xref: /openbmc/linux/kernel/cgroup/cpuset.c (revision 7b73a9c8)
1 /*
2  *  kernel/cpuset.c
3  *
4  *  Processor and Memory placement constraints for sets of tasks.
5  *
6  *  Copyright (C) 2003 BULL SA.
7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8  *  Copyright (C) 2006 Google, Inc
9  *
10  *  Portions derived from Patrick Mochel's sysfs code.
11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
12  *
13  *  2003-10-10 Written by Simon Derr.
14  *  2003-10-22 Updates by Stephen Hemminger.
15  *  2004 May-July Rework by Paul Jackson.
16  *  2006 Rework by Paul Menage to use generic cgroups
17  *  2008 Rework of the scheduler domains and CPU hotplug handling
18  *       by Max Krasnyansky
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/list.h>
37 #include <linux/mempolicy.h>
38 #include <linux/mm.h>
39 #include <linux/memory.h>
40 #include <linux/export.h>
41 #include <linux/mount.h>
42 #include <linux/fs_context.h>
43 #include <linux/namei.h>
44 #include <linux/pagemap.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
47 #include <linux/sched.h>
48 #include <linux/sched/deadline.h>
49 #include <linux/sched/mm.h>
50 #include <linux/sched/task.h>
51 #include <linux/seq_file.h>
52 #include <linux/security.h>
53 #include <linux/slab.h>
54 #include <linux/spinlock.h>
55 #include <linux/stat.h>
56 #include <linux/string.h>
57 #include <linux/time.h>
58 #include <linux/time64.h>
59 #include <linux/backing-dev.h>
60 #include <linux/sort.h>
61 #include <linux/oom.h>
62 #include <linux/sched/isolation.h>
63 #include <linux/uaccess.h>
64 #include <linux/atomic.h>
65 #include <linux/mutex.h>
66 #include <linux/cgroup.h>
67 #include <linux/wait.h>
68 
69 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
70 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
71 
72 /* See "Frequency meter" comments, below. */
73 
74 struct fmeter {
75 	int cnt;		/* unprocessed events count */
76 	int val;		/* most recent output value */
77 	time64_t time;		/* clock (secs) when val computed */
78 	spinlock_t lock;	/* guards read or write of above */
79 };
80 
81 struct cpuset {
82 	struct cgroup_subsys_state css;
83 
84 	unsigned long flags;		/* "unsigned long" so bitops work */
85 
86 	/*
87 	 * On default hierarchy:
88 	 *
89 	 * The user-configured masks can only be changed by writing to
90 	 * cpuset.cpus and cpuset.mems, and won't be limited by the
91 	 * parent masks.
92 	 *
93 	 * The effective masks is the real masks that apply to the tasks
94 	 * in the cpuset. They may be changed if the configured masks are
95 	 * changed or hotplug happens.
96 	 *
97 	 * effective_mask == configured_mask & parent's effective_mask,
98 	 * and if it ends up empty, it will inherit the parent's mask.
99 	 *
100 	 *
101 	 * On legacy hierachy:
102 	 *
103 	 * The user-configured masks are always the same with effective masks.
104 	 */
105 
106 	/* user-configured CPUs and Memory Nodes allow to tasks */
107 	cpumask_var_t cpus_allowed;
108 	nodemask_t mems_allowed;
109 
110 	/* effective CPUs and Memory Nodes allow to tasks */
111 	cpumask_var_t effective_cpus;
112 	nodemask_t effective_mems;
113 
114 	/*
115 	 * CPUs allocated to child sub-partitions (default hierarchy only)
116 	 * - CPUs granted by the parent = effective_cpus U subparts_cpus
117 	 * - effective_cpus and subparts_cpus are mutually exclusive.
118 	 *
119 	 * effective_cpus contains only onlined CPUs, but subparts_cpus
120 	 * may have offlined ones.
121 	 */
122 	cpumask_var_t subparts_cpus;
123 
124 	/*
125 	 * This is old Memory Nodes tasks took on.
126 	 *
127 	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
128 	 * - A new cpuset's old_mems_allowed is initialized when some
129 	 *   task is moved into it.
130 	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
131 	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
132 	 *   then old_mems_allowed is updated to mems_allowed.
133 	 */
134 	nodemask_t old_mems_allowed;
135 
136 	struct fmeter fmeter;		/* memory_pressure filter */
137 
138 	/*
139 	 * Tasks are being attached to this cpuset.  Used to prevent
140 	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
141 	 */
142 	int attach_in_progress;
143 
144 	/* partition number for rebuild_sched_domains() */
145 	int pn;
146 
147 	/* for custom sched domain */
148 	int relax_domain_level;
149 
150 	/* number of CPUs in subparts_cpus */
151 	int nr_subparts_cpus;
152 
153 	/* partition root state */
154 	int partition_root_state;
155 
156 	/*
157 	 * Default hierarchy only:
158 	 * use_parent_ecpus - set if using parent's effective_cpus
159 	 * child_ecpus_count - # of children with use_parent_ecpus set
160 	 */
161 	int use_parent_ecpus;
162 	int child_ecpus_count;
163 };
164 
165 /*
166  * Partition root states:
167  *
168  *   0 - not a partition root
169  *
170  *   1 - partition root
171  *
172  *  -1 - invalid partition root
173  *       None of the cpus in cpus_allowed can be put into the parent's
174  *       subparts_cpus. In this case, the cpuset is not a real partition
175  *       root anymore.  However, the CPU_EXCLUSIVE bit will still be set
176  *       and the cpuset can be restored back to a partition root if the
177  *       parent cpuset can give more CPUs back to this child cpuset.
178  */
179 #define PRS_DISABLED		0
180 #define PRS_ENABLED		1
181 #define PRS_ERROR		-1
182 
183 /*
184  * Temporary cpumasks for working with partitions that are passed among
185  * functions to avoid memory allocation in inner functions.
186  */
187 struct tmpmasks {
188 	cpumask_var_t addmask, delmask;	/* For partition root */
189 	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
190 };
191 
192 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
193 {
194 	return css ? container_of(css, struct cpuset, css) : NULL;
195 }
196 
197 /* Retrieve the cpuset for a task */
198 static inline struct cpuset *task_cs(struct task_struct *task)
199 {
200 	return css_cs(task_css(task, cpuset_cgrp_id));
201 }
202 
203 static inline struct cpuset *parent_cs(struct cpuset *cs)
204 {
205 	return css_cs(cs->css.parent);
206 }
207 
208 /* bits in struct cpuset flags field */
209 typedef enum {
210 	CS_ONLINE,
211 	CS_CPU_EXCLUSIVE,
212 	CS_MEM_EXCLUSIVE,
213 	CS_MEM_HARDWALL,
214 	CS_MEMORY_MIGRATE,
215 	CS_SCHED_LOAD_BALANCE,
216 	CS_SPREAD_PAGE,
217 	CS_SPREAD_SLAB,
218 } cpuset_flagbits_t;
219 
220 /* convenient tests for these bits */
221 static inline bool is_cpuset_online(struct cpuset *cs)
222 {
223 	return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
224 }
225 
226 static inline int is_cpu_exclusive(const struct cpuset *cs)
227 {
228 	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
229 }
230 
231 static inline int is_mem_exclusive(const struct cpuset *cs)
232 {
233 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
234 }
235 
236 static inline int is_mem_hardwall(const struct cpuset *cs)
237 {
238 	return test_bit(CS_MEM_HARDWALL, &cs->flags);
239 }
240 
241 static inline int is_sched_load_balance(const struct cpuset *cs)
242 {
243 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
244 }
245 
246 static inline int is_memory_migrate(const struct cpuset *cs)
247 {
248 	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
249 }
250 
251 static inline int is_spread_page(const struct cpuset *cs)
252 {
253 	return test_bit(CS_SPREAD_PAGE, &cs->flags);
254 }
255 
256 static inline int is_spread_slab(const struct cpuset *cs)
257 {
258 	return test_bit(CS_SPREAD_SLAB, &cs->flags);
259 }
260 
261 static inline int is_partition_root(const struct cpuset *cs)
262 {
263 	return cs->partition_root_state > 0;
264 }
265 
266 static struct cpuset top_cpuset = {
267 	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
268 		  (1 << CS_MEM_EXCLUSIVE)),
269 	.partition_root_state = PRS_ENABLED,
270 };
271 
272 /**
273  * cpuset_for_each_child - traverse online children of a cpuset
274  * @child_cs: loop cursor pointing to the current child
275  * @pos_css: used for iteration
276  * @parent_cs: target cpuset to walk children of
277  *
278  * Walk @child_cs through the online children of @parent_cs.  Must be used
279  * with RCU read locked.
280  */
281 #define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
282 	css_for_each_child((pos_css), &(parent_cs)->css)		\
283 		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
284 
285 /**
286  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
287  * @des_cs: loop cursor pointing to the current descendant
288  * @pos_css: used for iteration
289  * @root_cs: target cpuset to walk ancestor of
290  *
291  * Walk @des_cs through the online descendants of @root_cs.  Must be used
292  * with RCU read locked.  The caller may modify @pos_css by calling
293  * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
294  * iteration and the first node to be visited.
295  */
296 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
297 	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
298 		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
299 
300 /*
301  * There are two global locks guarding cpuset structures - cpuset_mutex and
302  * callback_lock. We also require taking task_lock() when dereferencing a
303  * task's cpuset pointer. See "The task_lock() exception", at the end of this
304  * comment.
305  *
306  * A task must hold both locks to modify cpusets.  If a task holds
307  * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
308  * is the only task able to also acquire callback_lock and be able to
309  * modify cpusets.  It can perform various checks on the cpuset structure
310  * first, knowing nothing will change.  It can also allocate memory while
311  * just holding cpuset_mutex.  While it is performing these checks, various
312  * callback routines can briefly acquire callback_lock to query cpusets.
313  * Once it is ready to make the changes, it takes callback_lock, blocking
314  * everyone else.
315  *
316  * Calls to the kernel memory allocator can not be made while holding
317  * callback_lock, as that would risk double tripping on callback_lock
318  * from one of the callbacks into the cpuset code from within
319  * __alloc_pages().
320  *
321  * If a task is only holding callback_lock, then it has read-only
322  * access to cpusets.
323  *
324  * Now, the task_struct fields mems_allowed and mempolicy may be changed
325  * by other task, we use alloc_lock in the task_struct fields to protect
326  * them.
327  *
328  * The cpuset_common_file_read() handlers only hold callback_lock across
329  * small pieces of code, such as when reading out possibly multi-word
330  * cpumasks and nodemasks.
331  *
332  * Accessing a task's cpuset should be done in accordance with the
333  * guidelines for accessing subsystem state in kernel/cgroup.c
334  */
335 
336 DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
337 
338 void cpuset_read_lock(void)
339 {
340 	percpu_down_read(&cpuset_rwsem);
341 }
342 
343 void cpuset_read_unlock(void)
344 {
345 	percpu_up_read(&cpuset_rwsem);
346 }
347 
348 static DEFINE_SPINLOCK(callback_lock);
349 
350 static struct workqueue_struct *cpuset_migrate_mm_wq;
351 
352 /*
353  * CPU / memory hotplug is handled asynchronously.
354  */
355 static void cpuset_hotplug_workfn(struct work_struct *work);
356 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
357 
358 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
359 
360 /*
361  * Cgroup v2 behavior is used when on default hierarchy or the
362  * cgroup_v2_mode flag is set.
363  */
364 static inline bool is_in_v2_mode(void)
365 {
366 	return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
367 	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
368 }
369 
370 /*
371  * Return in pmask the portion of a cpusets's cpus_allowed that
372  * are online.  If none are online, walk up the cpuset hierarchy
373  * until we find one that does have some online cpus.
374  *
375  * One way or another, we guarantee to return some non-empty subset
376  * of cpu_online_mask.
377  *
378  * Call with callback_lock or cpuset_mutex held.
379  */
380 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
381 {
382 	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
383 		cs = parent_cs(cs);
384 		if (unlikely(!cs)) {
385 			/*
386 			 * The top cpuset doesn't have any online cpu as a
387 			 * consequence of a race between cpuset_hotplug_work
388 			 * and cpu hotplug notifier.  But we know the top
389 			 * cpuset's effective_cpus is on its way to to be
390 			 * identical to cpu_online_mask.
391 			 */
392 			cpumask_copy(pmask, cpu_online_mask);
393 			return;
394 		}
395 	}
396 	cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
397 }
398 
399 /*
400  * Return in *pmask the portion of a cpusets's mems_allowed that
401  * are online, with memory.  If none are online with memory, walk
402  * up the cpuset hierarchy until we find one that does have some
403  * online mems.  The top cpuset always has some mems online.
404  *
405  * One way or another, we guarantee to return some non-empty subset
406  * of node_states[N_MEMORY].
407  *
408  * Call with callback_lock or cpuset_mutex held.
409  */
410 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
411 {
412 	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
413 		cs = parent_cs(cs);
414 	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
415 }
416 
417 /*
418  * update task's spread flag if cpuset's page/slab spread flag is set
419  *
420  * Call with callback_lock or cpuset_mutex held.
421  */
422 static void cpuset_update_task_spread_flag(struct cpuset *cs,
423 					struct task_struct *tsk)
424 {
425 	if (is_spread_page(cs))
426 		task_set_spread_page(tsk);
427 	else
428 		task_clear_spread_page(tsk);
429 
430 	if (is_spread_slab(cs))
431 		task_set_spread_slab(tsk);
432 	else
433 		task_clear_spread_slab(tsk);
434 }
435 
436 /*
437  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
438  *
439  * One cpuset is a subset of another if all its allowed CPUs and
440  * Memory Nodes are a subset of the other, and its exclusive flags
441  * are only set if the other's are set.  Call holding cpuset_mutex.
442  */
443 
444 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
445 {
446 	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
447 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
448 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
449 		is_mem_exclusive(p) <= is_mem_exclusive(q);
450 }
451 
452 /**
453  * alloc_cpumasks - allocate three cpumasks for cpuset
454  * @cs:  the cpuset that have cpumasks to be allocated.
455  * @tmp: the tmpmasks structure pointer
456  * Return: 0 if successful, -ENOMEM otherwise.
457  *
458  * Only one of the two input arguments should be non-NULL.
459  */
460 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
461 {
462 	cpumask_var_t *pmask1, *pmask2, *pmask3;
463 
464 	if (cs) {
465 		pmask1 = &cs->cpus_allowed;
466 		pmask2 = &cs->effective_cpus;
467 		pmask3 = &cs->subparts_cpus;
468 	} else {
469 		pmask1 = &tmp->new_cpus;
470 		pmask2 = &tmp->addmask;
471 		pmask3 = &tmp->delmask;
472 	}
473 
474 	if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
475 		return -ENOMEM;
476 
477 	if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
478 		goto free_one;
479 
480 	if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
481 		goto free_two;
482 
483 	return 0;
484 
485 free_two:
486 	free_cpumask_var(*pmask2);
487 free_one:
488 	free_cpumask_var(*pmask1);
489 	return -ENOMEM;
490 }
491 
492 /**
493  * free_cpumasks - free cpumasks in a tmpmasks structure
494  * @cs:  the cpuset that have cpumasks to be free.
495  * @tmp: the tmpmasks structure pointer
496  */
497 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
498 {
499 	if (cs) {
500 		free_cpumask_var(cs->cpus_allowed);
501 		free_cpumask_var(cs->effective_cpus);
502 		free_cpumask_var(cs->subparts_cpus);
503 	}
504 	if (tmp) {
505 		free_cpumask_var(tmp->new_cpus);
506 		free_cpumask_var(tmp->addmask);
507 		free_cpumask_var(tmp->delmask);
508 	}
509 }
510 
511 /**
512  * alloc_trial_cpuset - allocate a trial cpuset
513  * @cs: the cpuset that the trial cpuset duplicates
514  */
515 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
516 {
517 	struct cpuset *trial;
518 
519 	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
520 	if (!trial)
521 		return NULL;
522 
523 	if (alloc_cpumasks(trial, NULL)) {
524 		kfree(trial);
525 		return NULL;
526 	}
527 
528 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
529 	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
530 	return trial;
531 }
532 
533 /**
534  * free_cpuset - free the cpuset
535  * @cs: the cpuset to be freed
536  */
537 static inline void free_cpuset(struct cpuset *cs)
538 {
539 	free_cpumasks(cs, NULL);
540 	kfree(cs);
541 }
542 
543 /*
544  * validate_change() - Used to validate that any proposed cpuset change
545  *		       follows the structural rules for cpusets.
546  *
547  * If we replaced the flag and mask values of the current cpuset
548  * (cur) with those values in the trial cpuset (trial), would
549  * our various subset and exclusive rules still be valid?  Presumes
550  * cpuset_mutex held.
551  *
552  * 'cur' is the address of an actual, in-use cpuset.  Operations
553  * such as list traversal that depend on the actual address of the
554  * cpuset in the list must use cur below, not trial.
555  *
556  * 'trial' is the address of bulk structure copy of cur, with
557  * perhaps one or more of the fields cpus_allowed, mems_allowed,
558  * or flags changed to new, trial values.
559  *
560  * Return 0 if valid, -errno if not.
561  */
562 
563 static int validate_change(struct cpuset *cur, struct cpuset *trial)
564 {
565 	struct cgroup_subsys_state *css;
566 	struct cpuset *c, *par;
567 	int ret;
568 
569 	rcu_read_lock();
570 
571 	/* Each of our child cpusets must be a subset of us */
572 	ret = -EBUSY;
573 	cpuset_for_each_child(c, css, cur)
574 		if (!is_cpuset_subset(c, trial))
575 			goto out;
576 
577 	/* Remaining checks don't apply to root cpuset */
578 	ret = 0;
579 	if (cur == &top_cpuset)
580 		goto out;
581 
582 	par = parent_cs(cur);
583 
584 	/* On legacy hiearchy, we must be a subset of our parent cpuset. */
585 	ret = -EACCES;
586 	if (!is_in_v2_mode() && !is_cpuset_subset(trial, par))
587 		goto out;
588 
589 	/*
590 	 * If either I or some sibling (!= me) is exclusive, we can't
591 	 * overlap
592 	 */
593 	ret = -EINVAL;
594 	cpuset_for_each_child(c, css, par) {
595 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
596 		    c != cur &&
597 		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
598 			goto out;
599 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
600 		    c != cur &&
601 		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
602 			goto out;
603 	}
604 
605 	/*
606 	 * Cpusets with tasks - existing or newly being attached - can't
607 	 * be changed to have empty cpus_allowed or mems_allowed.
608 	 */
609 	ret = -ENOSPC;
610 	if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
611 		if (!cpumask_empty(cur->cpus_allowed) &&
612 		    cpumask_empty(trial->cpus_allowed))
613 			goto out;
614 		if (!nodes_empty(cur->mems_allowed) &&
615 		    nodes_empty(trial->mems_allowed))
616 			goto out;
617 	}
618 
619 	/*
620 	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
621 	 * tasks.
622 	 */
623 	ret = -EBUSY;
624 	if (is_cpu_exclusive(cur) &&
625 	    !cpuset_cpumask_can_shrink(cur->cpus_allowed,
626 				       trial->cpus_allowed))
627 		goto out;
628 
629 	ret = 0;
630 out:
631 	rcu_read_unlock();
632 	return ret;
633 }
634 
635 #ifdef CONFIG_SMP
636 /*
637  * Helper routine for generate_sched_domains().
638  * Do cpusets a, b have overlapping effective cpus_allowed masks?
639  */
640 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
641 {
642 	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
643 }
644 
645 static void
646 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
647 {
648 	if (dattr->relax_domain_level < c->relax_domain_level)
649 		dattr->relax_domain_level = c->relax_domain_level;
650 	return;
651 }
652 
653 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
654 				    struct cpuset *root_cs)
655 {
656 	struct cpuset *cp;
657 	struct cgroup_subsys_state *pos_css;
658 
659 	rcu_read_lock();
660 	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
661 		/* skip the whole subtree if @cp doesn't have any CPU */
662 		if (cpumask_empty(cp->cpus_allowed)) {
663 			pos_css = css_rightmost_descendant(pos_css);
664 			continue;
665 		}
666 
667 		if (is_sched_load_balance(cp))
668 			update_domain_attr(dattr, cp);
669 	}
670 	rcu_read_unlock();
671 }
672 
673 /* Must be called with cpuset_mutex held.  */
674 static inline int nr_cpusets(void)
675 {
676 	/* jump label reference count + the top-level cpuset */
677 	return static_key_count(&cpusets_enabled_key.key) + 1;
678 }
679 
680 /*
681  * generate_sched_domains()
682  *
683  * This function builds a partial partition of the systems CPUs
684  * A 'partial partition' is a set of non-overlapping subsets whose
685  * union is a subset of that set.
686  * The output of this function needs to be passed to kernel/sched/core.c
687  * partition_sched_domains() routine, which will rebuild the scheduler's
688  * load balancing domains (sched domains) as specified by that partial
689  * partition.
690  *
691  * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
692  * for a background explanation of this.
693  *
694  * Does not return errors, on the theory that the callers of this
695  * routine would rather not worry about failures to rebuild sched
696  * domains when operating in the severe memory shortage situations
697  * that could cause allocation failures below.
698  *
699  * Must be called with cpuset_mutex held.
700  *
701  * The three key local variables below are:
702  *    cp - cpuset pointer, used (together with pos_css) to perform a
703  *	   top-down scan of all cpusets. For our purposes, rebuilding
704  *	   the schedulers sched domains, we can ignore !is_sched_load_
705  *	   balance cpusets.
706  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
707  *	   that need to be load balanced, for convenient iterative
708  *	   access by the subsequent code that finds the best partition,
709  *	   i.e the set of domains (subsets) of CPUs such that the
710  *	   cpus_allowed of every cpuset marked is_sched_load_balance
711  *	   is a subset of one of these domains, while there are as
712  *	   many such domains as possible, each as small as possible.
713  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
714  *	   the kernel/sched/core.c routine partition_sched_domains() in a
715  *	   convenient format, that can be easily compared to the prior
716  *	   value to determine what partition elements (sched domains)
717  *	   were changed (added or removed.)
718  *
719  * Finding the best partition (set of domains):
720  *	The triple nested loops below over i, j, k scan over the
721  *	load balanced cpusets (using the array of cpuset pointers in
722  *	csa[]) looking for pairs of cpusets that have overlapping
723  *	cpus_allowed, but which don't have the same 'pn' partition
724  *	number and gives them in the same partition number.  It keeps
725  *	looping on the 'restart' label until it can no longer find
726  *	any such pairs.
727  *
728  *	The union of the cpus_allowed masks from the set of
729  *	all cpusets having the same 'pn' value then form the one
730  *	element of the partition (one sched domain) to be passed to
731  *	partition_sched_domains().
732  */
733 static int generate_sched_domains(cpumask_var_t **domains,
734 			struct sched_domain_attr **attributes)
735 {
736 	struct cpuset *cp;	/* top-down scan of cpusets */
737 	struct cpuset **csa;	/* array of all cpuset ptrs */
738 	int csn;		/* how many cpuset ptrs in csa so far */
739 	int i, j, k;		/* indices for partition finding loops */
740 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
741 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
742 	int ndoms = 0;		/* number of sched domains in result */
743 	int nslot;		/* next empty doms[] struct cpumask slot */
744 	struct cgroup_subsys_state *pos_css;
745 	bool root_load_balance = is_sched_load_balance(&top_cpuset);
746 
747 	doms = NULL;
748 	dattr = NULL;
749 	csa = NULL;
750 
751 	/* Special case for the 99% of systems with one, full, sched domain */
752 	if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
753 		ndoms = 1;
754 		doms = alloc_sched_domains(ndoms);
755 		if (!doms)
756 			goto done;
757 
758 		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
759 		if (dattr) {
760 			*dattr = SD_ATTR_INIT;
761 			update_domain_attr_tree(dattr, &top_cpuset);
762 		}
763 		cpumask_and(doms[0], top_cpuset.effective_cpus,
764 			    housekeeping_cpumask(HK_FLAG_DOMAIN));
765 
766 		goto done;
767 	}
768 
769 	csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
770 	if (!csa)
771 		goto done;
772 	csn = 0;
773 
774 	rcu_read_lock();
775 	if (root_load_balance)
776 		csa[csn++] = &top_cpuset;
777 	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
778 		if (cp == &top_cpuset)
779 			continue;
780 		/*
781 		 * Continue traversing beyond @cp iff @cp has some CPUs and
782 		 * isn't load balancing.  The former is obvious.  The
783 		 * latter: All child cpusets contain a subset of the
784 		 * parent's cpus, so just skip them, and then we call
785 		 * update_domain_attr_tree() to calc relax_domain_level of
786 		 * the corresponding sched domain.
787 		 *
788 		 * If root is load-balancing, we can skip @cp if it
789 		 * is a subset of the root's effective_cpus.
790 		 */
791 		if (!cpumask_empty(cp->cpus_allowed) &&
792 		    !(is_sched_load_balance(cp) &&
793 		      cpumask_intersects(cp->cpus_allowed,
794 					 housekeeping_cpumask(HK_FLAG_DOMAIN))))
795 			continue;
796 
797 		if (root_load_balance &&
798 		    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
799 			continue;
800 
801 		if (is_sched_load_balance(cp) &&
802 		    !cpumask_empty(cp->effective_cpus))
803 			csa[csn++] = cp;
804 
805 		/* skip @cp's subtree if not a partition root */
806 		if (!is_partition_root(cp))
807 			pos_css = css_rightmost_descendant(pos_css);
808 	}
809 	rcu_read_unlock();
810 
811 	for (i = 0; i < csn; i++)
812 		csa[i]->pn = i;
813 	ndoms = csn;
814 
815 restart:
816 	/* Find the best partition (set of sched domains) */
817 	for (i = 0; i < csn; i++) {
818 		struct cpuset *a = csa[i];
819 		int apn = a->pn;
820 
821 		for (j = 0; j < csn; j++) {
822 			struct cpuset *b = csa[j];
823 			int bpn = b->pn;
824 
825 			if (apn != bpn && cpusets_overlap(a, b)) {
826 				for (k = 0; k < csn; k++) {
827 					struct cpuset *c = csa[k];
828 
829 					if (c->pn == bpn)
830 						c->pn = apn;
831 				}
832 				ndoms--;	/* one less element */
833 				goto restart;
834 			}
835 		}
836 	}
837 
838 	/*
839 	 * Now we know how many domains to create.
840 	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
841 	 */
842 	doms = alloc_sched_domains(ndoms);
843 	if (!doms)
844 		goto done;
845 
846 	/*
847 	 * The rest of the code, including the scheduler, can deal with
848 	 * dattr==NULL case. No need to abort if alloc fails.
849 	 */
850 	dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
851 			      GFP_KERNEL);
852 
853 	for (nslot = 0, i = 0; i < csn; i++) {
854 		struct cpuset *a = csa[i];
855 		struct cpumask *dp;
856 		int apn = a->pn;
857 
858 		if (apn < 0) {
859 			/* Skip completed partitions */
860 			continue;
861 		}
862 
863 		dp = doms[nslot];
864 
865 		if (nslot == ndoms) {
866 			static int warnings = 10;
867 			if (warnings) {
868 				pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
869 					nslot, ndoms, csn, i, apn);
870 				warnings--;
871 			}
872 			continue;
873 		}
874 
875 		cpumask_clear(dp);
876 		if (dattr)
877 			*(dattr + nslot) = SD_ATTR_INIT;
878 		for (j = i; j < csn; j++) {
879 			struct cpuset *b = csa[j];
880 
881 			if (apn == b->pn) {
882 				cpumask_or(dp, dp, b->effective_cpus);
883 				cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN));
884 				if (dattr)
885 					update_domain_attr_tree(dattr + nslot, b);
886 
887 				/* Done with this partition */
888 				b->pn = -1;
889 			}
890 		}
891 		nslot++;
892 	}
893 	BUG_ON(nslot != ndoms);
894 
895 done:
896 	kfree(csa);
897 
898 	/*
899 	 * Fallback to the default domain if kmalloc() failed.
900 	 * See comments in partition_sched_domains().
901 	 */
902 	if (doms == NULL)
903 		ndoms = 1;
904 
905 	*domains    = doms;
906 	*attributes = dattr;
907 	return ndoms;
908 }
909 
910 static void update_tasks_root_domain(struct cpuset *cs)
911 {
912 	struct css_task_iter it;
913 	struct task_struct *task;
914 
915 	css_task_iter_start(&cs->css, 0, &it);
916 
917 	while ((task = css_task_iter_next(&it)))
918 		dl_add_task_root_domain(task);
919 
920 	css_task_iter_end(&it);
921 }
922 
923 static void rebuild_root_domains(void)
924 {
925 	struct cpuset *cs = NULL;
926 	struct cgroup_subsys_state *pos_css;
927 
928 	percpu_rwsem_assert_held(&cpuset_rwsem);
929 	lockdep_assert_cpus_held();
930 	lockdep_assert_held(&sched_domains_mutex);
931 
932 	rcu_read_lock();
933 
934 	/*
935 	 * Clear default root domain DL accounting, it will be computed again
936 	 * if a task belongs to it.
937 	 */
938 	dl_clear_root_domain(&def_root_domain);
939 
940 	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
941 
942 		if (cpumask_empty(cs->effective_cpus)) {
943 			pos_css = css_rightmost_descendant(pos_css);
944 			continue;
945 		}
946 
947 		css_get(&cs->css);
948 
949 		rcu_read_unlock();
950 
951 		update_tasks_root_domain(cs);
952 
953 		rcu_read_lock();
954 		css_put(&cs->css);
955 	}
956 	rcu_read_unlock();
957 }
958 
959 static void
960 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
961 				    struct sched_domain_attr *dattr_new)
962 {
963 	mutex_lock(&sched_domains_mutex);
964 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
965 	rebuild_root_domains();
966 	mutex_unlock(&sched_domains_mutex);
967 }
968 
969 /*
970  * Rebuild scheduler domains.
971  *
972  * If the flag 'sched_load_balance' of any cpuset with non-empty
973  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
974  * which has that flag enabled, or if any cpuset with a non-empty
975  * 'cpus' is removed, then call this routine to rebuild the
976  * scheduler's dynamic sched domains.
977  *
978  * Call with cpuset_mutex held.  Takes get_online_cpus().
979  */
980 static void rebuild_sched_domains_locked(void)
981 {
982 	struct sched_domain_attr *attr;
983 	cpumask_var_t *doms;
984 	int ndoms;
985 
986 	lockdep_assert_cpus_held();
987 	percpu_rwsem_assert_held(&cpuset_rwsem);
988 
989 	/*
990 	 * We have raced with CPU hotplug. Don't do anything to avoid
991 	 * passing doms with offlined cpu to partition_sched_domains().
992 	 * Anyways, hotplug work item will rebuild sched domains.
993 	 */
994 	if (!top_cpuset.nr_subparts_cpus &&
995 	    !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
996 		return;
997 
998 	if (top_cpuset.nr_subparts_cpus &&
999 	   !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask))
1000 		return;
1001 
1002 	/* Generate domain masks and attrs */
1003 	ndoms = generate_sched_domains(&doms, &attr);
1004 
1005 	/* Have scheduler rebuild the domains */
1006 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
1007 }
1008 #else /* !CONFIG_SMP */
1009 static void rebuild_sched_domains_locked(void)
1010 {
1011 }
1012 #endif /* CONFIG_SMP */
1013 
1014 void rebuild_sched_domains(void)
1015 {
1016 	get_online_cpus();
1017 	percpu_down_write(&cpuset_rwsem);
1018 	rebuild_sched_domains_locked();
1019 	percpu_up_write(&cpuset_rwsem);
1020 	put_online_cpus();
1021 }
1022 
1023 /**
1024  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1025  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1026  *
1027  * Iterate through each task of @cs updating its cpus_allowed to the
1028  * effective cpuset's.  As this function is called with cpuset_mutex held,
1029  * cpuset membership stays stable.
1030  */
1031 static void update_tasks_cpumask(struct cpuset *cs)
1032 {
1033 	struct css_task_iter it;
1034 	struct task_struct *task;
1035 
1036 	css_task_iter_start(&cs->css, 0, &it);
1037 	while ((task = css_task_iter_next(&it)))
1038 		set_cpus_allowed_ptr(task, cs->effective_cpus);
1039 	css_task_iter_end(&it);
1040 }
1041 
1042 /**
1043  * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1044  * @new_cpus: the temp variable for the new effective_cpus mask
1045  * @cs: the cpuset the need to recompute the new effective_cpus mask
1046  * @parent: the parent cpuset
1047  *
1048  * If the parent has subpartition CPUs, include them in the list of
1049  * allowable CPUs in computing the new effective_cpus mask. Since offlined
1050  * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1051  * to mask those out.
1052  */
1053 static void compute_effective_cpumask(struct cpumask *new_cpus,
1054 				      struct cpuset *cs, struct cpuset *parent)
1055 {
1056 	if (parent->nr_subparts_cpus) {
1057 		cpumask_or(new_cpus, parent->effective_cpus,
1058 			   parent->subparts_cpus);
1059 		cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
1060 		cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1061 	} else {
1062 		cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1063 	}
1064 }
1065 
1066 /*
1067  * Commands for update_parent_subparts_cpumask
1068  */
1069 enum subparts_cmd {
1070 	partcmd_enable,		/* Enable partition root	 */
1071 	partcmd_disable,	/* Disable partition root	 */
1072 	partcmd_update,		/* Update parent's subparts_cpus */
1073 };
1074 
1075 /**
1076  * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1077  * @cpuset:  The cpuset that requests change in partition root state
1078  * @cmd:     Partition root state change command
1079  * @newmask: Optional new cpumask for partcmd_update
1080  * @tmp:     Temporary addmask and delmask
1081  * Return:   0, 1 or an error code
1082  *
1083  * For partcmd_enable, the cpuset is being transformed from a non-partition
1084  * root to a partition root. The cpus_allowed mask of the given cpuset will
1085  * be put into parent's subparts_cpus and taken away from parent's
1086  * effective_cpus. The function will return 0 if all the CPUs listed in
1087  * cpus_allowed can be granted or an error code will be returned.
1088  *
1089  * For partcmd_disable, the cpuset is being transofrmed from a partition
1090  * root back to a non-partition root. any CPUs in cpus_allowed that are in
1091  * parent's subparts_cpus will be taken away from that cpumask and put back
1092  * into parent's effective_cpus. 0 should always be returned.
1093  *
1094  * For partcmd_update, if the optional newmask is specified, the cpu
1095  * list is to be changed from cpus_allowed to newmask. Otherwise,
1096  * cpus_allowed is assumed to remain the same. The cpuset should either
1097  * be a partition root or an invalid partition root. The partition root
1098  * state may change if newmask is NULL and none of the requested CPUs can
1099  * be granted by the parent. The function will return 1 if changes to
1100  * parent's subparts_cpus and effective_cpus happen or 0 otherwise.
1101  * Error code should only be returned when newmask is non-NULL.
1102  *
1103  * The partcmd_enable and partcmd_disable commands are used by
1104  * update_prstate(). The partcmd_update command is used by
1105  * update_cpumasks_hier() with newmask NULL and update_cpumask() with
1106  * newmask set.
1107  *
1108  * The checking is more strict when enabling partition root than the
1109  * other two commands.
1110  *
1111  * Because of the implicit cpu exclusive nature of a partition root,
1112  * cpumask changes that violates the cpu exclusivity rule will not be
1113  * permitted when checked by validate_change(). The validate_change()
1114  * function will also prevent any changes to the cpu list if it is not
1115  * a superset of children's cpu lists.
1116  */
1117 static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
1118 					  struct cpumask *newmask,
1119 					  struct tmpmasks *tmp)
1120 {
1121 	struct cpuset *parent = parent_cs(cpuset);
1122 	int adding;	/* Moving cpus from effective_cpus to subparts_cpus */
1123 	int deleting;	/* Moving cpus from subparts_cpus to effective_cpus */
1124 	bool part_error = false;	/* Partition error? */
1125 
1126 	percpu_rwsem_assert_held(&cpuset_rwsem);
1127 
1128 	/*
1129 	 * The parent must be a partition root.
1130 	 * The new cpumask, if present, or the current cpus_allowed must
1131 	 * not be empty.
1132 	 */
1133 	if (!is_partition_root(parent) ||
1134 	   (newmask && cpumask_empty(newmask)) ||
1135 	   (!newmask && cpumask_empty(cpuset->cpus_allowed)))
1136 		return -EINVAL;
1137 
1138 	/*
1139 	 * Enabling/disabling partition root is not allowed if there are
1140 	 * online children.
1141 	 */
1142 	if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css))
1143 		return -EBUSY;
1144 
1145 	/*
1146 	 * Enabling partition root is not allowed if not all the CPUs
1147 	 * can be granted from parent's effective_cpus or at least one
1148 	 * CPU will be left after that.
1149 	 */
1150 	if ((cmd == partcmd_enable) &&
1151 	   (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) ||
1152 	     cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus)))
1153 		return -EINVAL;
1154 
1155 	/*
1156 	 * A cpumask update cannot make parent's effective_cpus become empty.
1157 	 */
1158 	adding = deleting = false;
1159 	if (cmd == partcmd_enable) {
1160 		cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
1161 		adding = true;
1162 	} else if (cmd == partcmd_disable) {
1163 		deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
1164 				       parent->subparts_cpus);
1165 	} else if (newmask) {
1166 		/*
1167 		 * partcmd_update with newmask:
1168 		 *
1169 		 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1170 		 * addmask = newmask & parent->effective_cpus
1171 		 *		     & ~parent->subparts_cpus
1172 		 */
1173 		cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask);
1174 		deleting = cpumask_and(tmp->delmask, tmp->delmask,
1175 				       parent->subparts_cpus);
1176 
1177 		cpumask_and(tmp->addmask, newmask, parent->effective_cpus);
1178 		adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1179 					parent->subparts_cpus);
1180 		/*
1181 		 * Return error if the new effective_cpus could become empty.
1182 		 */
1183 		if (adding &&
1184 		    cpumask_equal(parent->effective_cpus, tmp->addmask)) {
1185 			if (!deleting)
1186 				return -EINVAL;
1187 			/*
1188 			 * As some of the CPUs in subparts_cpus might have
1189 			 * been offlined, we need to compute the real delmask
1190 			 * to confirm that.
1191 			 */
1192 			if (!cpumask_and(tmp->addmask, tmp->delmask,
1193 					 cpu_active_mask))
1194 				return -EINVAL;
1195 			cpumask_copy(tmp->addmask, parent->effective_cpus);
1196 		}
1197 	} else {
1198 		/*
1199 		 * partcmd_update w/o newmask:
1200 		 *
1201 		 * addmask = cpus_allowed & parent->effectiveb_cpus
1202 		 *
1203 		 * Note that parent's subparts_cpus may have been
1204 		 * pre-shrunk in case there is a change in the cpu list.
1205 		 * So no deletion is needed.
1206 		 */
1207 		adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed,
1208 				     parent->effective_cpus);
1209 		part_error = cpumask_equal(tmp->addmask,
1210 					   parent->effective_cpus);
1211 	}
1212 
1213 	if (cmd == partcmd_update) {
1214 		int prev_prs = cpuset->partition_root_state;
1215 
1216 		/*
1217 		 * Check for possible transition between PRS_ENABLED
1218 		 * and PRS_ERROR.
1219 		 */
1220 		switch (cpuset->partition_root_state) {
1221 		case PRS_ENABLED:
1222 			if (part_error)
1223 				cpuset->partition_root_state = PRS_ERROR;
1224 			break;
1225 		case PRS_ERROR:
1226 			if (!part_error)
1227 				cpuset->partition_root_state = PRS_ENABLED;
1228 			break;
1229 		}
1230 		/*
1231 		 * Set part_error if previously in invalid state.
1232 		 */
1233 		part_error = (prev_prs == PRS_ERROR);
1234 	}
1235 
1236 	if (!part_error && (cpuset->partition_root_state == PRS_ERROR))
1237 		return 0;	/* Nothing need to be done */
1238 
1239 	if (cpuset->partition_root_state == PRS_ERROR) {
1240 		/*
1241 		 * Remove all its cpus from parent's subparts_cpus.
1242 		 */
1243 		adding = false;
1244 		deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
1245 				       parent->subparts_cpus);
1246 	}
1247 
1248 	if (!adding && !deleting)
1249 		return 0;
1250 
1251 	/*
1252 	 * Change the parent's subparts_cpus.
1253 	 * Newly added CPUs will be removed from effective_cpus and
1254 	 * newly deleted ones will be added back to effective_cpus.
1255 	 */
1256 	spin_lock_irq(&callback_lock);
1257 	if (adding) {
1258 		cpumask_or(parent->subparts_cpus,
1259 			   parent->subparts_cpus, tmp->addmask);
1260 		cpumask_andnot(parent->effective_cpus,
1261 			       parent->effective_cpus, tmp->addmask);
1262 	}
1263 	if (deleting) {
1264 		cpumask_andnot(parent->subparts_cpus,
1265 			       parent->subparts_cpus, tmp->delmask);
1266 		/*
1267 		 * Some of the CPUs in subparts_cpus might have been offlined.
1268 		 */
1269 		cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1270 		cpumask_or(parent->effective_cpus,
1271 			   parent->effective_cpus, tmp->delmask);
1272 	}
1273 
1274 	parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1275 	spin_unlock_irq(&callback_lock);
1276 
1277 	return cmd == partcmd_update;
1278 }
1279 
1280 /*
1281  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1282  * @cs:  the cpuset to consider
1283  * @tmp: temp variables for calculating effective_cpus & partition setup
1284  *
1285  * When congifured cpumask is changed, the effective cpumasks of this cpuset
1286  * and all its descendants need to be updated.
1287  *
1288  * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
1289  *
1290  * Called with cpuset_mutex held
1291  */
1292 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
1293 {
1294 	struct cpuset *cp;
1295 	struct cgroup_subsys_state *pos_css;
1296 	bool need_rebuild_sched_domains = false;
1297 
1298 	rcu_read_lock();
1299 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1300 		struct cpuset *parent = parent_cs(cp);
1301 
1302 		compute_effective_cpumask(tmp->new_cpus, cp, parent);
1303 
1304 		/*
1305 		 * If it becomes empty, inherit the effective mask of the
1306 		 * parent, which is guaranteed to have some CPUs.
1307 		 */
1308 		if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1309 			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1310 			if (!cp->use_parent_ecpus) {
1311 				cp->use_parent_ecpus = true;
1312 				parent->child_ecpus_count++;
1313 			}
1314 		} else if (cp->use_parent_ecpus) {
1315 			cp->use_parent_ecpus = false;
1316 			WARN_ON_ONCE(!parent->child_ecpus_count);
1317 			parent->child_ecpus_count--;
1318 		}
1319 
1320 		/*
1321 		 * Skip the whole subtree if the cpumask remains the same
1322 		 * and has no partition root state.
1323 		 */
1324 		if (!cp->partition_root_state &&
1325 		    cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
1326 			pos_css = css_rightmost_descendant(pos_css);
1327 			continue;
1328 		}
1329 
1330 		/*
1331 		 * update_parent_subparts_cpumask() should have been called
1332 		 * for cs already in update_cpumask(). We should also call
1333 		 * update_tasks_cpumask() again for tasks in the parent
1334 		 * cpuset if the parent's subparts_cpus changes.
1335 		 */
1336 		if ((cp != cs) && cp->partition_root_state) {
1337 			switch (parent->partition_root_state) {
1338 			case PRS_DISABLED:
1339 				/*
1340 				 * If parent is not a partition root or an
1341 				 * invalid partition root, clear the state
1342 				 * state and the CS_CPU_EXCLUSIVE flag.
1343 				 */
1344 				WARN_ON_ONCE(cp->partition_root_state
1345 					     != PRS_ERROR);
1346 				cp->partition_root_state = 0;
1347 
1348 				/*
1349 				 * clear_bit() is an atomic operation and
1350 				 * readers aren't interested in the state
1351 				 * of CS_CPU_EXCLUSIVE anyway. So we can
1352 				 * just update the flag without holding
1353 				 * the callback_lock.
1354 				 */
1355 				clear_bit(CS_CPU_EXCLUSIVE, &cp->flags);
1356 				break;
1357 
1358 			case PRS_ENABLED:
1359 				if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp))
1360 					update_tasks_cpumask(parent);
1361 				break;
1362 
1363 			case PRS_ERROR:
1364 				/*
1365 				 * When parent is invalid, it has to be too.
1366 				 */
1367 				cp->partition_root_state = PRS_ERROR;
1368 				if (cp->nr_subparts_cpus) {
1369 					cp->nr_subparts_cpus = 0;
1370 					cpumask_clear(cp->subparts_cpus);
1371 				}
1372 				break;
1373 			}
1374 		}
1375 
1376 		if (!css_tryget_online(&cp->css))
1377 			continue;
1378 		rcu_read_unlock();
1379 
1380 		spin_lock_irq(&callback_lock);
1381 
1382 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1383 		if (cp->nr_subparts_cpus &&
1384 		   (cp->partition_root_state != PRS_ENABLED)) {
1385 			cp->nr_subparts_cpus = 0;
1386 			cpumask_clear(cp->subparts_cpus);
1387 		} else if (cp->nr_subparts_cpus) {
1388 			/*
1389 			 * Make sure that effective_cpus & subparts_cpus
1390 			 * are mutually exclusive.
1391 			 *
1392 			 * In the unlikely event that effective_cpus
1393 			 * becomes empty. we clear cp->nr_subparts_cpus and
1394 			 * let its child partition roots to compete for
1395 			 * CPUs again.
1396 			 */
1397 			cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1398 				       cp->subparts_cpus);
1399 			if (cpumask_empty(cp->effective_cpus)) {
1400 				cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1401 				cpumask_clear(cp->subparts_cpus);
1402 				cp->nr_subparts_cpus = 0;
1403 			} else if (!cpumask_subset(cp->subparts_cpus,
1404 						   tmp->new_cpus)) {
1405 				cpumask_andnot(cp->subparts_cpus,
1406 					cp->subparts_cpus, tmp->new_cpus);
1407 				cp->nr_subparts_cpus
1408 					= cpumask_weight(cp->subparts_cpus);
1409 			}
1410 		}
1411 		spin_unlock_irq(&callback_lock);
1412 
1413 		WARN_ON(!is_in_v2_mode() &&
1414 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1415 
1416 		update_tasks_cpumask(cp);
1417 
1418 		/*
1419 		 * On legacy hierarchy, if the effective cpumask of any non-
1420 		 * empty cpuset is changed, we need to rebuild sched domains.
1421 		 * On default hierarchy, the cpuset needs to be a partition
1422 		 * root as well.
1423 		 */
1424 		if (!cpumask_empty(cp->cpus_allowed) &&
1425 		    is_sched_load_balance(cp) &&
1426 		   (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1427 		    is_partition_root(cp)))
1428 			need_rebuild_sched_domains = true;
1429 
1430 		rcu_read_lock();
1431 		css_put(&cp->css);
1432 	}
1433 	rcu_read_unlock();
1434 
1435 	if (need_rebuild_sched_domains)
1436 		rebuild_sched_domains_locked();
1437 }
1438 
1439 /**
1440  * update_sibling_cpumasks - Update siblings cpumasks
1441  * @parent:  Parent cpuset
1442  * @cs:      Current cpuset
1443  * @tmp:     Temp variables
1444  */
1445 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1446 				    struct tmpmasks *tmp)
1447 {
1448 	struct cpuset *sibling;
1449 	struct cgroup_subsys_state *pos_css;
1450 
1451 	/*
1452 	 * Check all its siblings and call update_cpumasks_hier()
1453 	 * if their use_parent_ecpus flag is set in order for them
1454 	 * to use the right effective_cpus value.
1455 	 */
1456 	rcu_read_lock();
1457 	cpuset_for_each_child(sibling, pos_css, parent) {
1458 		if (sibling == cs)
1459 			continue;
1460 		if (!sibling->use_parent_ecpus)
1461 			continue;
1462 
1463 		update_cpumasks_hier(sibling, tmp);
1464 	}
1465 	rcu_read_unlock();
1466 }
1467 
1468 /**
1469  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1470  * @cs: the cpuset to consider
1471  * @trialcs: trial cpuset
1472  * @buf: buffer of cpu numbers written to this cpuset
1473  */
1474 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1475 			  const char *buf)
1476 {
1477 	int retval;
1478 	struct tmpmasks tmp;
1479 
1480 	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1481 	if (cs == &top_cpuset)
1482 		return -EACCES;
1483 
1484 	/*
1485 	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
1486 	 * Since cpulist_parse() fails on an empty mask, we special case
1487 	 * that parsing.  The validate_change() call ensures that cpusets
1488 	 * with tasks have cpus.
1489 	 */
1490 	if (!*buf) {
1491 		cpumask_clear(trialcs->cpus_allowed);
1492 	} else {
1493 		retval = cpulist_parse(buf, trialcs->cpus_allowed);
1494 		if (retval < 0)
1495 			return retval;
1496 
1497 		if (!cpumask_subset(trialcs->cpus_allowed,
1498 				    top_cpuset.cpus_allowed))
1499 			return -EINVAL;
1500 	}
1501 
1502 	/* Nothing to do if the cpus didn't change */
1503 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
1504 		return 0;
1505 
1506 	retval = validate_change(cs, trialcs);
1507 	if (retval < 0)
1508 		return retval;
1509 
1510 #ifdef CONFIG_CPUMASK_OFFSTACK
1511 	/*
1512 	 * Use the cpumasks in trialcs for tmpmasks when they are pointers
1513 	 * to allocated cpumasks.
1514 	 */
1515 	tmp.addmask  = trialcs->subparts_cpus;
1516 	tmp.delmask  = trialcs->effective_cpus;
1517 	tmp.new_cpus = trialcs->cpus_allowed;
1518 #endif
1519 
1520 	if (cs->partition_root_state) {
1521 		/* Cpumask of a partition root cannot be empty */
1522 		if (cpumask_empty(trialcs->cpus_allowed))
1523 			return -EINVAL;
1524 		if (update_parent_subparts_cpumask(cs, partcmd_update,
1525 					trialcs->cpus_allowed, &tmp) < 0)
1526 			return -EINVAL;
1527 	}
1528 
1529 	spin_lock_irq(&callback_lock);
1530 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1531 
1532 	/*
1533 	 * Make sure that subparts_cpus is a subset of cpus_allowed.
1534 	 */
1535 	if (cs->nr_subparts_cpus) {
1536 		cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus,
1537 			       cs->cpus_allowed);
1538 		cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1539 	}
1540 	spin_unlock_irq(&callback_lock);
1541 
1542 	update_cpumasks_hier(cs, &tmp);
1543 
1544 	if (cs->partition_root_state) {
1545 		struct cpuset *parent = parent_cs(cs);
1546 
1547 		/*
1548 		 * For partition root, update the cpumasks of sibling
1549 		 * cpusets if they use parent's effective_cpus.
1550 		 */
1551 		if (parent->child_ecpus_count)
1552 			update_sibling_cpumasks(parent, cs, &tmp);
1553 	}
1554 	return 0;
1555 }
1556 
1557 /*
1558  * Migrate memory region from one set of nodes to another.  This is
1559  * performed asynchronously as it can be called from process migration path
1560  * holding locks involved in process management.  All mm migrations are
1561  * performed in the queued order and can be waited for by flushing
1562  * cpuset_migrate_mm_wq.
1563  */
1564 
1565 struct cpuset_migrate_mm_work {
1566 	struct work_struct	work;
1567 	struct mm_struct	*mm;
1568 	nodemask_t		from;
1569 	nodemask_t		to;
1570 };
1571 
1572 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1573 {
1574 	struct cpuset_migrate_mm_work *mwork =
1575 		container_of(work, struct cpuset_migrate_mm_work, work);
1576 
1577 	/* on a wq worker, no need to worry about %current's mems_allowed */
1578 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1579 	mmput(mwork->mm);
1580 	kfree(mwork);
1581 }
1582 
1583 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1584 							const nodemask_t *to)
1585 {
1586 	struct cpuset_migrate_mm_work *mwork;
1587 
1588 	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1589 	if (mwork) {
1590 		mwork->mm = mm;
1591 		mwork->from = *from;
1592 		mwork->to = *to;
1593 		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1594 		queue_work(cpuset_migrate_mm_wq, &mwork->work);
1595 	} else {
1596 		mmput(mm);
1597 	}
1598 }
1599 
1600 static void cpuset_post_attach(void)
1601 {
1602 	flush_workqueue(cpuset_migrate_mm_wq);
1603 }
1604 
1605 /*
1606  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1607  * @tsk: the task to change
1608  * @newmems: new nodes that the task will be set
1609  *
1610  * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1611  * and rebind an eventual tasks' mempolicy. If the task is allocating in
1612  * parallel, it might temporarily see an empty intersection, which results in
1613  * a seqlock check and retry before OOM or allocation failure.
1614  */
1615 static void cpuset_change_task_nodemask(struct task_struct *tsk,
1616 					nodemask_t *newmems)
1617 {
1618 	task_lock(tsk);
1619 
1620 	local_irq_disable();
1621 	write_seqcount_begin(&tsk->mems_allowed_seq);
1622 
1623 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1624 	mpol_rebind_task(tsk, newmems);
1625 	tsk->mems_allowed = *newmems;
1626 
1627 	write_seqcount_end(&tsk->mems_allowed_seq);
1628 	local_irq_enable();
1629 
1630 	task_unlock(tsk);
1631 }
1632 
1633 static void *cpuset_being_rebound;
1634 
1635 /**
1636  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1637  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1638  *
1639  * Iterate through each task of @cs updating its mems_allowed to the
1640  * effective cpuset's.  As this function is called with cpuset_mutex held,
1641  * cpuset membership stays stable.
1642  */
1643 static void update_tasks_nodemask(struct cpuset *cs)
1644 {
1645 	static nodemask_t newmems;	/* protected by cpuset_mutex */
1646 	struct css_task_iter it;
1647 	struct task_struct *task;
1648 
1649 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1650 
1651 	guarantee_online_mems(cs, &newmems);
1652 
1653 	/*
1654 	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1655 	 * take while holding tasklist_lock.  Forks can happen - the
1656 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
1657 	 * and rebind their vma mempolicies too.  Because we still hold
1658 	 * the global cpuset_mutex, we know that no other rebind effort
1659 	 * will be contending for the global variable cpuset_being_rebound.
1660 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1661 	 * is idempotent.  Also migrate pages in each mm to new nodes.
1662 	 */
1663 	css_task_iter_start(&cs->css, 0, &it);
1664 	while ((task = css_task_iter_next(&it))) {
1665 		struct mm_struct *mm;
1666 		bool migrate;
1667 
1668 		cpuset_change_task_nodemask(task, &newmems);
1669 
1670 		mm = get_task_mm(task);
1671 		if (!mm)
1672 			continue;
1673 
1674 		migrate = is_memory_migrate(cs);
1675 
1676 		mpol_rebind_mm(mm, &cs->mems_allowed);
1677 		if (migrate)
1678 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1679 		else
1680 			mmput(mm);
1681 	}
1682 	css_task_iter_end(&it);
1683 
1684 	/*
1685 	 * All the tasks' nodemasks have been updated, update
1686 	 * cs->old_mems_allowed.
1687 	 */
1688 	cs->old_mems_allowed = newmems;
1689 
1690 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1691 	cpuset_being_rebound = NULL;
1692 }
1693 
1694 /*
1695  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1696  * @cs: the cpuset to consider
1697  * @new_mems: a temp variable for calculating new effective_mems
1698  *
1699  * When configured nodemask is changed, the effective nodemasks of this cpuset
1700  * and all its descendants need to be updated.
1701  *
1702  * On legacy hiearchy, effective_mems will be the same with mems_allowed.
1703  *
1704  * Called with cpuset_mutex held
1705  */
1706 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1707 {
1708 	struct cpuset *cp;
1709 	struct cgroup_subsys_state *pos_css;
1710 
1711 	rcu_read_lock();
1712 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1713 		struct cpuset *parent = parent_cs(cp);
1714 
1715 		nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1716 
1717 		/*
1718 		 * If it becomes empty, inherit the effective mask of the
1719 		 * parent, which is guaranteed to have some MEMs.
1720 		 */
1721 		if (is_in_v2_mode() && nodes_empty(*new_mems))
1722 			*new_mems = parent->effective_mems;
1723 
1724 		/* Skip the whole subtree if the nodemask remains the same. */
1725 		if (nodes_equal(*new_mems, cp->effective_mems)) {
1726 			pos_css = css_rightmost_descendant(pos_css);
1727 			continue;
1728 		}
1729 
1730 		if (!css_tryget_online(&cp->css))
1731 			continue;
1732 		rcu_read_unlock();
1733 
1734 		spin_lock_irq(&callback_lock);
1735 		cp->effective_mems = *new_mems;
1736 		spin_unlock_irq(&callback_lock);
1737 
1738 		WARN_ON(!is_in_v2_mode() &&
1739 			!nodes_equal(cp->mems_allowed, cp->effective_mems));
1740 
1741 		update_tasks_nodemask(cp);
1742 
1743 		rcu_read_lock();
1744 		css_put(&cp->css);
1745 	}
1746 	rcu_read_unlock();
1747 }
1748 
1749 /*
1750  * Handle user request to change the 'mems' memory placement
1751  * of a cpuset.  Needs to validate the request, update the
1752  * cpusets mems_allowed, and for each task in the cpuset,
1753  * update mems_allowed and rebind task's mempolicy and any vma
1754  * mempolicies and if the cpuset is marked 'memory_migrate',
1755  * migrate the tasks pages to the new memory.
1756  *
1757  * Call with cpuset_mutex held. May take callback_lock during call.
1758  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1759  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1760  * their mempolicies to the cpusets new mems_allowed.
1761  */
1762 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1763 			   const char *buf)
1764 {
1765 	int retval;
1766 
1767 	/*
1768 	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1769 	 * it's read-only
1770 	 */
1771 	if (cs == &top_cpuset) {
1772 		retval = -EACCES;
1773 		goto done;
1774 	}
1775 
1776 	/*
1777 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1778 	 * Since nodelist_parse() fails on an empty mask, we special case
1779 	 * that parsing.  The validate_change() call ensures that cpusets
1780 	 * with tasks have memory.
1781 	 */
1782 	if (!*buf) {
1783 		nodes_clear(trialcs->mems_allowed);
1784 	} else {
1785 		retval = nodelist_parse(buf, trialcs->mems_allowed);
1786 		if (retval < 0)
1787 			goto done;
1788 
1789 		if (!nodes_subset(trialcs->mems_allowed,
1790 				  top_cpuset.mems_allowed)) {
1791 			retval = -EINVAL;
1792 			goto done;
1793 		}
1794 	}
1795 
1796 	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1797 		retval = 0;		/* Too easy - nothing to do */
1798 		goto done;
1799 	}
1800 	retval = validate_change(cs, trialcs);
1801 	if (retval < 0)
1802 		goto done;
1803 
1804 	spin_lock_irq(&callback_lock);
1805 	cs->mems_allowed = trialcs->mems_allowed;
1806 	spin_unlock_irq(&callback_lock);
1807 
1808 	/* use trialcs->mems_allowed as a temp variable */
1809 	update_nodemasks_hier(cs, &trialcs->mems_allowed);
1810 done:
1811 	return retval;
1812 }
1813 
1814 bool current_cpuset_is_being_rebound(void)
1815 {
1816 	bool ret;
1817 
1818 	rcu_read_lock();
1819 	ret = task_cs(current) == cpuset_being_rebound;
1820 	rcu_read_unlock();
1821 
1822 	return ret;
1823 }
1824 
1825 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1826 {
1827 #ifdef CONFIG_SMP
1828 	if (val < -1 || val >= sched_domain_level_max)
1829 		return -EINVAL;
1830 #endif
1831 
1832 	if (val != cs->relax_domain_level) {
1833 		cs->relax_domain_level = val;
1834 		if (!cpumask_empty(cs->cpus_allowed) &&
1835 		    is_sched_load_balance(cs))
1836 			rebuild_sched_domains_locked();
1837 	}
1838 
1839 	return 0;
1840 }
1841 
1842 /**
1843  * update_tasks_flags - update the spread flags of tasks in the cpuset.
1844  * @cs: the cpuset in which each task's spread flags needs to be changed
1845  *
1846  * Iterate through each task of @cs updating its spread flags.  As this
1847  * function is called with cpuset_mutex held, cpuset membership stays
1848  * stable.
1849  */
1850 static void update_tasks_flags(struct cpuset *cs)
1851 {
1852 	struct css_task_iter it;
1853 	struct task_struct *task;
1854 
1855 	css_task_iter_start(&cs->css, 0, &it);
1856 	while ((task = css_task_iter_next(&it)))
1857 		cpuset_update_task_spread_flag(cs, task);
1858 	css_task_iter_end(&it);
1859 }
1860 
1861 /*
1862  * update_flag - read a 0 or a 1 in a file and update associated flag
1863  * bit:		the bit to update (see cpuset_flagbits_t)
1864  * cs:		the cpuset to update
1865  * turning_on: 	whether the flag is being set or cleared
1866  *
1867  * Call with cpuset_mutex held.
1868  */
1869 
1870 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1871 		       int turning_on)
1872 {
1873 	struct cpuset *trialcs;
1874 	int balance_flag_changed;
1875 	int spread_flag_changed;
1876 	int err;
1877 
1878 	trialcs = alloc_trial_cpuset(cs);
1879 	if (!trialcs)
1880 		return -ENOMEM;
1881 
1882 	if (turning_on)
1883 		set_bit(bit, &trialcs->flags);
1884 	else
1885 		clear_bit(bit, &trialcs->flags);
1886 
1887 	err = validate_change(cs, trialcs);
1888 	if (err < 0)
1889 		goto out;
1890 
1891 	balance_flag_changed = (is_sched_load_balance(cs) !=
1892 				is_sched_load_balance(trialcs));
1893 
1894 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1895 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
1896 
1897 	spin_lock_irq(&callback_lock);
1898 	cs->flags = trialcs->flags;
1899 	spin_unlock_irq(&callback_lock);
1900 
1901 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1902 		rebuild_sched_domains_locked();
1903 
1904 	if (spread_flag_changed)
1905 		update_tasks_flags(cs);
1906 out:
1907 	free_cpuset(trialcs);
1908 	return err;
1909 }
1910 
1911 /*
1912  * update_prstate - update partititon_root_state
1913  * cs:	the cpuset to update
1914  * val: 0 - disabled, 1 - enabled
1915  *
1916  * Call with cpuset_mutex held.
1917  */
1918 static int update_prstate(struct cpuset *cs, int val)
1919 {
1920 	int err;
1921 	struct cpuset *parent = parent_cs(cs);
1922 	struct tmpmasks tmp;
1923 
1924 	if ((val != 0) && (val != 1))
1925 		return -EINVAL;
1926 	if (val == cs->partition_root_state)
1927 		return 0;
1928 
1929 	/*
1930 	 * Cannot force a partial or invalid partition root to a full
1931 	 * partition root.
1932 	 */
1933 	if (val && cs->partition_root_state)
1934 		return -EINVAL;
1935 
1936 	if (alloc_cpumasks(NULL, &tmp))
1937 		return -ENOMEM;
1938 
1939 	err = -EINVAL;
1940 	if (!cs->partition_root_state) {
1941 		/*
1942 		 * Turning on partition root requires setting the
1943 		 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
1944 		 * cannot be NULL.
1945 		 */
1946 		if (cpumask_empty(cs->cpus_allowed))
1947 			goto out;
1948 
1949 		err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
1950 		if (err)
1951 			goto out;
1952 
1953 		err = update_parent_subparts_cpumask(cs, partcmd_enable,
1954 						     NULL, &tmp);
1955 		if (err) {
1956 			update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1957 			goto out;
1958 		}
1959 		cs->partition_root_state = PRS_ENABLED;
1960 	} else {
1961 		/*
1962 		 * Turning off partition root will clear the
1963 		 * CS_CPU_EXCLUSIVE bit.
1964 		 */
1965 		if (cs->partition_root_state == PRS_ERROR) {
1966 			cs->partition_root_state = 0;
1967 			update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1968 			err = 0;
1969 			goto out;
1970 		}
1971 
1972 		err = update_parent_subparts_cpumask(cs, partcmd_disable,
1973 						     NULL, &tmp);
1974 		if (err)
1975 			goto out;
1976 
1977 		cs->partition_root_state = 0;
1978 
1979 		/* Turning off CS_CPU_EXCLUSIVE will not return error */
1980 		update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1981 	}
1982 
1983 	/*
1984 	 * Update cpumask of parent's tasks except when it is the top
1985 	 * cpuset as some system daemons cannot be mapped to other CPUs.
1986 	 */
1987 	if (parent != &top_cpuset)
1988 		update_tasks_cpumask(parent);
1989 
1990 	if (parent->child_ecpus_count)
1991 		update_sibling_cpumasks(parent, cs, &tmp);
1992 
1993 	rebuild_sched_domains_locked();
1994 out:
1995 	free_cpumasks(NULL, &tmp);
1996 	return err;
1997 }
1998 
1999 /*
2000  * Frequency meter - How fast is some event occurring?
2001  *
2002  * These routines manage a digitally filtered, constant time based,
2003  * event frequency meter.  There are four routines:
2004  *   fmeter_init() - initialize a frequency meter.
2005  *   fmeter_markevent() - called each time the event happens.
2006  *   fmeter_getrate() - returns the recent rate of such events.
2007  *   fmeter_update() - internal routine used to update fmeter.
2008  *
2009  * A common data structure is passed to each of these routines,
2010  * which is used to keep track of the state required to manage the
2011  * frequency meter and its digital filter.
2012  *
2013  * The filter works on the number of events marked per unit time.
2014  * The filter is single-pole low-pass recursive (IIR).  The time unit
2015  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
2016  * simulate 3 decimal digits of precision (multiplied by 1000).
2017  *
2018  * With an FM_COEF of 933, and a time base of 1 second, the filter
2019  * has a half-life of 10 seconds, meaning that if the events quit
2020  * happening, then the rate returned from the fmeter_getrate()
2021  * will be cut in half each 10 seconds, until it converges to zero.
2022  *
2023  * It is not worth doing a real infinitely recursive filter.  If more
2024  * than FM_MAXTICKS ticks have elapsed since the last filter event,
2025  * just compute FM_MAXTICKS ticks worth, by which point the level
2026  * will be stable.
2027  *
2028  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2029  * arithmetic overflow in the fmeter_update() routine.
2030  *
2031  * Given the simple 32 bit integer arithmetic used, this meter works
2032  * best for reporting rates between one per millisecond (msec) and
2033  * one per 32 (approx) seconds.  At constant rates faster than one
2034  * per msec it maxes out at values just under 1,000,000.  At constant
2035  * rates between one per msec, and one per second it will stabilize
2036  * to a value N*1000, where N is the rate of events per second.
2037  * At constant rates between one per second and one per 32 seconds,
2038  * it will be choppy, moving up on the seconds that have an event,
2039  * and then decaying until the next event.  At rates slower than
2040  * about one in 32 seconds, it decays all the way back to zero between
2041  * each event.
2042  */
2043 
2044 #define FM_COEF 933		/* coefficient for half-life of 10 secs */
2045 #define FM_MAXTICKS ((u32)99)   /* useless computing more ticks than this */
2046 #define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
2047 #define FM_SCALE 1000		/* faux fixed point scale */
2048 
2049 /* Initialize a frequency meter */
2050 static void fmeter_init(struct fmeter *fmp)
2051 {
2052 	fmp->cnt = 0;
2053 	fmp->val = 0;
2054 	fmp->time = 0;
2055 	spin_lock_init(&fmp->lock);
2056 }
2057 
2058 /* Internal meter update - process cnt events and update value */
2059 static void fmeter_update(struct fmeter *fmp)
2060 {
2061 	time64_t now;
2062 	u32 ticks;
2063 
2064 	now = ktime_get_seconds();
2065 	ticks = now - fmp->time;
2066 
2067 	if (ticks == 0)
2068 		return;
2069 
2070 	ticks = min(FM_MAXTICKS, ticks);
2071 	while (ticks-- > 0)
2072 		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2073 	fmp->time = now;
2074 
2075 	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2076 	fmp->cnt = 0;
2077 }
2078 
2079 /* Process any previous ticks, then bump cnt by one (times scale). */
2080 static void fmeter_markevent(struct fmeter *fmp)
2081 {
2082 	spin_lock(&fmp->lock);
2083 	fmeter_update(fmp);
2084 	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2085 	spin_unlock(&fmp->lock);
2086 }
2087 
2088 /* Process any previous ticks, then return current value. */
2089 static int fmeter_getrate(struct fmeter *fmp)
2090 {
2091 	int val;
2092 
2093 	spin_lock(&fmp->lock);
2094 	fmeter_update(fmp);
2095 	val = fmp->val;
2096 	spin_unlock(&fmp->lock);
2097 	return val;
2098 }
2099 
2100 static struct cpuset *cpuset_attach_old_cs;
2101 
2102 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
2103 static int cpuset_can_attach(struct cgroup_taskset *tset)
2104 {
2105 	struct cgroup_subsys_state *css;
2106 	struct cpuset *cs;
2107 	struct task_struct *task;
2108 	int ret;
2109 
2110 	/* used later by cpuset_attach() */
2111 	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2112 	cs = css_cs(css);
2113 
2114 	percpu_down_write(&cpuset_rwsem);
2115 
2116 	/* allow moving tasks into an empty cpuset if on default hierarchy */
2117 	ret = -ENOSPC;
2118 	if (!is_in_v2_mode() &&
2119 	    (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
2120 		goto out_unlock;
2121 
2122 	cgroup_taskset_for_each(task, css, tset) {
2123 		ret = task_can_attach(task, cs->cpus_allowed);
2124 		if (ret)
2125 			goto out_unlock;
2126 		ret = security_task_setscheduler(task);
2127 		if (ret)
2128 			goto out_unlock;
2129 	}
2130 
2131 	/*
2132 	 * Mark attach is in progress.  This makes validate_change() fail
2133 	 * changes which zero cpus/mems_allowed.
2134 	 */
2135 	cs->attach_in_progress++;
2136 	ret = 0;
2137 out_unlock:
2138 	percpu_up_write(&cpuset_rwsem);
2139 	return ret;
2140 }
2141 
2142 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2143 {
2144 	struct cgroup_subsys_state *css;
2145 
2146 	cgroup_taskset_first(tset, &css);
2147 
2148 	percpu_down_write(&cpuset_rwsem);
2149 	css_cs(css)->attach_in_progress--;
2150 	percpu_up_write(&cpuset_rwsem);
2151 }
2152 
2153 /*
2154  * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
2155  * but we can't allocate it dynamically there.  Define it global and
2156  * allocate from cpuset_init().
2157  */
2158 static cpumask_var_t cpus_attach;
2159 
2160 static void cpuset_attach(struct cgroup_taskset *tset)
2161 {
2162 	/* static buf protected by cpuset_mutex */
2163 	static nodemask_t cpuset_attach_nodemask_to;
2164 	struct task_struct *task;
2165 	struct task_struct *leader;
2166 	struct cgroup_subsys_state *css;
2167 	struct cpuset *cs;
2168 	struct cpuset *oldcs = cpuset_attach_old_cs;
2169 
2170 	cgroup_taskset_first(tset, &css);
2171 	cs = css_cs(css);
2172 
2173 	percpu_down_write(&cpuset_rwsem);
2174 
2175 	/* prepare for attach */
2176 	if (cs == &top_cpuset)
2177 		cpumask_copy(cpus_attach, cpu_possible_mask);
2178 	else
2179 		guarantee_online_cpus(cs, cpus_attach);
2180 
2181 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2182 
2183 	cgroup_taskset_for_each(task, css, tset) {
2184 		/*
2185 		 * can_attach beforehand should guarantee that this doesn't
2186 		 * fail.  TODO: have a better way to handle failure here
2187 		 */
2188 		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2189 
2190 		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2191 		cpuset_update_task_spread_flag(cs, task);
2192 	}
2193 
2194 	/*
2195 	 * Change mm for all threadgroup leaders. This is expensive and may
2196 	 * sleep and should be moved outside migration path proper.
2197 	 */
2198 	cpuset_attach_nodemask_to = cs->effective_mems;
2199 	cgroup_taskset_for_each_leader(leader, css, tset) {
2200 		struct mm_struct *mm = get_task_mm(leader);
2201 
2202 		if (mm) {
2203 			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2204 
2205 			/*
2206 			 * old_mems_allowed is the same with mems_allowed
2207 			 * here, except if this task is being moved
2208 			 * automatically due to hotplug.  In that case
2209 			 * @mems_allowed has been updated and is empty, so
2210 			 * @old_mems_allowed is the right nodesets that we
2211 			 * migrate mm from.
2212 			 */
2213 			if (is_memory_migrate(cs))
2214 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2215 						  &cpuset_attach_nodemask_to);
2216 			else
2217 				mmput(mm);
2218 		}
2219 	}
2220 
2221 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
2222 
2223 	cs->attach_in_progress--;
2224 	if (!cs->attach_in_progress)
2225 		wake_up(&cpuset_attach_wq);
2226 
2227 	percpu_up_write(&cpuset_rwsem);
2228 }
2229 
2230 /* The various types of files and directories in a cpuset file system */
2231 
2232 typedef enum {
2233 	FILE_MEMORY_MIGRATE,
2234 	FILE_CPULIST,
2235 	FILE_MEMLIST,
2236 	FILE_EFFECTIVE_CPULIST,
2237 	FILE_EFFECTIVE_MEMLIST,
2238 	FILE_SUBPARTS_CPULIST,
2239 	FILE_CPU_EXCLUSIVE,
2240 	FILE_MEM_EXCLUSIVE,
2241 	FILE_MEM_HARDWALL,
2242 	FILE_SCHED_LOAD_BALANCE,
2243 	FILE_PARTITION_ROOT,
2244 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
2245 	FILE_MEMORY_PRESSURE_ENABLED,
2246 	FILE_MEMORY_PRESSURE,
2247 	FILE_SPREAD_PAGE,
2248 	FILE_SPREAD_SLAB,
2249 } cpuset_filetype_t;
2250 
2251 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2252 			    u64 val)
2253 {
2254 	struct cpuset *cs = css_cs(css);
2255 	cpuset_filetype_t type = cft->private;
2256 	int retval = 0;
2257 
2258 	get_online_cpus();
2259 	percpu_down_write(&cpuset_rwsem);
2260 	if (!is_cpuset_online(cs)) {
2261 		retval = -ENODEV;
2262 		goto out_unlock;
2263 	}
2264 
2265 	switch (type) {
2266 	case FILE_CPU_EXCLUSIVE:
2267 		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2268 		break;
2269 	case FILE_MEM_EXCLUSIVE:
2270 		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2271 		break;
2272 	case FILE_MEM_HARDWALL:
2273 		retval = update_flag(CS_MEM_HARDWALL, cs, val);
2274 		break;
2275 	case FILE_SCHED_LOAD_BALANCE:
2276 		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2277 		break;
2278 	case FILE_MEMORY_MIGRATE:
2279 		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2280 		break;
2281 	case FILE_MEMORY_PRESSURE_ENABLED:
2282 		cpuset_memory_pressure_enabled = !!val;
2283 		break;
2284 	case FILE_SPREAD_PAGE:
2285 		retval = update_flag(CS_SPREAD_PAGE, cs, val);
2286 		break;
2287 	case FILE_SPREAD_SLAB:
2288 		retval = update_flag(CS_SPREAD_SLAB, cs, val);
2289 		break;
2290 	default:
2291 		retval = -EINVAL;
2292 		break;
2293 	}
2294 out_unlock:
2295 	percpu_up_write(&cpuset_rwsem);
2296 	put_online_cpus();
2297 	return retval;
2298 }
2299 
2300 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2301 			    s64 val)
2302 {
2303 	struct cpuset *cs = css_cs(css);
2304 	cpuset_filetype_t type = cft->private;
2305 	int retval = -ENODEV;
2306 
2307 	get_online_cpus();
2308 	percpu_down_write(&cpuset_rwsem);
2309 	if (!is_cpuset_online(cs))
2310 		goto out_unlock;
2311 
2312 	switch (type) {
2313 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2314 		retval = update_relax_domain_level(cs, val);
2315 		break;
2316 	default:
2317 		retval = -EINVAL;
2318 		break;
2319 	}
2320 out_unlock:
2321 	percpu_up_write(&cpuset_rwsem);
2322 	put_online_cpus();
2323 	return retval;
2324 }
2325 
2326 /*
2327  * Common handling for a write to a "cpus" or "mems" file.
2328  */
2329 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2330 				    char *buf, size_t nbytes, loff_t off)
2331 {
2332 	struct cpuset *cs = css_cs(of_css(of));
2333 	struct cpuset *trialcs;
2334 	int retval = -ENODEV;
2335 
2336 	buf = strstrip(buf);
2337 
2338 	/*
2339 	 * CPU or memory hotunplug may leave @cs w/o any execution
2340 	 * resources, in which case the hotplug code asynchronously updates
2341 	 * configuration and transfers all tasks to the nearest ancestor
2342 	 * which can execute.
2343 	 *
2344 	 * As writes to "cpus" or "mems" may restore @cs's execution
2345 	 * resources, wait for the previously scheduled operations before
2346 	 * proceeding, so that we don't end up keep removing tasks added
2347 	 * after execution capability is restored.
2348 	 *
2349 	 * cpuset_hotplug_work calls back into cgroup core via
2350 	 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2351 	 * operation like this one can lead to a deadlock through kernfs
2352 	 * active_ref protection.  Let's break the protection.  Losing the
2353 	 * protection is okay as we check whether @cs is online after
2354 	 * grabbing cpuset_mutex anyway.  This only happens on the legacy
2355 	 * hierarchies.
2356 	 */
2357 	css_get(&cs->css);
2358 	kernfs_break_active_protection(of->kn);
2359 	flush_work(&cpuset_hotplug_work);
2360 
2361 	get_online_cpus();
2362 	percpu_down_write(&cpuset_rwsem);
2363 	if (!is_cpuset_online(cs))
2364 		goto out_unlock;
2365 
2366 	trialcs = alloc_trial_cpuset(cs);
2367 	if (!trialcs) {
2368 		retval = -ENOMEM;
2369 		goto out_unlock;
2370 	}
2371 
2372 	switch (of_cft(of)->private) {
2373 	case FILE_CPULIST:
2374 		retval = update_cpumask(cs, trialcs, buf);
2375 		break;
2376 	case FILE_MEMLIST:
2377 		retval = update_nodemask(cs, trialcs, buf);
2378 		break;
2379 	default:
2380 		retval = -EINVAL;
2381 		break;
2382 	}
2383 
2384 	free_cpuset(trialcs);
2385 out_unlock:
2386 	percpu_up_write(&cpuset_rwsem);
2387 	put_online_cpus();
2388 	kernfs_unbreak_active_protection(of->kn);
2389 	css_put(&cs->css);
2390 	flush_workqueue(cpuset_migrate_mm_wq);
2391 	return retval ?: nbytes;
2392 }
2393 
2394 /*
2395  * These ascii lists should be read in a single call, by using a user
2396  * buffer large enough to hold the entire map.  If read in smaller
2397  * chunks, there is no guarantee of atomicity.  Since the display format
2398  * used, list of ranges of sequential numbers, is variable length,
2399  * and since these maps can change value dynamically, one could read
2400  * gibberish by doing partial reads while a list was changing.
2401  */
2402 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2403 {
2404 	struct cpuset *cs = css_cs(seq_css(sf));
2405 	cpuset_filetype_t type = seq_cft(sf)->private;
2406 	int ret = 0;
2407 
2408 	spin_lock_irq(&callback_lock);
2409 
2410 	switch (type) {
2411 	case FILE_CPULIST:
2412 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
2413 		break;
2414 	case FILE_MEMLIST:
2415 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2416 		break;
2417 	case FILE_EFFECTIVE_CPULIST:
2418 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2419 		break;
2420 	case FILE_EFFECTIVE_MEMLIST:
2421 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2422 		break;
2423 	case FILE_SUBPARTS_CPULIST:
2424 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2425 		break;
2426 	default:
2427 		ret = -EINVAL;
2428 	}
2429 
2430 	spin_unlock_irq(&callback_lock);
2431 	return ret;
2432 }
2433 
2434 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2435 {
2436 	struct cpuset *cs = css_cs(css);
2437 	cpuset_filetype_t type = cft->private;
2438 	switch (type) {
2439 	case FILE_CPU_EXCLUSIVE:
2440 		return is_cpu_exclusive(cs);
2441 	case FILE_MEM_EXCLUSIVE:
2442 		return is_mem_exclusive(cs);
2443 	case FILE_MEM_HARDWALL:
2444 		return is_mem_hardwall(cs);
2445 	case FILE_SCHED_LOAD_BALANCE:
2446 		return is_sched_load_balance(cs);
2447 	case FILE_MEMORY_MIGRATE:
2448 		return is_memory_migrate(cs);
2449 	case FILE_MEMORY_PRESSURE_ENABLED:
2450 		return cpuset_memory_pressure_enabled;
2451 	case FILE_MEMORY_PRESSURE:
2452 		return fmeter_getrate(&cs->fmeter);
2453 	case FILE_SPREAD_PAGE:
2454 		return is_spread_page(cs);
2455 	case FILE_SPREAD_SLAB:
2456 		return is_spread_slab(cs);
2457 	default:
2458 		BUG();
2459 	}
2460 
2461 	/* Unreachable but makes gcc happy */
2462 	return 0;
2463 }
2464 
2465 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2466 {
2467 	struct cpuset *cs = css_cs(css);
2468 	cpuset_filetype_t type = cft->private;
2469 	switch (type) {
2470 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2471 		return cs->relax_domain_level;
2472 	default:
2473 		BUG();
2474 	}
2475 
2476 	/* Unrechable but makes gcc happy */
2477 	return 0;
2478 }
2479 
2480 static int sched_partition_show(struct seq_file *seq, void *v)
2481 {
2482 	struct cpuset *cs = css_cs(seq_css(seq));
2483 
2484 	switch (cs->partition_root_state) {
2485 	case PRS_ENABLED:
2486 		seq_puts(seq, "root\n");
2487 		break;
2488 	case PRS_DISABLED:
2489 		seq_puts(seq, "member\n");
2490 		break;
2491 	case PRS_ERROR:
2492 		seq_puts(seq, "root invalid\n");
2493 		break;
2494 	}
2495 	return 0;
2496 }
2497 
2498 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
2499 				     size_t nbytes, loff_t off)
2500 {
2501 	struct cpuset *cs = css_cs(of_css(of));
2502 	int val;
2503 	int retval = -ENODEV;
2504 
2505 	buf = strstrip(buf);
2506 
2507 	/*
2508 	 * Convert "root" to ENABLED, and convert "member" to DISABLED.
2509 	 */
2510 	if (!strcmp(buf, "root"))
2511 		val = PRS_ENABLED;
2512 	else if (!strcmp(buf, "member"))
2513 		val = PRS_DISABLED;
2514 	else
2515 		return -EINVAL;
2516 
2517 	css_get(&cs->css);
2518 	get_online_cpus();
2519 	percpu_down_write(&cpuset_rwsem);
2520 	if (!is_cpuset_online(cs))
2521 		goto out_unlock;
2522 
2523 	retval = update_prstate(cs, val);
2524 out_unlock:
2525 	percpu_up_write(&cpuset_rwsem);
2526 	put_online_cpus();
2527 	css_put(&cs->css);
2528 	return retval ?: nbytes;
2529 }
2530 
2531 /*
2532  * for the common functions, 'private' gives the type of file
2533  */
2534 
2535 static struct cftype legacy_files[] = {
2536 	{
2537 		.name = "cpus",
2538 		.seq_show = cpuset_common_seq_show,
2539 		.write = cpuset_write_resmask,
2540 		.max_write_len = (100U + 6 * NR_CPUS),
2541 		.private = FILE_CPULIST,
2542 	},
2543 
2544 	{
2545 		.name = "mems",
2546 		.seq_show = cpuset_common_seq_show,
2547 		.write = cpuset_write_resmask,
2548 		.max_write_len = (100U + 6 * MAX_NUMNODES),
2549 		.private = FILE_MEMLIST,
2550 	},
2551 
2552 	{
2553 		.name = "effective_cpus",
2554 		.seq_show = cpuset_common_seq_show,
2555 		.private = FILE_EFFECTIVE_CPULIST,
2556 	},
2557 
2558 	{
2559 		.name = "effective_mems",
2560 		.seq_show = cpuset_common_seq_show,
2561 		.private = FILE_EFFECTIVE_MEMLIST,
2562 	},
2563 
2564 	{
2565 		.name = "cpu_exclusive",
2566 		.read_u64 = cpuset_read_u64,
2567 		.write_u64 = cpuset_write_u64,
2568 		.private = FILE_CPU_EXCLUSIVE,
2569 	},
2570 
2571 	{
2572 		.name = "mem_exclusive",
2573 		.read_u64 = cpuset_read_u64,
2574 		.write_u64 = cpuset_write_u64,
2575 		.private = FILE_MEM_EXCLUSIVE,
2576 	},
2577 
2578 	{
2579 		.name = "mem_hardwall",
2580 		.read_u64 = cpuset_read_u64,
2581 		.write_u64 = cpuset_write_u64,
2582 		.private = FILE_MEM_HARDWALL,
2583 	},
2584 
2585 	{
2586 		.name = "sched_load_balance",
2587 		.read_u64 = cpuset_read_u64,
2588 		.write_u64 = cpuset_write_u64,
2589 		.private = FILE_SCHED_LOAD_BALANCE,
2590 	},
2591 
2592 	{
2593 		.name = "sched_relax_domain_level",
2594 		.read_s64 = cpuset_read_s64,
2595 		.write_s64 = cpuset_write_s64,
2596 		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
2597 	},
2598 
2599 	{
2600 		.name = "memory_migrate",
2601 		.read_u64 = cpuset_read_u64,
2602 		.write_u64 = cpuset_write_u64,
2603 		.private = FILE_MEMORY_MIGRATE,
2604 	},
2605 
2606 	{
2607 		.name = "memory_pressure",
2608 		.read_u64 = cpuset_read_u64,
2609 		.private = FILE_MEMORY_PRESSURE,
2610 	},
2611 
2612 	{
2613 		.name = "memory_spread_page",
2614 		.read_u64 = cpuset_read_u64,
2615 		.write_u64 = cpuset_write_u64,
2616 		.private = FILE_SPREAD_PAGE,
2617 	},
2618 
2619 	{
2620 		.name = "memory_spread_slab",
2621 		.read_u64 = cpuset_read_u64,
2622 		.write_u64 = cpuset_write_u64,
2623 		.private = FILE_SPREAD_SLAB,
2624 	},
2625 
2626 	{
2627 		.name = "memory_pressure_enabled",
2628 		.flags = CFTYPE_ONLY_ON_ROOT,
2629 		.read_u64 = cpuset_read_u64,
2630 		.write_u64 = cpuset_write_u64,
2631 		.private = FILE_MEMORY_PRESSURE_ENABLED,
2632 	},
2633 
2634 	{ }	/* terminate */
2635 };
2636 
2637 /*
2638  * This is currently a minimal set for the default hierarchy. It can be
2639  * expanded later on by migrating more features and control files from v1.
2640  */
2641 static struct cftype dfl_files[] = {
2642 	{
2643 		.name = "cpus",
2644 		.seq_show = cpuset_common_seq_show,
2645 		.write = cpuset_write_resmask,
2646 		.max_write_len = (100U + 6 * NR_CPUS),
2647 		.private = FILE_CPULIST,
2648 		.flags = CFTYPE_NOT_ON_ROOT,
2649 	},
2650 
2651 	{
2652 		.name = "mems",
2653 		.seq_show = cpuset_common_seq_show,
2654 		.write = cpuset_write_resmask,
2655 		.max_write_len = (100U + 6 * MAX_NUMNODES),
2656 		.private = FILE_MEMLIST,
2657 		.flags = CFTYPE_NOT_ON_ROOT,
2658 	},
2659 
2660 	{
2661 		.name = "cpus.effective",
2662 		.seq_show = cpuset_common_seq_show,
2663 		.private = FILE_EFFECTIVE_CPULIST,
2664 	},
2665 
2666 	{
2667 		.name = "mems.effective",
2668 		.seq_show = cpuset_common_seq_show,
2669 		.private = FILE_EFFECTIVE_MEMLIST,
2670 	},
2671 
2672 	{
2673 		.name = "cpus.partition",
2674 		.seq_show = sched_partition_show,
2675 		.write = sched_partition_write,
2676 		.private = FILE_PARTITION_ROOT,
2677 		.flags = CFTYPE_NOT_ON_ROOT,
2678 	},
2679 
2680 	{
2681 		.name = "cpus.subpartitions",
2682 		.seq_show = cpuset_common_seq_show,
2683 		.private = FILE_SUBPARTS_CPULIST,
2684 		.flags = CFTYPE_DEBUG,
2685 	},
2686 
2687 	{ }	/* terminate */
2688 };
2689 
2690 
2691 /*
2692  *	cpuset_css_alloc - allocate a cpuset css
2693  *	cgrp:	control group that the new cpuset will be part of
2694  */
2695 
2696 static struct cgroup_subsys_state *
2697 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
2698 {
2699 	struct cpuset *cs;
2700 
2701 	if (!parent_css)
2702 		return &top_cpuset.css;
2703 
2704 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
2705 	if (!cs)
2706 		return ERR_PTR(-ENOMEM);
2707 
2708 	if (alloc_cpumasks(cs, NULL)) {
2709 		kfree(cs);
2710 		return ERR_PTR(-ENOMEM);
2711 	}
2712 
2713 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
2714 	nodes_clear(cs->mems_allowed);
2715 	nodes_clear(cs->effective_mems);
2716 	fmeter_init(&cs->fmeter);
2717 	cs->relax_domain_level = -1;
2718 
2719 	return &cs->css;
2720 }
2721 
2722 static int cpuset_css_online(struct cgroup_subsys_state *css)
2723 {
2724 	struct cpuset *cs = css_cs(css);
2725 	struct cpuset *parent = parent_cs(cs);
2726 	struct cpuset *tmp_cs;
2727 	struct cgroup_subsys_state *pos_css;
2728 
2729 	if (!parent)
2730 		return 0;
2731 
2732 	get_online_cpus();
2733 	percpu_down_write(&cpuset_rwsem);
2734 
2735 	set_bit(CS_ONLINE, &cs->flags);
2736 	if (is_spread_page(parent))
2737 		set_bit(CS_SPREAD_PAGE, &cs->flags);
2738 	if (is_spread_slab(parent))
2739 		set_bit(CS_SPREAD_SLAB, &cs->flags);
2740 
2741 	cpuset_inc();
2742 
2743 	spin_lock_irq(&callback_lock);
2744 	if (is_in_v2_mode()) {
2745 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
2746 		cs->effective_mems = parent->effective_mems;
2747 		cs->use_parent_ecpus = true;
2748 		parent->child_ecpus_count++;
2749 	}
2750 	spin_unlock_irq(&callback_lock);
2751 
2752 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
2753 		goto out_unlock;
2754 
2755 	/*
2756 	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2757 	 * set.  This flag handling is implemented in cgroup core for
2758 	 * histrical reasons - the flag may be specified during mount.
2759 	 *
2760 	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
2761 	 * refuse to clone the configuration - thereby refusing the task to
2762 	 * be entered, and as a result refusing the sys_unshare() or
2763 	 * clone() which initiated it.  If this becomes a problem for some
2764 	 * users who wish to allow that scenario, then this could be
2765 	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2766 	 * (and likewise for mems) to the new cgroup.
2767 	 */
2768 	rcu_read_lock();
2769 	cpuset_for_each_child(tmp_cs, pos_css, parent) {
2770 		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2771 			rcu_read_unlock();
2772 			goto out_unlock;
2773 		}
2774 	}
2775 	rcu_read_unlock();
2776 
2777 	spin_lock_irq(&callback_lock);
2778 	cs->mems_allowed = parent->mems_allowed;
2779 	cs->effective_mems = parent->mems_allowed;
2780 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2781 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2782 	spin_unlock_irq(&callback_lock);
2783 out_unlock:
2784 	percpu_up_write(&cpuset_rwsem);
2785 	put_online_cpus();
2786 	return 0;
2787 }
2788 
2789 /*
2790  * If the cpuset being removed has its flag 'sched_load_balance'
2791  * enabled, then simulate turning sched_load_balance off, which
2792  * will call rebuild_sched_domains_locked(). That is not needed
2793  * in the default hierarchy where only changes in partition
2794  * will cause repartitioning.
2795  *
2796  * If the cpuset has the 'sched.partition' flag enabled, simulate
2797  * turning 'sched.partition" off.
2798  */
2799 
2800 static void cpuset_css_offline(struct cgroup_subsys_state *css)
2801 {
2802 	struct cpuset *cs = css_cs(css);
2803 
2804 	get_online_cpus();
2805 	percpu_down_write(&cpuset_rwsem);
2806 
2807 	if (is_partition_root(cs))
2808 		update_prstate(cs, 0);
2809 
2810 	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2811 	    is_sched_load_balance(cs))
2812 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2813 
2814 	if (cs->use_parent_ecpus) {
2815 		struct cpuset *parent = parent_cs(cs);
2816 
2817 		cs->use_parent_ecpus = false;
2818 		parent->child_ecpus_count--;
2819 	}
2820 
2821 	cpuset_dec();
2822 	clear_bit(CS_ONLINE, &cs->flags);
2823 
2824 	percpu_up_write(&cpuset_rwsem);
2825 	put_online_cpus();
2826 }
2827 
2828 static void cpuset_css_free(struct cgroup_subsys_state *css)
2829 {
2830 	struct cpuset *cs = css_cs(css);
2831 
2832 	free_cpuset(cs);
2833 }
2834 
2835 static void cpuset_bind(struct cgroup_subsys_state *root_css)
2836 {
2837 	percpu_down_write(&cpuset_rwsem);
2838 	spin_lock_irq(&callback_lock);
2839 
2840 	if (is_in_v2_mode()) {
2841 		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
2842 		top_cpuset.mems_allowed = node_possible_map;
2843 	} else {
2844 		cpumask_copy(top_cpuset.cpus_allowed,
2845 			     top_cpuset.effective_cpus);
2846 		top_cpuset.mems_allowed = top_cpuset.effective_mems;
2847 	}
2848 
2849 	spin_unlock_irq(&callback_lock);
2850 	percpu_up_write(&cpuset_rwsem);
2851 }
2852 
2853 /*
2854  * Make sure the new task conform to the current state of its parent,
2855  * which could have been changed by cpuset just after it inherits the
2856  * state from the parent and before it sits on the cgroup's task list.
2857  */
2858 static void cpuset_fork(struct task_struct *task)
2859 {
2860 	if (task_css_is_root(task, cpuset_cgrp_id))
2861 		return;
2862 
2863 	set_cpus_allowed_ptr(task, current->cpus_ptr);
2864 	task->mems_allowed = current->mems_allowed;
2865 }
2866 
2867 struct cgroup_subsys cpuset_cgrp_subsys = {
2868 	.css_alloc	= cpuset_css_alloc,
2869 	.css_online	= cpuset_css_online,
2870 	.css_offline	= cpuset_css_offline,
2871 	.css_free	= cpuset_css_free,
2872 	.can_attach	= cpuset_can_attach,
2873 	.cancel_attach	= cpuset_cancel_attach,
2874 	.attach		= cpuset_attach,
2875 	.post_attach	= cpuset_post_attach,
2876 	.bind		= cpuset_bind,
2877 	.fork		= cpuset_fork,
2878 	.legacy_cftypes	= legacy_files,
2879 	.dfl_cftypes	= dfl_files,
2880 	.early_init	= true,
2881 	.threaded	= true,
2882 };
2883 
2884 /**
2885  * cpuset_init - initialize cpusets at system boot
2886  *
2887  * Description: Initialize top_cpuset
2888  **/
2889 
2890 int __init cpuset_init(void)
2891 {
2892 	BUG_ON(percpu_init_rwsem(&cpuset_rwsem));
2893 
2894 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
2895 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
2896 	BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
2897 
2898 	cpumask_setall(top_cpuset.cpus_allowed);
2899 	nodes_setall(top_cpuset.mems_allowed);
2900 	cpumask_setall(top_cpuset.effective_cpus);
2901 	nodes_setall(top_cpuset.effective_mems);
2902 
2903 	fmeter_init(&top_cpuset.fmeter);
2904 	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
2905 	top_cpuset.relax_domain_level = -1;
2906 
2907 	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
2908 
2909 	return 0;
2910 }
2911 
2912 /*
2913  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2914  * or memory nodes, we need to walk over the cpuset hierarchy,
2915  * removing that CPU or node from all cpusets.  If this removes the
2916  * last CPU or node from a cpuset, then move the tasks in the empty
2917  * cpuset to its next-highest non-empty parent.
2918  */
2919 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2920 {
2921 	struct cpuset *parent;
2922 
2923 	/*
2924 	 * Find its next-highest non-empty parent, (top cpuset
2925 	 * has online cpus, so can't be empty).
2926 	 */
2927 	parent = parent_cs(cs);
2928 	while (cpumask_empty(parent->cpus_allowed) ||
2929 			nodes_empty(parent->mems_allowed))
2930 		parent = parent_cs(parent);
2931 
2932 	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2933 		pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
2934 		pr_cont_cgroup_name(cs->css.cgroup);
2935 		pr_cont("\n");
2936 	}
2937 }
2938 
2939 static void
2940 hotplug_update_tasks_legacy(struct cpuset *cs,
2941 			    struct cpumask *new_cpus, nodemask_t *new_mems,
2942 			    bool cpus_updated, bool mems_updated)
2943 {
2944 	bool is_empty;
2945 
2946 	spin_lock_irq(&callback_lock);
2947 	cpumask_copy(cs->cpus_allowed, new_cpus);
2948 	cpumask_copy(cs->effective_cpus, new_cpus);
2949 	cs->mems_allowed = *new_mems;
2950 	cs->effective_mems = *new_mems;
2951 	spin_unlock_irq(&callback_lock);
2952 
2953 	/*
2954 	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
2955 	 * as the tasks will be migratecd to an ancestor.
2956 	 */
2957 	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
2958 		update_tasks_cpumask(cs);
2959 	if (mems_updated && !nodes_empty(cs->mems_allowed))
2960 		update_tasks_nodemask(cs);
2961 
2962 	is_empty = cpumask_empty(cs->cpus_allowed) ||
2963 		   nodes_empty(cs->mems_allowed);
2964 
2965 	percpu_up_write(&cpuset_rwsem);
2966 
2967 	/*
2968 	 * Move tasks to the nearest ancestor with execution resources,
2969 	 * This is full cgroup operation which will also call back into
2970 	 * cpuset. Should be done outside any lock.
2971 	 */
2972 	if (is_empty)
2973 		remove_tasks_in_empty_cpuset(cs);
2974 
2975 	percpu_down_write(&cpuset_rwsem);
2976 }
2977 
2978 static void
2979 hotplug_update_tasks(struct cpuset *cs,
2980 		     struct cpumask *new_cpus, nodemask_t *new_mems,
2981 		     bool cpus_updated, bool mems_updated)
2982 {
2983 	if (cpumask_empty(new_cpus))
2984 		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
2985 	if (nodes_empty(*new_mems))
2986 		*new_mems = parent_cs(cs)->effective_mems;
2987 
2988 	spin_lock_irq(&callback_lock);
2989 	cpumask_copy(cs->effective_cpus, new_cpus);
2990 	cs->effective_mems = *new_mems;
2991 	spin_unlock_irq(&callback_lock);
2992 
2993 	if (cpus_updated)
2994 		update_tasks_cpumask(cs);
2995 	if (mems_updated)
2996 		update_tasks_nodemask(cs);
2997 }
2998 
2999 static bool force_rebuild;
3000 
3001 void cpuset_force_rebuild(void)
3002 {
3003 	force_rebuild = true;
3004 }
3005 
3006 /**
3007  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3008  * @cs: cpuset in interest
3009  * @tmp: the tmpmasks structure pointer
3010  *
3011  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3012  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3013  * all its tasks are moved to the nearest ancestor with both resources.
3014  */
3015 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3016 {
3017 	static cpumask_t new_cpus;
3018 	static nodemask_t new_mems;
3019 	bool cpus_updated;
3020 	bool mems_updated;
3021 	struct cpuset *parent;
3022 retry:
3023 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3024 
3025 	percpu_down_write(&cpuset_rwsem);
3026 
3027 	/*
3028 	 * We have raced with task attaching. We wait until attaching
3029 	 * is finished, so we won't attach a task to an empty cpuset.
3030 	 */
3031 	if (cs->attach_in_progress) {
3032 		percpu_up_write(&cpuset_rwsem);
3033 		goto retry;
3034 	}
3035 
3036 	parent =  parent_cs(cs);
3037 	compute_effective_cpumask(&new_cpus, cs, parent);
3038 	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3039 
3040 	if (cs->nr_subparts_cpus)
3041 		/*
3042 		 * Make sure that CPUs allocated to child partitions
3043 		 * do not show up in effective_cpus.
3044 		 */
3045 		cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3046 
3047 	if (!tmp || !cs->partition_root_state)
3048 		goto update_tasks;
3049 
3050 	/*
3051 	 * In the unlikely event that a partition root has empty
3052 	 * effective_cpus or its parent becomes erroneous, we have to
3053 	 * transition it to the erroneous state.
3054 	 */
3055 	if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
3056 	   (parent->partition_root_state == PRS_ERROR))) {
3057 		if (cs->nr_subparts_cpus) {
3058 			cs->nr_subparts_cpus = 0;
3059 			cpumask_clear(cs->subparts_cpus);
3060 			compute_effective_cpumask(&new_cpus, cs, parent);
3061 		}
3062 
3063 		/*
3064 		 * If the effective_cpus is empty because the child
3065 		 * partitions take away all the CPUs, we can keep
3066 		 * the current partition and let the child partitions
3067 		 * fight for available CPUs.
3068 		 */
3069 		if ((parent->partition_root_state == PRS_ERROR) ||
3070 		     cpumask_empty(&new_cpus)) {
3071 			update_parent_subparts_cpumask(cs, partcmd_disable,
3072 						       NULL, tmp);
3073 			cs->partition_root_state = PRS_ERROR;
3074 		}
3075 		cpuset_force_rebuild();
3076 	}
3077 
3078 	/*
3079 	 * On the other hand, an erroneous partition root may be transitioned
3080 	 * back to a regular one or a partition root with no CPU allocated
3081 	 * from the parent may change to erroneous.
3082 	 */
3083 	if (is_partition_root(parent) &&
3084 	   ((cs->partition_root_state == PRS_ERROR) ||
3085 	    !cpumask_intersects(&new_cpus, parent->subparts_cpus)) &&
3086 	     update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp))
3087 		cpuset_force_rebuild();
3088 
3089 update_tasks:
3090 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3091 	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3092 
3093 	if (is_in_v2_mode())
3094 		hotplug_update_tasks(cs, &new_cpus, &new_mems,
3095 				     cpus_updated, mems_updated);
3096 	else
3097 		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3098 					    cpus_updated, mems_updated);
3099 
3100 	percpu_up_write(&cpuset_rwsem);
3101 }
3102 
3103 /**
3104  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3105  *
3106  * This function is called after either CPU or memory configuration has
3107  * changed and updates cpuset accordingly.  The top_cpuset is always
3108  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3109  * order to make cpusets transparent (of no affect) on systems that are
3110  * actively using CPU hotplug but making no active use of cpusets.
3111  *
3112  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3113  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3114  * all descendants.
3115  *
3116  * Note that CPU offlining during suspend is ignored.  We don't modify
3117  * cpusets across suspend/resume cycles at all.
3118  */
3119 static void cpuset_hotplug_workfn(struct work_struct *work)
3120 {
3121 	static cpumask_t new_cpus;
3122 	static nodemask_t new_mems;
3123 	bool cpus_updated, mems_updated;
3124 	bool on_dfl = is_in_v2_mode();
3125 	struct tmpmasks tmp, *ptmp = NULL;
3126 
3127 	if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3128 		ptmp = &tmp;
3129 
3130 	percpu_down_write(&cpuset_rwsem);
3131 
3132 	/* fetch the available cpus/mems and find out which changed how */
3133 	cpumask_copy(&new_cpus, cpu_active_mask);
3134 	new_mems = node_states[N_MEMORY];
3135 
3136 	/*
3137 	 * If subparts_cpus is populated, it is likely that the check below
3138 	 * will produce a false positive on cpus_updated when the cpu list
3139 	 * isn't changed. It is extra work, but it is better to be safe.
3140 	 */
3141 	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3142 	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3143 
3144 	/* synchronize cpus_allowed to cpu_active_mask */
3145 	if (cpus_updated) {
3146 		spin_lock_irq(&callback_lock);
3147 		if (!on_dfl)
3148 			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3149 		/*
3150 		 * Make sure that CPUs allocated to child partitions
3151 		 * do not show up in effective_cpus. If no CPU is left,
3152 		 * we clear the subparts_cpus & let the child partitions
3153 		 * fight for the CPUs again.
3154 		 */
3155 		if (top_cpuset.nr_subparts_cpus) {
3156 			if (cpumask_subset(&new_cpus,
3157 					   top_cpuset.subparts_cpus)) {
3158 				top_cpuset.nr_subparts_cpus = 0;
3159 				cpumask_clear(top_cpuset.subparts_cpus);
3160 			} else {
3161 				cpumask_andnot(&new_cpus, &new_cpus,
3162 					       top_cpuset.subparts_cpus);
3163 			}
3164 		}
3165 		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3166 		spin_unlock_irq(&callback_lock);
3167 		/* we don't mess with cpumasks of tasks in top_cpuset */
3168 	}
3169 
3170 	/* synchronize mems_allowed to N_MEMORY */
3171 	if (mems_updated) {
3172 		spin_lock_irq(&callback_lock);
3173 		if (!on_dfl)
3174 			top_cpuset.mems_allowed = new_mems;
3175 		top_cpuset.effective_mems = new_mems;
3176 		spin_unlock_irq(&callback_lock);
3177 		update_tasks_nodemask(&top_cpuset);
3178 	}
3179 
3180 	percpu_up_write(&cpuset_rwsem);
3181 
3182 	/* if cpus or mems changed, we need to propagate to descendants */
3183 	if (cpus_updated || mems_updated) {
3184 		struct cpuset *cs;
3185 		struct cgroup_subsys_state *pos_css;
3186 
3187 		rcu_read_lock();
3188 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3189 			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3190 				continue;
3191 			rcu_read_unlock();
3192 
3193 			cpuset_hotplug_update_tasks(cs, ptmp);
3194 
3195 			rcu_read_lock();
3196 			css_put(&cs->css);
3197 		}
3198 		rcu_read_unlock();
3199 	}
3200 
3201 	/* rebuild sched domains if cpus_allowed has changed */
3202 	if (cpus_updated || force_rebuild) {
3203 		force_rebuild = false;
3204 		rebuild_sched_domains();
3205 	}
3206 
3207 	free_cpumasks(NULL, ptmp);
3208 }
3209 
3210 void cpuset_update_active_cpus(void)
3211 {
3212 	/*
3213 	 * We're inside cpu hotplug critical region which usually nests
3214 	 * inside cgroup synchronization.  Bounce actual hotplug processing
3215 	 * to a work item to avoid reverse locking order.
3216 	 */
3217 	schedule_work(&cpuset_hotplug_work);
3218 }
3219 
3220 void cpuset_wait_for_hotplug(void)
3221 {
3222 	flush_work(&cpuset_hotplug_work);
3223 }
3224 
3225 /*
3226  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3227  * Call this routine anytime after node_states[N_MEMORY] changes.
3228  * See cpuset_update_active_cpus() for CPU hotplug handling.
3229  */
3230 static int cpuset_track_online_nodes(struct notifier_block *self,
3231 				unsigned long action, void *arg)
3232 {
3233 	schedule_work(&cpuset_hotplug_work);
3234 	return NOTIFY_OK;
3235 }
3236 
3237 static struct notifier_block cpuset_track_online_nodes_nb = {
3238 	.notifier_call = cpuset_track_online_nodes,
3239 	.priority = 10,		/* ??! */
3240 };
3241 
3242 /**
3243  * cpuset_init_smp - initialize cpus_allowed
3244  *
3245  * Description: Finish top cpuset after cpu, node maps are initialized
3246  */
3247 void __init cpuset_init_smp(void)
3248 {
3249 	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
3250 	top_cpuset.mems_allowed = node_states[N_MEMORY];
3251 	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3252 
3253 	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3254 	top_cpuset.effective_mems = node_states[N_MEMORY];
3255 
3256 	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
3257 
3258 	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3259 	BUG_ON(!cpuset_migrate_mm_wq);
3260 }
3261 
3262 /**
3263  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3264  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3265  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3266  *
3267  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3268  * attached to the specified @tsk.  Guaranteed to return some non-empty
3269  * subset of cpu_online_mask, even if this means going outside the
3270  * tasks cpuset.
3271  **/
3272 
3273 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3274 {
3275 	unsigned long flags;
3276 
3277 	spin_lock_irqsave(&callback_lock, flags);
3278 	rcu_read_lock();
3279 	guarantee_online_cpus(task_cs(tsk), pmask);
3280 	rcu_read_unlock();
3281 	spin_unlock_irqrestore(&callback_lock, flags);
3282 }
3283 
3284 /**
3285  * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3286  * @tsk: pointer to task_struct with which the scheduler is struggling
3287  *
3288  * Description: In the case that the scheduler cannot find an allowed cpu in
3289  * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3290  * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3291  * which will not contain a sane cpumask during cases such as cpu hotplugging.
3292  * This is the absolute last resort for the scheduler and it is only used if
3293  * _every_ other avenue has been traveled.
3294  **/
3295 
3296 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3297 {
3298 	rcu_read_lock();
3299 	do_set_cpus_allowed(tsk, is_in_v2_mode() ?
3300 		task_cs(tsk)->cpus_allowed : cpu_possible_mask);
3301 	rcu_read_unlock();
3302 
3303 	/*
3304 	 * We own tsk->cpus_allowed, nobody can change it under us.
3305 	 *
3306 	 * But we used cs && cs->cpus_allowed lockless and thus can
3307 	 * race with cgroup_attach_task() or update_cpumask() and get
3308 	 * the wrong tsk->cpus_allowed. However, both cases imply the
3309 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3310 	 * which takes task_rq_lock().
3311 	 *
3312 	 * If we are called after it dropped the lock we must see all
3313 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
3314 	 * set any mask even if it is not right from task_cs() pov,
3315 	 * the pending set_cpus_allowed_ptr() will fix things.
3316 	 *
3317 	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
3318 	 * if required.
3319 	 */
3320 }
3321 
3322 void __init cpuset_init_current_mems_allowed(void)
3323 {
3324 	nodes_setall(current->mems_allowed);
3325 }
3326 
3327 /**
3328  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3329  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3330  *
3331  * Description: Returns the nodemask_t mems_allowed of the cpuset
3332  * attached to the specified @tsk.  Guaranteed to return some non-empty
3333  * subset of node_states[N_MEMORY], even if this means going outside the
3334  * tasks cpuset.
3335  **/
3336 
3337 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
3338 {
3339 	nodemask_t mask;
3340 	unsigned long flags;
3341 
3342 	spin_lock_irqsave(&callback_lock, flags);
3343 	rcu_read_lock();
3344 	guarantee_online_mems(task_cs(tsk), &mask);
3345 	rcu_read_unlock();
3346 	spin_unlock_irqrestore(&callback_lock, flags);
3347 
3348 	return mask;
3349 }
3350 
3351 /**
3352  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
3353  * @nodemask: the nodemask to be checked
3354  *
3355  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3356  */
3357 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
3358 {
3359 	return nodes_intersects(*nodemask, current->mems_allowed);
3360 }
3361 
3362 /*
3363  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3364  * mem_hardwall ancestor to the specified cpuset.  Call holding
3365  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
3366  * (an unusual configuration), then returns the root cpuset.
3367  */
3368 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
3369 {
3370 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
3371 		cs = parent_cs(cs);
3372 	return cs;
3373 }
3374 
3375 /**
3376  * cpuset_node_allowed - Can we allocate on a memory node?
3377  * @node: is this an allowed node?
3378  * @gfp_mask: memory allocation flags
3379  *
3380  * If we're in interrupt, yes, we can always allocate.  If @node is set in
3381  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
3382  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
3383  * yes.  If current has access to memory reserves as an oom victim, yes.
3384  * Otherwise, no.
3385  *
3386  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
3387  * and do not allow allocations outside the current tasks cpuset
3388  * unless the task has been OOM killed.
3389  * GFP_KERNEL allocations are not so marked, so can escape to the
3390  * nearest enclosing hardwalled ancestor cpuset.
3391  *
3392  * Scanning up parent cpusets requires callback_lock.  The
3393  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
3394  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
3395  * current tasks mems_allowed came up empty on the first pass over
3396  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
3397  * cpuset are short of memory, might require taking the callback_lock.
3398  *
3399  * The first call here from mm/page_alloc:get_page_from_freelist()
3400  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
3401  * so no allocation on a node outside the cpuset is allowed (unless
3402  * in interrupt, of course).
3403  *
3404  * The second pass through get_page_from_freelist() doesn't even call
3405  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
3406  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
3407  * in alloc_flags.  That logic and the checks below have the combined
3408  * affect that:
3409  *	in_interrupt - any node ok (current task context irrelevant)
3410  *	GFP_ATOMIC   - any node ok
3411  *	tsk_is_oom_victim   - any node ok
3412  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
3413  *	GFP_USER     - only nodes in current tasks mems allowed ok.
3414  */
3415 bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
3416 {
3417 	struct cpuset *cs;		/* current cpuset ancestors */
3418 	int allowed;			/* is allocation in zone z allowed? */
3419 	unsigned long flags;
3420 
3421 	if (in_interrupt())
3422 		return true;
3423 	if (node_isset(node, current->mems_allowed))
3424 		return true;
3425 	/*
3426 	 * Allow tasks that have access to memory reserves because they have
3427 	 * been OOM killed to get memory anywhere.
3428 	 */
3429 	if (unlikely(tsk_is_oom_victim(current)))
3430 		return true;
3431 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
3432 		return false;
3433 
3434 	if (current->flags & PF_EXITING) /* Let dying task have memory */
3435 		return true;
3436 
3437 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
3438 	spin_lock_irqsave(&callback_lock, flags);
3439 
3440 	rcu_read_lock();
3441 	cs = nearest_hardwall_ancestor(task_cs(current));
3442 	allowed = node_isset(node, cs->mems_allowed);
3443 	rcu_read_unlock();
3444 
3445 	spin_unlock_irqrestore(&callback_lock, flags);
3446 	return allowed;
3447 }
3448 
3449 /**
3450  * cpuset_mem_spread_node() - On which node to begin search for a file page
3451  * cpuset_slab_spread_node() - On which node to begin search for a slab page
3452  *
3453  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
3454  * tasks in a cpuset with is_spread_page or is_spread_slab set),
3455  * and if the memory allocation used cpuset_mem_spread_node()
3456  * to determine on which node to start looking, as it will for
3457  * certain page cache or slab cache pages such as used for file
3458  * system buffers and inode caches, then instead of starting on the
3459  * local node to look for a free page, rather spread the starting
3460  * node around the tasks mems_allowed nodes.
3461  *
3462  * We don't have to worry about the returned node being offline
3463  * because "it can't happen", and even if it did, it would be ok.
3464  *
3465  * The routines calling guarantee_online_mems() are careful to
3466  * only set nodes in task->mems_allowed that are online.  So it
3467  * should not be possible for the following code to return an
3468  * offline node.  But if it did, that would be ok, as this routine
3469  * is not returning the node where the allocation must be, only
3470  * the node where the search should start.  The zonelist passed to
3471  * __alloc_pages() will include all nodes.  If the slab allocator
3472  * is passed an offline node, it will fall back to the local node.
3473  * See kmem_cache_alloc_node().
3474  */
3475 
3476 static int cpuset_spread_node(int *rotor)
3477 {
3478 	return *rotor = next_node_in(*rotor, current->mems_allowed);
3479 }
3480 
3481 int cpuset_mem_spread_node(void)
3482 {
3483 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
3484 		current->cpuset_mem_spread_rotor =
3485 			node_random(&current->mems_allowed);
3486 
3487 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
3488 }
3489 
3490 int cpuset_slab_spread_node(void)
3491 {
3492 	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
3493 		current->cpuset_slab_spread_rotor =
3494 			node_random(&current->mems_allowed);
3495 
3496 	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
3497 }
3498 
3499 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
3500 
3501 /**
3502  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3503  * @tsk1: pointer to task_struct of some task.
3504  * @tsk2: pointer to task_struct of some other task.
3505  *
3506  * Description: Return true if @tsk1's mems_allowed intersects the
3507  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
3508  * one of the task's memory usage might impact the memory available
3509  * to the other.
3510  **/
3511 
3512 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
3513 				   const struct task_struct *tsk2)
3514 {
3515 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
3516 }
3517 
3518 /**
3519  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3520  *
3521  * Description: Prints current's name, cpuset name, and cached copy of its
3522  * mems_allowed to the kernel log.
3523  */
3524 void cpuset_print_current_mems_allowed(void)
3525 {
3526 	struct cgroup *cgrp;
3527 
3528 	rcu_read_lock();
3529 
3530 	cgrp = task_cs(current)->css.cgroup;
3531 	pr_cont(",cpuset=");
3532 	pr_cont_cgroup_name(cgrp);
3533 	pr_cont(",mems_allowed=%*pbl",
3534 		nodemask_pr_args(&current->mems_allowed));
3535 
3536 	rcu_read_unlock();
3537 }
3538 
3539 /*
3540  * Collection of memory_pressure is suppressed unless
3541  * this flag is enabled by writing "1" to the special
3542  * cpuset file 'memory_pressure_enabled' in the root cpuset.
3543  */
3544 
3545 int cpuset_memory_pressure_enabled __read_mostly;
3546 
3547 /**
3548  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3549  *
3550  * Keep a running average of the rate of synchronous (direct)
3551  * page reclaim efforts initiated by tasks in each cpuset.
3552  *
3553  * This represents the rate at which some task in the cpuset
3554  * ran low on memory on all nodes it was allowed to use, and
3555  * had to enter the kernels page reclaim code in an effort to
3556  * create more free memory by tossing clean pages or swapping
3557  * or writing dirty pages.
3558  *
3559  * Display to user space in the per-cpuset read-only file
3560  * "memory_pressure".  Value displayed is an integer
3561  * representing the recent rate of entry into the synchronous
3562  * (direct) page reclaim by any task attached to the cpuset.
3563  **/
3564 
3565 void __cpuset_memory_pressure_bump(void)
3566 {
3567 	rcu_read_lock();
3568 	fmeter_markevent(&task_cs(current)->fmeter);
3569 	rcu_read_unlock();
3570 }
3571 
3572 #ifdef CONFIG_PROC_PID_CPUSET
3573 /*
3574  * proc_cpuset_show()
3575  *  - Print tasks cpuset path into seq_file.
3576  *  - Used for /proc/<pid>/cpuset.
3577  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3578  *    doesn't really matter if tsk->cpuset changes after we read it,
3579  *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
3580  *    anyway.
3581  */
3582 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
3583 		     struct pid *pid, struct task_struct *tsk)
3584 {
3585 	char *buf;
3586 	struct cgroup_subsys_state *css;
3587 	int retval;
3588 
3589 	retval = -ENOMEM;
3590 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
3591 	if (!buf)
3592 		goto out;
3593 
3594 	css = task_get_css(tsk, cpuset_cgrp_id);
3595 	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
3596 				current->nsproxy->cgroup_ns);
3597 	css_put(css);
3598 	if (retval >= PATH_MAX)
3599 		retval = -ENAMETOOLONG;
3600 	if (retval < 0)
3601 		goto out_free;
3602 	seq_puts(m, buf);
3603 	seq_putc(m, '\n');
3604 	retval = 0;
3605 out_free:
3606 	kfree(buf);
3607 out:
3608 	return retval;
3609 }
3610 #endif /* CONFIG_PROC_PID_CPUSET */
3611 
3612 /* Display task mems_allowed in /proc/<pid>/status file. */
3613 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
3614 {
3615 	seq_printf(m, "Mems_allowed:\t%*pb\n",
3616 		   nodemask_pr_args(&task->mems_allowed));
3617 	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
3618 		   nodemask_pr_args(&task->mems_allowed));
3619 }
3620