xref: /openbmc/linux/kernel/cgroup/cpuset.c (revision 9659281c)
1 /*
2  *  kernel/cpuset.c
3  *
4  *  Processor and Memory placement constraints for sets of tasks.
5  *
6  *  Copyright (C) 2003 BULL SA.
7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8  *  Copyright (C) 2006 Google, Inc
9  *
10  *  Portions derived from Patrick Mochel's sysfs code.
11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
12  *
13  *  2003-10-10 Written by Simon Derr.
14  *  2003-10-22 Updates by Stephen Hemminger.
15  *  2004 May-July Rework by Paul Jackson.
16  *  2006 Rework by Paul Menage to use generic cgroups
17  *  2008 Rework of the scheduler domains and CPU hotplug handling
18  *       by Max Krasnyansky
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/list.h>
37 #include <linux/mempolicy.h>
38 #include <linux/mm.h>
39 #include <linux/memory.h>
40 #include <linux/export.h>
41 #include <linux/mount.h>
42 #include <linux/fs_context.h>
43 #include <linux/namei.h>
44 #include <linux/pagemap.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
47 #include <linux/sched.h>
48 #include <linux/sched/deadline.h>
49 #include <linux/sched/mm.h>
50 #include <linux/sched/task.h>
51 #include <linux/seq_file.h>
52 #include <linux/security.h>
53 #include <linux/slab.h>
54 #include <linux/spinlock.h>
55 #include <linux/stat.h>
56 #include <linux/string.h>
57 #include <linux/time.h>
58 #include <linux/time64.h>
59 #include <linux/backing-dev.h>
60 #include <linux/sort.h>
61 #include <linux/oom.h>
62 #include <linux/sched/isolation.h>
63 #include <linux/uaccess.h>
64 #include <linux/atomic.h>
65 #include <linux/mutex.h>
66 #include <linux/cgroup.h>
67 #include <linux/wait.h>
68 
69 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
70 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
71 
72 /* See "Frequency meter" comments, below. */
73 
74 struct fmeter {
75 	int cnt;		/* unprocessed events count */
76 	int val;		/* most recent output value */
77 	time64_t time;		/* clock (secs) when val computed */
78 	spinlock_t lock;	/* guards read or write of above */
79 };
80 
81 struct cpuset {
82 	struct cgroup_subsys_state css;
83 
84 	unsigned long flags;		/* "unsigned long" so bitops work */
85 
86 	/*
87 	 * On default hierarchy:
88 	 *
89 	 * The user-configured masks can only be changed by writing to
90 	 * cpuset.cpus and cpuset.mems, and won't be limited by the
91 	 * parent masks.
92 	 *
93 	 * The effective masks is the real masks that apply to the tasks
94 	 * in the cpuset. They may be changed if the configured masks are
95 	 * changed or hotplug happens.
96 	 *
97 	 * effective_mask == configured_mask & parent's effective_mask,
98 	 * and if it ends up empty, it will inherit the parent's mask.
99 	 *
100 	 *
101 	 * On legacy hierarchy:
102 	 *
103 	 * The user-configured masks are always the same with effective masks.
104 	 */
105 
106 	/* user-configured CPUs and Memory Nodes allow to tasks */
107 	cpumask_var_t cpus_allowed;
108 	nodemask_t mems_allowed;
109 
110 	/* effective CPUs and Memory Nodes allow to tasks */
111 	cpumask_var_t effective_cpus;
112 	nodemask_t effective_mems;
113 
114 	/*
115 	 * CPUs allocated to child sub-partitions (default hierarchy only)
116 	 * - CPUs granted by the parent = effective_cpus U subparts_cpus
117 	 * - effective_cpus and subparts_cpus are mutually exclusive.
118 	 *
119 	 * effective_cpus contains only onlined CPUs, but subparts_cpus
120 	 * may have offlined ones.
121 	 */
122 	cpumask_var_t subparts_cpus;
123 
124 	/*
125 	 * This is old Memory Nodes tasks took on.
126 	 *
127 	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
128 	 * - A new cpuset's old_mems_allowed is initialized when some
129 	 *   task is moved into it.
130 	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
131 	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
132 	 *   then old_mems_allowed is updated to mems_allowed.
133 	 */
134 	nodemask_t old_mems_allowed;
135 
136 	struct fmeter fmeter;		/* memory_pressure filter */
137 
138 	/*
139 	 * Tasks are being attached to this cpuset.  Used to prevent
140 	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
141 	 */
142 	int attach_in_progress;
143 
144 	/* partition number for rebuild_sched_domains() */
145 	int pn;
146 
147 	/* for custom sched domain */
148 	int relax_domain_level;
149 
150 	/* number of CPUs in subparts_cpus */
151 	int nr_subparts_cpus;
152 
153 	/* partition root state */
154 	int partition_root_state;
155 
156 	/*
157 	 * Default hierarchy only:
158 	 * use_parent_ecpus - set if using parent's effective_cpus
159 	 * child_ecpus_count - # of children with use_parent_ecpus set
160 	 */
161 	int use_parent_ecpus;
162 	int child_ecpus_count;
163 };
164 
165 /*
166  * Partition root states:
167  *
168  *   0 - not a partition root
169  *
170  *   1 - partition root
171  *
172  *  -1 - invalid partition root
173  *       None of the cpus in cpus_allowed can be put into the parent's
174  *       subparts_cpus. In this case, the cpuset is not a real partition
175  *       root anymore.  However, the CPU_EXCLUSIVE bit will still be set
176  *       and the cpuset can be restored back to a partition root if the
177  *       parent cpuset can give more CPUs back to this child cpuset.
178  */
179 #define PRS_DISABLED		0
180 #define PRS_ENABLED		1
181 #define PRS_ERROR		-1
182 
183 /*
184  * Temporary cpumasks for working with partitions that are passed among
185  * functions to avoid memory allocation in inner functions.
186  */
187 struct tmpmasks {
188 	cpumask_var_t addmask, delmask;	/* For partition root */
189 	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
190 };
191 
192 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
193 {
194 	return css ? container_of(css, struct cpuset, css) : NULL;
195 }
196 
197 /* Retrieve the cpuset for a task */
198 static inline struct cpuset *task_cs(struct task_struct *task)
199 {
200 	return css_cs(task_css(task, cpuset_cgrp_id));
201 }
202 
203 static inline struct cpuset *parent_cs(struct cpuset *cs)
204 {
205 	return css_cs(cs->css.parent);
206 }
207 
208 /* bits in struct cpuset flags field */
209 typedef enum {
210 	CS_ONLINE,
211 	CS_CPU_EXCLUSIVE,
212 	CS_MEM_EXCLUSIVE,
213 	CS_MEM_HARDWALL,
214 	CS_MEMORY_MIGRATE,
215 	CS_SCHED_LOAD_BALANCE,
216 	CS_SPREAD_PAGE,
217 	CS_SPREAD_SLAB,
218 } cpuset_flagbits_t;
219 
220 /* convenient tests for these bits */
221 static inline bool is_cpuset_online(struct cpuset *cs)
222 {
223 	return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
224 }
225 
226 static inline int is_cpu_exclusive(const struct cpuset *cs)
227 {
228 	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
229 }
230 
231 static inline int is_mem_exclusive(const struct cpuset *cs)
232 {
233 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
234 }
235 
236 static inline int is_mem_hardwall(const struct cpuset *cs)
237 {
238 	return test_bit(CS_MEM_HARDWALL, &cs->flags);
239 }
240 
241 static inline int is_sched_load_balance(const struct cpuset *cs)
242 {
243 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
244 }
245 
246 static inline int is_memory_migrate(const struct cpuset *cs)
247 {
248 	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
249 }
250 
251 static inline int is_spread_page(const struct cpuset *cs)
252 {
253 	return test_bit(CS_SPREAD_PAGE, &cs->flags);
254 }
255 
256 static inline int is_spread_slab(const struct cpuset *cs)
257 {
258 	return test_bit(CS_SPREAD_SLAB, &cs->flags);
259 }
260 
261 static inline int is_partition_root(const struct cpuset *cs)
262 {
263 	return cs->partition_root_state > 0;
264 }
265 
266 static struct cpuset top_cpuset = {
267 	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
268 		  (1 << CS_MEM_EXCLUSIVE)),
269 	.partition_root_state = PRS_ENABLED,
270 };
271 
272 /**
273  * cpuset_for_each_child - traverse online children of a cpuset
274  * @child_cs: loop cursor pointing to the current child
275  * @pos_css: used for iteration
276  * @parent_cs: target cpuset to walk children of
277  *
278  * Walk @child_cs through the online children of @parent_cs.  Must be used
279  * with RCU read locked.
280  */
281 #define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
282 	css_for_each_child((pos_css), &(parent_cs)->css)		\
283 		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
284 
285 /**
286  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
287  * @des_cs: loop cursor pointing to the current descendant
288  * @pos_css: used for iteration
289  * @root_cs: target cpuset to walk ancestor of
290  *
291  * Walk @des_cs through the online descendants of @root_cs.  Must be used
292  * with RCU read locked.  The caller may modify @pos_css by calling
293  * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
294  * iteration and the first node to be visited.
295  */
296 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
297 	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
298 		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
299 
300 /*
301  * There are two global locks guarding cpuset structures - cpuset_mutex and
302  * callback_lock. We also require taking task_lock() when dereferencing a
303  * task's cpuset pointer. See "The task_lock() exception", at the end of this
304  * comment.
305  *
306  * A task must hold both locks to modify cpusets.  If a task holds
307  * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
308  * is the only task able to also acquire callback_lock and be able to
309  * modify cpusets.  It can perform various checks on the cpuset structure
310  * first, knowing nothing will change.  It can also allocate memory while
311  * just holding cpuset_mutex.  While it is performing these checks, various
312  * callback routines can briefly acquire callback_lock to query cpusets.
313  * Once it is ready to make the changes, it takes callback_lock, blocking
314  * everyone else.
315  *
316  * Calls to the kernel memory allocator can not be made while holding
317  * callback_lock, as that would risk double tripping on callback_lock
318  * from one of the callbacks into the cpuset code from within
319  * __alloc_pages().
320  *
321  * If a task is only holding callback_lock, then it has read-only
322  * access to cpusets.
323  *
324  * Now, the task_struct fields mems_allowed and mempolicy may be changed
325  * by other task, we use alloc_lock in the task_struct fields to protect
326  * them.
327  *
328  * The cpuset_common_file_read() handlers only hold callback_lock across
329  * small pieces of code, such as when reading out possibly multi-word
330  * cpumasks and nodemasks.
331  *
332  * Accessing a task's cpuset should be done in accordance with the
333  * guidelines for accessing subsystem state in kernel/cgroup.c
334  */
335 
336 DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
337 
338 void cpuset_read_lock(void)
339 {
340 	percpu_down_read(&cpuset_rwsem);
341 }
342 
343 void cpuset_read_unlock(void)
344 {
345 	percpu_up_read(&cpuset_rwsem);
346 }
347 
348 static DEFINE_SPINLOCK(callback_lock);
349 
350 static struct workqueue_struct *cpuset_migrate_mm_wq;
351 
352 /*
353  * CPU / memory hotplug is handled asynchronously.
354  */
355 static void cpuset_hotplug_workfn(struct work_struct *work);
356 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
357 
358 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
359 
360 /*
361  * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
362  * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
363  * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
364  * With v2 behavior, "cpus" and "mems" are always what the users have
365  * requested and won't be changed by hotplug events. Only the effective
366  * cpus or mems will be affected.
367  */
368 static inline bool is_in_v2_mode(void)
369 {
370 	return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
371 	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
372 }
373 
374 /*
375  * Return in pmask the portion of a cpusets's cpus_allowed that
376  * are online.  If none are online, walk up the cpuset hierarchy
377  * until we find one that does have some online cpus.
378  *
379  * One way or another, we guarantee to return some non-empty subset
380  * of cpu_online_mask.
381  *
382  * Call with callback_lock or cpuset_mutex held.
383  */
384 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
385 {
386 	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
387 		cs = parent_cs(cs);
388 		if (unlikely(!cs)) {
389 			/*
390 			 * The top cpuset doesn't have any online cpu as a
391 			 * consequence of a race between cpuset_hotplug_work
392 			 * and cpu hotplug notifier.  But we know the top
393 			 * cpuset's effective_cpus is on its way to be
394 			 * identical to cpu_online_mask.
395 			 */
396 			cpumask_copy(pmask, cpu_online_mask);
397 			return;
398 		}
399 	}
400 	cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
401 }
402 
403 /*
404  * Return in *pmask the portion of a cpusets's mems_allowed that
405  * are online, with memory.  If none are online with memory, walk
406  * up the cpuset hierarchy until we find one that does have some
407  * online mems.  The top cpuset always has some mems online.
408  *
409  * One way or another, we guarantee to return some non-empty subset
410  * of node_states[N_MEMORY].
411  *
412  * Call with callback_lock or cpuset_mutex held.
413  */
414 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
415 {
416 	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
417 		cs = parent_cs(cs);
418 	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
419 }
420 
421 /*
422  * update task's spread flag if cpuset's page/slab spread flag is set
423  *
424  * Call with callback_lock or cpuset_mutex held.
425  */
426 static void cpuset_update_task_spread_flag(struct cpuset *cs,
427 					struct task_struct *tsk)
428 {
429 	if (is_spread_page(cs))
430 		task_set_spread_page(tsk);
431 	else
432 		task_clear_spread_page(tsk);
433 
434 	if (is_spread_slab(cs))
435 		task_set_spread_slab(tsk);
436 	else
437 		task_clear_spread_slab(tsk);
438 }
439 
440 /*
441  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
442  *
443  * One cpuset is a subset of another if all its allowed CPUs and
444  * Memory Nodes are a subset of the other, and its exclusive flags
445  * are only set if the other's are set.  Call holding cpuset_mutex.
446  */
447 
448 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
449 {
450 	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
451 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
452 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
453 		is_mem_exclusive(p) <= is_mem_exclusive(q);
454 }
455 
456 /**
457  * alloc_cpumasks - allocate three cpumasks for cpuset
458  * @cs:  the cpuset that have cpumasks to be allocated.
459  * @tmp: the tmpmasks structure pointer
460  * Return: 0 if successful, -ENOMEM otherwise.
461  *
462  * Only one of the two input arguments should be non-NULL.
463  */
464 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
465 {
466 	cpumask_var_t *pmask1, *pmask2, *pmask3;
467 
468 	if (cs) {
469 		pmask1 = &cs->cpus_allowed;
470 		pmask2 = &cs->effective_cpus;
471 		pmask3 = &cs->subparts_cpus;
472 	} else {
473 		pmask1 = &tmp->new_cpus;
474 		pmask2 = &tmp->addmask;
475 		pmask3 = &tmp->delmask;
476 	}
477 
478 	if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
479 		return -ENOMEM;
480 
481 	if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
482 		goto free_one;
483 
484 	if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
485 		goto free_two;
486 
487 	return 0;
488 
489 free_two:
490 	free_cpumask_var(*pmask2);
491 free_one:
492 	free_cpumask_var(*pmask1);
493 	return -ENOMEM;
494 }
495 
496 /**
497  * free_cpumasks - free cpumasks in a tmpmasks structure
498  * @cs:  the cpuset that have cpumasks to be free.
499  * @tmp: the tmpmasks structure pointer
500  */
501 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
502 {
503 	if (cs) {
504 		free_cpumask_var(cs->cpus_allowed);
505 		free_cpumask_var(cs->effective_cpus);
506 		free_cpumask_var(cs->subparts_cpus);
507 	}
508 	if (tmp) {
509 		free_cpumask_var(tmp->new_cpus);
510 		free_cpumask_var(tmp->addmask);
511 		free_cpumask_var(tmp->delmask);
512 	}
513 }
514 
515 /**
516  * alloc_trial_cpuset - allocate a trial cpuset
517  * @cs: the cpuset that the trial cpuset duplicates
518  */
519 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
520 {
521 	struct cpuset *trial;
522 
523 	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
524 	if (!trial)
525 		return NULL;
526 
527 	if (alloc_cpumasks(trial, NULL)) {
528 		kfree(trial);
529 		return NULL;
530 	}
531 
532 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
533 	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
534 	return trial;
535 }
536 
537 /**
538  * free_cpuset - free the cpuset
539  * @cs: the cpuset to be freed
540  */
541 static inline void free_cpuset(struct cpuset *cs)
542 {
543 	free_cpumasks(cs, NULL);
544 	kfree(cs);
545 }
546 
547 /*
548  * validate_change() - Used to validate that any proposed cpuset change
549  *		       follows the structural rules for cpusets.
550  *
551  * If we replaced the flag and mask values of the current cpuset
552  * (cur) with those values in the trial cpuset (trial), would
553  * our various subset and exclusive rules still be valid?  Presumes
554  * cpuset_mutex held.
555  *
556  * 'cur' is the address of an actual, in-use cpuset.  Operations
557  * such as list traversal that depend on the actual address of the
558  * cpuset in the list must use cur below, not trial.
559  *
560  * 'trial' is the address of bulk structure copy of cur, with
561  * perhaps one or more of the fields cpus_allowed, mems_allowed,
562  * or flags changed to new, trial values.
563  *
564  * Return 0 if valid, -errno if not.
565  */
566 
567 static int validate_change(struct cpuset *cur, struct cpuset *trial)
568 {
569 	struct cgroup_subsys_state *css;
570 	struct cpuset *c, *par;
571 	int ret;
572 
573 	rcu_read_lock();
574 
575 	/* Each of our child cpusets must be a subset of us */
576 	ret = -EBUSY;
577 	cpuset_for_each_child(c, css, cur)
578 		if (!is_cpuset_subset(c, trial))
579 			goto out;
580 
581 	/* Remaining checks don't apply to root cpuset */
582 	ret = 0;
583 	if (cur == &top_cpuset)
584 		goto out;
585 
586 	par = parent_cs(cur);
587 
588 	/* On legacy hierarchy, we must be a subset of our parent cpuset. */
589 	ret = -EACCES;
590 	if (!is_in_v2_mode() && !is_cpuset_subset(trial, par))
591 		goto out;
592 
593 	/*
594 	 * If either I or some sibling (!= me) is exclusive, we can't
595 	 * overlap
596 	 */
597 	ret = -EINVAL;
598 	cpuset_for_each_child(c, css, par) {
599 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
600 		    c != cur &&
601 		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
602 			goto out;
603 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
604 		    c != cur &&
605 		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
606 			goto out;
607 	}
608 
609 	/*
610 	 * Cpusets with tasks - existing or newly being attached - can't
611 	 * be changed to have empty cpus_allowed or mems_allowed.
612 	 */
613 	ret = -ENOSPC;
614 	if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
615 		if (!cpumask_empty(cur->cpus_allowed) &&
616 		    cpumask_empty(trial->cpus_allowed))
617 			goto out;
618 		if (!nodes_empty(cur->mems_allowed) &&
619 		    nodes_empty(trial->mems_allowed))
620 			goto out;
621 	}
622 
623 	/*
624 	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
625 	 * tasks.
626 	 */
627 	ret = -EBUSY;
628 	if (is_cpu_exclusive(cur) &&
629 	    !cpuset_cpumask_can_shrink(cur->cpus_allowed,
630 				       trial->cpus_allowed))
631 		goto out;
632 
633 	ret = 0;
634 out:
635 	rcu_read_unlock();
636 	return ret;
637 }
638 
639 #ifdef CONFIG_SMP
640 /*
641  * Helper routine for generate_sched_domains().
642  * Do cpusets a, b have overlapping effective cpus_allowed masks?
643  */
644 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
645 {
646 	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
647 }
648 
649 static void
650 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
651 {
652 	if (dattr->relax_domain_level < c->relax_domain_level)
653 		dattr->relax_domain_level = c->relax_domain_level;
654 	return;
655 }
656 
657 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
658 				    struct cpuset *root_cs)
659 {
660 	struct cpuset *cp;
661 	struct cgroup_subsys_state *pos_css;
662 
663 	rcu_read_lock();
664 	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
665 		/* skip the whole subtree if @cp doesn't have any CPU */
666 		if (cpumask_empty(cp->cpus_allowed)) {
667 			pos_css = css_rightmost_descendant(pos_css);
668 			continue;
669 		}
670 
671 		if (is_sched_load_balance(cp))
672 			update_domain_attr(dattr, cp);
673 	}
674 	rcu_read_unlock();
675 }
676 
677 /* Must be called with cpuset_mutex held.  */
678 static inline int nr_cpusets(void)
679 {
680 	/* jump label reference count + the top-level cpuset */
681 	return static_key_count(&cpusets_enabled_key.key) + 1;
682 }
683 
684 /*
685  * generate_sched_domains()
686  *
687  * This function builds a partial partition of the systems CPUs
688  * A 'partial partition' is a set of non-overlapping subsets whose
689  * union is a subset of that set.
690  * The output of this function needs to be passed to kernel/sched/core.c
691  * partition_sched_domains() routine, which will rebuild the scheduler's
692  * load balancing domains (sched domains) as specified by that partial
693  * partition.
694  *
695  * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
696  * for a background explanation of this.
697  *
698  * Does not return errors, on the theory that the callers of this
699  * routine would rather not worry about failures to rebuild sched
700  * domains when operating in the severe memory shortage situations
701  * that could cause allocation failures below.
702  *
703  * Must be called with cpuset_mutex held.
704  *
705  * The three key local variables below are:
706  *    cp - cpuset pointer, used (together with pos_css) to perform a
707  *	   top-down scan of all cpusets. For our purposes, rebuilding
708  *	   the schedulers sched domains, we can ignore !is_sched_load_
709  *	   balance cpusets.
710  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
711  *	   that need to be load balanced, for convenient iterative
712  *	   access by the subsequent code that finds the best partition,
713  *	   i.e the set of domains (subsets) of CPUs such that the
714  *	   cpus_allowed of every cpuset marked is_sched_load_balance
715  *	   is a subset of one of these domains, while there are as
716  *	   many such domains as possible, each as small as possible.
717  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
718  *	   the kernel/sched/core.c routine partition_sched_domains() in a
719  *	   convenient format, that can be easily compared to the prior
720  *	   value to determine what partition elements (sched domains)
721  *	   were changed (added or removed.)
722  *
723  * Finding the best partition (set of domains):
724  *	The triple nested loops below over i, j, k scan over the
725  *	load balanced cpusets (using the array of cpuset pointers in
726  *	csa[]) looking for pairs of cpusets that have overlapping
727  *	cpus_allowed, but which don't have the same 'pn' partition
728  *	number and gives them in the same partition number.  It keeps
729  *	looping on the 'restart' label until it can no longer find
730  *	any such pairs.
731  *
732  *	The union of the cpus_allowed masks from the set of
733  *	all cpusets having the same 'pn' value then form the one
734  *	element of the partition (one sched domain) to be passed to
735  *	partition_sched_domains().
736  */
737 static int generate_sched_domains(cpumask_var_t **domains,
738 			struct sched_domain_attr **attributes)
739 {
740 	struct cpuset *cp;	/* top-down scan of cpusets */
741 	struct cpuset **csa;	/* array of all cpuset ptrs */
742 	int csn;		/* how many cpuset ptrs in csa so far */
743 	int i, j, k;		/* indices for partition finding loops */
744 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
745 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
746 	int ndoms = 0;		/* number of sched domains in result */
747 	int nslot;		/* next empty doms[] struct cpumask slot */
748 	struct cgroup_subsys_state *pos_css;
749 	bool root_load_balance = is_sched_load_balance(&top_cpuset);
750 
751 	doms = NULL;
752 	dattr = NULL;
753 	csa = NULL;
754 
755 	/* Special case for the 99% of systems with one, full, sched domain */
756 	if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
757 		ndoms = 1;
758 		doms = alloc_sched_domains(ndoms);
759 		if (!doms)
760 			goto done;
761 
762 		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
763 		if (dattr) {
764 			*dattr = SD_ATTR_INIT;
765 			update_domain_attr_tree(dattr, &top_cpuset);
766 		}
767 		cpumask_and(doms[0], top_cpuset.effective_cpus,
768 			    housekeeping_cpumask(HK_FLAG_DOMAIN));
769 
770 		goto done;
771 	}
772 
773 	csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
774 	if (!csa)
775 		goto done;
776 	csn = 0;
777 
778 	rcu_read_lock();
779 	if (root_load_balance)
780 		csa[csn++] = &top_cpuset;
781 	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
782 		if (cp == &top_cpuset)
783 			continue;
784 		/*
785 		 * Continue traversing beyond @cp iff @cp has some CPUs and
786 		 * isn't load balancing.  The former is obvious.  The
787 		 * latter: All child cpusets contain a subset of the
788 		 * parent's cpus, so just skip them, and then we call
789 		 * update_domain_attr_tree() to calc relax_domain_level of
790 		 * the corresponding sched domain.
791 		 *
792 		 * If root is load-balancing, we can skip @cp if it
793 		 * is a subset of the root's effective_cpus.
794 		 */
795 		if (!cpumask_empty(cp->cpus_allowed) &&
796 		    !(is_sched_load_balance(cp) &&
797 		      cpumask_intersects(cp->cpus_allowed,
798 					 housekeeping_cpumask(HK_FLAG_DOMAIN))))
799 			continue;
800 
801 		if (root_load_balance &&
802 		    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
803 			continue;
804 
805 		if (is_sched_load_balance(cp) &&
806 		    !cpumask_empty(cp->effective_cpus))
807 			csa[csn++] = cp;
808 
809 		/* skip @cp's subtree if not a partition root */
810 		if (!is_partition_root(cp))
811 			pos_css = css_rightmost_descendant(pos_css);
812 	}
813 	rcu_read_unlock();
814 
815 	for (i = 0; i < csn; i++)
816 		csa[i]->pn = i;
817 	ndoms = csn;
818 
819 restart:
820 	/* Find the best partition (set of sched domains) */
821 	for (i = 0; i < csn; i++) {
822 		struct cpuset *a = csa[i];
823 		int apn = a->pn;
824 
825 		for (j = 0; j < csn; j++) {
826 			struct cpuset *b = csa[j];
827 			int bpn = b->pn;
828 
829 			if (apn != bpn && cpusets_overlap(a, b)) {
830 				for (k = 0; k < csn; k++) {
831 					struct cpuset *c = csa[k];
832 
833 					if (c->pn == bpn)
834 						c->pn = apn;
835 				}
836 				ndoms--;	/* one less element */
837 				goto restart;
838 			}
839 		}
840 	}
841 
842 	/*
843 	 * Now we know how many domains to create.
844 	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
845 	 */
846 	doms = alloc_sched_domains(ndoms);
847 	if (!doms)
848 		goto done;
849 
850 	/*
851 	 * The rest of the code, including the scheduler, can deal with
852 	 * dattr==NULL case. No need to abort if alloc fails.
853 	 */
854 	dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
855 			      GFP_KERNEL);
856 
857 	for (nslot = 0, i = 0; i < csn; i++) {
858 		struct cpuset *a = csa[i];
859 		struct cpumask *dp;
860 		int apn = a->pn;
861 
862 		if (apn < 0) {
863 			/* Skip completed partitions */
864 			continue;
865 		}
866 
867 		dp = doms[nslot];
868 
869 		if (nslot == ndoms) {
870 			static int warnings = 10;
871 			if (warnings) {
872 				pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
873 					nslot, ndoms, csn, i, apn);
874 				warnings--;
875 			}
876 			continue;
877 		}
878 
879 		cpumask_clear(dp);
880 		if (dattr)
881 			*(dattr + nslot) = SD_ATTR_INIT;
882 		for (j = i; j < csn; j++) {
883 			struct cpuset *b = csa[j];
884 
885 			if (apn == b->pn) {
886 				cpumask_or(dp, dp, b->effective_cpus);
887 				cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN));
888 				if (dattr)
889 					update_domain_attr_tree(dattr + nslot, b);
890 
891 				/* Done with this partition */
892 				b->pn = -1;
893 			}
894 		}
895 		nslot++;
896 	}
897 	BUG_ON(nslot != ndoms);
898 
899 done:
900 	kfree(csa);
901 
902 	/*
903 	 * Fallback to the default domain if kmalloc() failed.
904 	 * See comments in partition_sched_domains().
905 	 */
906 	if (doms == NULL)
907 		ndoms = 1;
908 
909 	*domains    = doms;
910 	*attributes = dattr;
911 	return ndoms;
912 }
913 
914 static void update_tasks_root_domain(struct cpuset *cs)
915 {
916 	struct css_task_iter it;
917 	struct task_struct *task;
918 
919 	css_task_iter_start(&cs->css, 0, &it);
920 
921 	while ((task = css_task_iter_next(&it)))
922 		dl_add_task_root_domain(task);
923 
924 	css_task_iter_end(&it);
925 }
926 
927 static void rebuild_root_domains(void)
928 {
929 	struct cpuset *cs = NULL;
930 	struct cgroup_subsys_state *pos_css;
931 
932 	percpu_rwsem_assert_held(&cpuset_rwsem);
933 	lockdep_assert_cpus_held();
934 	lockdep_assert_held(&sched_domains_mutex);
935 
936 	rcu_read_lock();
937 
938 	/*
939 	 * Clear default root domain DL accounting, it will be computed again
940 	 * if a task belongs to it.
941 	 */
942 	dl_clear_root_domain(&def_root_domain);
943 
944 	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
945 
946 		if (cpumask_empty(cs->effective_cpus)) {
947 			pos_css = css_rightmost_descendant(pos_css);
948 			continue;
949 		}
950 
951 		css_get(&cs->css);
952 
953 		rcu_read_unlock();
954 
955 		update_tasks_root_domain(cs);
956 
957 		rcu_read_lock();
958 		css_put(&cs->css);
959 	}
960 	rcu_read_unlock();
961 }
962 
963 static void
964 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
965 				    struct sched_domain_attr *dattr_new)
966 {
967 	mutex_lock(&sched_domains_mutex);
968 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
969 	rebuild_root_domains();
970 	mutex_unlock(&sched_domains_mutex);
971 }
972 
973 /*
974  * Rebuild scheduler domains.
975  *
976  * If the flag 'sched_load_balance' of any cpuset with non-empty
977  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
978  * which has that flag enabled, or if any cpuset with a non-empty
979  * 'cpus' is removed, then call this routine to rebuild the
980  * scheduler's dynamic sched domains.
981  *
982  * Call with cpuset_mutex held.  Takes get_online_cpus().
983  */
984 static void rebuild_sched_domains_locked(void)
985 {
986 	struct cgroup_subsys_state *pos_css;
987 	struct sched_domain_attr *attr;
988 	cpumask_var_t *doms;
989 	struct cpuset *cs;
990 	int ndoms;
991 
992 	lockdep_assert_cpus_held();
993 	percpu_rwsem_assert_held(&cpuset_rwsem);
994 
995 	/*
996 	 * If we have raced with CPU hotplug, return early to avoid
997 	 * passing doms with offlined cpu to partition_sched_domains().
998 	 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
999 	 *
1000 	 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1001 	 * should be the same as the active CPUs, so checking only top_cpuset
1002 	 * is enough to detect racing CPU offlines.
1003 	 */
1004 	if (!top_cpuset.nr_subparts_cpus &&
1005 	    !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1006 		return;
1007 
1008 	/*
1009 	 * With subpartition CPUs, however, the effective CPUs of a partition
1010 	 * root should be only a subset of the active CPUs.  Since a CPU in any
1011 	 * partition root could be offlined, all must be checked.
1012 	 */
1013 	if (top_cpuset.nr_subparts_cpus) {
1014 		rcu_read_lock();
1015 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1016 			if (!is_partition_root(cs)) {
1017 				pos_css = css_rightmost_descendant(pos_css);
1018 				continue;
1019 			}
1020 			if (!cpumask_subset(cs->effective_cpus,
1021 					    cpu_active_mask)) {
1022 				rcu_read_unlock();
1023 				return;
1024 			}
1025 		}
1026 		rcu_read_unlock();
1027 	}
1028 
1029 	/* Generate domain masks and attrs */
1030 	ndoms = generate_sched_domains(&doms, &attr);
1031 
1032 	/* Have scheduler rebuild the domains */
1033 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
1034 }
1035 #else /* !CONFIG_SMP */
1036 static void rebuild_sched_domains_locked(void)
1037 {
1038 }
1039 #endif /* CONFIG_SMP */
1040 
1041 void rebuild_sched_domains(void)
1042 {
1043 	get_online_cpus();
1044 	percpu_down_write(&cpuset_rwsem);
1045 	rebuild_sched_domains_locked();
1046 	percpu_up_write(&cpuset_rwsem);
1047 	put_online_cpus();
1048 }
1049 
1050 /**
1051  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1052  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1053  *
1054  * Iterate through each task of @cs updating its cpus_allowed to the
1055  * effective cpuset's.  As this function is called with cpuset_mutex held,
1056  * cpuset membership stays stable.
1057  */
1058 static void update_tasks_cpumask(struct cpuset *cs)
1059 {
1060 	struct css_task_iter it;
1061 	struct task_struct *task;
1062 
1063 	css_task_iter_start(&cs->css, 0, &it);
1064 	while ((task = css_task_iter_next(&it)))
1065 		set_cpus_allowed_ptr(task, cs->effective_cpus);
1066 	css_task_iter_end(&it);
1067 }
1068 
1069 /**
1070  * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1071  * @new_cpus: the temp variable for the new effective_cpus mask
1072  * @cs: the cpuset the need to recompute the new effective_cpus mask
1073  * @parent: the parent cpuset
1074  *
1075  * If the parent has subpartition CPUs, include them in the list of
1076  * allowable CPUs in computing the new effective_cpus mask. Since offlined
1077  * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1078  * to mask those out.
1079  */
1080 static void compute_effective_cpumask(struct cpumask *new_cpus,
1081 				      struct cpuset *cs, struct cpuset *parent)
1082 {
1083 	if (parent->nr_subparts_cpus) {
1084 		cpumask_or(new_cpus, parent->effective_cpus,
1085 			   parent->subparts_cpus);
1086 		cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
1087 		cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1088 	} else {
1089 		cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1090 	}
1091 }
1092 
1093 /*
1094  * Commands for update_parent_subparts_cpumask
1095  */
1096 enum subparts_cmd {
1097 	partcmd_enable,		/* Enable partition root	 */
1098 	partcmd_disable,	/* Disable partition root	 */
1099 	partcmd_update,		/* Update parent's subparts_cpus */
1100 };
1101 
1102 /**
1103  * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1104  * @cpuset:  The cpuset that requests change in partition root state
1105  * @cmd:     Partition root state change command
1106  * @newmask: Optional new cpumask for partcmd_update
1107  * @tmp:     Temporary addmask and delmask
1108  * Return:   0, 1 or an error code
1109  *
1110  * For partcmd_enable, the cpuset is being transformed from a non-partition
1111  * root to a partition root. The cpus_allowed mask of the given cpuset will
1112  * be put into parent's subparts_cpus and taken away from parent's
1113  * effective_cpus. The function will return 0 if all the CPUs listed in
1114  * cpus_allowed can be granted or an error code will be returned.
1115  *
1116  * For partcmd_disable, the cpuset is being transofrmed from a partition
1117  * root back to a non-partition root. any CPUs in cpus_allowed that are in
1118  * parent's subparts_cpus will be taken away from that cpumask and put back
1119  * into parent's effective_cpus. 0 should always be returned.
1120  *
1121  * For partcmd_update, if the optional newmask is specified, the cpu
1122  * list is to be changed from cpus_allowed to newmask. Otherwise,
1123  * cpus_allowed is assumed to remain the same. The cpuset should either
1124  * be a partition root or an invalid partition root. The partition root
1125  * state may change if newmask is NULL and none of the requested CPUs can
1126  * be granted by the parent. The function will return 1 if changes to
1127  * parent's subparts_cpus and effective_cpus happen or 0 otherwise.
1128  * Error code should only be returned when newmask is non-NULL.
1129  *
1130  * The partcmd_enable and partcmd_disable commands are used by
1131  * update_prstate(). The partcmd_update command is used by
1132  * update_cpumasks_hier() with newmask NULL and update_cpumask() with
1133  * newmask set.
1134  *
1135  * The checking is more strict when enabling partition root than the
1136  * other two commands.
1137  *
1138  * Because of the implicit cpu exclusive nature of a partition root,
1139  * cpumask changes that violates the cpu exclusivity rule will not be
1140  * permitted when checked by validate_change(). The validate_change()
1141  * function will also prevent any changes to the cpu list if it is not
1142  * a superset of children's cpu lists.
1143  */
1144 static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
1145 					  struct cpumask *newmask,
1146 					  struct tmpmasks *tmp)
1147 {
1148 	struct cpuset *parent = parent_cs(cpuset);
1149 	int adding;	/* Moving cpus from effective_cpus to subparts_cpus */
1150 	int deleting;	/* Moving cpus from subparts_cpus to effective_cpus */
1151 	bool part_error = false;	/* Partition error? */
1152 
1153 	percpu_rwsem_assert_held(&cpuset_rwsem);
1154 
1155 	/*
1156 	 * The parent must be a partition root.
1157 	 * The new cpumask, if present, or the current cpus_allowed must
1158 	 * not be empty.
1159 	 */
1160 	if (!is_partition_root(parent) ||
1161 	   (newmask && cpumask_empty(newmask)) ||
1162 	   (!newmask && cpumask_empty(cpuset->cpus_allowed)))
1163 		return -EINVAL;
1164 
1165 	/*
1166 	 * Enabling/disabling partition root is not allowed if there are
1167 	 * online children.
1168 	 */
1169 	if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css))
1170 		return -EBUSY;
1171 
1172 	/*
1173 	 * Enabling partition root is not allowed if not all the CPUs
1174 	 * can be granted from parent's effective_cpus or at least one
1175 	 * CPU will be left after that.
1176 	 */
1177 	if ((cmd == partcmd_enable) &&
1178 	   (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) ||
1179 	     cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus)))
1180 		return -EINVAL;
1181 
1182 	/*
1183 	 * A cpumask update cannot make parent's effective_cpus become empty.
1184 	 */
1185 	adding = deleting = false;
1186 	if (cmd == partcmd_enable) {
1187 		cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
1188 		adding = true;
1189 	} else if (cmd == partcmd_disable) {
1190 		deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
1191 				       parent->subparts_cpus);
1192 	} else if (newmask) {
1193 		/*
1194 		 * partcmd_update with newmask:
1195 		 *
1196 		 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1197 		 * addmask = newmask & parent->effective_cpus
1198 		 *		     & ~parent->subparts_cpus
1199 		 */
1200 		cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask);
1201 		deleting = cpumask_and(tmp->delmask, tmp->delmask,
1202 				       parent->subparts_cpus);
1203 
1204 		cpumask_and(tmp->addmask, newmask, parent->effective_cpus);
1205 		adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1206 					parent->subparts_cpus);
1207 		/*
1208 		 * Return error if the new effective_cpus could become empty.
1209 		 */
1210 		if (adding &&
1211 		    cpumask_equal(parent->effective_cpus, tmp->addmask)) {
1212 			if (!deleting)
1213 				return -EINVAL;
1214 			/*
1215 			 * As some of the CPUs in subparts_cpus might have
1216 			 * been offlined, we need to compute the real delmask
1217 			 * to confirm that.
1218 			 */
1219 			if (!cpumask_and(tmp->addmask, tmp->delmask,
1220 					 cpu_active_mask))
1221 				return -EINVAL;
1222 			cpumask_copy(tmp->addmask, parent->effective_cpus);
1223 		}
1224 	} else {
1225 		/*
1226 		 * partcmd_update w/o newmask:
1227 		 *
1228 		 * addmask = cpus_allowed & parent->effectiveb_cpus
1229 		 *
1230 		 * Note that parent's subparts_cpus may have been
1231 		 * pre-shrunk in case there is a change in the cpu list.
1232 		 * So no deletion is needed.
1233 		 */
1234 		adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed,
1235 				     parent->effective_cpus);
1236 		part_error = cpumask_equal(tmp->addmask,
1237 					   parent->effective_cpus);
1238 	}
1239 
1240 	if (cmd == partcmd_update) {
1241 		int prev_prs = cpuset->partition_root_state;
1242 
1243 		/*
1244 		 * Check for possible transition between PRS_ENABLED
1245 		 * and PRS_ERROR.
1246 		 */
1247 		switch (cpuset->partition_root_state) {
1248 		case PRS_ENABLED:
1249 			if (part_error)
1250 				cpuset->partition_root_state = PRS_ERROR;
1251 			break;
1252 		case PRS_ERROR:
1253 			if (!part_error)
1254 				cpuset->partition_root_state = PRS_ENABLED;
1255 			break;
1256 		}
1257 		/*
1258 		 * Set part_error if previously in invalid state.
1259 		 */
1260 		part_error = (prev_prs == PRS_ERROR);
1261 	}
1262 
1263 	if (!part_error && (cpuset->partition_root_state == PRS_ERROR))
1264 		return 0;	/* Nothing need to be done */
1265 
1266 	if (cpuset->partition_root_state == PRS_ERROR) {
1267 		/*
1268 		 * Remove all its cpus from parent's subparts_cpus.
1269 		 */
1270 		adding = false;
1271 		deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
1272 				       parent->subparts_cpus);
1273 	}
1274 
1275 	if (!adding && !deleting)
1276 		return 0;
1277 
1278 	/*
1279 	 * Change the parent's subparts_cpus.
1280 	 * Newly added CPUs will be removed from effective_cpus and
1281 	 * newly deleted ones will be added back to effective_cpus.
1282 	 */
1283 	spin_lock_irq(&callback_lock);
1284 	if (adding) {
1285 		cpumask_or(parent->subparts_cpus,
1286 			   parent->subparts_cpus, tmp->addmask);
1287 		cpumask_andnot(parent->effective_cpus,
1288 			       parent->effective_cpus, tmp->addmask);
1289 	}
1290 	if (deleting) {
1291 		cpumask_andnot(parent->subparts_cpus,
1292 			       parent->subparts_cpus, tmp->delmask);
1293 		/*
1294 		 * Some of the CPUs in subparts_cpus might have been offlined.
1295 		 */
1296 		cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1297 		cpumask_or(parent->effective_cpus,
1298 			   parent->effective_cpus, tmp->delmask);
1299 	}
1300 
1301 	parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1302 	spin_unlock_irq(&callback_lock);
1303 
1304 	return cmd == partcmd_update;
1305 }
1306 
1307 /*
1308  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1309  * @cs:  the cpuset to consider
1310  * @tmp: temp variables for calculating effective_cpus & partition setup
1311  *
1312  * When configured cpumask is changed, the effective cpumasks of this cpuset
1313  * and all its descendants need to be updated.
1314  *
1315  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1316  *
1317  * Called with cpuset_mutex held
1318  */
1319 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
1320 {
1321 	struct cpuset *cp;
1322 	struct cgroup_subsys_state *pos_css;
1323 	bool need_rebuild_sched_domains = false;
1324 
1325 	rcu_read_lock();
1326 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1327 		struct cpuset *parent = parent_cs(cp);
1328 
1329 		compute_effective_cpumask(tmp->new_cpus, cp, parent);
1330 
1331 		/*
1332 		 * If it becomes empty, inherit the effective mask of the
1333 		 * parent, which is guaranteed to have some CPUs.
1334 		 */
1335 		if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1336 			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1337 			if (!cp->use_parent_ecpus) {
1338 				cp->use_parent_ecpus = true;
1339 				parent->child_ecpus_count++;
1340 			}
1341 		} else if (cp->use_parent_ecpus) {
1342 			cp->use_parent_ecpus = false;
1343 			WARN_ON_ONCE(!parent->child_ecpus_count);
1344 			parent->child_ecpus_count--;
1345 		}
1346 
1347 		/*
1348 		 * Skip the whole subtree if the cpumask remains the same
1349 		 * and has no partition root state.
1350 		 */
1351 		if (!cp->partition_root_state &&
1352 		    cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
1353 			pos_css = css_rightmost_descendant(pos_css);
1354 			continue;
1355 		}
1356 
1357 		/*
1358 		 * update_parent_subparts_cpumask() should have been called
1359 		 * for cs already in update_cpumask(). We should also call
1360 		 * update_tasks_cpumask() again for tasks in the parent
1361 		 * cpuset if the parent's subparts_cpus changes.
1362 		 */
1363 		if ((cp != cs) && cp->partition_root_state) {
1364 			switch (parent->partition_root_state) {
1365 			case PRS_DISABLED:
1366 				/*
1367 				 * If parent is not a partition root or an
1368 				 * invalid partition root, clear the state
1369 				 * state and the CS_CPU_EXCLUSIVE flag.
1370 				 */
1371 				WARN_ON_ONCE(cp->partition_root_state
1372 					     != PRS_ERROR);
1373 				cp->partition_root_state = 0;
1374 
1375 				/*
1376 				 * clear_bit() is an atomic operation and
1377 				 * readers aren't interested in the state
1378 				 * of CS_CPU_EXCLUSIVE anyway. So we can
1379 				 * just update the flag without holding
1380 				 * the callback_lock.
1381 				 */
1382 				clear_bit(CS_CPU_EXCLUSIVE, &cp->flags);
1383 				break;
1384 
1385 			case PRS_ENABLED:
1386 				if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp))
1387 					update_tasks_cpumask(parent);
1388 				break;
1389 
1390 			case PRS_ERROR:
1391 				/*
1392 				 * When parent is invalid, it has to be too.
1393 				 */
1394 				cp->partition_root_state = PRS_ERROR;
1395 				if (cp->nr_subparts_cpus) {
1396 					cp->nr_subparts_cpus = 0;
1397 					cpumask_clear(cp->subparts_cpus);
1398 				}
1399 				break;
1400 			}
1401 		}
1402 
1403 		if (!css_tryget_online(&cp->css))
1404 			continue;
1405 		rcu_read_unlock();
1406 
1407 		spin_lock_irq(&callback_lock);
1408 
1409 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1410 		if (cp->nr_subparts_cpus &&
1411 		   (cp->partition_root_state != PRS_ENABLED)) {
1412 			cp->nr_subparts_cpus = 0;
1413 			cpumask_clear(cp->subparts_cpus);
1414 		} else if (cp->nr_subparts_cpus) {
1415 			/*
1416 			 * Make sure that effective_cpus & subparts_cpus
1417 			 * are mutually exclusive.
1418 			 *
1419 			 * In the unlikely event that effective_cpus
1420 			 * becomes empty. we clear cp->nr_subparts_cpus and
1421 			 * let its child partition roots to compete for
1422 			 * CPUs again.
1423 			 */
1424 			cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1425 				       cp->subparts_cpus);
1426 			if (cpumask_empty(cp->effective_cpus)) {
1427 				cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1428 				cpumask_clear(cp->subparts_cpus);
1429 				cp->nr_subparts_cpus = 0;
1430 			} else if (!cpumask_subset(cp->subparts_cpus,
1431 						   tmp->new_cpus)) {
1432 				cpumask_andnot(cp->subparts_cpus,
1433 					cp->subparts_cpus, tmp->new_cpus);
1434 				cp->nr_subparts_cpus
1435 					= cpumask_weight(cp->subparts_cpus);
1436 			}
1437 		}
1438 		spin_unlock_irq(&callback_lock);
1439 
1440 		WARN_ON(!is_in_v2_mode() &&
1441 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1442 
1443 		update_tasks_cpumask(cp);
1444 
1445 		/*
1446 		 * On legacy hierarchy, if the effective cpumask of any non-
1447 		 * empty cpuset is changed, we need to rebuild sched domains.
1448 		 * On default hierarchy, the cpuset needs to be a partition
1449 		 * root as well.
1450 		 */
1451 		if (!cpumask_empty(cp->cpus_allowed) &&
1452 		    is_sched_load_balance(cp) &&
1453 		   (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1454 		    is_partition_root(cp)))
1455 			need_rebuild_sched_domains = true;
1456 
1457 		rcu_read_lock();
1458 		css_put(&cp->css);
1459 	}
1460 	rcu_read_unlock();
1461 
1462 	if (need_rebuild_sched_domains)
1463 		rebuild_sched_domains_locked();
1464 }
1465 
1466 /**
1467  * update_sibling_cpumasks - Update siblings cpumasks
1468  * @parent:  Parent cpuset
1469  * @cs:      Current cpuset
1470  * @tmp:     Temp variables
1471  */
1472 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1473 				    struct tmpmasks *tmp)
1474 {
1475 	struct cpuset *sibling;
1476 	struct cgroup_subsys_state *pos_css;
1477 
1478 	/*
1479 	 * Check all its siblings and call update_cpumasks_hier()
1480 	 * if their use_parent_ecpus flag is set in order for them
1481 	 * to use the right effective_cpus value.
1482 	 */
1483 	rcu_read_lock();
1484 	cpuset_for_each_child(sibling, pos_css, parent) {
1485 		if (sibling == cs)
1486 			continue;
1487 		if (!sibling->use_parent_ecpus)
1488 			continue;
1489 
1490 		update_cpumasks_hier(sibling, tmp);
1491 	}
1492 	rcu_read_unlock();
1493 }
1494 
1495 /**
1496  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1497  * @cs: the cpuset to consider
1498  * @trialcs: trial cpuset
1499  * @buf: buffer of cpu numbers written to this cpuset
1500  */
1501 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1502 			  const char *buf)
1503 {
1504 	int retval;
1505 	struct tmpmasks tmp;
1506 
1507 	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1508 	if (cs == &top_cpuset)
1509 		return -EACCES;
1510 
1511 	/*
1512 	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
1513 	 * Since cpulist_parse() fails on an empty mask, we special case
1514 	 * that parsing.  The validate_change() call ensures that cpusets
1515 	 * with tasks have cpus.
1516 	 */
1517 	if (!*buf) {
1518 		cpumask_clear(trialcs->cpus_allowed);
1519 	} else {
1520 		retval = cpulist_parse(buf, trialcs->cpus_allowed);
1521 		if (retval < 0)
1522 			return retval;
1523 
1524 		if (!cpumask_subset(trialcs->cpus_allowed,
1525 				    top_cpuset.cpus_allowed))
1526 			return -EINVAL;
1527 	}
1528 
1529 	/* Nothing to do if the cpus didn't change */
1530 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
1531 		return 0;
1532 
1533 	retval = validate_change(cs, trialcs);
1534 	if (retval < 0)
1535 		return retval;
1536 
1537 #ifdef CONFIG_CPUMASK_OFFSTACK
1538 	/*
1539 	 * Use the cpumasks in trialcs for tmpmasks when they are pointers
1540 	 * to allocated cpumasks.
1541 	 */
1542 	tmp.addmask  = trialcs->subparts_cpus;
1543 	tmp.delmask  = trialcs->effective_cpus;
1544 	tmp.new_cpus = trialcs->cpus_allowed;
1545 #endif
1546 
1547 	if (cs->partition_root_state) {
1548 		/* Cpumask of a partition root cannot be empty */
1549 		if (cpumask_empty(trialcs->cpus_allowed))
1550 			return -EINVAL;
1551 		if (update_parent_subparts_cpumask(cs, partcmd_update,
1552 					trialcs->cpus_allowed, &tmp) < 0)
1553 			return -EINVAL;
1554 	}
1555 
1556 	spin_lock_irq(&callback_lock);
1557 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1558 
1559 	/*
1560 	 * Make sure that subparts_cpus is a subset of cpus_allowed.
1561 	 */
1562 	if (cs->nr_subparts_cpus) {
1563 		cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus,
1564 			       cs->cpus_allowed);
1565 		cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1566 	}
1567 	spin_unlock_irq(&callback_lock);
1568 
1569 	update_cpumasks_hier(cs, &tmp);
1570 
1571 	if (cs->partition_root_state) {
1572 		struct cpuset *parent = parent_cs(cs);
1573 
1574 		/*
1575 		 * For partition root, update the cpumasks of sibling
1576 		 * cpusets if they use parent's effective_cpus.
1577 		 */
1578 		if (parent->child_ecpus_count)
1579 			update_sibling_cpumasks(parent, cs, &tmp);
1580 	}
1581 	return 0;
1582 }
1583 
1584 /*
1585  * Migrate memory region from one set of nodes to another.  This is
1586  * performed asynchronously as it can be called from process migration path
1587  * holding locks involved in process management.  All mm migrations are
1588  * performed in the queued order and can be waited for by flushing
1589  * cpuset_migrate_mm_wq.
1590  */
1591 
1592 struct cpuset_migrate_mm_work {
1593 	struct work_struct	work;
1594 	struct mm_struct	*mm;
1595 	nodemask_t		from;
1596 	nodemask_t		to;
1597 };
1598 
1599 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1600 {
1601 	struct cpuset_migrate_mm_work *mwork =
1602 		container_of(work, struct cpuset_migrate_mm_work, work);
1603 
1604 	/* on a wq worker, no need to worry about %current's mems_allowed */
1605 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1606 	mmput(mwork->mm);
1607 	kfree(mwork);
1608 }
1609 
1610 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1611 							const nodemask_t *to)
1612 {
1613 	struct cpuset_migrate_mm_work *mwork;
1614 
1615 	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1616 	if (mwork) {
1617 		mwork->mm = mm;
1618 		mwork->from = *from;
1619 		mwork->to = *to;
1620 		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1621 		queue_work(cpuset_migrate_mm_wq, &mwork->work);
1622 	} else {
1623 		mmput(mm);
1624 	}
1625 }
1626 
1627 static void cpuset_post_attach(void)
1628 {
1629 	flush_workqueue(cpuset_migrate_mm_wq);
1630 }
1631 
1632 /*
1633  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1634  * @tsk: the task to change
1635  * @newmems: new nodes that the task will be set
1636  *
1637  * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1638  * and rebind an eventual tasks' mempolicy. If the task is allocating in
1639  * parallel, it might temporarily see an empty intersection, which results in
1640  * a seqlock check and retry before OOM or allocation failure.
1641  */
1642 static void cpuset_change_task_nodemask(struct task_struct *tsk,
1643 					nodemask_t *newmems)
1644 {
1645 	task_lock(tsk);
1646 
1647 	local_irq_disable();
1648 	write_seqcount_begin(&tsk->mems_allowed_seq);
1649 
1650 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1651 	mpol_rebind_task(tsk, newmems);
1652 	tsk->mems_allowed = *newmems;
1653 
1654 	write_seqcount_end(&tsk->mems_allowed_seq);
1655 	local_irq_enable();
1656 
1657 	task_unlock(tsk);
1658 }
1659 
1660 static void *cpuset_being_rebound;
1661 
1662 /**
1663  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1664  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1665  *
1666  * Iterate through each task of @cs updating its mems_allowed to the
1667  * effective cpuset's.  As this function is called with cpuset_mutex held,
1668  * cpuset membership stays stable.
1669  */
1670 static void update_tasks_nodemask(struct cpuset *cs)
1671 {
1672 	static nodemask_t newmems;	/* protected by cpuset_mutex */
1673 	struct css_task_iter it;
1674 	struct task_struct *task;
1675 
1676 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1677 
1678 	guarantee_online_mems(cs, &newmems);
1679 
1680 	/*
1681 	 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
1682 	 * take while holding tasklist_lock.  Forks can happen - the
1683 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
1684 	 * and rebind their vma mempolicies too.  Because we still hold
1685 	 * the global cpuset_mutex, we know that no other rebind effort
1686 	 * will be contending for the global variable cpuset_being_rebound.
1687 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1688 	 * is idempotent.  Also migrate pages in each mm to new nodes.
1689 	 */
1690 	css_task_iter_start(&cs->css, 0, &it);
1691 	while ((task = css_task_iter_next(&it))) {
1692 		struct mm_struct *mm;
1693 		bool migrate;
1694 
1695 		cpuset_change_task_nodemask(task, &newmems);
1696 
1697 		mm = get_task_mm(task);
1698 		if (!mm)
1699 			continue;
1700 
1701 		migrate = is_memory_migrate(cs);
1702 
1703 		mpol_rebind_mm(mm, &cs->mems_allowed);
1704 		if (migrate)
1705 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1706 		else
1707 			mmput(mm);
1708 	}
1709 	css_task_iter_end(&it);
1710 
1711 	/*
1712 	 * All the tasks' nodemasks have been updated, update
1713 	 * cs->old_mems_allowed.
1714 	 */
1715 	cs->old_mems_allowed = newmems;
1716 
1717 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1718 	cpuset_being_rebound = NULL;
1719 }
1720 
1721 /*
1722  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1723  * @cs: the cpuset to consider
1724  * @new_mems: a temp variable for calculating new effective_mems
1725  *
1726  * When configured nodemask is changed, the effective nodemasks of this cpuset
1727  * and all its descendants need to be updated.
1728  *
1729  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
1730  *
1731  * Called with cpuset_mutex held
1732  */
1733 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1734 {
1735 	struct cpuset *cp;
1736 	struct cgroup_subsys_state *pos_css;
1737 
1738 	rcu_read_lock();
1739 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1740 		struct cpuset *parent = parent_cs(cp);
1741 
1742 		nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1743 
1744 		/*
1745 		 * If it becomes empty, inherit the effective mask of the
1746 		 * parent, which is guaranteed to have some MEMs.
1747 		 */
1748 		if (is_in_v2_mode() && nodes_empty(*new_mems))
1749 			*new_mems = parent->effective_mems;
1750 
1751 		/* Skip the whole subtree if the nodemask remains the same. */
1752 		if (nodes_equal(*new_mems, cp->effective_mems)) {
1753 			pos_css = css_rightmost_descendant(pos_css);
1754 			continue;
1755 		}
1756 
1757 		if (!css_tryget_online(&cp->css))
1758 			continue;
1759 		rcu_read_unlock();
1760 
1761 		spin_lock_irq(&callback_lock);
1762 		cp->effective_mems = *new_mems;
1763 		spin_unlock_irq(&callback_lock);
1764 
1765 		WARN_ON(!is_in_v2_mode() &&
1766 			!nodes_equal(cp->mems_allowed, cp->effective_mems));
1767 
1768 		update_tasks_nodemask(cp);
1769 
1770 		rcu_read_lock();
1771 		css_put(&cp->css);
1772 	}
1773 	rcu_read_unlock();
1774 }
1775 
1776 /*
1777  * Handle user request to change the 'mems' memory placement
1778  * of a cpuset.  Needs to validate the request, update the
1779  * cpusets mems_allowed, and for each task in the cpuset,
1780  * update mems_allowed and rebind task's mempolicy and any vma
1781  * mempolicies and if the cpuset is marked 'memory_migrate',
1782  * migrate the tasks pages to the new memory.
1783  *
1784  * Call with cpuset_mutex held. May take callback_lock during call.
1785  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1786  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
1787  * their mempolicies to the cpusets new mems_allowed.
1788  */
1789 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1790 			   const char *buf)
1791 {
1792 	int retval;
1793 
1794 	/*
1795 	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1796 	 * it's read-only
1797 	 */
1798 	if (cs == &top_cpuset) {
1799 		retval = -EACCES;
1800 		goto done;
1801 	}
1802 
1803 	/*
1804 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1805 	 * Since nodelist_parse() fails on an empty mask, we special case
1806 	 * that parsing.  The validate_change() call ensures that cpusets
1807 	 * with tasks have memory.
1808 	 */
1809 	if (!*buf) {
1810 		nodes_clear(trialcs->mems_allowed);
1811 	} else {
1812 		retval = nodelist_parse(buf, trialcs->mems_allowed);
1813 		if (retval < 0)
1814 			goto done;
1815 
1816 		if (!nodes_subset(trialcs->mems_allowed,
1817 				  top_cpuset.mems_allowed)) {
1818 			retval = -EINVAL;
1819 			goto done;
1820 		}
1821 	}
1822 
1823 	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1824 		retval = 0;		/* Too easy - nothing to do */
1825 		goto done;
1826 	}
1827 	retval = validate_change(cs, trialcs);
1828 	if (retval < 0)
1829 		goto done;
1830 
1831 	spin_lock_irq(&callback_lock);
1832 	cs->mems_allowed = trialcs->mems_allowed;
1833 	spin_unlock_irq(&callback_lock);
1834 
1835 	/* use trialcs->mems_allowed as a temp variable */
1836 	update_nodemasks_hier(cs, &trialcs->mems_allowed);
1837 done:
1838 	return retval;
1839 }
1840 
1841 bool current_cpuset_is_being_rebound(void)
1842 {
1843 	bool ret;
1844 
1845 	rcu_read_lock();
1846 	ret = task_cs(current) == cpuset_being_rebound;
1847 	rcu_read_unlock();
1848 
1849 	return ret;
1850 }
1851 
1852 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1853 {
1854 #ifdef CONFIG_SMP
1855 	if (val < -1 || val >= sched_domain_level_max)
1856 		return -EINVAL;
1857 #endif
1858 
1859 	if (val != cs->relax_domain_level) {
1860 		cs->relax_domain_level = val;
1861 		if (!cpumask_empty(cs->cpus_allowed) &&
1862 		    is_sched_load_balance(cs))
1863 			rebuild_sched_domains_locked();
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 /**
1870  * update_tasks_flags - update the spread flags of tasks in the cpuset.
1871  * @cs: the cpuset in which each task's spread flags needs to be changed
1872  *
1873  * Iterate through each task of @cs updating its spread flags.  As this
1874  * function is called with cpuset_mutex held, cpuset membership stays
1875  * stable.
1876  */
1877 static void update_tasks_flags(struct cpuset *cs)
1878 {
1879 	struct css_task_iter it;
1880 	struct task_struct *task;
1881 
1882 	css_task_iter_start(&cs->css, 0, &it);
1883 	while ((task = css_task_iter_next(&it)))
1884 		cpuset_update_task_spread_flag(cs, task);
1885 	css_task_iter_end(&it);
1886 }
1887 
1888 /*
1889  * update_flag - read a 0 or a 1 in a file and update associated flag
1890  * bit:		the bit to update (see cpuset_flagbits_t)
1891  * cs:		the cpuset to update
1892  * turning_on: 	whether the flag is being set or cleared
1893  *
1894  * Call with cpuset_mutex held.
1895  */
1896 
1897 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1898 		       int turning_on)
1899 {
1900 	struct cpuset *trialcs;
1901 	int balance_flag_changed;
1902 	int spread_flag_changed;
1903 	int err;
1904 
1905 	trialcs = alloc_trial_cpuset(cs);
1906 	if (!trialcs)
1907 		return -ENOMEM;
1908 
1909 	if (turning_on)
1910 		set_bit(bit, &trialcs->flags);
1911 	else
1912 		clear_bit(bit, &trialcs->flags);
1913 
1914 	err = validate_change(cs, trialcs);
1915 	if (err < 0)
1916 		goto out;
1917 
1918 	balance_flag_changed = (is_sched_load_balance(cs) !=
1919 				is_sched_load_balance(trialcs));
1920 
1921 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1922 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
1923 
1924 	spin_lock_irq(&callback_lock);
1925 	cs->flags = trialcs->flags;
1926 	spin_unlock_irq(&callback_lock);
1927 
1928 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1929 		rebuild_sched_domains_locked();
1930 
1931 	if (spread_flag_changed)
1932 		update_tasks_flags(cs);
1933 out:
1934 	free_cpuset(trialcs);
1935 	return err;
1936 }
1937 
1938 /*
1939  * update_prstate - update partititon_root_state
1940  * cs:	the cpuset to update
1941  * val: 0 - disabled, 1 - enabled
1942  *
1943  * Call with cpuset_mutex held.
1944  */
1945 static int update_prstate(struct cpuset *cs, int val)
1946 {
1947 	int err;
1948 	struct cpuset *parent = parent_cs(cs);
1949 	struct tmpmasks tmp;
1950 
1951 	if ((val != 0) && (val != 1))
1952 		return -EINVAL;
1953 	if (val == cs->partition_root_state)
1954 		return 0;
1955 
1956 	/*
1957 	 * Cannot force a partial or invalid partition root to a full
1958 	 * partition root.
1959 	 */
1960 	if (val && cs->partition_root_state)
1961 		return -EINVAL;
1962 
1963 	if (alloc_cpumasks(NULL, &tmp))
1964 		return -ENOMEM;
1965 
1966 	err = -EINVAL;
1967 	if (!cs->partition_root_state) {
1968 		/*
1969 		 * Turning on partition root requires setting the
1970 		 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
1971 		 * cannot be NULL.
1972 		 */
1973 		if (cpumask_empty(cs->cpus_allowed))
1974 			goto out;
1975 
1976 		err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
1977 		if (err)
1978 			goto out;
1979 
1980 		err = update_parent_subparts_cpumask(cs, partcmd_enable,
1981 						     NULL, &tmp);
1982 		if (err) {
1983 			update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1984 			goto out;
1985 		}
1986 		cs->partition_root_state = PRS_ENABLED;
1987 	} else {
1988 		/*
1989 		 * Turning off partition root will clear the
1990 		 * CS_CPU_EXCLUSIVE bit.
1991 		 */
1992 		if (cs->partition_root_state == PRS_ERROR) {
1993 			cs->partition_root_state = 0;
1994 			update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1995 			err = 0;
1996 			goto out;
1997 		}
1998 
1999 		err = update_parent_subparts_cpumask(cs, partcmd_disable,
2000 						     NULL, &tmp);
2001 		if (err)
2002 			goto out;
2003 
2004 		cs->partition_root_state = 0;
2005 
2006 		/* Turning off CS_CPU_EXCLUSIVE will not return error */
2007 		update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2008 	}
2009 
2010 	/*
2011 	 * Update cpumask of parent's tasks except when it is the top
2012 	 * cpuset as some system daemons cannot be mapped to other CPUs.
2013 	 */
2014 	if (parent != &top_cpuset)
2015 		update_tasks_cpumask(parent);
2016 
2017 	if (parent->child_ecpus_count)
2018 		update_sibling_cpumasks(parent, cs, &tmp);
2019 
2020 	rebuild_sched_domains_locked();
2021 out:
2022 	free_cpumasks(NULL, &tmp);
2023 	return err;
2024 }
2025 
2026 /*
2027  * Frequency meter - How fast is some event occurring?
2028  *
2029  * These routines manage a digitally filtered, constant time based,
2030  * event frequency meter.  There are four routines:
2031  *   fmeter_init() - initialize a frequency meter.
2032  *   fmeter_markevent() - called each time the event happens.
2033  *   fmeter_getrate() - returns the recent rate of such events.
2034  *   fmeter_update() - internal routine used to update fmeter.
2035  *
2036  * A common data structure is passed to each of these routines,
2037  * which is used to keep track of the state required to manage the
2038  * frequency meter and its digital filter.
2039  *
2040  * The filter works on the number of events marked per unit time.
2041  * The filter is single-pole low-pass recursive (IIR).  The time unit
2042  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
2043  * simulate 3 decimal digits of precision (multiplied by 1000).
2044  *
2045  * With an FM_COEF of 933, and a time base of 1 second, the filter
2046  * has a half-life of 10 seconds, meaning that if the events quit
2047  * happening, then the rate returned from the fmeter_getrate()
2048  * will be cut in half each 10 seconds, until it converges to zero.
2049  *
2050  * It is not worth doing a real infinitely recursive filter.  If more
2051  * than FM_MAXTICKS ticks have elapsed since the last filter event,
2052  * just compute FM_MAXTICKS ticks worth, by which point the level
2053  * will be stable.
2054  *
2055  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2056  * arithmetic overflow in the fmeter_update() routine.
2057  *
2058  * Given the simple 32 bit integer arithmetic used, this meter works
2059  * best for reporting rates between one per millisecond (msec) and
2060  * one per 32 (approx) seconds.  At constant rates faster than one
2061  * per msec it maxes out at values just under 1,000,000.  At constant
2062  * rates between one per msec, and one per second it will stabilize
2063  * to a value N*1000, where N is the rate of events per second.
2064  * At constant rates between one per second and one per 32 seconds,
2065  * it will be choppy, moving up on the seconds that have an event,
2066  * and then decaying until the next event.  At rates slower than
2067  * about one in 32 seconds, it decays all the way back to zero between
2068  * each event.
2069  */
2070 
2071 #define FM_COEF 933		/* coefficient for half-life of 10 secs */
2072 #define FM_MAXTICKS ((u32)99)   /* useless computing more ticks than this */
2073 #define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
2074 #define FM_SCALE 1000		/* faux fixed point scale */
2075 
2076 /* Initialize a frequency meter */
2077 static void fmeter_init(struct fmeter *fmp)
2078 {
2079 	fmp->cnt = 0;
2080 	fmp->val = 0;
2081 	fmp->time = 0;
2082 	spin_lock_init(&fmp->lock);
2083 }
2084 
2085 /* Internal meter update - process cnt events and update value */
2086 static void fmeter_update(struct fmeter *fmp)
2087 {
2088 	time64_t now;
2089 	u32 ticks;
2090 
2091 	now = ktime_get_seconds();
2092 	ticks = now - fmp->time;
2093 
2094 	if (ticks == 0)
2095 		return;
2096 
2097 	ticks = min(FM_MAXTICKS, ticks);
2098 	while (ticks-- > 0)
2099 		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2100 	fmp->time = now;
2101 
2102 	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2103 	fmp->cnt = 0;
2104 }
2105 
2106 /* Process any previous ticks, then bump cnt by one (times scale). */
2107 static void fmeter_markevent(struct fmeter *fmp)
2108 {
2109 	spin_lock(&fmp->lock);
2110 	fmeter_update(fmp);
2111 	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2112 	spin_unlock(&fmp->lock);
2113 }
2114 
2115 /* Process any previous ticks, then return current value. */
2116 static int fmeter_getrate(struct fmeter *fmp)
2117 {
2118 	int val;
2119 
2120 	spin_lock(&fmp->lock);
2121 	fmeter_update(fmp);
2122 	val = fmp->val;
2123 	spin_unlock(&fmp->lock);
2124 	return val;
2125 }
2126 
2127 static struct cpuset *cpuset_attach_old_cs;
2128 
2129 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
2130 static int cpuset_can_attach(struct cgroup_taskset *tset)
2131 {
2132 	struct cgroup_subsys_state *css;
2133 	struct cpuset *cs;
2134 	struct task_struct *task;
2135 	int ret;
2136 
2137 	/* used later by cpuset_attach() */
2138 	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2139 	cs = css_cs(css);
2140 
2141 	percpu_down_write(&cpuset_rwsem);
2142 
2143 	/* allow moving tasks into an empty cpuset if on default hierarchy */
2144 	ret = -ENOSPC;
2145 	if (!is_in_v2_mode() &&
2146 	    (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
2147 		goto out_unlock;
2148 
2149 	cgroup_taskset_for_each(task, css, tset) {
2150 		ret = task_can_attach(task, cs->cpus_allowed);
2151 		if (ret)
2152 			goto out_unlock;
2153 		ret = security_task_setscheduler(task);
2154 		if (ret)
2155 			goto out_unlock;
2156 	}
2157 
2158 	/*
2159 	 * Mark attach is in progress.  This makes validate_change() fail
2160 	 * changes which zero cpus/mems_allowed.
2161 	 */
2162 	cs->attach_in_progress++;
2163 	ret = 0;
2164 out_unlock:
2165 	percpu_up_write(&cpuset_rwsem);
2166 	return ret;
2167 }
2168 
2169 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2170 {
2171 	struct cgroup_subsys_state *css;
2172 
2173 	cgroup_taskset_first(tset, &css);
2174 
2175 	percpu_down_write(&cpuset_rwsem);
2176 	css_cs(css)->attach_in_progress--;
2177 	percpu_up_write(&cpuset_rwsem);
2178 }
2179 
2180 /*
2181  * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
2182  * but we can't allocate it dynamically there.  Define it global and
2183  * allocate from cpuset_init().
2184  */
2185 static cpumask_var_t cpus_attach;
2186 
2187 static void cpuset_attach(struct cgroup_taskset *tset)
2188 {
2189 	/* static buf protected by cpuset_mutex */
2190 	static nodemask_t cpuset_attach_nodemask_to;
2191 	struct task_struct *task;
2192 	struct task_struct *leader;
2193 	struct cgroup_subsys_state *css;
2194 	struct cpuset *cs;
2195 	struct cpuset *oldcs = cpuset_attach_old_cs;
2196 
2197 	cgroup_taskset_first(tset, &css);
2198 	cs = css_cs(css);
2199 
2200 	percpu_down_write(&cpuset_rwsem);
2201 
2202 	/* prepare for attach */
2203 	if (cs == &top_cpuset)
2204 		cpumask_copy(cpus_attach, cpu_possible_mask);
2205 	else
2206 		guarantee_online_cpus(cs, cpus_attach);
2207 
2208 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2209 
2210 	cgroup_taskset_for_each(task, css, tset) {
2211 		/*
2212 		 * can_attach beforehand should guarantee that this doesn't
2213 		 * fail.  TODO: have a better way to handle failure here
2214 		 */
2215 		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2216 
2217 		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2218 		cpuset_update_task_spread_flag(cs, task);
2219 	}
2220 
2221 	/*
2222 	 * Change mm for all threadgroup leaders. This is expensive and may
2223 	 * sleep and should be moved outside migration path proper.
2224 	 */
2225 	cpuset_attach_nodemask_to = cs->effective_mems;
2226 	cgroup_taskset_for_each_leader(leader, css, tset) {
2227 		struct mm_struct *mm = get_task_mm(leader);
2228 
2229 		if (mm) {
2230 			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2231 
2232 			/*
2233 			 * old_mems_allowed is the same with mems_allowed
2234 			 * here, except if this task is being moved
2235 			 * automatically due to hotplug.  In that case
2236 			 * @mems_allowed has been updated and is empty, so
2237 			 * @old_mems_allowed is the right nodesets that we
2238 			 * migrate mm from.
2239 			 */
2240 			if (is_memory_migrate(cs))
2241 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2242 						  &cpuset_attach_nodemask_to);
2243 			else
2244 				mmput(mm);
2245 		}
2246 	}
2247 
2248 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
2249 
2250 	cs->attach_in_progress--;
2251 	if (!cs->attach_in_progress)
2252 		wake_up(&cpuset_attach_wq);
2253 
2254 	percpu_up_write(&cpuset_rwsem);
2255 }
2256 
2257 /* The various types of files and directories in a cpuset file system */
2258 
2259 typedef enum {
2260 	FILE_MEMORY_MIGRATE,
2261 	FILE_CPULIST,
2262 	FILE_MEMLIST,
2263 	FILE_EFFECTIVE_CPULIST,
2264 	FILE_EFFECTIVE_MEMLIST,
2265 	FILE_SUBPARTS_CPULIST,
2266 	FILE_CPU_EXCLUSIVE,
2267 	FILE_MEM_EXCLUSIVE,
2268 	FILE_MEM_HARDWALL,
2269 	FILE_SCHED_LOAD_BALANCE,
2270 	FILE_PARTITION_ROOT,
2271 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
2272 	FILE_MEMORY_PRESSURE_ENABLED,
2273 	FILE_MEMORY_PRESSURE,
2274 	FILE_SPREAD_PAGE,
2275 	FILE_SPREAD_SLAB,
2276 } cpuset_filetype_t;
2277 
2278 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2279 			    u64 val)
2280 {
2281 	struct cpuset *cs = css_cs(css);
2282 	cpuset_filetype_t type = cft->private;
2283 	int retval = 0;
2284 
2285 	get_online_cpus();
2286 	percpu_down_write(&cpuset_rwsem);
2287 	if (!is_cpuset_online(cs)) {
2288 		retval = -ENODEV;
2289 		goto out_unlock;
2290 	}
2291 
2292 	switch (type) {
2293 	case FILE_CPU_EXCLUSIVE:
2294 		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2295 		break;
2296 	case FILE_MEM_EXCLUSIVE:
2297 		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2298 		break;
2299 	case FILE_MEM_HARDWALL:
2300 		retval = update_flag(CS_MEM_HARDWALL, cs, val);
2301 		break;
2302 	case FILE_SCHED_LOAD_BALANCE:
2303 		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2304 		break;
2305 	case FILE_MEMORY_MIGRATE:
2306 		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2307 		break;
2308 	case FILE_MEMORY_PRESSURE_ENABLED:
2309 		cpuset_memory_pressure_enabled = !!val;
2310 		break;
2311 	case FILE_SPREAD_PAGE:
2312 		retval = update_flag(CS_SPREAD_PAGE, cs, val);
2313 		break;
2314 	case FILE_SPREAD_SLAB:
2315 		retval = update_flag(CS_SPREAD_SLAB, cs, val);
2316 		break;
2317 	default:
2318 		retval = -EINVAL;
2319 		break;
2320 	}
2321 out_unlock:
2322 	percpu_up_write(&cpuset_rwsem);
2323 	put_online_cpus();
2324 	return retval;
2325 }
2326 
2327 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2328 			    s64 val)
2329 {
2330 	struct cpuset *cs = css_cs(css);
2331 	cpuset_filetype_t type = cft->private;
2332 	int retval = -ENODEV;
2333 
2334 	get_online_cpus();
2335 	percpu_down_write(&cpuset_rwsem);
2336 	if (!is_cpuset_online(cs))
2337 		goto out_unlock;
2338 
2339 	switch (type) {
2340 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2341 		retval = update_relax_domain_level(cs, val);
2342 		break;
2343 	default:
2344 		retval = -EINVAL;
2345 		break;
2346 	}
2347 out_unlock:
2348 	percpu_up_write(&cpuset_rwsem);
2349 	put_online_cpus();
2350 	return retval;
2351 }
2352 
2353 /*
2354  * Common handling for a write to a "cpus" or "mems" file.
2355  */
2356 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2357 				    char *buf, size_t nbytes, loff_t off)
2358 {
2359 	struct cpuset *cs = css_cs(of_css(of));
2360 	struct cpuset *trialcs;
2361 	int retval = -ENODEV;
2362 
2363 	buf = strstrip(buf);
2364 
2365 	/*
2366 	 * CPU or memory hotunplug may leave @cs w/o any execution
2367 	 * resources, in which case the hotplug code asynchronously updates
2368 	 * configuration and transfers all tasks to the nearest ancestor
2369 	 * which can execute.
2370 	 *
2371 	 * As writes to "cpus" or "mems" may restore @cs's execution
2372 	 * resources, wait for the previously scheduled operations before
2373 	 * proceeding, so that we don't end up keep removing tasks added
2374 	 * after execution capability is restored.
2375 	 *
2376 	 * cpuset_hotplug_work calls back into cgroup core via
2377 	 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2378 	 * operation like this one can lead to a deadlock through kernfs
2379 	 * active_ref protection.  Let's break the protection.  Losing the
2380 	 * protection is okay as we check whether @cs is online after
2381 	 * grabbing cpuset_mutex anyway.  This only happens on the legacy
2382 	 * hierarchies.
2383 	 */
2384 	css_get(&cs->css);
2385 	kernfs_break_active_protection(of->kn);
2386 	flush_work(&cpuset_hotplug_work);
2387 
2388 	get_online_cpus();
2389 	percpu_down_write(&cpuset_rwsem);
2390 	if (!is_cpuset_online(cs))
2391 		goto out_unlock;
2392 
2393 	trialcs = alloc_trial_cpuset(cs);
2394 	if (!trialcs) {
2395 		retval = -ENOMEM;
2396 		goto out_unlock;
2397 	}
2398 
2399 	switch (of_cft(of)->private) {
2400 	case FILE_CPULIST:
2401 		retval = update_cpumask(cs, trialcs, buf);
2402 		break;
2403 	case FILE_MEMLIST:
2404 		retval = update_nodemask(cs, trialcs, buf);
2405 		break;
2406 	default:
2407 		retval = -EINVAL;
2408 		break;
2409 	}
2410 
2411 	free_cpuset(trialcs);
2412 out_unlock:
2413 	percpu_up_write(&cpuset_rwsem);
2414 	put_online_cpus();
2415 	kernfs_unbreak_active_protection(of->kn);
2416 	css_put(&cs->css);
2417 	flush_workqueue(cpuset_migrate_mm_wq);
2418 	return retval ?: nbytes;
2419 }
2420 
2421 /*
2422  * These ascii lists should be read in a single call, by using a user
2423  * buffer large enough to hold the entire map.  If read in smaller
2424  * chunks, there is no guarantee of atomicity.  Since the display format
2425  * used, list of ranges of sequential numbers, is variable length,
2426  * and since these maps can change value dynamically, one could read
2427  * gibberish by doing partial reads while a list was changing.
2428  */
2429 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2430 {
2431 	struct cpuset *cs = css_cs(seq_css(sf));
2432 	cpuset_filetype_t type = seq_cft(sf)->private;
2433 	int ret = 0;
2434 
2435 	spin_lock_irq(&callback_lock);
2436 
2437 	switch (type) {
2438 	case FILE_CPULIST:
2439 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
2440 		break;
2441 	case FILE_MEMLIST:
2442 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2443 		break;
2444 	case FILE_EFFECTIVE_CPULIST:
2445 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2446 		break;
2447 	case FILE_EFFECTIVE_MEMLIST:
2448 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2449 		break;
2450 	case FILE_SUBPARTS_CPULIST:
2451 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2452 		break;
2453 	default:
2454 		ret = -EINVAL;
2455 	}
2456 
2457 	spin_unlock_irq(&callback_lock);
2458 	return ret;
2459 }
2460 
2461 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2462 {
2463 	struct cpuset *cs = css_cs(css);
2464 	cpuset_filetype_t type = cft->private;
2465 	switch (type) {
2466 	case FILE_CPU_EXCLUSIVE:
2467 		return is_cpu_exclusive(cs);
2468 	case FILE_MEM_EXCLUSIVE:
2469 		return is_mem_exclusive(cs);
2470 	case FILE_MEM_HARDWALL:
2471 		return is_mem_hardwall(cs);
2472 	case FILE_SCHED_LOAD_BALANCE:
2473 		return is_sched_load_balance(cs);
2474 	case FILE_MEMORY_MIGRATE:
2475 		return is_memory_migrate(cs);
2476 	case FILE_MEMORY_PRESSURE_ENABLED:
2477 		return cpuset_memory_pressure_enabled;
2478 	case FILE_MEMORY_PRESSURE:
2479 		return fmeter_getrate(&cs->fmeter);
2480 	case FILE_SPREAD_PAGE:
2481 		return is_spread_page(cs);
2482 	case FILE_SPREAD_SLAB:
2483 		return is_spread_slab(cs);
2484 	default:
2485 		BUG();
2486 	}
2487 
2488 	/* Unreachable but makes gcc happy */
2489 	return 0;
2490 }
2491 
2492 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2493 {
2494 	struct cpuset *cs = css_cs(css);
2495 	cpuset_filetype_t type = cft->private;
2496 	switch (type) {
2497 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2498 		return cs->relax_domain_level;
2499 	default:
2500 		BUG();
2501 	}
2502 
2503 	/* Unreachable but makes gcc happy */
2504 	return 0;
2505 }
2506 
2507 static int sched_partition_show(struct seq_file *seq, void *v)
2508 {
2509 	struct cpuset *cs = css_cs(seq_css(seq));
2510 
2511 	switch (cs->partition_root_state) {
2512 	case PRS_ENABLED:
2513 		seq_puts(seq, "root\n");
2514 		break;
2515 	case PRS_DISABLED:
2516 		seq_puts(seq, "member\n");
2517 		break;
2518 	case PRS_ERROR:
2519 		seq_puts(seq, "root invalid\n");
2520 		break;
2521 	}
2522 	return 0;
2523 }
2524 
2525 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
2526 				     size_t nbytes, loff_t off)
2527 {
2528 	struct cpuset *cs = css_cs(of_css(of));
2529 	int val;
2530 	int retval = -ENODEV;
2531 
2532 	buf = strstrip(buf);
2533 
2534 	/*
2535 	 * Convert "root" to ENABLED, and convert "member" to DISABLED.
2536 	 */
2537 	if (!strcmp(buf, "root"))
2538 		val = PRS_ENABLED;
2539 	else if (!strcmp(buf, "member"))
2540 		val = PRS_DISABLED;
2541 	else
2542 		return -EINVAL;
2543 
2544 	css_get(&cs->css);
2545 	get_online_cpus();
2546 	percpu_down_write(&cpuset_rwsem);
2547 	if (!is_cpuset_online(cs))
2548 		goto out_unlock;
2549 
2550 	retval = update_prstate(cs, val);
2551 out_unlock:
2552 	percpu_up_write(&cpuset_rwsem);
2553 	put_online_cpus();
2554 	css_put(&cs->css);
2555 	return retval ?: nbytes;
2556 }
2557 
2558 /*
2559  * for the common functions, 'private' gives the type of file
2560  */
2561 
2562 static struct cftype legacy_files[] = {
2563 	{
2564 		.name = "cpus",
2565 		.seq_show = cpuset_common_seq_show,
2566 		.write = cpuset_write_resmask,
2567 		.max_write_len = (100U + 6 * NR_CPUS),
2568 		.private = FILE_CPULIST,
2569 	},
2570 
2571 	{
2572 		.name = "mems",
2573 		.seq_show = cpuset_common_seq_show,
2574 		.write = cpuset_write_resmask,
2575 		.max_write_len = (100U + 6 * MAX_NUMNODES),
2576 		.private = FILE_MEMLIST,
2577 	},
2578 
2579 	{
2580 		.name = "effective_cpus",
2581 		.seq_show = cpuset_common_seq_show,
2582 		.private = FILE_EFFECTIVE_CPULIST,
2583 	},
2584 
2585 	{
2586 		.name = "effective_mems",
2587 		.seq_show = cpuset_common_seq_show,
2588 		.private = FILE_EFFECTIVE_MEMLIST,
2589 	},
2590 
2591 	{
2592 		.name = "cpu_exclusive",
2593 		.read_u64 = cpuset_read_u64,
2594 		.write_u64 = cpuset_write_u64,
2595 		.private = FILE_CPU_EXCLUSIVE,
2596 	},
2597 
2598 	{
2599 		.name = "mem_exclusive",
2600 		.read_u64 = cpuset_read_u64,
2601 		.write_u64 = cpuset_write_u64,
2602 		.private = FILE_MEM_EXCLUSIVE,
2603 	},
2604 
2605 	{
2606 		.name = "mem_hardwall",
2607 		.read_u64 = cpuset_read_u64,
2608 		.write_u64 = cpuset_write_u64,
2609 		.private = FILE_MEM_HARDWALL,
2610 	},
2611 
2612 	{
2613 		.name = "sched_load_balance",
2614 		.read_u64 = cpuset_read_u64,
2615 		.write_u64 = cpuset_write_u64,
2616 		.private = FILE_SCHED_LOAD_BALANCE,
2617 	},
2618 
2619 	{
2620 		.name = "sched_relax_domain_level",
2621 		.read_s64 = cpuset_read_s64,
2622 		.write_s64 = cpuset_write_s64,
2623 		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
2624 	},
2625 
2626 	{
2627 		.name = "memory_migrate",
2628 		.read_u64 = cpuset_read_u64,
2629 		.write_u64 = cpuset_write_u64,
2630 		.private = FILE_MEMORY_MIGRATE,
2631 	},
2632 
2633 	{
2634 		.name = "memory_pressure",
2635 		.read_u64 = cpuset_read_u64,
2636 		.private = FILE_MEMORY_PRESSURE,
2637 	},
2638 
2639 	{
2640 		.name = "memory_spread_page",
2641 		.read_u64 = cpuset_read_u64,
2642 		.write_u64 = cpuset_write_u64,
2643 		.private = FILE_SPREAD_PAGE,
2644 	},
2645 
2646 	{
2647 		.name = "memory_spread_slab",
2648 		.read_u64 = cpuset_read_u64,
2649 		.write_u64 = cpuset_write_u64,
2650 		.private = FILE_SPREAD_SLAB,
2651 	},
2652 
2653 	{
2654 		.name = "memory_pressure_enabled",
2655 		.flags = CFTYPE_ONLY_ON_ROOT,
2656 		.read_u64 = cpuset_read_u64,
2657 		.write_u64 = cpuset_write_u64,
2658 		.private = FILE_MEMORY_PRESSURE_ENABLED,
2659 	},
2660 
2661 	{ }	/* terminate */
2662 };
2663 
2664 /*
2665  * This is currently a minimal set for the default hierarchy. It can be
2666  * expanded later on by migrating more features and control files from v1.
2667  */
2668 static struct cftype dfl_files[] = {
2669 	{
2670 		.name = "cpus",
2671 		.seq_show = cpuset_common_seq_show,
2672 		.write = cpuset_write_resmask,
2673 		.max_write_len = (100U + 6 * NR_CPUS),
2674 		.private = FILE_CPULIST,
2675 		.flags = CFTYPE_NOT_ON_ROOT,
2676 	},
2677 
2678 	{
2679 		.name = "mems",
2680 		.seq_show = cpuset_common_seq_show,
2681 		.write = cpuset_write_resmask,
2682 		.max_write_len = (100U + 6 * MAX_NUMNODES),
2683 		.private = FILE_MEMLIST,
2684 		.flags = CFTYPE_NOT_ON_ROOT,
2685 	},
2686 
2687 	{
2688 		.name = "cpus.effective",
2689 		.seq_show = cpuset_common_seq_show,
2690 		.private = FILE_EFFECTIVE_CPULIST,
2691 	},
2692 
2693 	{
2694 		.name = "mems.effective",
2695 		.seq_show = cpuset_common_seq_show,
2696 		.private = FILE_EFFECTIVE_MEMLIST,
2697 	},
2698 
2699 	{
2700 		.name = "cpus.partition",
2701 		.seq_show = sched_partition_show,
2702 		.write = sched_partition_write,
2703 		.private = FILE_PARTITION_ROOT,
2704 		.flags = CFTYPE_NOT_ON_ROOT,
2705 	},
2706 
2707 	{
2708 		.name = "cpus.subpartitions",
2709 		.seq_show = cpuset_common_seq_show,
2710 		.private = FILE_SUBPARTS_CPULIST,
2711 		.flags = CFTYPE_DEBUG,
2712 	},
2713 
2714 	{ }	/* terminate */
2715 };
2716 
2717 
2718 /*
2719  *	cpuset_css_alloc - allocate a cpuset css
2720  *	cgrp:	control group that the new cpuset will be part of
2721  */
2722 
2723 static struct cgroup_subsys_state *
2724 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
2725 {
2726 	struct cpuset *cs;
2727 
2728 	if (!parent_css)
2729 		return &top_cpuset.css;
2730 
2731 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
2732 	if (!cs)
2733 		return ERR_PTR(-ENOMEM);
2734 
2735 	if (alloc_cpumasks(cs, NULL)) {
2736 		kfree(cs);
2737 		return ERR_PTR(-ENOMEM);
2738 	}
2739 
2740 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
2741 	nodes_clear(cs->mems_allowed);
2742 	nodes_clear(cs->effective_mems);
2743 	fmeter_init(&cs->fmeter);
2744 	cs->relax_domain_level = -1;
2745 
2746 	return &cs->css;
2747 }
2748 
2749 static int cpuset_css_online(struct cgroup_subsys_state *css)
2750 {
2751 	struct cpuset *cs = css_cs(css);
2752 	struct cpuset *parent = parent_cs(cs);
2753 	struct cpuset *tmp_cs;
2754 	struct cgroup_subsys_state *pos_css;
2755 
2756 	if (!parent)
2757 		return 0;
2758 
2759 	get_online_cpus();
2760 	percpu_down_write(&cpuset_rwsem);
2761 
2762 	set_bit(CS_ONLINE, &cs->flags);
2763 	if (is_spread_page(parent))
2764 		set_bit(CS_SPREAD_PAGE, &cs->flags);
2765 	if (is_spread_slab(parent))
2766 		set_bit(CS_SPREAD_SLAB, &cs->flags);
2767 
2768 	cpuset_inc();
2769 
2770 	spin_lock_irq(&callback_lock);
2771 	if (is_in_v2_mode()) {
2772 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
2773 		cs->effective_mems = parent->effective_mems;
2774 		cs->use_parent_ecpus = true;
2775 		parent->child_ecpus_count++;
2776 	}
2777 	spin_unlock_irq(&callback_lock);
2778 
2779 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
2780 		goto out_unlock;
2781 
2782 	/*
2783 	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2784 	 * set.  This flag handling is implemented in cgroup core for
2785 	 * histrical reasons - the flag may be specified during mount.
2786 	 *
2787 	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
2788 	 * refuse to clone the configuration - thereby refusing the task to
2789 	 * be entered, and as a result refusing the sys_unshare() or
2790 	 * clone() which initiated it.  If this becomes a problem for some
2791 	 * users who wish to allow that scenario, then this could be
2792 	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2793 	 * (and likewise for mems) to the new cgroup.
2794 	 */
2795 	rcu_read_lock();
2796 	cpuset_for_each_child(tmp_cs, pos_css, parent) {
2797 		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2798 			rcu_read_unlock();
2799 			goto out_unlock;
2800 		}
2801 	}
2802 	rcu_read_unlock();
2803 
2804 	spin_lock_irq(&callback_lock);
2805 	cs->mems_allowed = parent->mems_allowed;
2806 	cs->effective_mems = parent->mems_allowed;
2807 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2808 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2809 	spin_unlock_irq(&callback_lock);
2810 out_unlock:
2811 	percpu_up_write(&cpuset_rwsem);
2812 	put_online_cpus();
2813 	return 0;
2814 }
2815 
2816 /*
2817  * If the cpuset being removed has its flag 'sched_load_balance'
2818  * enabled, then simulate turning sched_load_balance off, which
2819  * will call rebuild_sched_domains_locked(). That is not needed
2820  * in the default hierarchy where only changes in partition
2821  * will cause repartitioning.
2822  *
2823  * If the cpuset has the 'sched.partition' flag enabled, simulate
2824  * turning 'sched.partition" off.
2825  */
2826 
2827 static void cpuset_css_offline(struct cgroup_subsys_state *css)
2828 {
2829 	struct cpuset *cs = css_cs(css);
2830 
2831 	get_online_cpus();
2832 	percpu_down_write(&cpuset_rwsem);
2833 
2834 	if (is_partition_root(cs))
2835 		update_prstate(cs, 0);
2836 
2837 	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2838 	    is_sched_load_balance(cs))
2839 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2840 
2841 	if (cs->use_parent_ecpus) {
2842 		struct cpuset *parent = parent_cs(cs);
2843 
2844 		cs->use_parent_ecpus = false;
2845 		parent->child_ecpus_count--;
2846 	}
2847 
2848 	cpuset_dec();
2849 	clear_bit(CS_ONLINE, &cs->flags);
2850 
2851 	percpu_up_write(&cpuset_rwsem);
2852 	put_online_cpus();
2853 }
2854 
2855 static void cpuset_css_free(struct cgroup_subsys_state *css)
2856 {
2857 	struct cpuset *cs = css_cs(css);
2858 
2859 	free_cpuset(cs);
2860 }
2861 
2862 static void cpuset_bind(struct cgroup_subsys_state *root_css)
2863 {
2864 	percpu_down_write(&cpuset_rwsem);
2865 	spin_lock_irq(&callback_lock);
2866 
2867 	if (is_in_v2_mode()) {
2868 		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
2869 		top_cpuset.mems_allowed = node_possible_map;
2870 	} else {
2871 		cpumask_copy(top_cpuset.cpus_allowed,
2872 			     top_cpuset.effective_cpus);
2873 		top_cpuset.mems_allowed = top_cpuset.effective_mems;
2874 	}
2875 
2876 	spin_unlock_irq(&callback_lock);
2877 	percpu_up_write(&cpuset_rwsem);
2878 }
2879 
2880 /*
2881  * Make sure the new task conform to the current state of its parent,
2882  * which could have been changed by cpuset just after it inherits the
2883  * state from the parent and before it sits on the cgroup's task list.
2884  */
2885 static void cpuset_fork(struct task_struct *task)
2886 {
2887 	if (task_css_is_root(task, cpuset_cgrp_id))
2888 		return;
2889 
2890 	set_cpus_allowed_ptr(task, current->cpus_ptr);
2891 	task->mems_allowed = current->mems_allowed;
2892 }
2893 
2894 struct cgroup_subsys cpuset_cgrp_subsys = {
2895 	.css_alloc	= cpuset_css_alloc,
2896 	.css_online	= cpuset_css_online,
2897 	.css_offline	= cpuset_css_offline,
2898 	.css_free	= cpuset_css_free,
2899 	.can_attach	= cpuset_can_attach,
2900 	.cancel_attach	= cpuset_cancel_attach,
2901 	.attach		= cpuset_attach,
2902 	.post_attach	= cpuset_post_attach,
2903 	.bind		= cpuset_bind,
2904 	.fork		= cpuset_fork,
2905 	.legacy_cftypes	= legacy_files,
2906 	.dfl_cftypes	= dfl_files,
2907 	.early_init	= true,
2908 	.threaded	= true,
2909 };
2910 
2911 /**
2912  * cpuset_init - initialize cpusets at system boot
2913  *
2914  * Description: Initialize top_cpuset
2915  **/
2916 
2917 int __init cpuset_init(void)
2918 {
2919 	BUG_ON(percpu_init_rwsem(&cpuset_rwsem));
2920 
2921 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
2922 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
2923 	BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
2924 
2925 	cpumask_setall(top_cpuset.cpus_allowed);
2926 	nodes_setall(top_cpuset.mems_allowed);
2927 	cpumask_setall(top_cpuset.effective_cpus);
2928 	nodes_setall(top_cpuset.effective_mems);
2929 
2930 	fmeter_init(&top_cpuset.fmeter);
2931 	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
2932 	top_cpuset.relax_domain_level = -1;
2933 
2934 	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
2935 
2936 	return 0;
2937 }
2938 
2939 /*
2940  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2941  * or memory nodes, we need to walk over the cpuset hierarchy,
2942  * removing that CPU or node from all cpusets.  If this removes the
2943  * last CPU or node from a cpuset, then move the tasks in the empty
2944  * cpuset to its next-highest non-empty parent.
2945  */
2946 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2947 {
2948 	struct cpuset *parent;
2949 
2950 	/*
2951 	 * Find its next-highest non-empty parent, (top cpuset
2952 	 * has online cpus, so can't be empty).
2953 	 */
2954 	parent = parent_cs(cs);
2955 	while (cpumask_empty(parent->cpus_allowed) ||
2956 			nodes_empty(parent->mems_allowed))
2957 		parent = parent_cs(parent);
2958 
2959 	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2960 		pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
2961 		pr_cont_cgroup_name(cs->css.cgroup);
2962 		pr_cont("\n");
2963 	}
2964 }
2965 
2966 static void
2967 hotplug_update_tasks_legacy(struct cpuset *cs,
2968 			    struct cpumask *new_cpus, nodemask_t *new_mems,
2969 			    bool cpus_updated, bool mems_updated)
2970 {
2971 	bool is_empty;
2972 
2973 	spin_lock_irq(&callback_lock);
2974 	cpumask_copy(cs->cpus_allowed, new_cpus);
2975 	cpumask_copy(cs->effective_cpus, new_cpus);
2976 	cs->mems_allowed = *new_mems;
2977 	cs->effective_mems = *new_mems;
2978 	spin_unlock_irq(&callback_lock);
2979 
2980 	/*
2981 	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
2982 	 * as the tasks will be migratecd to an ancestor.
2983 	 */
2984 	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
2985 		update_tasks_cpumask(cs);
2986 	if (mems_updated && !nodes_empty(cs->mems_allowed))
2987 		update_tasks_nodemask(cs);
2988 
2989 	is_empty = cpumask_empty(cs->cpus_allowed) ||
2990 		   nodes_empty(cs->mems_allowed);
2991 
2992 	percpu_up_write(&cpuset_rwsem);
2993 
2994 	/*
2995 	 * Move tasks to the nearest ancestor with execution resources,
2996 	 * This is full cgroup operation which will also call back into
2997 	 * cpuset. Should be done outside any lock.
2998 	 */
2999 	if (is_empty)
3000 		remove_tasks_in_empty_cpuset(cs);
3001 
3002 	percpu_down_write(&cpuset_rwsem);
3003 }
3004 
3005 static void
3006 hotplug_update_tasks(struct cpuset *cs,
3007 		     struct cpumask *new_cpus, nodemask_t *new_mems,
3008 		     bool cpus_updated, bool mems_updated)
3009 {
3010 	if (cpumask_empty(new_cpus))
3011 		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3012 	if (nodes_empty(*new_mems))
3013 		*new_mems = parent_cs(cs)->effective_mems;
3014 
3015 	spin_lock_irq(&callback_lock);
3016 	cpumask_copy(cs->effective_cpus, new_cpus);
3017 	cs->effective_mems = *new_mems;
3018 	spin_unlock_irq(&callback_lock);
3019 
3020 	if (cpus_updated)
3021 		update_tasks_cpumask(cs);
3022 	if (mems_updated)
3023 		update_tasks_nodemask(cs);
3024 }
3025 
3026 static bool force_rebuild;
3027 
3028 void cpuset_force_rebuild(void)
3029 {
3030 	force_rebuild = true;
3031 }
3032 
3033 /**
3034  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3035  * @cs: cpuset in interest
3036  * @tmp: the tmpmasks structure pointer
3037  *
3038  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3039  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3040  * all its tasks are moved to the nearest ancestor with both resources.
3041  */
3042 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3043 {
3044 	static cpumask_t new_cpus;
3045 	static nodemask_t new_mems;
3046 	bool cpus_updated;
3047 	bool mems_updated;
3048 	struct cpuset *parent;
3049 retry:
3050 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3051 
3052 	percpu_down_write(&cpuset_rwsem);
3053 
3054 	/*
3055 	 * We have raced with task attaching. We wait until attaching
3056 	 * is finished, so we won't attach a task to an empty cpuset.
3057 	 */
3058 	if (cs->attach_in_progress) {
3059 		percpu_up_write(&cpuset_rwsem);
3060 		goto retry;
3061 	}
3062 
3063 	parent =  parent_cs(cs);
3064 	compute_effective_cpumask(&new_cpus, cs, parent);
3065 	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3066 
3067 	if (cs->nr_subparts_cpus)
3068 		/*
3069 		 * Make sure that CPUs allocated to child partitions
3070 		 * do not show up in effective_cpus.
3071 		 */
3072 		cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3073 
3074 	if (!tmp || !cs->partition_root_state)
3075 		goto update_tasks;
3076 
3077 	/*
3078 	 * In the unlikely event that a partition root has empty
3079 	 * effective_cpus or its parent becomes erroneous, we have to
3080 	 * transition it to the erroneous state.
3081 	 */
3082 	if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
3083 	   (parent->partition_root_state == PRS_ERROR))) {
3084 		if (cs->nr_subparts_cpus) {
3085 			cs->nr_subparts_cpus = 0;
3086 			cpumask_clear(cs->subparts_cpus);
3087 			compute_effective_cpumask(&new_cpus, cs, parent);
3088 		}
3089 
3090 		/*
3091 		 * If the effective_cpus is empty because the child
3092 		 * partitions take away all the CPUs, we can keep
3093 		 * the current partition and let the child partitions
3094 		 * fight for available CPUs.
3095 		 */
3096 		if ((parent->partition_root_state == PRS_ERROR) ||
3097 		     cpumask_empty(&new_cpus)) {
3098 			update_parent_subparts_cpumask(cs, partcmd_disable,
3099 						       NULL, tmp);
3100 			cs->partition_root_state = PRS_ERROR;
3101 		}
3102 		cpuset_force_rebuild();
3103 	}
3104 
3105 	/*
3106 	 * On the other hand, an erroneous partition root may be transitioned
3107 	 * back to a regular one or a partition root with no CPU allocated
3108 	 * from the parent may change to erroneous.
3109 	 */
3110 	if (is_partition_root(parent) &&
3111 	   ((cs->partition_root_state == PRS_ERROR) ||
3112 	    !cpumask_intersects(&new_cpus, parent->subparts_cpus)) &&
3113 	     update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp))
3114 		cpuset_force_rebuild();
3115 
3116 update_tasks:
3117 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3118 	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3119 
3120 	if (is_in_v2_mode())
3121 		hotplug_update_tasks(cs, &new_cpus, &new_mems,
3122 				     cpus_updated, mems_updated);
3123 	else
3124 		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3125 					    cpus_updated, mems_updated);
3126 
3127 	percpu_up_write(&cpuset_rwsem);
3128 }
3129 
3130 /**
3131  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3132  *
3133  * This function is called after either CPU or memory configuration has
3134  * changed and updates cpuset accordingly.  The top_cpuset is always
3135  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3136  * order to make cpusets transparent (of no affect) on systems that are
3137  * actively using CPU hotplug but making no active use of cpusets.
3138  *
3139  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3140  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3141  * all descendants.
3142  *
3143  * Note that CPU offlining during suspend is ignored.  We don't modify
3144  * cpusets across suspend/resume cycles at all.
3145  */
3146 static void cpuset_hotplug_workfn(struct work_struct *work)
3147 {
3148 	static cpumask_t new_cpus;
3149 	static nodemask_t new_mems;
3150 	bool cpus_updated, mems_updated;
3151 	bool on_dfl = is_in_v2_mode();
3152 	struct tmpmasks tmp, *ptmp = NULL;
3153 
3154 	if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3155 		ptmp = &tmp;
3156 
3157 	percpu_down_write(&cpuset_rwsem);
3158 
3159 	/* fetch the available cpus/mems and find out which changed how */
3160 	cpumask_copy(&new_cpus, cpu_active_mask);
3161 	new_mems = node_states[N_MEMORY];
3162 
3163 	/*
3164 	 * If subparts_cpus is populated, it is likely that the check below
3165 	 * will produce a false positive on cpus_updated when the cpu list
3166 	 * isn't changed. It is extra work, but it is better to be safe.
3167 	 */
3168 	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3169 	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3170 
3171 	/* synchronize cpus_allowed to cpu_active_mask */
3172 	if (cpus_updated) {
3173 		spin_lock_irq(&callback_lock);
3174 		if (!on_dfl)
3175 			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3176 		/*
3177 		 * Make sure that CPUs allocated to child partitions
3178 		 * do not show up in effective_cpus. If no CPU is left,
3179 		 * we clear the subparts_cpus & let the child partitions
3180 		 * fight for the CPUs again.
3181 		 */
3182 		if (top_cpuset.nr_subparts_cpus) {
3183 			if (cpumask_subset(&new_cpus,
3184 					   top_cpuset.subparts_cpus)) {
3185 				top_cpuset.nr_subparts_cpus = 0;
3186 				cpumask_clear(top_cpuset.subparts_cpus);
3187 			} else {
3188 				cpumask_andnot(&new_cpus, &new_cpus,
3189 					       top_cpuset.subparts_cpus);
3190 			}
3191 		}
3192 		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3193 		spin_unlock_irq(&callback_lock);
3194 		/* we don't mess with cpumasks of tasks in top_cpuset */
3195 	}
3196 
3197 	/* synchronize mems_allowed to N_MEMORY */
3198 	if (mems_updated) {
3199 		spin_lock_irq(&callback_lock);
3200 		if (!on_dfl)
3201 			top_cpuset.mems_allowed = new_mems;
3202 		top_cpuset.effective_mems = new_mems;
3203 		spin_unlock_irq(&callback_lock);
3204 		update_tasks_nodemask(&top_cpuset);
3205 	}
3206 
3207 	percpu_up_write(&cpuset_rwsem);
3208 
3209 	/* if cpus or mems changed, we need to propagate to descendants */
3210 	if (cpus_updated || mems_updated) {
3211 		struct cpuset *cs;
3212 		struct cgroup_subsys_state *pos_css;
3213 
3214 		rcu_read_lock();
3215 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3216 			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3217 				continue;
3218 			rcu_read_unlock();
3219 
3220 			cpuset_hotplug_update_tasks(cs, ptmp);
3221 
3222 			rcu_read_lock();
3223 			css_put(&cs->css);
3224 		}
3225 		rcu_read_unlock();
3226 	}
3227 
3228 	/* rebuild sched domains if cpus_allowed has changed */
3229 	if (cpus_updated || force_rebuild) {
3230 		force_rebuild = false;
3231 		rebuild_sched_domains();
3232 	}
3233 
3234 	free_cpumasks(NULL, ptmp);
3235 }
3236 
3237 void cpuset_update_active_cpus(void)
3238 {
3239 	/*
3240 	 * We're inside cpu hotplug critical region which usually nests
3241 	 * inside cgroup synchronization.  Bounce actual hotplug processing
3242 	 * to a work item to avoid reverse locking order.
3243 	 */
3244 	schedule_work(&cpuset_hotplug_work);
3245 }
3246 
3247 void cpuset_wait_for_hotplug(void)
3248 {
3249 	flush_work(&cpuset_hotplug_work);
3250 }
3251 
3252 /*
3253  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3254  * Call this routine anytime after node_states[N_MEMORY] changes.
3255  * See cpuset_update_active_cpus() for CPU hotplug handling.
3256  */
3257 static int cpuset_track_online_nodes(struct notifier_block *self,
3258 				unsigned long action, void *arg)
3259 {
3260 	schedule_work(&cpuset_hotplug_work);
3261 	return NOTIFY_OK;
3262 }
3263 
3264 static struct notifier_block cpuset_track_online_nodes_nb = {
3265 	.notifier_call = cpuset_track_online_nodes,
3266 	.priority = 10,		/* ??! */
3267 };
3268 
3269 /**
3270  * cpuset_init_smp - initialize cpus_allowed
3271  *
3272  * Description: Finish top cpuset after cpu, node maps are initialized
3273  */
3274 void __init cpuset_init_smp(void)
3275 {
3276 	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
3277 	top_cpuset.mems_allowed = node_states[N_MEMORY];
3278 	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3279 
3280 	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3281 	top_cpuset.effective_mems = node_states[N_MEMORY];
3282 
3283 	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
3284 
3285 	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3286 	BUG_ON(!cpuset_migrate_mm_wq);
3287 }
3288 
3289 /**
3290  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3291  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3292  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3293  *
3294  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3295  * attached to the specified @tsk.  Guaranteed to return some non-empty
3296  * subset of cpu_online_mask, even if this means going outside the
3297  * tasks cpuset.
3298  **/
3299 
3300 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3301 {
3302 	unsigned long flags;
3303 
3304 	spin_lock_irqsave(&callback_lock, flags);
3305 	rcu_read_lock();
3306 	guarantee_online_cpus(task_cs(tsk), pmask);
3307 	rcu_read_unlock();
3308 	spin_unlock_irqrestore(&callback_lock, flags);
3309 }
3310 
3311 /**
3312  * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3313  * @tsk: pointer to task_struct with which the scheduler is struggling
3314  *
3315  * Description: In the case that the scheduler cannot find an allowed cpu in
3316  * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3317  * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3318  * which will not contain a sane cpumask during cases such as cpu hotplugging.
3319  * This is the absolute last resort for the scheduler and it is only used if
3320  * _every_ other avenue has been traveled.
3321  **/
3322 
3323 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3324 {
3325 	rcu_read_lock();
3326 	do_set_cpus_allowed(tsk, is_in_v2_mode() ?
3327 		task_cs(tsk)->cpus_allowed : cpu_possible_mask);
3328 	rcu_read_unlock();
3329 
3330 	/*
3331 	 * We own tsk->cpus_allowed, nobody can change it under us.
3332 	 *
3333 	 * But we used cs && cs->cpus_allowed lockless and thus can
3334 	 * race with cgroup_attach_task() or update_cpumask() and get
3335 	 * the wrong tsk->cpus_allowed. However, both cases imply the
3336 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3337 	 * which takes task_rq_lock().
3338 	 *
3339 	 * If we are called after it dropped the lock we must see all
3340 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
3341 	 * set any mask even if it is not right from task_cs() pov,
3342 	 * the pending set_cpus_allowed_ptr() will fix things.
3343 	 *
3344 	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
3345 	 * if required.
3346 	 */
3347 }
3348 
3349 void __init cpuset_init_current_mems_allowed(void)
3350 {
3351 	nodes_setall(current->mems_allowed);
3352 }
3353 
3354 /**
3355  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3356  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3357  *
3358  * Description: Returns the nodemask_t mems_allowed of the cpuset
3359  * attached to the specified @tsk.  Guaranteed to return some non-empty
3360  * subset of node_states[N_MEMORY], even if this means going outside the
3361  * tasks cpuset.
3362  **/
3363 
3364 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
3365 {
3366 	nodemask_t mask;
3367 	unsigned long flags;
3368 
3369 	spin_lock_irqsave(&callback_lock, flags);
3370 	rcu_read_lock();
3371 	guarantee_online_mems(task_cs(tsk), &mask);
3372 	rcu_read_unlock();
3373 	spin_unlock_irqrestore(&callback_lock, flags);
3374 
3375 	return mask;
3376 }
3377 
3378 /**
3379  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
3380  * @nodemask: the nodemask to be checked
3381  *
3382  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3383  */
3384 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
3385 {
3386 	return nodes_intersects(*nodemask, current->mems_allowed);
3387 }
3388 
3389 /*
3390  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3391  * mem_hardwall ancestor to the specified cpuset.  Call holding
3392  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
3393  * (an unusual configuration), then returns the root cpuset.
3394  */
3395 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
3396 {
3397 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
3398 		cs = parent_cs(cs);
3399 	return cs;
3400 }
3401 
3402 /**
3403  * cpuset_node_allowed - Can we allocate on a memory node?
3404  * @node: is this an allowed node?
3405  * @gfp_mask: memory allocation flags
3406  *
3407  * If we're in interrupt, yes, we can always allocate.  If @node is set in
3408  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
3409  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
3410  * yes.  If current has access to memory reserves as an oom victim, yes.
3411  * Otherwise, no.
3412  *
3413  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
3414  * and do not allow allocations outside the current tasks cpuset
3415  * unless the task has been OOM killed.
3416  * GFP_KERNEL allocations are not so marked, so can escape to the
3417  * nearest enclosing hardwalled ancestor cpuset.
3418  *
3419  * Scanning up parent cpusets requires callback_lock.  The
3420  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
3421  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
3422  * current tasks mems_allowed came up empty on the first pass over
3423  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
3424  * cpuset are short of memory, might require taking the callback_lock.
3425  *
3426  * The first call here from mm/page_alloc:get_page_from_freelist()
3427  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
3428  * so no allocation on a node outside the cpuset is allowed (unless
3429  * in interrupt, of course).
3430  *
3431  * The second pass through get_page_from_freelist() doesn't even call
3432  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
3433  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
3434  * in alloc_flags.  That logic and the checks below have the combined
3435  * affect that:
3436  *	in_interrupt - any node ok (current task context irrelevant)
3437  *	GFP_ATOMIC   - any node ok
3438  *	tsk_is_oom_victim   - any node ok
3439  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
3440  *	GFP_USER     - only nodes in current tasks mems allowed ok.
3441  */
3442 bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
3443 {
3444 	struct cpuset *cs;		/* current cpuset ancestors */
3445 	int allowed;			/* is allocation in zone z allowed? */
3446 	unsigned long flags;
3447 
3448 	if (in_interrupt())
3449 		return true;
3450 	if (node_isset(node, current->mems_allowed))
3451 		return true;
3452 	/*
3453 	 * Allow tasks that have access to memory reserves because they have
3454 	 * been OOM killed to get memory anywhere.
3455 	 */
3456 	if (unlikely(tsk_is_oom_victim(current)))
3457 		return true;
3458 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
3459 		return false;
3460 
3461 	if (current->flags & PF_EXITING) /* Let dying task have memory */
3462 		return true;
3463 
3464 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
3465 	spin_lock_irqsave(&callback_lock, flags);
3466 
3467 	rcu_read_lock();
3468 	cs = nearest_hardwall_ancestor(task_cs(current));
3469 	allowed = node_isset(node, cs->mems_allowed);
3470 	rcu_read_unlock();
3471 
3472 	spin_unlock_irqrestore(&callback_lock, flags);
3473 	return allowed;
3474 }
3475 
3476 /**
3477  * cpuset_mem_spread_node() - On which node to begin search for a file page
3478  * cpuset_slab_spread_node() - On which node to begin search for a slab page
3479  *
3480  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
3481  * tasks in a cpuset with is_spread_page or is_spread_slab set),
3482  * and if the memory allocation used cpuset_mem_spread_node()
3483  * to determine on which node to start looking, as it will for
3484  * certain page cache or slab cache pages such as used for file
3485  * system buffers and inode caches, then instead of starting on the
3486  * local node to look for a free page, rather spread the starting
3487  * node around the tasks mems_allowed nodes.
3488  *
3489  * We don't have to worry about the returned node being offline
3490  * because "it can't happen", and even if it did, it would be ok.
3491  *
3492  * The routines calling guarantee_online_mems() are careful to
3493  * only set nodes in task->mems_allowed that are online.  So it
3494  * should not be possible for the following code to return an
3495  * offline node.  But if it did, that would be ok, as this routine
3496  * is not returning the node where the allocation must be, only
3497  * the node where the search should start.  The zonelist passed to
3498  * __alloc_pages() will include all nodes.  If the slab allocator
3499  * is passed an offline node, it will fall back to the local node.
3500  * See kmem_cache_alloc_node().
3501  */
3502 
3503 static int cpuset_spread_node(int *rotor)
3504 {
3505 	return *rotor = next_node_in(*rotor, current->mems_allowed);
3506 }
3507 
3508 int cpuset_mem_spread_node(void)
3509 {
3510 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
3511 		current->cpuset_mem_spread_rotor =
3512 			node_random(&current->mems_allowed);
3513 
3514 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
3515 }
3516 
3517 int cpuset_slab_spread_node(void)
3518 {
3519 	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
3520 		current->cpuset_slab_spread_rotor =
3521 			node_random(&current->mems_allowed);
3522 
3523 	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
3524 }
3525 
3526 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
3527 
3528 /**
3529  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3530  * @tsk1: pointer to task_struct of some task.
3531  * @tsk2: pointer to task_struct of some other task.
3532  *
3533  * Description: Return true if @tsk1's mems_allowed intersects the
3534  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
3535  * one of the task's memory usage might impact the memory available
3536  * to the other.
3537  **/
3538 
3539 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
3540 				   const struct task_struct *tsk2)
3541 {
3542 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
3543 }
3544 
3545 /**
3546  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3547  *
3548  * Description: Prints current's name, cpuset name, and cached copy of its
3549  * mems_allowed to the kernel log.
3550  */
3551 void cpuset_print_current_mems_allowed(void)
3552 {
3553 	struct cgroup *cgrp;
3554 
3555 	rcu_read_lock();
3556 
3557 	cgrp = task_cs(current)->css.cgroup;
3558 	pr_cont(",cpuset=");
3559 	pr_cont_cgroup_name(cgrp);
3560 	pr_cont(",mems_allowed=%*pbl",
3561 		nodemask_pr_args(&current->mems_allowed));
3562 
3563 	rcu_read_unlock();
3564 }
3565 
3566 /*
3567  * Collection of memory_pressure is suppressed unless
3568  * this flag is enabled by writing "1" to the special
3569  * cpuset file 'memory_pressure_enabled' in the root cpuset.
3570  */
3571 
3572 int cpuset_memory_pressure_enabled __read_mostly;
3573 
3574 /**
3575  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3576  *
3577  * Keep a running average of the rate of synchronous (direct)
3578  * page reclaim efforts initiated by tasks in each cpuset.
3579  *
3580  * This represents the rate at which some task in the cpuset
3581  * ran low on memory on all nodes it was allowed to use, and
3582  * had to enter the kernels page reclaim code in an effort to
3583  * create more free memory by tossing clean pages or swapping
3584  * or writing dirty pages.
3585  *
3586  * Display to user space in the per-cpuset read-only file
3587  * "memory_pressure".  Value displayed is an integer
3588  * representing the recent rate of entry into the synchronous
3589  * (direct) page reclaim by any task attached to the cpuset.
3590  **/
3591 
3592 void __cpuset_memory_pressure_bump(void)
3593 {
3594 	rcu_read_lock();
3595 	fmeter_markevent(&task_cs(current)->fmeter);
3596 	rcu_read_unlock();
3597 }
3598 
3599 #ifdef CONFIG_PROC_PID_CPUSET
3600 /*
3601  * proc_cpuset_show()
3602  *  - Print tasks cpuset path into seq_file.
3603  *  - Used for /proc/<pid>/cpuset.
3604  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3605  *    doesn't really matter if tsk->cpuset changes after we read it,
3606  *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
3607  *    anyway.
3608  */
3609 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
3610 		     struct pid *pid, struct task_struct *tsk)
3611 {
3612 	char *buf;
3613 	struct cgroup_subsys_state *css;
3614 	int retval;
3615 
3616 	retval = -ENOMEM;
3617 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
3618 	if (!buf)
3619 		goto out;
3620 
3621 	css = task_get_css(tsk, cpuset_cgrp_id);
3622 	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
3623 				current->nsproxy->cgroup_ns);
3624 	css_put(css);
3625 	if (retval >= PATH_MAX)
3626 		retval = -ENAMETOOLONG;
3627 	if (retval < 0)
3628 		goto out_free;
3629 	seq_puts(m, buf);
3630 	seq_putc(m, '\n');
3631 	retval = 0;
3632 out_free:
3633 	kfree(buf);
3634 out:
3635 	return retval;
3636 }
3637 #endif /* CONFIG_PROC_PID_CPUSET */
3638 
3639 /* Display task mems_allowed in /proc/<pid>/status file. */
3640 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
3641 {
3642 	seq_printf(m, "Mems_allowed:\t%*pb\n",
3643 		   nodemask_pr_args(&task->mems_allowed));
3644 	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
3645 		   nodemask_pr_args(&task->mems_allowed));
3646 }
3647