xref: /openbmc/linux/kernel/cpu.c (revision bc000245)
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 
23 #include "smpboot.h"
24 
25 #ifdef CONFIG_SMP
26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
27 static DEFINE_MUTEX(cpu_add_remove_lock);
28 
29 /*
30  * The following two API's must be used when attempting
31  * to serialize the updates to cpu_online_mask, cpu_present_mask.
32  */
33 void cpu_maps_update_begin(void)
34 {
35 	mutex_lock(&cpu_add_remove_lock);
36 }
37 
38 void cpu_maps_update_done(void)
39 {
40 	mutex_unlock(&cpu_add_remove_lock);
41 }
42 
43 static RAW_NOTIFIER_HEAD(cpu_chain);
44 
45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46  * Should always be manipulated under cpu_add_remove_lock
47  */
48 static int cpu_hotplug_disabled;
49 
50 #ifdef CONFIG_HOTPLUG_CPU
51 
52 static struct {
53 	struct task_struct *active_writer;
54 	struct mutex lock; /* Synchronizes accesses to refcount, */
55 	/*
56 	 * Also blocks the new readers during
57 	 * an ongoing cpu hotplug operation.
58 	 */
59 	int refcount;
60 } cpu_hotplug = {
61 	.active_writer = NULL,
62 	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
63 	.refcount = 0,
64 };
65 
66 void get_online_cpus(void)
67 {
68 	might_sleep();
69 	if (cpu_hotplug.active_writer == current)
70 		return;
71 	mutex_lock(&cpu_hotplug.lock);
72 	cpu_hotplug.refcount++;
73 	mutex_unlock(&cpu_hotplug.lock);
74 
75 }
76 EXPORT_SYMBOL_GPL(get_online_cpus);
77 
78 void put_online_cpus(void)
79 {
80 	if (cpu_hotplug.active_writer == current)
81 		return;
82 	mutex_lock(&cpu_hotplug.lock);
83 
84 	if (WARN_ON(!cpu_hotplug.refcount))
85 		cpu_hotplug.refcount++; /* try to fix things up */
86 
87 	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
88 		wake_up_process(cpu_hotplug.active_writer);
89 	mutex_unlock(&cpu_hotplug.lock);
90 
91 }
92 EXPORT_SYMBOL_GPL(put_online_cpus);
93 
94 /*
95  * This ensures that the hotplug operation can begin only when the
96  * refcount goes to zero.
97  *
98  * Note that during a cpu-hotplug operation, the new readers, if any,
99  * will be blocked by the cpu_hotplug.lock
100  *
101  * Since cpu_hotplug_begin() is always called after invoking
102  * cpu_maps_update_begin(), we can be sure that only one writer is active.
103  *
104  * Note that theoretically, there is a possibility of a livelock:
105  * - Refcount goes to zero, last reader wakes up the sleeping
106  *   writer.
107  * - Last reader unlocks the cpu_hotplug.lock.
108  * - A new reader arrives at this moment, bumps up the refcount.
109  * - The writer acquires the cpu_hotplug.lock finds the refcount
110  *   non zero and goes to sleep again.
111  *
112  * However, this is very difficult to achieve in practice since
113  * get_online_cpus() not an api which is called all that often.
114  *
115  */
116 void cpu_hotplug_begin(void)
117 {
118 	cpu_hotplug.active_writer = current;
119 
120 	for (;;) {
121 		mutex_lock(&cpu_hotplug.lock);
122 		if (likely(!cpu_hotplug.refcount))
123 			break;
124 		__set_current_state(TASK_UNINTERRUPTIBLE);
125 		mutex_unlock(&cpu_hotplug.lock);
126 		schedule();
127 	}
128 }
129 
130 void cpu_hotplug_done(void)
131 {
132 	cpu_hotplug.active_writer = NULL;
133 	mutex_unlock(&cpu_hotplug.lock);
134 }
135 
136 /*
137  * Wait for currently running CPU hotplug operations to complete (if any) and
138  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
139  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
140  * hotplug path before performing hotplug operations. So acquiring that lock
141  * guarantees mutual exclusion from any currently running hotplug operations.
142  */
143 void cpu_hotplug_disable(void)
144 {
145 	cpu_maps_update_begin();
146 	cpu_hotplug_disabled = 1;
147 	cpu_maps_update_done();
148 }
149 
150 void cpu_hotplug_enable(void)
151 {
152 	cpu_maps_update_begin();
153 	cpu_hotplug_disabled = 0;
154 	cpu_maps_update_done();
155 }
156 
157 #endif	/* CONFIG_HOTPLUG_CPU */
158 
159 /* Need to know about CPUs going up/down? */
160 int __ref register_cpu_notifier(struct notifier_block *nb)
161 {
162 	int ret;
163 	cpu_maps_update_begin();
164 	ret = raw_notifier_chain_register(&cpu_chain, nb);
165 	cpu_maps_update_done();
166 	return ret;
167 }
168 
169 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
170 			int *nr_calls)
171 {
172 	int ret;
173 
174 	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
175 					nr_calls);
176 
177 	return notifier_to_errno(ret);
178 }
179 
180 static int cpu_notify(unsigned long val, void *v)
181 {
182 	return __cpu_notify(val, v, -1, NULL);
183 }
184 
185 #ifdef CONFIG_HOTPLUG_CPU
186 
187 static void cpu_notify_nofail(unsigned long val, void *v)
188 {
189 	BUG_ON(cpu_notify(val, v));
190 }
191 EXPORT_SYMBOL(register_cpu_notifier);
192 
193 void __ref unregister_cpu_notifier(struct notifier_block *nb)
194 {
195 	cpu_maps_update_begin();
196 	raw_notifier_chain_unregister(&cpu_chain, nb);
197 	cpu_maps_update_done();
198 }
199 EXPORT_SYMBOL(unregister_cpu_notifier);
200 
201 /**
202  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
203  * @cpu: a CPU id
204  *
205  * This function walks all processes, finds a valid mm struct for each one and
206  * then clears a corresponding bit in mm's cpumask.  While this all sounds
207  * trivial, there are various non-obvious corner cases, which this function
208  * tries to solve in a safe manner.
209  *
210  * Also note that the function uses a somewhat relaxed locking scheme, so it may
211  * be called only for an already offlined CPU.
212  */
213 void clear_tasks_mm_cpumask(int cpu)
214 {
215 	struct task_struct *p;
216 
217 	/*
218 	 * This function is called after the cpu is taken down and marked
219 	 * offline, so its not like new tasks will ever get this cpu set in
220 	 * their mm mask. -- Peter Zijlstra
221 	 * Thus, we may use rcu_read_lock() here, instead of grabbing
222 	 * full-fledged tasklist_lock.
223 	 */
224 	WARN_ON(cpu_online(cpu));
225 	rcu_read_lock();
226 	for_each_process(p) {
227 		struct task_struct *t;
228 
229 		/*
230 		 * Main thread might exit, but other threads may still have
231 		 * a valid mm. Find one.
232 		 */
233 		t = find_lock_task_mm(p);
234 		if (!t)
235 			continue;
236 		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
237 		task_unlock(t);
238 	}
239 	rcu_read_unlock();
240 }
241 
242 static inline void check_for_tasks(int cpu)
243 {
244 	struct task_struct *p;
245 	cputime_t utime, stime;
246 
247 	write_lock_irq(&tasklist_lock);
248 	for_each_process(p) {
249 		task_cputime(p, &utime, &stime);
250 		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
251 		    (utime || stime))
252 			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
253 				"(state = %ld, flags = %x)\n",
254 				p->comm, task_pid_nr(p), cpu,
255 				p->state, p->flags);
256 	}
257 	write_unlock_irq(&tasklist_lock);
258 }
259 
260 struct take_cpu_down_param {
261 	unsigned long mod;
262 	void *hcpu;
263 };
264 
265 /* Take this CPU down. */
266 static int __ref take_cpu_down(void *_param)
267 {
268 	struct take_cpu_down_param *param = _param;
269 	int err;
270 
271 	/* Ensure this CPU doesn't handle any more interrupts. */
272 	err = __cpu_disable();
273 	if (err < 0)
274 		return err;
275 
276 	cpu_notify(CPU_DYING | param->mod, param->hcpu);
277 	/* Park the stopper thread */
278 	kthread_park(current);
279 	return 0;
280 }
281 
282 /* Requires cpu_add_remove_lock to be held */
283 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
284 {
285 	int err, nr_calls = 0;
286 	void *hcpu = (void *)(long)cpu;
287 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
288 	struct take_cpu_down_param tcd_param = {
289 		.mod = mod,
290 		.hcpu = hcpu,
291 	};
292 
293 	if (num_online_cpus() == 1)
294 		return -EBUSY;
295 
296 	if (!cpu_online(cpu))
297 		return -EINVAL;
298 
299 	cpu_hotplug_begin();
300 
301 	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
302 	if (err) {
303 		nr_calls--;
304 		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
305 		printk("%s: attempt to take down CPU %u failed\n",
306 				__func__, cpu);
307 		goto out_release;
308 	}
309 
310 	/*
311 	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
312 	 * and RCU users of this state to go away such that all new such users
313 	 * will observe it.
314 	 *
315 	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
316 	 * not imply sync_sched(), so explicitly call both.
317 	 *
318 	 * Do sync before park smpboot threads to take care the rcu boost case.
319 	 */
320 #ifdef CONFIG_PREEMPT
321 	synchronize_sched();
322 #endif
323 	synchronize_rcu();
324 
325 	smpboot_park_threads(cpu);
326 
327 	/*
328 	 * So now all preempt/rcu users must observe !cpu_active().
329 	 */
330 
331 	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
332 	if (err) {
333 		/* CPU didn't die: tell everyone.  Can't complain. */
334 		smpboot_unpark_threads(cpu);
335 		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
336 		goto out_release;
337 	}
338 	BUG_ON(cpu_online(cpu));
339 
340 	/*
341 	 * The migration_call() CPU_DYING callback will have removed all
342 	 * runnable tasks from the cpu, there's only the idle task left now
343 	 * that the migration thread is done doing the stop_machine thing.
344 	 *
345 	 * Wait for the stop thread to go away.
346 	 */
347 	while (!idle_cpu(cpu))
348 		cpu_relax();
349 
350 	/* This actually kills the CPU. */
351 	__cpu_die(cpu);
352 
353 	/* CPU is completely dead: tell everyone.  Too late to complain. */
354 	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
355 
356 	check_for_tasks(cpu);
357 
358 out_release:
359 	cpu_hotplug_done();
360 	if (!err)
361 		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
362 	return err;
363 }
364 
365 int __ref cpu_down(unsigned int cpu)
366 {
367 	int err;
368 
369 	cpu_maps_update_begin();
370 
371 	if (cpu_hotplug_disabled) {
372 		err = -EBUSY;
373 		goto out;
374 	}
375 
376 	err = _cpu_down(cpu, 0);
377 
378 out:
379 	cpu_maps_update_done();
380 	return err;
381 }
382 EXPORT_SYMBOL(cpu_down);
383 #endif /*CONFIG_HOTPLUG_CPU*/
384 
385 /* Requires cpu_add_remove_lock to be held */
386 static int _cpu_up(unsigned int cpu, int tasks_frozen)
387 {
388 	int ret, nr_calls = 0;
389 	void *hcpu = (void *)(long)cpu;
390 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
391 	struct task_struct *idle;
392 
393 	cpu_hotplug_begin();
394 
395 	if (cpu_online(cpu) || !cpu_present(cpu)) {
396 		ret = -EINVAL;
397 		goto out;
398 	}
399 
400 	idle = idle_thread_get(cpu);
401 	if (IS_ERR(idle)) {
402 		ret = PTR_ERR(idle);
403 		goto out;
404 	}
405 
406 	ret = smpboot_create_threads(cpu);
407 	if (ret)
408 		goto out;
409 
410 	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
411 	if (ret) {
412 		nr_calls--;
413 		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
414 				__func__, cpu);
415 		goto out_notify;
416 	}
417 
418 	/* Arch-specific enabling code. */
419 	ret = __cpu_up(cpu, idle);
420 	if (ret != 0)
421 		goto out_notify;
422 	BUG_ON(!cpu_online(cpu));
423 
424 	/* Wake the per cpu threads */
425 	smpboot_unpark_threads(cpu);
426 
427 	/* Now call notifier in preparation. */
428 	cpu_notify(CPU_ONLINE | mod, hcpu);
429 
430 out_notify:
431 	if (ret != 0)
432 		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
433 out:
434 	cpu_hotplug_done();
435 
436 	return ret;
437 }
438 
439 int cpu_up(unsigned int cpu)
440 {
441 	int err = 0;
442 
443 	if (!cpu_possible(cpu)) {
444 		printk(KERN_ERR "can't online cpu %d because it is not "
445 			"configured as may-hotadd at boot time\n", cpu);
446 #if defined(CONFIG_IA64)
447 		printk(KERN_ERR "please check additional_cpus= boot "
448 				"parameter\n");
449 #endif
450 		return -EINVAL;
451 	}
452 
453 	err = try_online_node(cpu_to_node(cpu));
454 	if (err)
455 		return err;
456 
457 	cpu_maps_update_begin();
458 
459 	if (cpu_hotplug_disabled) {
460 		err = -EBUSY;
461 		goto out;
462 	}
463 
464 	err = _cpu_up(cpu, 0);
465 
466 out:
467 	cpu_maps_update_done();
468 	return err;
469 }
470 EXPORT_SYMBOL_GPL(cpu_up);
471 
472 #ifdef CONFIG_PM_SLEEP_SMP
473 static cpumask_var_t frozen_cpus;
474 
475 int disable_nonboot_cpus(void)
476 {
477 	int cpu, first_cpu, error = 0;
478 
479 	cpu_maps_update_begin();
480 	first_cpu = cpumask_first(cpu_online_mask);
481 	/*
482 	 * We take down all of the non-boot CPUs in one shot to avoid races
483 	 * with the userspace trying to use the CPU hotplug at the same time
484 	 */
485 	cpumask_clear(frozen_cpus);
486 
487 	printk("Disabling non-boot CPUs ...\n");
488 	for_each_online_cpu(cpu) {
489 		if (cpu == first_cpu)
490 			continue;
491 		error = _cpu_down(cpu, 1);
492 		if (!error)
493 			cpumask_set_cpu(cpu, frozen_cpus);
494 		else {
495 			printk(KERN_ERR "Error taking CPU%d down: %d\n",
496 				cpu, error);
497 			break;
498 		}
499 	}
500 
501 	if (!error) {
502 		BUG_ON(num_online_cpus() > 1);
503 		/* Make sure the CPUs won't be enabled by someone else */
504 		cpu_hotplug_disabled = 1;
505 	} else {
506 		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
507 	}
508 	cpu_maps_update_done();
509 	return error;
510 }
511 
512 void __weak arch_enable_nonboot_cpus_begin(void)
513 {
514 }
515 
516 void __weak arch_enable_nonboot_cpus_end(void)
517 {
518 }
519 
520 void __ref enable_nonboot_cpus(void)
521 {
522 	int cpu, error;
523 
524 	/* Allow everyone to use the CPU hotplug again */
525 	cpu_maps_update_begin();
526 	cpu_hotplug_disabled = 0;
527 	if (cpumask_empty(frozen_cpus))
528 		goto out;
529 
530 	printk(KERN_INFO "Enabling non-boot CPUs ...\n");
531 
532 	arch_enable_nonboot_cpus_begin();
533 
534 	for_each_cpu(cpu, frozen_cpus) {
535 		error = _cpu_up(cpu, 1);
536 		if (!error) {
537 			printk(KERN_INFO "CPU%d is up\n", cpu);
538 			continue;
539 		}
540 		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
541 	}
542 
543 	arch_enable_nonboot_cpus_end();
544 
545 	cpumask_clear(frozen_cpus);
546 out:
547 	cpu_maps_update_done();
548 }
549 
550 static int __init alloc_frozen_cpus(void)
551 {
552 	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
553 		return -ENOMEM;
554 	return 0;
555 }
556 core_initcall(alloc_frozen_cpus);
557 
558 /*
559  * When callbacks for CPU hotplug notifications are being executed, we must
560  * ensure that the state of the system with respect to the tasks being frozen
561  * or not, as reported by the notification, remains unchanged *throughout the
562  * duration* of the execution of the callbacks.
563  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
564  *
565  * This synchronization is implemented by mutually excluding regular CPU
566  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
567  * Hibernate notifications.
568  */
569 static int
570 cpu_hotplug_pm_callback(struct notifier_block *nb,
571 			unsigned long action, void *ptr)
572 {
573 	switch (action) {
574 
575 	case PM_SUSPEND_PREPARE:
576 	case PM_HIBERNATION_PREPARE:
577 		cpu_hotplug_disable();
578 		break;
579 
580 	case PM_POST_SUSPEND:
581 	case PM_POST_HIBERNATION:
582 		cpu_hotplug_enable();
583 		break;
584 
585 	default:
586 		return NOTIFY_DONE;
587 	}
588 
589 	return NOTIFY_OK;
590 }
591 
592 
593 static int __init cpu_hotplug_pm_sync_init(void)
594 {
595 	/*
596 	 * cpu_hotplug_pm_callback has higher priority than x86
597 	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
598 	 * to disable cpu hotplug to avoid cpu hotplug race.
599 	 */
600 	pm_notifier(cpu_hotplug_pm_callback, 0);
601 	return 0;
602 }
603 core_initcall(cpu_hotplug_pm_sync_init);
604 
605 #endif /* CONFIG_PM_SLEEP_SMP */
606 
607 /**
608  * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
609  * @cpu: cpu that just started
610  *
611  * This function calls the cpu_chain notifiers with CPU_STARTING.
612  * It must be called by the arch code on the new cpu, before the new cpu
613  * enables interrupts and before the "boot" cpu returns from __cpu_up().
614  */
615 void notify_cpu_starting(unsigned int cpu)
616 {
617 	unsigned long val = CPU_STARTING;
618 
619 #ifdef CONFIG_PM_SLEEP_SMP
620 	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
621 		val = CPU_STARTING_FROZEN;
622 #endif /* CONFIG_PM_SLEEP_SMP */
623 	cpu_notify(val, (void *)(long)cpu);
624 }
625 
626 #endif /* CONFIG_SMP */
627 
628 /*
629  * cpu_bit_bitmap[] is a special, "compressed" data structure that
630  * represents all NR_CPUS bits binary values of 1<<nr.
631  *
632  * It is used by cpumask_of() to get a constant address to a CPU
633  * mask value that has a single bit set only.
634  */
635 
636 /* cpu_bit_bitmap[0] is empty - so we can back into it */
637 #define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
638 #define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
639 #define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
640 #define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
641 
642 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
643 
644 	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
645 	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
646 #if BITS_PER_LONG > 32
647 	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
648 	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
649 #endif
650 };
651 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
652 
653 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
654 EXPORT_SYMBOL(cpu_all_bits);
655 
656 #ifdef CONFIG_INIT_ALL_POSSIBLE
657 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
658 	= CPU_BITS_ALL;
659 #else
660 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
661 #endif
662 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
663 EXPORT_SYMBOL(cpu_possible_mask);
664 
665 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
666 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
667 EXPORT_SYMBOL(cpu_online_mask);
668 
669 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
670 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
671 EXPORT_SYMBOL(cpu_present_mask);
672 
673 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
674 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
675 EXPORT_SYMBOL(cpu_active_mask);
676 
677 void set_cpu_possible(unsigned int cpu, bool possible)
678 {
679 	if (possible)
680 		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
681 	else
682 		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
683 }
684 
685 void set_cpu_present(unsigned int cpu, bool present)
686 {
687 	if (present)
688 		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
689 	else
690 		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
691 }
692 
693 void set_cpu_online(unsigned int cpu, bool online)
694 {
695 	if (online)
696 		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
697 	else
698 		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
699 }
700 
701 void set_cpu_active(unsigned int cpu, bool active)
702 {
703 	if (active)
704 		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
705 	else
706 		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
707 }
708 
709 void init_cpu_present(const struct cpumask *src)
710 {
711 	cpumask_copy(to_cpumask(cpu_present_bits), src);
712 }
713 
714 void init_cpu_possible(const struct cpumask *src)
715 {
716 	cpumask_copy(to_cpumask(cpu_possible_bits), src);
717 }
718 
719 void init_cpu_online(const struct cpumask *src)
720 {
721 	cpumask_copy(to_cpumask(cpu_online_bits), src);
722 }
723