xref: /openbmc/linux/drivers/cpuidle/cpuidle.c (revision 6c33a6f4)
1 /*
2  * cpuidle.c - core cpuidle infrastructure
3  *
4  * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5  *               Shaohua Li <shaohua.li@intel.com>
6  *               Adam Belay <abelay@novell.com>
7  *
8  * This code is licenced under the GPL.
9  */
10 
11 #include <linux/clockchips.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/sched.h>
15 #include <linux/sched/clock.h>
16 #include <linux/notifier.h>
17 #include <linux/pm_qos.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuidle.h>
20 #include <linux/ktime.h>
21 #include <linux/hrtimer.h>
22 #include <linux/module.h>
23 #include <linux/suspend.h>
24 #include <linux/tick.h>
25 #include <trace/events/power.h>
26 
27 #include "cpuidle.h"
28 
29 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
30 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
31 
32 DEFINE_MUTEX(cpuidle_lock);
33 LIST_HEAD(cpuidle_detected_devices);
34 
35 static int enabled_devices;
36 static int off __read_mostly;
37 static int initialized __read_mostly;
38 
39 int cpuidle_disabled(void)
40 {
41 	return off;
42 }
43 void disable_cpuidle(void)
44 {
45 	off = 1;
46 }
47 
48 bool cpuidle_not_available(struct cpuidle_driver *drv,
49 			   struct cpuidle_device *dev)
50 {
51 	return off || !initialized || !drv || !dev || !dev->enabled;
52 }
53 
54 /**
55  * cpuidle_play_dead - cpu off-lining
56  *
57  * Returns in case of an error or no driver
58  */
59 int cpuidle_play_dead(void)
60 {
61 	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
62 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
63 	int i;
64 
65 	if (!drv)
66 		return -ENODEV;
67 
68 	/* Find lowest-power state that supports long-term idle */
69 	for (i = drv->state_count - 1; i >= 0; i--)
70 		if (drv->states[i].enter_dead)
71 			return drv->states[i].enter_dead(dev, i);
72 
73 	return -ENODEV;
74 }
75 
76 static int find_deepest_state(struct cpuidle_driver *drv,
77 			      struct cpuidle_device *dev,
78 			      u64 max_latency_ns,
79 			      unsigned int forbidden_flags,
80 			      bool s2idle)
81 {
82 	u64 latency_req = 0;
83 	int i, ret = 0;
84 
85 	for (i = 1; i < drv->state_count; i++) {
86 		struct cpuidle_state *s = &drv->states[i];
87 
88 		if (dev->states_usage[i].disable ||
89 		    s->exit_latency_ns <= latency_req ||
90 		    s->exit_latency_ns > max_latency_ns ||
91 		    (s->flags & forbidden_flags) ||
92 		    (s2idle && !s->enter_s2idle))
93 			continue;
94 
95 		latency_req = s->exit_latency_ns;
96 		ret = i;
97 	}
98 	return ret;
99 }
100 
101 /**
102  * cpuidle_use_deepest_state - Set/unset governor override mode.
103  * @latency_limit_ns: Idle state exit latency limit (or no override if 0).
104  *
105  * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle
106  * state with exit latency within @latency_limit_ns (override governors going
107  * forward), or do not override governors if it is zero.
108  */
109 void cpuidle_use_deepest_state(u64 latency_limit_ns)
110 {
111 	struct cpuidle_device *dev;
112 
113 	preempt_disable();
114 	dev = cpuidle_get_device();
115 	if (dev)
116 		dev->forced_idle_latency_limit_ns = latency_limit_ns;
117 	preempt_enable();
118 }
119 
120 /**
121  * cpuidle_find_deepest_state - Find the deepest available idle state.
122  * @drv: cpuidle driver for the given CPU.
123  * @dev: cpuidle device for the given CPU.
124  * @latency_limit_ns: Idle state exit latency limit
125  *
126  * Return: the index of the deepest available idle state.
127  */
128 int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
129 			       struct cpuidle_device *dev,
130 			       u64 latency_limit_ns)
131 {
132 	return find_deepest_state(drv, dev, latency_limit_ns, 0, false);
133 }
134 
135 #ifdef CONFIG_SUSPEND
136 static void enter_s2idle_proper(struct cpuidle_driver *drv,
137 				struct cpuidle_device *dev, int index)
138 {
139 	ktime_t time_start, time_end;
140 
141 	time_start = ns_to_ktime(local_clock());
142 
143 	/*
144 	 * trace_suspend_resume() called by tick_freeze() for the last CPU
145 	 * executing it contains RCU usage regarded as invalid in the idle
146 	 * context, so tell RCU about that.
147 	 */
148 	RCU_NONIDLE(tick_freeze());
149 	/*
150 	 * The state used here cannot be a "coupled" one, because the "coupled"
151 	 * cpuidle mechanism enables interrupts and doing that with timekeeping
152 	 * suspended is generally unsafe.
153 	 */
154 	stop_critical_timings();
155 	drv->states[index].enter_s2idle(dev, drv, index);
156 	WARN_ON(!irqs_disabled());
157 	/*
158 	 * timekeeping_resume() that will be called by tick_unfreeze() for the
159 	 * first CPU executing it calls functions containing RCU read-side
160 	 * critical sections, so tell RCU about that.
161 	 */
162 	RCU_NONIDLE(tick_unfreeze());
163 	start_critical_timings();
164 
165 	time_end = ns_to_ktime(local_clock());
166 
167 	dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
168 	dev->states_usage[index].s2idle_usage++;
169 }
170 
171 /**
172  * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
173  * @drv: cpuidle driver for the given CPU.
174  * @dev: cpuidle device for the given CPU.
175  *
176  * If there are states with the ->enter_s2idle callback, find the deepest of
177  * them and enter it with frozen tick.
178  */
179 int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
180 {
181 	int index;
182 
183 	/*
184 	 * Find the deepest state with ->enter_s2idle present, which guarantees
185 	 * that interrupts won't be enabled when it exits and allows the tick to
186 	 * be frozen safely.
187 	 */
188 	index = find_deepest_state(drv, dev, U64_MAX, 0, true);
189 	if (index > 0)
190 		enter_s2idle_proper(drv, dev, index);
191 
192 	return index;
193 }
194 #endif /* CONFIG_SUSPEND */
195 
196 /**
197  * cpuidle_enter_state - enter the state and update stats
198  * @dev: cpuidle device for this cpu
199  * @drv: cpuidle driver for this cpu
200  * @index: index into the states table in @drv of the state to enter
201  */
202 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
203 			int index)
204 {
205 	int entered_state;
206 
207 	struct cpuidle_state *target_state = &drv->states[index];
208 	bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
209 	ktime_t time_start, time_end;
210 
211 	/*
212 	 * Tell the time framework to switch to a broadcast timer because our
213 	 * local timer will be shut down.  If a local timer is used from another
214 	 * CPU as a broadcast timer, this call may fail if it is not available.
215 	 */
216 	if (broadcast && tick_broadcast_enter()) {
217 		index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
218 					   CPUIDLE_FLAG_TIMER_STOP, false);
219 		if (index < 0) {
220 			default_idle_call();
221 			return -EBUSY;
222 		}
223 		target_state = &drv->states[index];
224 		broadcast = false;
225 	}
226 
227 	/* Take note of the planned idle state. */
228 	sched_idle_set_state(target_state);
229 
230 	trace_cpu_idle_rcuidle(index, dev->cpu);
231 	time_start = ns_to_ktime(local_clock());
232 
233 	stop_critical_timings();
234 	entered_state = target_state->enter(dev, drv, index);
235 	start_critical_timings();
236 
237 	sched_clock_idle_wakeup_event();
238 	time_end = ns_to_ktime(local_clock());
239 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
240 
241 	/* The cpu is no longer idle or about to enter idle. */
242 	sched_idle_set_state(NULL);
243 
244 	if (broadcast) {
245 		if (WARN_ON_ONCE(!irqs_disabled()))
246 			local_irq_disable();
247 
248 		tick_broadcast_exit();
249 	}
250 
251 	if (!cpuidle_state_is_coupled(drv, index))
252 		local_irq_enable();
253 
254 	if (entered_state >= 0) {
255 		s64 diff, delay = drv->states[entered_state].exit_latency_ns;
256 		int i;
257 
258 		/*
259 		 * Update cpuidle counters
260 		 * This can be moved to within driver enter routine,
261 		 * but that results in multiple copies of same code.
262 		 */
263 		diff = ktime_sub(time_end, time_start);
264 
265 		dev->last_residency_ns = diff;
266 		dev->states_usage[entered_state].time_ns += diff;
267 		dev->states_usage[entered_state].usage++;
268 
269 		if (diff < drv->states[entered_state].target_residency_ns) {
270 			for (i = entered_state - 1; i >= 0; i--) {
271 				if (dev->states_usage[i].disable)
272 					continue;
273 
274 				/* Shallower states are enabled, so update. */
275 				dev->states_usage[entered_state].above++;
276 				break;
277 			}
278 		} else if (diff > delay) {
279 			for (i = entered_state + 1; i < drv->state_count; i++) {
280 				if (dev->states_usage[i].disable)
281 					continue;
282 
283 				/*
284 				 * Update if a deeper state would have been a
285 				 * better match for the observed idle duration.
286 				 */
287 				if (diff - delay >= drv->states[i].target_residency_ns)
288 					dev->states_usage[entered_state].below++;
289 
290 				break;
291 			}
292 		}
293 	} else {
294 		dev->last_residency_ns = 0;
295 	}
296 
297 	return entered_state;
298 }
299 
300 /**
301  * cpuidle_select - ask the cpuidle framework to choose an idle state
302  *
303  * @drv: the cpuidle driver
304  * @dev: the cpuidle device
305  * @stop_tick: indication on whether or not to stop the tick
306  *
307  * Returns the index of the idle state.  The return value must not be negative.
308  *
309  * The memory location pointed to by @stop_tick is expected to be written the
310  * 'false' boolean value if the scheduler tick should not be stopped before
311  * entering the returned state.
312  */
313 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
314 		   bool *stop_tick)
315 {
316 	return cpuidle_curr_governor->select(drv, dev, stop_tick);
317 }
318 
319 /**
320  * cpuidle_enter - enter into the specified idle state
321  *
322  * @drv:   the cpuidle driver tied with the cpu
323  * @dev:   the cpuidle device
324  * @index: the index in the idle state table
325  *
326  * Returns the index in the idle state, < 0 in case of error.
327  * The error code depends on the backend driver
328  */
329 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
330 		  int index)
331 {
332 	int ret = 0;
333 
334 	/*
335 	 * Store the next hrtimer, which becomes either next tick or the next
336 	 * timer event, whatever expires first. Additionally, to make this data
337 	 * useful for consumers outside cpuidle, we rely on that the governor's
338 	 * ->select() callback have decided, whether to stop the tick or not.
339 	 */
340 	WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
341 
342 	if (cpuidle_state_is_coupled(drv, index))
343 		ret = cpuidle_enter_state_coupled(dev, drv, index);
344 	else
345 		ret = cpuidle_enter_state(dev, drv, index);
346 
347 	WRITE_ONCE(dev->next_hrtimer, 0);
348 	return ret;
349 }
350 
351 /**
352  * cpuidle_reflect - tell the underlying governor what was the state
353  * we were in
354  *
355  * @dev  : the cpuidle device
356  * @index: the index in the idle state table
357  *
358  */
359 void cpuidle_reflect(struct cpuidle_device *dev, int index)
360 {
361 	if (cpuidle_curr_governor->reflect && index >= 0)
362 		cpuidle_curr_governor->reflect(dev, index);
363 }
364 
365 /**
366  * cpuidle_poll_time - return amount of time to poll for,
367  * governors can override dev->poll_limit_ns if necessary
368  *
369  * @drv:   the cpuidle driver tied with the cpu
370  * @dev:   the cpuidle device
371  *
372  */
373 u64 cpuidle_poll_time(struct cpuidle_driver *drv,
374 		      struct cpuidle_device *dev)
375 {
376 	int i;
377 	u64 limit_ns;
378 
379 	if (dev->poll_limit_ns)
380 		return dev->poll_limit_ns;
381 
382 	limit_ns = TICK_NSEC;
383 	for (i = 1; i < drv->state_count; i++) {
384 		if (dev->states_usage[i].disable)
385 			continue;
386 
387 		limit_ns = drv->states[i].target_residency_ns;
388 		break;
389 	}
390 
391 	dev->poll_limit_ns = limit_ns;
392 
393 	return dev->poll_limit_ns;
394 }
395 
396 /**
397  * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
398  */
399 void cpuidle_install_idle_handler(void)
400 {
401 	if (enabled_devices) {
402 		/* Make sure all changes finished before we switch to new idle */
403 		smp_wmb();
404 		initialized = 1;
405 	}
406 }
407 
408 /**
409  * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
410  */
411 void cpuidle_uninstall_idle_handler(void)
412 {
413 	if (enabled_devices) {
414 		initialized = 0;
415 		wake_up_all_idle_cpus();
416 	}
417 
418 	/*
419 	 * Make sure external observers (such as the scheduler)
420 	 * are done looking at pointed idle states.
421 	 */
422 	synchronize_rcu();
423 }
424 
425 /**
426  * cpuidle_pause_and_lock - temporarily disables CPUIDLE
427  */
428 void cpuidle_pause_and_lock(void)
429 {
430 	mutex_lock(&cpuidle_lock);
431 	cpuidle_uninstall_idle_handler();
432 }
433 
434 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
435 
436 /**
437  * cpuidle_resume_and_unlock - resumes CPUIDLE operation
438  */
439 void cpuidle_resume_and_unlock(void)
440 {
441 	cpuidle_install_idle_handler();
442 	mutex_unlock(&cpuidle_lock);
443 }
444 
445 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
446 
447 /* Currently used in suspend/resume path to suspend cpuidle */
448 void cpuidle_pause(void)
449 {
450 	mutex_lock(&cpuidle_lock);
451 	cpuidle_uninstall_idle_handler();
452 	mutex_unlock(&cpuidle_lock);
453 }
454 
455 /* Currently used in suspend/resume path to resume cpuidle */
456 void cpuidle_resume(void)
457 {
458 	mutex_lock(&cpuidle_lock);
459 	cpuidle_install_idle_handler();
460 	mutex_unlock(&cpuidle_lock);
461 }
462 
463 /**
464  * cpuidle_enable_device - enables idle PM for a CPU
465  * @dev: the CPU
466  *
467  * This function must be called between cpuidle_pause_and_lock and
468  * cpuidle_resume_and_unlock when used externally.
469  */
470 int cpuidle_enable_device(struct cpuidle_device *dev)
471 {
472 	int ret;
473 	struct cpuidle_driver *drv;
474 
475 	if (!dev)
476 		return -EINVAL;
477 
478 	if (dev->enabled)
479 		return 0;
480 
481 	if (!cpuidle_curr_governor)
482 		return -EIO;
483 
484 	drv = cpuidle_get_cpu_driver(dev);
485 
486 	if (!drv)
487 		return -EIO;
488 
489 	if (!dev->registered)
490 		return -EINVAL;
491 
492 	ret = cpuidle_add_device_sysfs(dev);
493 	if (ret)
494 		return ret;
495 
496 	if (cpuidle_curr_governor->enable) {
497 		ret = cpuidle_curr_governor->enable(drv, dev);
498 		if (ret)
499 			goto fail_sysfs;
500 	}
501 
502 	smp_wmb();
503 
504 	dev->enabled = 1;
505 
506 	enabled_devices++;
507 	return 0;
508 
509 fail_sysfs:
510 	cpuidle_remove_device_sysfs(dev);
511 
512 	return ret;
513 }
514 
515 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
516 
517 /**
518  * cpuidle_disable_device - disables idle PM for a CPU
519  * @dev: the CPU
520  *
521  * This function must be called between cpuidle_pause_and_lock and
522  * cpuidle_resume_and_unlock when used externally.
523  */
524 void cpuidle_disable_device(struct cpuidle_device *dev)
525 {
526 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
527 
528 	if (!dev || !dev->enabled)
529 		return;
530 
531 	if (!drv || !cpuidle_curr_governor)
532 		return;
533 
534 	dev->enabled = 0;
535 
536 	if (cpuidle_curr_governor->disable)
537 		cpuidle_curr_governor->disable(drv, dev);
538 
539 	cpuidle_remove_device_sysfs(dev);
540 	enabled_devices--;
541 }
542 
543 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
544 
545 static void __cpuidle_unregister_device(struct cpuidle_device *dev)
546 {
547 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
548 
549 	list_del(&dev->device_list);
550 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
551 	module_put(drv->owner);
552 
553 	dev->registered = 0;
554 }
555 
556 static void __cpuidle_device_init(struct cpuidle_device *dev)
557 {
558 	memset(dev->states_usage, 0, sizeof(dev->states_usage));
559 	dev->last_residency_ns = 0;
560 	dev->next_hrtimer = 0;
561 }
562 
563 /**
564  * __cpuidle_register_device - internal register function called before register
565  * and enable routines
566  * @dev: the cpu
567  *
568  * cpuidle_lock mutex must be held before this is called
569  */
570 static int __cpuidle_register_device(struct cpuidle_device *dev)
571 {
572 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
573 	int i, ret;
574 
575 	if (!try_module_get(drv->owner))
576 		return -EINVAL;
577 
578 	for (i = 0; i < drv->state_count; i++) {
579 		if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
580 			dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
581 
582 		if (drv->states[i].flags & CPUIDLE_FLAG_OFF)
583 			dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
584 	}
585 
586 	per_cpu(cpuidle_devices, dev->cpu) = dev;
587 	list_add(&dev->device_list, &cpuidle_detected_devices);
588 
589 	ret = cpuidle_coupled_register_device(dev);
590 	if (ret)
591 		__cpuidle_unregister_device(dev);
592 	else
593 		dev->registered = 1;
594 
595 	return ret;
596 }
597 
598 /**
599  * cpuidle_register_device - registers a CPU's idle PM feature
600  * @dev: the cpu
601  */
602 int cpuidle_register_device(struct cpuidle_device *dev)
603 {
604 	int ret = -EBUSY;
605 
606 	if (!dev)
607 		return -EINVAL;
608 
609 	mutex_lock(&cpuidle_lock);
610 
611 	if (dev->registered)
612 		goto out_unlock;
613 
614 	__cpuidle_device_init(dev);
615 
616 	ret = __cpuidle_register_device(dev);
617 	if (ret)
618 		goto out_unlock;
619 
620 	ret = cpuidle_add_sysfs(dev);
621 	if (ret)
622 		goto out_unregister;
623 
624 	ret = cpuidle_enable_device(dev);
625 	if (ret)
626 		goto out_sysfs;
627 
628 	cpuidle_install_idle_handler();
629 
630 out_unlock:
631 	mutex_unlock(&cpuidle_lock);
632 
633 	return ret;
634 
635 out_sysfs:
636 	cpuidle_remove_sysfs(dev);
637 out_unregister:
638 	__cpuidle_unregister_device(dev);
639 	goto out_unlock;
640 }
641 
642 EXPORT_SYMBOL_GPL(cpuidle_register_device);
643 
644 /**
645  * cpuidle_unregister_device - unregisters a CPU's idle PM feature
646  * @dev: the cpu
647  */
648 void cpuidle_unregister_device(struct cpuidle_device *dev)
649 {
650 	if (!dev || dev->registered == 0)
651 		return;
652 
653 	cpuidle_pause_and_lock();
654 
655 	cpuidle_disable_device(dev);
656 
657 	cpuidle_remove_sysfs(dev);
658 
659 	__cpuidle_unregister_device(dev);
660 
661 	cpuidle_coupled_unregister_device(dev);
662 
663 	cpuidle_resume_and_unlock();
664 }
665 
666 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
667 
668 /**
669  * cpuidle_unregister: unregister a driver and the devices. This function
670  * can be used only if the driver has been previously registered through
671  * the cpuidle_register function.
672  *
673  * @drv: a valid pointer to a struct cpuidle_driver
674  */
675 void cpuidle_unregister(struct cpuidle_driver *drv)
676 {
677 	int cpu;
678 	struct cpuidle_device *device;
679 
680 	for_each_cpu(cpu, drv->cpumask) {
681 		device = &per_cpu(cpuidle_dev, cpu);
682 		cpuidle_unregister_device(device);
683 	}
684 
685 	cpuidle_unregister_driver(drv);
686 }
687 EXPORT_SYMBOL_GPL(cpuidle_unregister);
688 
689 /**
690  * cpuidle_register: registers the driver and the cpu devices with the
691  * coupled_cpus passed as parameter. This function is used for all common
692  * initialization pattern there are in the arch specific drivers. The
693  * devices is globally defined in this file.
694  *
695  * @drv         : a valid pointer to a struct cpuidle_driver
696  * @coupled_cpus: a cpumask for the coupled states
697  *
698  * Returns 0 on success, < 0 otherwise
699  */
700 int cpuidle_register(struct cpuidle_driver *drv,
701 		     const struct cpumask *const coupled_cpus)
702 {
703 	int ret, cpu;
704 	struct cpuidle_device *device;
705 
706 	ret = cpuidle_register_driver(drv);
707 	if (ret) {
708 		pr_err("failed to register cpuidle driver\n");
709 		return ret;
710 	}
711 
712 	for_each_cpu(cpu, drv->cpumask) {
713 		device = &per_cpu(cpuidle_dev, cpu);
714 		device->cpu = cpu;
715 
716 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
717 		/*
718 		 * On multiplatform for ARM, the coupled idle states could be
719 		 * enabled in the kernel even if the cpuidle driver does not
720 		 * use it. Note, coupled_cpus is a struct copy.
721 		 */
722 		if (coupled_cpus)
723 			device->coupled_cpus = *coupled_cpus;
724 #endif
725 		ret = cpuidle_register_device(device);
726 		if (!ret)
727 			continue;
728 
729 		pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
730 
731 		cpuidle_unregister(drv);
732 		break;
733 	}
734 
735 	return ret;
736 }
737 EXPORT_SYMBOL_GPL(cpuidle_register);
738 
739 #ifdef CONFIG_SMP
740 
741 /*
742  * This function gets called when a part of the kernel has a new latency
743  * requirement.  This means we need to get all processors out of their C-state,
744  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
745  * wakes them all right up.
746  */
747 static int cpuidle_latency_notify(struct notifier_block *b,
748 		unsigned long l, void *v)
749 {
750 	wake_up_all_idle_cpus();
751 	return NOTIFY_OK;
752 }
753 
754 static struct notifier_block cpuidle_latency_notifier = {
755 	.notifier_call = cpuidle_latency_notify,
756 };
757 
758 static inline void latency_notifier_init(struct notifier_block *n)
759 {
760 	pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
761 }
762 
763 #else /* CONFIG_SMP */
764 
765 #define latency_notifier_init(x) do { } while (0)
766 
767 #endif /* CONFIG_SMP */
768 
769 /**
770  * cpuidle_init - core initializer
771  */
772 static int __init cpuidle_init(void)
773 {
774 	int ret;
775 
776 	if (cpuidle_disabled())
777 		return -ENODEV;
778 
779 	ret = cpuidle_add_interface(cpu_subsys.dev_root);
780 	if (ret)
781 		return ret;
782 
783 	latency_notifier_init(&cpuidle_latency_notifier);
784 
785 	return 0;
786 }
787 
788 module_param(off, int, 0444);
789 module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444);
790 core_initcall(cpuidle_init);
791