xref: /openbmc/linux/drivers/base/power/domain.c (revision 3e26a691)
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22 
23 #include "power.h"
24 
25 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
26 
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
28 ({								\
29 	type (*__routine)(struct device *__d); 			\
30 	type __ret = (type)0;					\
31 								\
32 	__routine = genpd->dev_ops.callback; 			\
33 	if (__routine) {					\
34 		__ret = __routine(dev); 			\
35 	}							\
36 	__ret;							\
37 })
38 
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
41 
42 /*
43  * Get the generic PM domain for a particular struct device.
44  * This validates the struct device pointer, the PM domain pointer,
45  * and checks that the PM domain pointer is a real generic PM domain.
46  * Any failure results in NULL being returned.
47  */
48 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
49 {
50 	struct generic_pm_domain *genpd = NULL, *gpd;
51 
52 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
53 		return NULL;
54 
55 	mutex_lock(&gpd_list_lock);
56 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
57 		if (&gpd->domain == dev->pm_domain) {
58 			genpd = gpd;
59 			break;
60 		}
61 	}
62 	mutex_unlock(&gpd_list_lock);
63 
64 	return genpd;
65 }
66 
67 /*
68  * This should only be used where we are certain that the pm_domain
69  * attached to the device is a genpd domain.
70  */
71 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
72 {
73 	if (IS_ERR_OR_NULL(dev->pm_domain))
74 		return ERR_PTR(-EINVAL);
75 
76 	return pd_to_genpd(dev->pm_domain);
77 }
78 
79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
80 {
81 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
82 }
83 
84 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
85 {
86 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
87 }
88 
89 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
90 {
91 	bool ret = false;
92 
93 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
94 		ret = !!atomic_dec_and_test(&genpd->sd_count);
95 
96 	return ret;
97 }
98 
99 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
100 {
101 	atomic_inc(&genpd->sd_count);
102 	smp_mb__after_atomic();
103 }
104 
105 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
106 {
107 	unsigned int state_idx = genpd->state_idx;
108 	ktime_t time_start;
109 	s64 elapsed_ns;
110 	int ret;
111 
112 	if (!genpd->power_on)
113 		return 0;
114 
115 	if (!timed)
116 		return genpd->power_on(genpd);
117 
118 	time_start = ktime_get();
119 	ret = genpd->power_on(genpd);
120 	if (ret)
121 		return ret;
122 
123 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
124 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
125 		return ret;
126 
127 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
128 	genpd->max_off_time_changed = true;
129 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
130 		 genpd->name, "on", elapsed_ns);
131 
132 	return ret;
133 }
134 
135 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
136 {
137 	unsigned int state_idx = genpd->state_idx;
138 	ktime_t time_start;
139 	s64 elapsed_ns;
140 	int ret;
141 
142 	if (!genpd->power_off)
143 		return 0;
144 
145 	if (!timed)
146 		return genpd->power_off(genpd);
147 
148 	time_start = ktime_get();
149 	ret = genpd->power_off(genpd);
150 	if (ret == -EBUSY)
151 		return ret;
152 
153 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
154 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
155 		return ret;
156 
157 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
158 	genpd->max_off_time_changed = true;
159 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
160 		 genpd->name, "off", elapsed_ns);
161 
162 	return ret;
163 }
164 
165 /**
166  * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
167  * @genpd: PM domain to power off.
168  *
169  * Queue up the execution of genpd_poweroff() unless it's already been done
170  * before.
171  */
172 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
173 {
174 	queue_work(pm_wq, &genpd->power_off_work);
175 }
176 
177 /**
178  * genpd_poweron - Restore power to a given PM domain and its masters.
179  * @genpd: PM domain to power up.
180  * @depth: nesting count for lockdep.
181  *
182  * Restore power to @genpd and all of its masters so that it is possible to
183  * resume a device belonging to it.
184  */
185 static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
186 {
187 	struct gpd_link *link;
188 	int ret = 0;
189 
190 	if (genpd->status == GPD_STATE_ACTIVE
191 	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
192 		return 0;
193 
194 	/*
195 	 * The list is guaranteed not to change while the loop below is being
196 	 * executed, unless one of the masters' .power_on() callbacks fiddles
197 	 * with it.
198 	 */
199 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
200 		struct generic_pm_domain *master = link->master;
201 
202 		genpd_sd_counter_inc(master);
203 
204 		mutex_lock_nested(&master->lock, depth + 1);
205 		ret = genpd_poweron(master, depth + 1);
206 		mutex_unlock(&master->lock);
207 
208 		if (ret) {
209 			genpd_sd_counter_dec(master);
210 			goto err;
211 		}
212 	}
213 
214 	ret = genpd_power_on(genpd, true);
215 	if (ret)
216 		goto err;
217 
218 	genpd->status = GPD_STATE_ACTIVE;
219 	return 0;
220 
221  err:
222 	list_for_each_entry_continue_reverse(link,
223 					&genpd->slave_links,
224 					slave_node) {
225 		genpd_sd_counter_dec(link->master);
226 		genpd_queue_power_off_work(link->master);
227 	}
228 
229 	return ret;
230 }
231 
232 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
233 {
234 	return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
235 }
236 
237 static int genpd_restore_dev(struct generic_pm_domain *genpd,
238 			struct device *dev)
239 {
240 	return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
241 }
242 
243 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
244 				     unsigned long val, void *ptr)
245 {
246 	struct generic_pm_domain_data *gpd_data;
247 	struct device *dev;
248 
249 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
250 	dev = gpd_data->base.dev;
251 
252 	for (;;) {
253 		struct generic_pm_domain *genpd;
254 		struct pm_domain_data *pdd;
255 
256 		spin_lock_irq(&dev->power.lock);
257 
258 		pdd = dev->power.subsys_data ?
259 				dev->power.subsys_data->domain_data : NULL;
260 		if (pdd && pdd->dev) {
261 			to_gpd_data(pdd)->td.constraint_changed = true;
262 			genpd = dev_to_genpd(dev);
263 		} else {
264 			genpd = ERR_PTR(-ENODATA);
265 		}
266 
267 		spin_unlock_irq(&dev->power.lock);
268 
269 		if (!IS_ERR(genpd)) {
270 			mutex_lock(&genpd->lock);
271 			genpd->max_off_time_changed = true;
272 			mutex_unlock(&genpd->lock);
273 		}
274 
275 		dev = dev->parent;
276 		if (!dev || dev->power.ignore_children)
277 			break;
278 	}
279 
280 	return NOTIFY_DONE;
281 }
282 
283 /**
284  * genpd_poweroff - Remove power from a given PM domain.
285  * @genpd: PM domain to power down.
286  * @is_async: PM domain is powered down from a scheduled work
287  *
288  * If all of the @genpd's devices have been suspended and all of its subdomains
289  * have been powered down, remove power from @genpd.
290  */
291 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
292 {
293 	struct pm_domain_data *pdd;
294 	struct gpd_link *link;
295 	unsigned int not_suspended = 0;
296 
297 	/*
298 	 * Do not try to power off the domain in the following situations:
299 	 * (1) The domain is already in the "power off" state.
300 	 * (2) System suspend is in progress.
301 	 */
302 	if (genpd->status == GPD_STATE_POWER_OFF
303 	    || genpd->prepared_count > 0)
304 		return 0;
305 
306 	if (atomic_read(&genpd->sd_count) > 0)
307 		return -EBUSY;
308 
309 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
310 		enum pm_qos_flags_status stat;
311 
312 		stat = dev_pm_qos_flags(pdd->dev,
313 					PM_QOS_FLAG_NO_POWER_OFF
314 						| PM_QOS_FLAG_REMOTE_WAKEUP);
315 		if (stat > PM_QOS_FLAGS_NONE)
316 			return -EBUSY;
317 
318 		if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
319 			not_suspended++;
320 	}
321 
322 	if (not_suspended > 1 || (not_suspended == 1 && is_async))
323 		return -EBUSY;
324 
325 	if (genpd->gov && genpd->gov->power_down_ok) {
326 		if (!genpd->gov->power_down_ok(&genpd->domain))
327 			return -EAGAIN;
328 	}
329 
330 	if (genpd->power_off) {
331 		int ret;
332 
333 		if (atomic_read(&genpd->sd_count) > 0)
334 			return -EBUSY;
335 
336 		/*
337 		 * If sd_count > 0 at this point, one of the subdomains hasn't
338 		 * managed to call genpd_poweron() for the master yet after
339 		 * incrementing it.  In that case genpd_poweron() will wait
340 		 * for us to drop the lock, so we can call .power_off() and let
341 		 * the genpd_poweron() restore power for us (this shouldn't
342 		 * happen very often).
343 		 */
344 		ret = genpd_power_off(genpd, true);
345 		if (ret)
346 			return ret;
347 	}
348 
349 	genpd->status = GPD_STATE_POWER_OFF;
350 
351 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
352 		genpd_sd_counter_dec(link->master);
353 		genpd_queue_power_off_work(link->master);
354 	}
355 
356 	return 0;
357 }
358 
359 /**
360  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
361  * @work: Work structure used for scheduling the execution of this function.
362  */
363 static void genpd_power_off_work_fn(struct work_struct *work)
364 {
365 	struct generic_pm_domain *genpd;
366 
367 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
368 
369 	mutex_lock(&genpd->lock);
370 	genpd_poweroff(genpd, true);
371 	mutex_unlock(&genpd->lock);
372 }
373 
374 /**
375  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
376  * @dev: Device to suspend.
377  *
378  * Carry out a runtime suspend of a device under the assumption that its
379  * pm_domain field points to the domain member of an object of type
380  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
381  */
382 static int pm_genpd_runtime_suspend(struct device *dev)
383 {
384 	struct generic_pm_domain *genpd;
385 	bool (*stop_ok)(struct device *__dev);
386 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
387 	bool runtime_pm = pm_runtime_enabled(dev);
388 	ktime_t time_start;
389 	s64 elapsed_ns;
390 	int ret;
391 
392 	dev_dbg(dev, "%s()\n", __func__);
393 
394 	genpd = dev_to_genpd(dev);
395 	if (IS_ERR(genpd))
396 		return -EINVAL;
397 
398 	/*
399 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
400 	 * callbacks for other purposes than runtime PM. In those scenarios
401 	 * runtime PM is disabled. Under these circumstances, we shall skip
402 	 * validating/measuring the PM QoS latency.
403 	 */
404 	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
405 	if (runtime_pm && stop_ok && !stop_ok(dev))
406 		return -EBUSY;
407 
408 	/* Measure suspend latency. */
409 	if (runtime_pm)
410 		time_start = ktime_get();
411 
412 	ret = genpd_save_dev(genpd, dev);
413 	if (ret)
414 		return ret;
415 
416 	ret = genpd_stop_dev(genpd, dev);
417 	if (ret) {
418 		genpd_restore_dev(genpd, dev);
419 		return ret;
420 	}
421 
422 	/* Update suspend latency value if the measured time exceeds it. */
423 	if (runtime_pm) {
424 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
425 		if (elapsed_ns > td->suspend_latency_ns) {
426 			td->suspend_latency_ns = elapsed_ns;
427 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
428 				elapsed_ns);
429 			genpd->max_off_time_changed = true;
430 			td->constraint_changed = true;
431 		}
432 	}
433 
434 	/*
435 	 * If power.irq_safe is set, this routine will be run with interrupts
436 	 * off, so it can't use mutexes.
437 	 */
438 	if (dev->power.irq_safe)
439 		return 0;
440 
441 	mutex_lock(&genpd->lock);
442 	genpd_poweroff(genpd, false);
443 	mutex_unlock(&genpd->lock);
444 
445 	return 0;
446 }
447 
448 /**
449  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
450  * @dev: Device to resume.
451  *
452  * Carry out a runtime resume of a device under the assumption that its
453  * pm_domain field points to the domain member of an object of type
454  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
455  */
456 static int pm_genpd_runtime_resume(struct device *dev)
457 {
458 	struct generic_pm_domain *genpd;
459 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
460 	bool runtime_pm = pm_runtime_enabled(dev);
461 	ktime_t time_start;
462 	s64 elapsed_ns;
463 	int ret;
464 	bool timed = true;
465 
466 	dev_dbg(dev, "%s()\n", __func__);
467 
468 	genpd = dev_to_genpd(dev);
469 	if (IS_ERR(genpd))
470 		return -EINVAL;
471 
472 	/* If power.irq_safe, the PM domain is never powered off. */
473 	if (dev->power.irq_safe) {
474 		timed = false;
475 		goto out;
476 	}
477 
478 	mutex_lock(&genpd->lock);
479 	ret = genpd_poweron(genpd, 0);
480 	mutex_unlock(&genpd->lock);
481 
482 	if (ret)
483 		return ret;
484 
485  out:
486 	/* Measure resume latency. */
487 	if (timed && runtime_pm)
488 		time_start = ktime_get();
489 
490 	ret = genpd_start_dev(genpd, dev);
491 	if (ret)
492 		goto err_poweroff;
493 
494 	ret = genpd_restore_dev(genpd, dev);
495 	if (ret)
496 		goto err_stop;
497 
498 	/* Update resume latency value if the measured time exceeds it. */
499 	if (timed && runtime_pm) {
500 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
501 		if (elapsed_ns > td->resume_latency_ns) {
502 			td->resume_latency_ns = elapsed_ns;
503 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
504 				elapsed_ns);
505 			genpd->max_off_time_changed = true;
506 			td->constraint_changed = true;
507 		}
508 	}
509 
510 	return 0;
511 
512 err_stop:
513 	genpd_stop_dev(genpd, dev);
514 err_poweroff:
515 	if (!dev->power.irq_safe) {
516 		mutex_lock(&genpd->lock);
517 		genpd_poweroff(genpd, 0);
518 		mutex_unlock(&genpd->lock);
519 	}
520 
521 	return ret;
522 }
523 
524 static bool pd_ignore_unused;
525 static int __init pd_ignore_unused_setup(char *__unused)
526 {
527 	pd_ignore_unused = true;
528 	return 1;
529 }
530 __setup("pd_ignore_unused", pd_ignore_unused_setup);
531 
532 /**
533  * genpd_poweroff_unused - Power off all PM domains with no devices in use.
534  */
535 static int __init genpd_poweroff_unused(void)
536 {
537 	struct generic_pm_domain *genpd;
538 
539 	if (pd_ignore_unused) {
540 		pr_warn("genpd: Not disabling unused power domains\n");
541 		return 0;
542 	}
543 
544 	mutex_lock(&gpd_list_lock);
545 
546 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
547 		genpd_queue_power_off_work(genpd);
548 
549 	mutex_unlock(&gpd_list_lock);
550 
551 	return 0;
552 }
553 late_initcall(genpd_poweroff_unused);
554 
555 #ifdef CONFIG_PM_SLEEP
556 
557 /**
558  * pm_genpd_present - Check if the given PM domain has been initialized.
559  * @genpd: PM domain to check.
560  */
561 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
562 {
563 	const struct generic_pm_domain *gpd;
564 
565 	if (IS_ERR_OR_NULL(genpd))
566 		return false;
567 
568 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
569 		if (gpd == genpd)
570 			return true;
571 
572 	return false;
573 }
574 
575 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
576 				    struct device *dev)
577 {
578 	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
579 }
580 
581 /**
582  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
583  * @genpd: PM domain to power off, if possible.
584  * @timed: True if latency measurements are allowed.
585  *
586  * Check if the given PM domain can be powered off (during system suspend or
587  * hibernation) and do that if so.  Also, in that case propagate to its masters.
588  *
589  * This function is only called in "noirq" and "syscore" stages of system power
590  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
591  * executed sequentially, so it is guaranteed that it will never run twice in
592  * parallel).
593  */
594 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
595 				   bool timed)
596 {
597 	struct gpd_link *link;
598 
599 	if (genpd->status == GPD_STATE_POWER_OFF)
600 		return;
601 
602 	if (genpd->suspended_count != genpd->device_count
603 	    || atomic_read(&genpd->sd_count) > 0)
604 		return;
605 
606 	/* Choose the deepest state when suspending */
607 	genpd->state_idx = genpd->state_count - 1;
608 	genpd_power_off(genpd, timed);
609 
610 	genpd->status = GPD_STATE_POWER_OFF;
611 
612 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
613 		genpd_sd_counter_dec(link->master);
614 		pm_genpd_sync_poweroff(link->master, timed);
615 	}
616 }
617 
618 /**
619  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
620  * @genpd: PM domain to power on.
621  * @timed: True if latency measurements are allowed.
622  *
623  * This function is only called in "noirq" and "syscore" stages of system power
624  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
625  * executed sequentially, so it is guaranteed that it will never run twice in
626  * parallel).
627  */
628 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
629 				  bool timed)
630 {
631 	struct gpd_link *link;
632 
633 	if (genpd->status == GPD_STATE_ACTIVE)
634 		return;
635 
636 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
637 		pm_genpd_sync_poweron(link->master, timed);
638 		genpd_sd_counter_inc(link->master);
639 	}
640 
641 	genpd_power_on(genpd, timed);
642 
643 	genpd->status = GPD_STATE_ACTIVE;
644 }
645 
646 /**
647  * resume_needed - Check whether to resume a device before system suspend.
648  * @dev: Device to check.
649  * @genpd: PM domain the device belongs to.
650  *
651  * There are two cases in which a device that can wake up the system from sleep
652  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
653  * to wake up the system and it has to remain active for this purpose while the
654  * system is in the sleep state and (2) if the device is not enabled to wake up
655  * the system from sleep states and it generally doesn't generate wakeup signals
656  * by itself (those signals are generated on its behalf by other parts of the
657  * system).  In the latter case it may be necessary to reconfigure the device's
658  * wakeup settings during system suspend, because it may have been set up to
659  * signal remote wakeup from the system's working state as needed by runtime PM.
660  * Return 'true' in either of the above cases.
661  */
662 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
663 {
664 	bool active_wakeup;
665 
666 	if (!device_can_wakeup(dev))
667 		return false;
668 
669 	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
670 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
671 }
672 
673 /**
674  * pm_genpd_prepare - Start power transition of a device in a PM domain.
675  * @dev: Device to start the transition of.
676  *
677  * Start a power transition of a device (during a system-wide power transition)
678  * under the assumption that its pm_domain field points to the domain member of
679  * an object of type struct generic_pm_domain representing a PM domain
680  * consisting of I/O devices.
681  */
682 static int pm_genpd_prepare(struct device *dev)
683 {
684 	struct generic_pm_domain *genpd;
685 	int ret;
686 
687 	dev_dbg(dev, "%s()\n", __func__);
688 
689 	genpd = dev_to_genpd(dev);
690 	if (IS_ERR(genpd))
691 		return -EINVAL;
692 
693 	/*
694 	 * If a wakeup request is pending for the device, it should be woken up
695 	 * at this point and a system wakeup event should be reported if it's
696 	 * set up to wake up the system from sleep states.
697 	 */
698 	pm_runtime_get_noresume(dev);
699 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
700 		pm_wakeup_event(dev, 0);
701 
702 	if (pm_wakeup_pending()) {
703 		pm_runtime_put(dev);
704 		return -EBUSY;
705 	}
706 
707 	if (resume_needed(dev, genpd))
708 		pm_runtime_resume(dev);
709 
710 	mutex_lock(&genpd->lock);
711 
712 	if (genpd->prepared_count++ == 0) {
713 		genpd->suspended_count = 0;
714 		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
715 	}
716 
717 	mutex_unlock(&genpd->lock);
718 
719 	if (genpd->suspend_power_off) {
720 		pm_runtime_put_noidle(dev);
721 		return 0;
722 	}
723 
724 	/*
725 	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
726 	 * so genpd_poweron() will return immediately, but if the device
727 	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
728 	 * to make it operational.
729 	 */
730 	pm_runtime_resume(dev);
731 	__pm_runtime_disable(dev, false);
732 
733 	ret = pm_generic_prepare(dev);
734 	if (ret) {
735 		mutex_lock(&genpd->lock);
736 
737 		if (--genpd->prepared_count == 0)
738 			genpd->suspend_power_off = false;
739 
740 		mutex_unlock(&genpd->lock);
741 		pm_runtime_enable(dev);
742 	}
743 
744 	pm_runtime_put(dev);
745 	return ret;
746 }
747 
748 /**
749  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
750  * @dev: Device to suspend.
751  *
752  * Suspend a device under the assumption that its pm_domain field points to the
753  * domain member of an object of type struct generic_pm_domain representing
754  * a PM domain consisting of I/O devices.
755  */
756 static int pm_genpd_suspend(struct device *dev)
757 {
758 	struct generic_pm_domain *genpd;
759 
760 	dev_dbg(dev, "%s()\n", __func__);
761 
762 	genpd = dev_to_genpd(dev);
763 	if (IS_ERR(genpd))
764 		return -EINVAL;
765 
766 	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
767 }
768 
769 /**
770  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
771  * @dev: Device to suspend.
772  *
773  * Carry out a late suspend of a device under the assumption that its
774  * pm_domain field points to the domain member of an object of type
775  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
776  */
777 static int pm_genpd_suspend_late(struct device *dev)
778 {
779 	struct generic_pm_domain *genpd;
780 
781 	dev_dbg(dev, "%s()\n", __func__);
782 
783 	genpd = dev_to_genpd(dev);
784 	if (IS_ERR(genpd))
785 		return -EINVAL;
786 
787 	return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
788 }
789 
790 /**
791  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
792  * @dev: Device to suspend.
793  *
794  * Stop the device and remove power from the domain if all devices in it have
795  * been stopped.
796  */
797 static int pm_genpd_suspend_noirq(struct device *dev)
798 {
799 	struct generic_pm_domain *genpd;
800 
801 	dev_dbg(dev, "%s()\n", __func__);
802 
803 	genpd = dev_to_genpd(dev);
804 	if (IS_ERR(genpd))
805 		return -EINVAL;
806 
807 	if (genpd->suspend_power_off
808 	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
809 		return 0;
810 
811 	genpd_stop_dev(genpd, dev);
812 
813 	/*
814 	 * Since all of the "noirq" callbacks are executed sequentially, it is
815 	 * guaranteed that this function will never run twice in parallel for
816 	 * the same PM domain, so it is not necessary to use locking here.
817 	 */
818 	genpd->suspended_count++;
819 	pm_genpd_sync_poweroff(genpd, true);
820 
821 	return 0;
822 }
823 
824 /**
825  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
826  * @dev: Device to resume.
827  *
828  * Restore power to the device's PM domain, if necessary, and start the device.
829  */
830 static int pm_genpd_resume_noirq(struct device *dev)
831 {
832 	struct generic_pm_domain *genpd;
833 
834 	dev_dbg(dev, "%s()\n", __func__);
835 
836 	genpd = dev_to_genpd(dev);
837 	if (IS_ERR(genpd))
838 		return -EINVAL;
839 
840 	if (genpd->suspend_power_off
841 	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
842 		return 0;
843 
844 	/*
845 	 * Since all of the "noirq" callbacks are executed sequentially, it is
846 	 * guaranteed that this function will never run twice in parallel for
847 	 * the same PM domain, so it is not necessary to use locking here.
848 	 */
849 	pm_genpd_sync_poweron(genpd, true);
850 	genpd->suspended_count--;
851 
852 	return genpd_start_dev(genpd, dev);
853 }
854 
855 /**
856  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
857  * @dev: Device to resume.
858  *
859  * Carry out an early resume of a device under the assumption that its
860  * pm_domain field points to the domain member of an object of type
861  * struct generic_pm_domain representing a power domain consisting of I/O
862  * devices.
863  */
864 static int pm_genpd_resume_early(struct device *dev)
865 {
866 	struct generic_pm_domain *genpd;
867 
868 	dev_dbg(dev, "%s()\n", __func__);
869 
870 	genpd = dev_to_genpd(dev);
871 	if (IS_ERR(genpd))
872 		return -EINVAL;
873 
874 	return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
875 }
876 
877 /**
878  * pm_genpd_resume - Resume of device in an I/O PM domain.
879  * @dev: Device to resume.
880  *
881  * Resume a device under the assumption that its pm_domain field points to the
882  * domain member of an object of type struct generic_pm_domain representing
883  * a power domain consisting of I/O devices.
884  */
885 static int pm_genpd_resume(struct device *dev)
886 {
887 	struct generic_pm_domain *genpd;
888 
889 	dev_dbg(dev, "%s()\n", __func__);
890 
891 	genpd = dev_to_genpd(dev);
892 	if (IS_ERR(genpd))
893 		return -EINVAL;
894 
895 	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
896 }
897 
898 /**
899  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
900  * @dev: Device to freeze.
901  *
902  * Freeze a device under the assumption that its pm_domain field points to the
903  * domain member of an object of type struct generic_pm_domain representing
904  * a power domain consisting of I/O devices.
905  */
906 static int pm_genpd_freeze(struct device *dev)
907 {
908 	struct generic_pm_domain *genpd;
909 
910 	dev_dbg(dev, "%s()\n", __func__);
911 
912 	genpd = dev_to_genpd(dev);
913 	if (IS_ERR(genpd))
914 		return -EINVAL;
915 
916 	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
917 }
918 
919 /**
920  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
921  * @dev: Device to freeze.
922  *
923  * Carry out a late freeze of a device under the assumption that its
924  * pm_domain field points to the domain member of an object of type
925  * struct generic_pm_domain representing a power domain consisting of I/O
926  * devices.
927  */
928 static int pm_genpd_freeze_late(struct device *dev)
929 {
930 	struct generic_pm_domain *genpd;
931 
932 	dev_dbg(dev, "%s()\n", __func__);
933 
934 	genpd = dev_to_genpd(dev);
935 	if (IS_ERR(genpd))
936 		return -EINVAL;
937 
938 	return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
939 }
940 
941 /**
942  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
943  * @dev: Device to freeze.
944  *
945  * Carry out a late freeze of a device under the assumption that its
946  * pm_domain field points to the domain member of an object of type
947  * struct generic_pm_domain representing a power domain consisting of I/O
948  * devices.
949  */
950 static int pm_genpd_freeze_noirq(struct device *dev)
951 {
952 	struct generic_pm_domain *genpd;
953 
954 	dev_dbg(dev, "%s()\n", __func__);
955 
956 	genpd = dev_to_genpd(dev);
957 	if (IS_ERR(genpd))
958 		return -EINVAL;
959 
960 	return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
961 }
962 
963 /**
964  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
965  * @dev: Device to thaw.
966  *
967  * Start the device, unless power has been removed from the domain already
968  * before the system transition.
969  */
970 static int pm_genpd_thaw_noirq(struct device *dev)
971 {
972 	struct generic_pm_domain *genpd;
973 
974 	dev_dbg(dev, "%s()\n", __func__);
975 
976 	genpd = dev_to_genpd(dev);
977 	if (IS_ERR(genpd))
978 		return -EINVAL;
979 
980 	return genpd->suspend_power_off ?
981 		0 : genpd_start_dev(genpd, dev);
982 }
983 
984 /**
985  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
986  * @dev: Device to thaw.
987  *
988  * Carry out an early thaw of a device under the assumption that its
989  * pm_domain field points to the domain member of an object of type
990  * struct generic_pm_domain representing a power domain consisting of I/O
991  * devices.
992  */
993 static int pm_genpd_thaw_early(struct device *dev)
994 {
995 	struct generic_pm_domain *genpd;
996 
997 	dev_dbg(dev, "%s()\n", __func__);
998 
999 	genpd = dev_to_genpd(dev);
1000 	if (IS_ERR(genpd))
1001 		return -EINVAL;
1002 
1003 	return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1004 }
1005 
1006 /**
1007  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1008  * @dev: Device to thaw.
1009  *
1010  * Thaw a device under the assumption that its pm_domain field points to the
1011  * domain member of an object of type struct generic_pm_domain representing
1012  * a power domain consisting of I/O devices.
1013  */
1014 static int pm_genpd_thaw(struct device *dev)
1015 {
1016 	struct generic_pm_domain *genpd;
1017 
1018 	dev_dbg(dev, "%s()\n", __func__);
1019 
1020 	genpd = dev_to_genpd(dev);
1021 	if (IS_ERR(genpd))
1022 		return -EINVAL;
1023 
1024 	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1025 }
1026 
1027 /**
1028  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1029  * @dev: Device to resume.
1030  *
1031  * Make sure the domain will be in the same power state as before the
1032  * hibernation the system is resuming from and start the device if necessary.
1033  */
1034 static int pm_genpd_restore_noirq(struct device *dev)
1035 {
1036 	struct generic_pm_domain *genpd;
1037 
1038 	dev_dbg(dev, "%s()\n", __func__);
1039 
1040 	genpd = dev_to_genpd(dev);
1041 	if (IS_ERR(genpd))
1042 		return -EINVAL;
1043 
1044 	/*
1045 	 * Since all of the "noirq" callbacks are executed sequentially, it is
1046 	 * guaranteed that this function will never run twice in parallel for
1047 	 * the same PM domain, so it is not necessary to use locking here.
1048 	 *
1049 	 * At this point suspended_count == 0 means we are being run for the
1050 	 * first time for the given domain in the present cycle.
1051 	 */
1052 	if (genpd->suspended_count++ == 0) {
1053 		/*
1054 		 * The boot kernel might put the domain into arbitrary state,
1055 		 * so make it appear as powered off to pm_genpd_sync_poweron(),
1056 		 * so that it tries to power it on in case it was really off.
1057 		 */
1058 		genpd->status = GPD_STATE_POWER_OFF;
1059 		if (genpd->suspend_power_off) {
1060 			/*
1061 			 * If the domain was off before the hibernation, make
1062 			 * sure it will be off going forward.
1063 			 */
1064 			genpd_power_off(genpd, true);
1065 
1066 			return 0;
1067 		}
1068 	}
1069 
1070 	if (genpd->suspend_power_off)
1071 		return 0;
1072 
1073 	pm_genpd_sync_poweron(genpd, true);
1074 
1075 	return genpd_start_dev(genpd, dev);
1076 }
1077 
1078 /**
1079  * pm_genpd_complete - Complete power transition of a device in a power domain.
1080  * @dev: Device to complete the transition of.
1081  *
1082  * Complete a power transition of a device (during a system-wide power
1083  * transition) under the assumption that its pm_domain field points to the
1084  * domain member of an object of type struct generic_pm_domain representing
1085  * a power domain consisting of I/O devices.
1086  */
1087 static void pm_genpd_complete(struct device *dev)
1088 {
1089 	struct generic_pm_domain *genpd;
1090 	bool run_complete;
1091 
1092 	dev_dbg(dev, "%s()\n", __func__);
1093 
1094 	genpd = dev_to_genpd(dev);
1095 	if (IS_ERR(genpd))
1096 		return;
1097 
1098 	mutex_lock(&genpd->lock);
1099 
1100 	run_complete = !genpd->suspend_power_off;
1101 	if (--genpd->prepared_count == 0)
1102 		genpd->suspend_power_off = false;
1103 
1104 	mutex_unlock(&genpd->lock);
1105 
1106 	if (run_complete) {
1107 		pm_generic_complete(dev);
1108 		pm_runtime_set_active(dev);
1109 		pm_runtime_enable(dev);
1110 		pm_request_idle(dev);
1111 	}
1112 }
1113 
1114 /**
1115  * genpd_syscore_switch - Switch power during system core suspend or resume.
1116  * @dev: Device that normally is marked as "always on" to switch power for.
1117  *
1118  * This routine may only be called during the system core (syscore) suspend or
1119  * resume phase for devices whose "always on" flags are set.
1120  */
1121 static void genpd_syscore_switch(struct device *dev, bool suspend)
1122 {
1123 	struct generic_pm_domain *genpd;
1124 
1125 	genpd = dev_to_genpd(dev);
1126 	if (!pm_genpd_present(genpd))
1127 		return;
1128 
1129 	if (suspend) {
1130 		genpd->suspended_count++;
1131 		pm_genpd_sync_poweroff(genpd, false);
1132 	} else {
1133 		pm_genpd_sync_poweron(genpd, false);
1134 		genpd->suspended_count--;
1135 	}
1136 }
1137 
1138 void pm_genpd_syscore_poweroff(struct device *dev)
1139 {
1140 	genpd_syscore_switch(dev, true);
1141 }
1142 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1143 
1144 void pm_genpd_syscore_poweron(struct device *dev)
1145 {
1146 	genpd_syscore_switch(dev, false);
1147 }
1148 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1149 
1150 #else /* !CONFIG_PM_SLEEP */
1151 
1152 #define pm_genpd_prepare		NULL
1153 #define pm_genpd_suspend		NULL
1154 #define pm_genpd_suspend_late		NULL
1155 #define pm_genpd_suspend_noirq		NULL
1156 #define pm_genpd_resume_early		NULL
1157 #define pm_genpd_resume_noirq		NULL
1158 #define pm_genpd_resume			NULL
1159 #define pm_genpd_freeze			NULL
1160 #define pm_genpd_freeze_late		NULL
1161 #define pm_genpd_freeze_noirq		NULL
1162 #define pm_genpd_thaw_early		NULL
1163 #define pm_genpd_thaw_noirq		NULL
1164 #define pm_genpd_thaw			NULL
1165 #define pm_genpd_restore_noirq		NULL
1166 #define pm_genpd_complete		NULL
1167 
1168 #endif /* CONFIG_PM_SLEEP */
1169 
1170 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1171 					struct generic_pm_domain *genpd,
1172 					struct gpd_timing_data *td)
1173 {
1174 	struct generic_pm_domain_data *gpd_data;
1175 	int ret;
1176 
1177 	ret = dev_pm_get_subsys_data(dev);
1178 	if (ret)
1179 		return ERR_PTR(ret);
1180 
1181 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1182 	if (!gpd_data) {
1183 		ret = -ENOMEM;
1184 		goto err_put;
1185 	}
1186 
1187 	if (td)
1188 		gpd_data->td = *td;
1189 
1190 	gpd_data->base.dev = dev;
1191 	gpd_data->td.constraint_changed = true;
1192 	gpd_data->td.effective_constraint_ns = -1;
1193 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1194 
1195 	spin_lock_irq(&dev->power.lock);
1196 
1197 	if (dev->power.subsys_data->domain_data) {
1198 		ret = -EINVAL;
1199 		goto err_free;
1200 	}
1201 
1202 	dev->power.subsys_data->domain_data = &gpd_data->base;
1203 
1204 	spin_unlock_irq(&dev->power.lock);
1205 
1206 	dev_pm_domain_set(dev, &genpd->domain);
1207 
1208 	return gpd_data;
1209 
1210  err_free:
1211 	spin_unlock_irq(&dev->power.lock);
1212 	kfree(gpd_data);
1213  err_put:
1214 	dev_pm_put_subsys_data(dev);
1215 	return ERR_PTR(ret);
1216 }
1217 
1218 static void genpd_free_dev_data(struct device *dev,
1219 				struct generic_pm_domain_data *gpd_data)
1220 {
1221 	dev_pm_domain_set(dev, NULL);
1222 
1223 	spin_lock_irq(&dev->power.lock);
1224 
1225 	dev->power.subsys_data->domain_data = NULL;
1226 
1227 	spin_unlock_irq(&dev->power.lock);
1228 
1229 	kfree(gpd_data);
1230 	dev_pm_put_subsys_data(dev);
1231 }
1232 
1233 /**
1234  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1235  * @genpd: PM domain to add the device to.
1236  * @dev: Device to be added.
1237  * @td: Set of PM QoS timing parameters to attach to the device.
1238  */
1239 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1240 			  struct gpd_timing_data *td)
1241 {
1242 	struct generic_pm_domain_data *gpd_data;
1243 	int ret = 0;
1244 
1245 	dev_dbg(dev, "%s()\n", __func__);
1246 
1247 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1248 		return -EINVAL;
1249 
1250 	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1251 	if (IS_ERR(gpd_data))
1252 		return PTR_ERR(gpd_data);
1253 
1254 	mutex_lock(&genpd->lock);
1255 
1256 	if (genpd->prepared_count > 0) {
1257 		ret = -EAGAIN;
1258 		goto out;
1259 	}
1260 
1261 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1262 	if (ret)
1263 		goto out;
1264 
1265 	genpd->device_count++;
1266 	genpd->max_off_time_changed = true;
1267 
1268 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1269 
1270  out:
1271 	mutex_unlock(&genpd->lock);
1272 
1273 	if (ret)
1274 		genpd_free_dev_data(dev, gpd_data);
1275 	else
1276 		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1277 
1278 	return ret;
1279 }
1280 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1281 
1282 /**
1283  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1284  * @genpd: PM domain to remove the device from.
1285  * @dev: Device to be removed.
1286  */
1287 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1288 			   struct device *dev)
1289 {
1290 	struct generic_pm_domain_data *gpd_data;
1291 	struct pm_domain_data *pdd;
1292 	int ret = 0;
1293 
1294 	dev_dbg(dev, "%s()\n", __func__);
1295 
1296 	if (!genpd || genpd != pm_genpd_lookup_dev(dev))
1297 		return -EINVAL;
1298 
1299 	/* The above validation also means we have existing domain_data. */
1300 	pdd = dev->power.subsys_data->domain_data;
1301 	gpd_data = to_gpd_data(pdd);
1302 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1303 
1304 	mutex_lock(&genpd->lock);
1305 
1306 	if (genpd->prepared_count > 0) {
1307 		ret = -EAGAIN;
1308 		goto out;
1309 	}
1310 
1311 	genpd->device_count--;
1312 	genpd->max_off_time_changed = true;
1313 
1314 	if (genpd->detach_dev)
1315 		genpd->detach_dev(genpd, dev);
1316 
1317 	list_del_init(&pdd->list_node);
1318 
1319 	mutex_unlock(&genpd->lock);
1320 
1321 	genpd_free_dev_data(dev, gpd_data);
1322 
1323 	return 0;
1324 
1325  out:
1326 	mutex_unlock(&genpd->lock);
1327 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1328 
1329 	return ret;
1330 }
1331 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1332 
1333 /**
1334  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1335  * @genpd: Master PM domain to add the subdomain to.
1336  * @subdomain: Subdomain to be added.
1337  */
1338 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1339 			   struct generic_pm_domain *subdomain)
1340 {
1341 	struct gpd_link *link, *itr;
1342 	int ret = 0;
1343 
1344 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1345 	    || genpd == subdomain)
1346 		return -EINVAL;
1347 
1348 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1349 	if (!link)
1350 		return -ENOMEM;
1351 
1352 	mutex_lock(&subdomain->lock);
1353 	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1354 
1355 	if (genpd->status == GPD_STATE_POWER_OFF
1356 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1357 		ret = -EINVAL;
1358 		goto out;
1359 	}
1360 
1361 	list_for_each_entry(itr, &genpd->master_links, master_node) {
1362 		if (itr->slave == subdomain && itr->master == genpd) {
1363 			ret = -EINVAL;
1364 			goto out;
1365 		}
1366 	}
1367 
1368 	link->master = genpd;
1369 	list_add_tail(&link->master_node, &genpd->master_links);
1370 	link->slave = subdomain;
1371 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1372 	if (subdomain->status != GPD_STATE_POWER_OFF)
1373 		genpd_sd_counter_inc(genpd);
1374 
1375  out:
1376 	mutex_unlock(&genpd->lock);
1377 	mutex_unlock(&subdomain->lock);
1378 	if (ret)
1379 		kfree(link);
1380 	return ret;
1381 }
1382 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1383 
1384 /**
1385  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1386  * @genpd: Master PM domain to remove the subdomain from.
1387  * @subdomain: Subdomain to be removed.
1388  */
1389 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1390 			      struct generic_pm_domain *subdomain)
1391 {
1392 	struct gpd_link *link;
1393 	int ret = -EINVAL;
1394 
1395 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1396 		return -EINVAL;
1397 
1398 	mutex_lock(&subdomain->lock);
1399 	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1400 
1401 	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1402 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1403 			subdomain->name);
1404 		ret = -EBUSY;
1405 		goto out;
1406 	}
1407 
1408 	list_for_each_entry(link, &genpd->master_links, master_node) {
1409 		if (link->slave != subdomain)
1410 			continue;
1411 
1412 		list_del(&link->master_node);
1413 		list_del(&link->slave_node);
1414 		kfree(link);
1415 		if (subdomain->status != GPD_STATE_POWER_OFF)
1416 			genpd_sd_counter_dec(genpd);
1417 
1418 		ret = 0;
1419 		break;
1420 	}
1421 
1422 out:
1423 	mutex_unlock(&genpd->lock);
1424 	mutex_unlock(&subdomain->lock);
1425 
1426 	return ret;
1427 }
1428 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1429 
1430 /* Default device callbacks for generic PM domains. */
1431 
1432 /**
1433  * pm_genpd_default_save_state - Default "save device state" for PM domains.
1434  * @dev: Device to handle.
1435  */
1436 static int pm_genpd_default_save_state(struct device *dev)
1437 {
1438 	int (*cb)(struct device *__dev);
1439 
1440 	if (dev->type && dev->type->pm)
1441 		cb = dev->type->pm->runtime_suspend;
1442 	else if (dev->class && dev->class->pm)
1443 		cb = dev->class->pm->runtime_suspend;
1444 	else if (dev->bus && dev->bus->pm)
1445 		cb = dev->bus->pm->runtime_suspend;
1446 	else
1447 		cb = NULL;
1448 
1449 	if (!cb && dev->driver && dev->driver->pm)
1450 		cb = dev->driver->pm->runtime_suspend;
1451 
1452 	return cb ? cb(dev) : 0;
1453 }
1454 
1455 /**
1456  * pm_genpd_default_restore_state - Default PM domains "restore device state".
1457  * @dev: Device to handle.
1458  */
1459 static int pm_genpd_default_restore_state(struct device *dev)
1460 {
1461 	int (*cb)(struct device *__dev);
1462 
1463 	if (dev->type && dev->type->pm)
1464 		cb = dev->type->pm->runtime_resume;
1465 	else if (dev->class && dev->class->pm)
1466 		cb = dev->class->pm->runtime_resume;
1467 	else if (dev->bus && dev->bus->pm)
1468 		cb = dev->bus->pm->runtime_resume;
1469 	else
1470 		cb = NULL;
1471 
1472 	if (!cb && dev->driver && dev->driver->pm)
1473 		cb = dev->driver->pm->runtime_resume;
1474 
1475 	return cb ? cb(dev) : 0;
1476 }
1477 
1478 /**
1479  * pm_genpd_init - Initialize a generic I/O PM domain object.
1480  * @genpd: PM domain object to initialize.
1481  * @gov: PM domain governor to associate with the domain (may be NULL).
1482  * @is_off: Initial value of the domain's power_is_off field.
1483  */
1484 void pm_genpd_init(struct generic_pm_domain *genpd,
1485 		   struct dev_power_governor *gov, bool is_off)
1486 {
1487 	if (IS_ERR_OR_NULL(genpd))
1488 		return;
1489 
1490 	INIT_LIST_HEAD(&genpd->master_links);
1491 	INIT_LIST_HEAD(&genpd->slave_links);
1492 	INIT_LIST_HEAD(&genpd->dev_list);
1493 	mutex_init(&genpd->lock);
1494 	genpd->gov = gov;
1495 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1496 	atomic_set(&genpd->sd_count, 0);
1497 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1498 	genpd->device_count = 0;
1499 	genpd->max_off_time_ns = -1;
1500 	genpd->max_off_time_changed = true;
1501 	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1502 	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1503 	genpd->domain.ops.prepare = pm_genpd_prepare;
1504 	genpd->domain.ops.suspend = pm_genpd_suspend;
1505 	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1506 	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1507 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1508 	genpd->domain.ops.resume_early = pm_genpd_resume_early;
1509 	genpd->domain.ops.resume = pm_genpd_resume;
1510 	genpd->domain.ops.freeze = pm_genpd_freeze;
1511 	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1512 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1513 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1514 	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1515 	genpd->domain.ops.thaw = pm_genpd_thaw;
1516 	genpd->domain.ops.poweroff = pm_genpd_suspend;
1517 	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1518 	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1519 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1520 	genpd->domain.ops.restore_early = pm_genpd_resume_early;
1521 	genpd->domain.ops.restore = pm_genpd_resume;
1522 	genpd->domain.ops.complete = pm_genpd_complete;
1523 	genpd->dev_ops.save_state = pm_genpd_default_save_state;
1524 	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1525 
1526 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1527 		genpd->dev_ops.stop = pm_clk_suspend;
1528 		genpd->dev_ops.start = pm_clk_resume;
1529 	}
1530 
1531 	if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
1532 		pr_warn("Initial state index out of bounds.\n");
1533 		genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
1534 	}
1535 
1536 	if (genpd->state_count > GENPD_MAX_NUM_STATES) {
1537 		pr_warn("Limiting states to  %d\n", GENPD_MAX_NUM_STATES);
1538 		genpd->state_count = GENPD_MAX_NUM_STATES;
1539 	}
1540 
1541 	/* Use only one "off" state if there were no states declared */
1542 	if (genpd->state_count == 0)
1543 		genpd->state_count = 1;
1544 
1545 	mutex_lock(&gpd_list_lock);
1546 	list_add(&genpd->gpd_list_node, &gpd_list);
1547 	mutex_unlock(&gpd_list_lock);
1548 }
1549 EXPORT_SYMBOL_GPL(pm_genpd_init);
1550 
1551 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1552 /*
1553  * Device Tree based PM domain providers.
1554  *
1555  * The code below implements generic device tree based PM domain providers that
1556  * bind device tree nodes with generic PM domains registered in the system.
1557  *
1558  * Any driver that registers generic PM domains and needs to support binding of
1559  * devices to these domains is supposed to register a PM domain provider, which
1560  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1561  *
1562  * Two simple mapping functions have been provided for convenience:
1563  *  - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1564  *  - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1565  *    index.
1566  */
1567 
1568 /**
1569  * struct of_genpd_provider - PM domain provider registration structure
1570  * @link: Entry in global list of PM domain providers
1571  * @node: Pointer to device tree node of PM domain provider
1572  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1573  *         into a PM domain.
1574  * @data: context pointer to be passed into @xlate callback
1575  */
1576 struct of_genpd_provider {
1577 	struct list_head link;
1578 	struct device_node *node;
1579 	genpd_xlate_t xlate;
1580 	void *data;
1581 };
1582 
1583 /* List of registered PM domain providers. */
1584 static LIST_HEAD(of_genpd_providers);
1585 /* Mutex to protect the list above. */
1586 static DEFINE_MUTEX(of_genpd_mutex);
1587 
1588 /**
1589  * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1590  * @genpdspec: OF phandle args to map into a PM domain
1591  * @data: xlate function private data - pointer to struct generic_pm_domain
1592  *
1593  * This is a generic xlate function that can be used to model PM domains that
1594  * have their own device tree nodes. The private data of xlate function needs
1595  * to be a valid pointer to struct generic_pm_domain.
1596  */
1597 struct generic_pm_domain *__of_genpd_xlate_simple(
1598 					struct of_phandle_args *genpdspec,
1599 					void *data)
1600 {
1601 	if (genpdspec->args_count != 0)
1602 		return ERR_PTR(-EINVAL);
1603 	return data;
1604 }
1605 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1606 
1607 /**
1608  * __of_genpd_xlate_onecell() - Xlate function using a single index.
1609  * @genpdspec: OF phandle args to map into a PM domain
1610  * @data: xlate function private data - pointer to struct genpd_onecell_data
1611  *
1612  * This is a generic xlate function that can be used to model simple PM domain
1613  * controllers that have one device tree node and provide multiple PM domains.
1614  * A single cell is used as an index into an array of PM domains specified in
1615  * the genpd_onecell_data struct when registering the provider.
1616  */
1617 struct generic_pm_domain *__of_genpd_xlate_onecell(
1618 					struct of_phandle_args *genpdspec,
1619 					void *data)
1620 {
1621 	struct genpd_onecell_data *genpd_data = data;
1622 	unsigned int idx = genpdspec->args[0];
1623 
1624 	if (genpdspec->args_count != 1)
1625 		return ERR_PTR(-EINVAL);
1626 
1627 	if (idx >= genpd_data->num_domains) {
1628 		pr_err("%s: invalid domain index %u\n", __func__, idx);
1629 		return ERR_PTR(-EINVAL);
1630 	}
1631 
1632 	if (!genpd_data->domains[idx])
1633 		return ERR_PTR(-ENOENT);
1634 
1635 	return genpd_data->domains[idx];
1636 }
1637 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
1638 
1639 /**
1640  * __of_genpd_add_provider() - Register a PM domain provider for a node
1641  * @np: Device node pointer associated with the PM domain provider.
1642  * @xlate: Callback for decoding PM domain from phandle arguments.
1643  * @data: Context pointer for @xlate callback.
1644  */
1645 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1646 			void *data)
1647 {
1648 	struct of_genpd_provider *cp;
1649 
1650 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1651 	if (!cp)
1652 		return -ENOMEM;
1653 
1654 	cp->node = of_node_get(np);
1655 	cp->data = data;
1656 	cp->xlate = xlate;
1657 
1658 	mutex_lock(&of_genpd_mutex);
1659 	list_add(&cp->link, &of_genpd_providers);
1660 	mutex_unlock(&of_genpd_mutex);
1661 	pr_debug("Added domain provider from %s\n", np->full_name);
1662 
1663 	return 0;
1664 }
1665 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
1666 
1667 /**
1668  * of_genpd_del_provider() - Remove a previously registered PM domain provider
1669  * @np: Device node pointer associated with the PM domain provider
1670  */
1671 void of_genpd_del_provider(struct device_node *np)
1672 {
1673 	struct of_genpd_provider *cp;
1674 
1675 	mutex_lock(&of_genpd_mutex);
1676 	list_for_each_entry(cp, &of_genpd_providers, link) {
1677 		if (cp->node == np) {
1678 			list_del(&cp->link);
1679 			of_node_put(cp->node);
1680 			kfree(cp);
1681 			break;
1682 		}
1683 	}
1684 	mutex_unlock(&of_genpd_mutex);
1685 }
1686 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1687 
1688 /**
1689  * of_genpd_get_from_provider() - Look-up PM domain
1690  * @genpdspec: OF phandle args to use for look-up
1691  *
1692  * Looks for a PM domain provider under the node specified by @genpdspec and if
1693  * found, uses xlate function of the provider to map phandle args to a PM
1694  * domain.
1695  *
1696  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1697  * on failure.
1698  */
1699 struct generic_pm_domain *of_genpd_get_from_provider(
1700 					struct of_phandle_args *genpdspec)
1701 {
1702 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1703 	struct of_genpd_provider *provider;
1704 
1705 	if (!genpdspec)
1706 		return ERR_PTR(-EINVAL);
1707 
1708 	mutex_lock(&of_genpd_mutex);
1709 
1710 	/* Check if we have such a provider in our array */
1711 	list_for_each_entry(provider, &of_genpd_providers, link) {
1712 		if (provider->node == genpdspec->np)
1713 			genpd = provider->xlate(genpdspec, provider->data);
1714 		if (!IS_ERR(genpd))
1715 			break;
1716 	}
1717 
1718 	mutex_unlock(&of_genpd_mutex);
1719 
1720 	return genpd;
1721 }
1722 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
1723 
1724 /**
1725  * genpd_dev_pm_detach - Detach a device from its PM domain.
1726  * @dev: Device to detach.
1727  * @power_off: Currently not used
1728  *
1729  * Try to locate a corresponding generic PM domain, which the device was
1730  * attached to previously. If such is found, the device is detached from it.
1731  */
1732 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1733 {
1734 	struct generic_pm_domain *pd;
1735 	unsigned int i;
1736 	int ret = 0;
1737 
1738 	pd = pm_genpd_lookup_dev(dev);
1739 	if (!pd)
1740 		return;
1741 
1742 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1743 
1744 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1745 		ret = pm_genpd_remove_device(pd, dev);
1746 		if (ret != -EAGAIN)
1747 			break;
1748 
1749 		mdelay(i);
1750 		cond_resched();
1751 	}
1752 
1753 	if (ret < 0) {
1754 		dev_err(dev, "failed to remove from PM domain %s: %d",
1755 			pd->name, ret);
1756 		return;
1757 	}
1758 
1759 	/* Check if PM domain can be powered off after removing this device. */
1760 	genpd_queue_power_off_work(pd);
1761 }
1762 
1763 static void genpd_dev_pm_sync(struct device *dev)
1764 {
1765 	struct generic_pm_domain *pd;
1766 
1767 	pd = dev_to_genpd(dev);
1768 	if (IS_ERR(pd))
1769 		return;
1770 
1771 	genpd_queue_power_off_work(pd);
1772 }
1773 
1774 /**
1775  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1776  * @dev: Device to attach.
1777  *
1778  * Parse device's OF node to find a PM domain specifier. If such is found,
1779  * attaches the device to retrieved pm_domain ops.
1780  *
1781  * Both generic and legacy Samsung-specific DT bindings are supported to keep
1782  * backwards compatibility with existing DTBs.
1783  *
1784  * Returns 0 on successfully attached PM domain or negative error code. Note
1785  * that if a power-domain exists for the device, but it cannot be found or
1786  * turned on, then return -EPROBE_DEFER to ensure that the device is not
1787  * probed and to re-try again later.
1788  */
1789 int genpd_dev_pm_attach(struct device *dev)
1790 {
1791 	struct of_phandle_args pd_args;
1792 	struct generic_pm_domain *pd;
1793 	unsigned int i;
1794 	int ret;
1795 
1796 	if (!dev->of_node)
1797 		return -ENODEV;
1798 
1799 	if (dev->pm_domain)
1800 		return -EEXIST;
1801 
1802 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1803 					"#power-domain-cells", 0, &pd_args);
1804 	if (ret < 0) {
1805 		if (ret != -ENOENT)
1806 			return ret;
1807 
1808 		/*
1809 		 * Try legacy Samsung-specific bindings
1810 		 * (for backwards compatibility of DT ABI)
1811 		 */
1812 		pd_args.args_count = 0;
1813 		pd_args.np = of_parse_phandle(dev->of_node,
1814 						"samsung,power-domain", 0);
1815 		if (!pd_args.np)
1816 			return -ENOENT;
1817 	}
1818 
1819 	pd = of_genpd_get_from_provider(&pd_args);
1820 	of_node_put(pd_args.np);
1821 	if (IS_ERR(pd)) {
1822 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1823 			__func__, PTR_ERR(pd));
1824 		return -EPROBE_DEFER;
1825 	}
1826 
1827 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
1828 
1829 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1830 		ret = pm_genpd_add_device(pd, dev);
1831 		if (ret != -EAGAIN)
1832 			break;
1833 
1834 		mdelay(i);
1835 		cond_resched();
1836 	}
1837 
1838 	if (ret < 0) {
1839 		dev_err(dev, "failed to add to PM domain %s: %d",
1840 			pd->name, ret);
1841 		goto out;
1842 	}
1843 
1844 	dev->pm_domain->detach = genpd_dev_pm_detach;
1845 	dev->pm_domain->sync = genpd_dev_pm_sync;
1846 
1847 	mutex_lock(&pd->lock);
1848 	ret = genpd_poweron(pd, 0);
1849 	mutex_unlock(&pd->lock);
1850 out:
1851 	return ret ? -EPROBE_DEFER : 0;
1852 }
1853 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
1854 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
1855 
1856 
1857 /***        debugfs support        ***/
1858 
1859 #ifdef CONFIG_PM_ADVANCED_DEBUG
1860 #include <linux/pm.h>
1861 #include <linux/device.h>
1862 #include <linux/debugfs.h>
1863 #include <linux/seq_file.h>
1864 #include <linux/init.h>
1865 #include <linux/kobject.h>
1866 static struct dentry *pm_genpd_debugfs_dir;
1867 
1868 /*
1869  * TODO: This function is a slightly modified version of rtpm_status_show
1870  * from sysfs.c, so generalize it.
1871  */
1872 static void rtpm_status_str(struct seq_file *s, struct device *dev)
1873 {
1874 	static const char * const status_lookup[] = {
1875 		[RPM_ACTIVE] = "active",
1876 		[RPM_RESUMING] = "resuming",
1877 		[RPM_SUSPENDED] = "suspended",
1878 		[RPM_SUSPENDING] = "suspending"
1879 	};
1880 	const char *p = "";
1881 
1882 	if (dev->power.runtime_error)
1883 		p = "error";
1884 	else if (dev->power.disable_depth)
1885 		p = "unsupported";
1886 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
1887 		p = status_lookup[dev->power.runtime_status];
1888 	else
1889 		WARN_ON(1);
1890 
1891 	seq_puts(s, p);
1892 }
1893 
1894 static int pm_genpd_summary_one(struct seq_file *s,
1895 				struct generic_pm_domain *genpd)
1896 {
1897 	static const char * const status_lookup[] = {
1898 		[GPD_STATE_ACTIVE] = "on",
1899 		[GPD_STATE_POWER_OFF] = "off"
1900 	};
1901 	struct pm_domain_data *pm_data;
1902 	const char *kobj_path;
1903 	struct gpd_link *link;
1904 	char state[16];
1905 	int ret;
1906 
1907 	ret = mutex_lock_interruptible(&genpd->lock);
1908 	if (ret)
1909 		return -ERESTARTSYS;
1910 
1911 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
1912 		goto exit;
1913 	if (genpd->status == GPD_STATE_POWER_OFF)
1914 		snprintf(state, sizeof(state), "%s-%u",
1915 			 status_lookup[genpd->status], genpd->state_idx);
1916 	else
1917 		snprintf(state, sizeof(state), "%s",
1918 			 status_lookup[genpd->status]);
1919 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
1920 
1921 	/*
1922 	 * Modifications on the list require holding locks on both
1923 	 * master and slave, so we are safe.
1924 	 * Also genpd->name is immutable.
1925 	 */
1926 	list_for_each_entry(link, &genpd->master_links, master_node) {
1927 		seq_printf(s, "%s", link->slave->name);
1928 		if (!list_is_last(&link->master_node, &genpd->master_links))
1929 			seq_puts(s, ", ");
1930 	}
1931 
1932 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
1933 		kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
1934 		if (kobj_path == NULL)
1935 			continue;
1936 
1937 		seq_printf(s, "\n    %-50s  ", kobj_path);
1938 		rtpm_status_str(s, pm_data->dev);
1939 		kfree(kobj_path);
1940 	}
1941 
1942 	seq_puts(s, "\n");
1943 exit:
1944 	mutex_unlock(&genpd->lock);
1945 
1946 	return 0;
1947 }
1948 
1949 static int pm_genpd_summary_show(struct seq_file *s, void *data)
1950 {
1951 	struct generic_pm_domain *genpd;
1952 	int ret = 0;
1953 
1954 	seq_puts(s, "domain                          status          slaves\n");
1955 	seq_puts(s, "    /device                                             runtime status\n");
1956 	seq_puts(s, "----------------------------------------------------------------------\n");
1957 
1958 	ret = mutex_lock_interruptible(&gpd_list_lock);
1959 	if (ret)
1960 		return -ERESTARTSYS;
1961 
1962 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1963 		ret = pm_genpd_summary_one(s, genpd);
1964 		if (ret)
1965 			break;
1966 	}
1967 	mutex_unlock(&gpd_list_lock);
1968 
1969 	return ret;
1970 }
1971 
1972 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
1973 {
1974 	return single_open(file, pm_genpd_summary_show, NULL);
1975 }
1976 
1977 static const struct file_operations pm_genpd_summary_fops = {
1978 	.open = pm_genpd_summary_open,
1979 	.read = seq_read,
1980 	.llseek = seq_lseek,
1981 	.release = single_release,
1982 };
1983 
1984 static int __init pm_genpd_debug_init(void)
1985 {
1986 	struct dentry *d;
1987 
1988 	pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
1989 
1990 	if (!pm_genpd_debugfs_dir)
1991 		return -ENOMEM;
1992 
1993 	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
1994 			pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
1995 	if (!d)
1996 		return -ENOMEM;
1997 
1998 	return 0;
1999 }
2000 late_initcall(pm_genpd_debug_init);
2001 
2002 static void __exit pm_genpd_debug_exit(void)
2003 {
2004 	debugfs_remove_recursive(pm_genpd_debugfs_dir);
2005 }
2006 __exitcall(pm_genpd_debug_exit);
2007 #endif /* CONFIG_PM_ADVANCED_DEBUG */
2008