xref: /openbmc/linux/drivers/base/power/domain.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22 
23 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
24 
25 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
26 ({								\
27 	type (*__routine)(struct device *__d); 			\
28 	type __ret = (type)0;					\
29 								\
30 	__routine = genpd->dev_ops.callback; 			\
31 	if (__routine) {					\
32 		__ret = __routine(dev); 			\
33 	}							\
34 	__ret;							\
35 })
36 
37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)	\
38 ({										\
39 	ktime_t __start = ktime_get();						\
40 	type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);		\
41 	s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));		\
42 	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\
43 	if (!__retval && __elapsed > __td->field) {				\
44 		__td->field = __elapsed;					\
45 		dev_dbg(dev, name " latency exceeded, new value %lld ns\n",	\
46 			__elapsed);						\
47 		genpd->max_off_time_changed = true;				\
48 		__td->constraint_changed = true;				\
49 	}									\
50 	__retval;								\
51 })
52 
53 static LIST_HEAD(gpd_list);
54 static DEFINE_MUTEX(gpd_list_lock);
55 
56 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
57 {
58 	struct generic_pm_domain *genpd = NULL, *gpd;
59 
60 	if (IS_ERR_OR_NULL(domain_name))
61 		return NULL;
62 
63 	mutex_lock(&gpd_list_lock);
64 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
65 		if (!strcmp(gpd->name, domain_name)) {
66 			genpd = gpd;
67 			break;
68 		}
69 	}
70 	mutex_unlock(&gpd_list_lock);
71 	return genpd;
72 }
73 
74 /*
75  * Get the generic PM domain for a particular struct device.
76  * This validates the struct device pointer, the PM domain pointer,
77  * and checks that the PM domain pointer is a real generic PM domain.
78  * Any failure results in NULL being returned.
79  */
80 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
81 {
82 	struct generic_pm_domain *genpd = NULL, *gpd;
83 
84 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
85 		return NULL;
86 
87 	mutex_lock(&gpd_list_lock);
88 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
89 		if (&gpd->domain == dev->pm_domain) {
90 			genpd = gpd;
91 			break;
92 		}
93 	}
94 	mutex_unlock(&gpd_list_lock);
95 
96 	return genpd;
97 }
98 
99 /*
100  * This should only be used where we are certain that the pm_domain
101  * attached to the device is a genpd domain.
102  */
103 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
104 {
105 	if (IS_ERR_OR_NULL(dev->pm_domain))
106 		return ERR_PTR(-EINVAL);
107 
108 	return pd_to_genpd(dev->pm_domain);
109 }
110 
111 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
112 {
113 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
114 					stop_latency_ns, "stop");
115 }
116 
117 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev,
118 			bool timed)
119 {
120 	if (!timed)
121 		return GENPD_DEV_CALLBACK(genpd, int, start, dev);
122 
123 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
124 					start_latency_ns, "start");
125 }
126 
127 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
128 {
129 	bool ret = false;
130 
131 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
132 		ret = !!atomic_dec_and_test(&genpd->sd_count);
133 
134 	return ret;
135 }
136 
137 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
138 {
139 	atomic_inc(&genpd->sd_count);
140 	smp_mb__after_atomic();
141 }
142 
143 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
144 {
145 	s64 usecs64;
146 
147 	if (!genpd->cpuidle_data)
148 		return;
149 
150 	usecs64 = genpd->power_on_latency_ns;
151 	do_div(usecs64, NSEC_PER_USEC);
152 	usecs64 += genpd->cpuidle_data->saved_exit_latency;
153 	genpd->cpuidle_data->idle_state->exit_latency = usecs64;
154 }
155 
156 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
157 {
158 	ktime_t time_start;
159 	s64 elapsed_ns;
160 	int ret;
161 
162 	if (!genpd->power_on)
163 		return 0;
164 
165 	if (!timed)
166 		return genpd->power_on(genpd);
167 
168 	time_start = ktime_get();
169 	ret = genpd->power_on(genpd);
170 	if (ret)
171 		return ret;
172 
173 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
174 	if (elapsed_ns <= genpd->power_on_latency_ns)
175 		return ret;
176 
177 	genpd->power_on_latency_ns = elapsed_ns;
178 	genpd->max_off_time_changed = true;
179 	genpd_recalc_cpu_exit_latency(genpd);
180 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
181 		 genpd->name, "on", elapsed_ns);
182 
183 	return ret;
184 }
185 
186 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
187 {
188 	ktime_t time_start;
189 	s64 elapsed_ns;
190 	int ret;
191 
192 	if (!genpd->power_off)
193 		return 0;
194 
195 	if (!timed)
196 		return genpd->power_off(genpd);
197 
198 	time_start = ktime_get();
199 	ret = genpd->power_off(genpd);
200 	if (ret == -EBUSY)
201 		return ret;
202 
203 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
204 	if (elapsed_ns <= genpd->power_off_latency_ns)
205 		return ret;
206 
207 	genpd->power_off_latency_ns = elapsed_ns;
208 	genpd->max_off_time_changed = true;
209 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
210 		 genpd->name, "off", elapsed_ns);
211 
212 	return ret;
213 }
214 
215 /**
216  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
217  * @genpd: PM domait to power off.
218  *
219  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
220  * before.
221  */
222 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
223 {
224 	queue_work(pm_wq, &genpd->power_off_work);
225 }
226 
227 /**
228  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
229  * @genpd: PM domain to power up.
230  *
231  * Restore power to @genpd and all of its masters so that it is possible to
232  * resume a device belonging to it.
233  */
234 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
235 {
236 	struct gpd_link *link;
237 	int ret = 0;
238 
239 	if (genpd->status == GPD_STATE_ACTIVE
240 	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
241 		return 0;
242 
243 	if (genpd->cpuidle_data) {
244 		cpuidle_pause_and_lock();
245 		genpd->cpuidle_data->idle_state->disabled = true;
246 		cpuidle_resume_and_unlock();
247 		goto out;
248 	}
249 
250 	/*
251 	 * The list is guaranteed not to change while the loop below is being
252 	 * executed, unless one of the masters' .power_on() callbacks fiddles
253 	 * with it.
254 	 */
255 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
256 		genpd_sd_counter_inc(link->master);
257 
258 		ret = pm_genpd_poweron(link->master);
259 		if (ret) {
260 			genpd_sd_counter_dec(link->master);
261 			goto err;
262 		}
263 	}
264 
265 	ret = genpd_power_on(genpd, true);
266 	if (ret)
267 		goto err;
268 
269  out:
270 	genpd->status = GPD_STATE_ACTIVE;
271 	return 0;
272 
273  err:
274 	list_for_each_entry_continue_reverse(link,
275 					&genpd->slave_links,
276 					slave_node) {
277 		genpd_sd_counter_dec(link->master);
278 		genpd_queue_power_off_work(link->master);
279 	}
280 
281 	return ret;
282 }
283 
284 /**
285  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
286  * @genpd: PM domain to power up.
287  */
288 int pm_genpd_poweron(struct generic_pm_domain *genpd)
289 {
290 	int ret;
291 
292 	mutex_lock(&genpd->lock);
293 	ret = __pm_genpd_poweron(genpd);
294 	mutex_unlock(&genpd->lock);
295 	return ret;
296 }
297 
298 /**
299  * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
300  * @domain_name: Name of the PM domain to power up.
301  */
302 int pm_genpd_name_poweron(const char *domain_name)
303 {
304 	struct generic_pm_domain *genpd;
305 
306 	genpd = pm_genpd_lookup_name(domain_name);
307 	return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
308 }
309 
310 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
311 {
312 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
313 					save_state_latency_ns, "state save");
314 }
315 
316 static int genpd_restore_dev(struct generic_pm_domain *genpd,
317 			struct device *dev, bool timed)
318 {
319 	if (!timed)
320 		return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
321 
322 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
323 					restore_state_latency_ns,
324 					"state restore");
325 }
326 
327 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
328 				     unsigned long val, void *ptr)
329 {
330 	struct generic_pm_domain_data *gpd_data;
331 	struct device *dev;
332 
333 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
334 	dev = gpd_data->base.dev;
335 
336 	for (;;) {
337 		struct generic_pm_domain *genpd;
338 		struct pm_domain_data *pdd;
339 
340 		spin_lock_irq(&dev->power.lock);
341 
342 		pdd = dev->power.subsys_data ?
343 				dev->power.subsys_data->domain_data : NULL;
344 		if (pdd && pdd->dev) {
345 			to_gpd_data(pdd)->td.constraint_changed = true;
346 			genpd = dev_to_genpd(dev);
347 		} else {
348 			genpd = ERR_PTR(-ENODATA);
349 		}
350 
351 		spin_unlock_irq(&dev->power.lock);
352 
353 		if (!IS_ERR(genpd)) {
354 			mutex_lock(&genpd->lock);
355 			genpd->max_off_time_changed = true;
356 			mutex_unlock(&genpd->lock);
357 		}
358 
359 		dev = dev->parent;
360 		if (!dev || dev->power.ignore_children)
361 			break;
362 	}
363 
364 	return NOTIFY_DONE;
365 }
366 
367 /**
368  * pm_genpd_poweroff - Remove power from a given PM domain.
369  * @genpd: PM domain to power down.
370  *
371  * If all of the @genpd's devices have been suspended and all of its subdomains
372  * have been powered down, remove power from @genpd.
373  */
374 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
375 {
376 	struct pm_domain_data *pdd;
377 	struct gpd_link *link;
378 	unsigned int not_suspended = 0;
379 
380 	/*
381 	 * Do not try to power off the domain in the following situations:
382 	 * (1) The domain is already in the "power off" state.
383 	 * (2) System suspend is in progress.
384 	 */
385 	if (genpd->status == GPD_STATE_POWER_OFF
386 	    || genpd->prepared_count > 0)
387 		return 0;
388 
389 	if (atomic_read(&genpd->sd_count) > 0)
390 		return -EBUSY;
391 
392 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
393 		enum pm_qos_flags_status stat;
394 
395 		stat = dev_pm_qos_flags(pdd->dev,
396 					PM_QOS_FLAG_NO_POWER_OFF
397 						| PM_QOS_FLAG_REMOTE_WAKEUP);
398 		if (stat > PM_QOS_FLAGS_NONE)
399 			return -EBUSY;
400 
401 		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
402 		    || pdd->dev->power.irq_safe))
403 			not_suspended++;
404 	}
405 
406 	if (not_suspended > genpd->in_progress)
407 		return -EBUSY;
408 
409 	if (genpd->gov && genpd->gov->power_down_ok) {
410 		if (!genpd->gov->power_down_ok(&genpd->domain))
411 			return -EAGAIN;
412 	}
413 
414 	if (genpd->cpuidle_data) {
415 		/*
416 		 * If cpuidle_data is set, cpuidle should turn the domain off
417 		 * when the CPU in it is idle.  In that case we don't decrement
418 		 * the subdomain counts of the master domains, so that power is
419 		 * not removed from the current domain prematurely as a result
420 		 * of cutting off the masters' power.
421 		 */
422 		genpd->status = GPD_STATE_POWER_OFF;
423 		cpuidle_pause_and_lock();
424 		genpd->cpuidle_data->idle_state->disabled = false;
425 		cpuidle_resume_and_unlock();
426 		return 0;
427 	}
428 
429 	if (genpd->power_off) {
430 		int ret;
431 
432 		if (atomic_read(&genpd->sd_count) > 0)
433 			return -EBUSY;
434 
435 		/*
436 		 * If sd_count > 0 at this point, one of the subdomains hasn't
437 		 * managed to call pm_genpd_poweron() for the master yet after
438 		 * incrementing it.  In that case pm_genpd_poweron() will wait
439 		 * for us to drop the lock, so we can call .power_off() and let
440 		 * the pm_genpd_poweron() restore power for us (this shouldn't
441 		 * happen very often).
442 		 */
443 		ret = genpd_power_off(genpd, true);
444 		if (ret)
445 			return ret;
446 	}
447 
448 	genpd->status = GPD_STATE_POWER_OFF;
449 
450 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
451 		genpd_sd_counter_dec(link->master);
452 		genpd_queue_power_off_work(link->master);
453 	}
454 
455 	return 0;
456 }
457 
458 /**
459  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
460  * @work: Work structure used for scheduling the execution of this function.
461  */
462 static void genpd_power_off_work_fn(struct work_struct *work)
463 {
464 	struct generic_pm_domain *genpd;
465 
466 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
467 
468 	mutex_lock(&genpd->lock);
469 	pm_genpd_poweroff(genpd);
470 	mutex_unlock(&genpd->lock);
471 }
472 
473 /**
474  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
475  * @dev: Device to suspend.
476  *
477  * Carry out a runtime suspend of a device under the assumption that its
478  * pm_domain field points to the domain member of an object of type
479  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
480  */
481 static int pm_genpd_runtime_suspend(struct device *dev)
482 {
483 	struct generic_pm_domain *genpd;
484 	bool (*stop_ok)(struct device *__dev);
485 	int ret;
486 
487 	dev_dbg(dev, "%s()\n", __func__);
488 
489 	genpd = dev_to_genpd(dev);
490 	if (IS_ERR(genpd))
491 		return -EINVAL;
492 
493 	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
494 	if (stop_ok && !stop_ok(dev))
495 		return -EBUSY;
496 
497 	ret = genpd_save_dev(genpd, dev);
498 	if (ret)
499 		return ret;
500 
501 	ret = genpd_stop_dev(genpd, dev);
502 	if (ret) {
503 		genpd_restore_dev(genpd, dev, true);
504 		return ret;
505 	}
506 
507 	/*
508 	 * If power.irq_safe is set, this routine will be run with interrupts
509 	 * off, so it can't use mutexes.
510 	 */
511 	if (dev->power.irq_safe)
512 		return 0;
513 
514 	mutex_lock(&genpd->lock);
515 	genpd->in_progress++;
516 	pm_genpd_poweroff(genpd);
517 	genpd->in_progress--;
518 	mutex_unlock(&genpd->lock);
519 
520 	return 0;
521 }
522 
523 /**
524  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
525  * @dev: Device to resume.
526  *
527  * Carry out a runtime resume of a device under the assumption that its
528  * pm_domain field points to the domain member of an object of type
529  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
530  */
531 static int pm_genpd_runtime_resume(struct device *dev)
532 {
533 	struct generic_pm_domain *genpd;
534 	int ret;
535 	bool timed = true;
536 
537 	dev_dbg(dev, "%s()\n", __func__);
538 
539 	genpd = dev_to_genpd(dev);
540 	if (IS_ERR(genpd))
541 		return -EINVAL;
542 
543 	/* If power.irq_safe, the PM domain is never powered off. */
544 	if (dev->power.irq_safe) {
545 		timed = false;
546 		goto out;
547 	}
548 
549 	mutex_lock(&genpd->lock);
550 	ret = __pm_genpd_poweron(genpd);
551 	mutex_unlock(&genpd->lock);
552 
553 	if (ret)
554 		return ret;
555 
556  out:
557 	genpd_start_dev(genpd, dev, timed);
558 	genpd_restore_dev(genpd, dev, timed);
559 
560 	return 0;
561 }
562 
563 static bool pd_ignore_unused;
564 static int __init pd_ignore_unused_setup(char *__unused)
565 {
566 	pd_ignore_unused = true;
567 	return 1;
568 }
569 __setup("pd_ignore_unused", pd_ignore_unused_setup);
570 
571 /**
572  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
573  */
574 void pm_genpd_poweroff_unused(void)
575 {
576 	struct generic_pm_domain *genpd;
577 
578 	if (pd_ignore_unused) {
579 		pr_warn("genpd: Not disabling unused power domains\n");
580 		return;
581 	}
582 
583 	mutex_lock(&gpd_list_lock);
584 
585 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
586 		genpd_queue_power_off_work(genpd);
587 
588 	mutex_unlock(&gpd_list_lock);
589 }
590 
591 static int __init genpd_poweroff_unused(void)
592 {
593 	pm_genpd_poweroff_unused();
594 	return 0;
595 }
596 late_initcall(genpd_poweroff_unused);
597 
598 #ifdef CONFIG_PM_SLEEP
599 
600 /**
601  * pm_genpd_present - Check if the given PM domain has been initialized.
602  * @genpd: PM domain to check.
603  */
604 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
605 {
606 	const struct generic_pm_domain *gpd;
607 
608 	if (IS_ERR_OR_NULL(genpd))
609 		return false;
610 
611 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
612 		if (gpd == genpd)
613 			return true;
614 
615 	return false;
616 }
617 
618 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
619 				    struct device *dev)
620 {
621 	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
622 }
623 
624 /**
625  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
626  * @genpd: PM domain to power off, if possible.
627  * @timed: True if latency measurements are allowed.
628  *
629  * Check if the given PM domain can be powered off (during system suspend or
630  * hibernation) and do that if so.  Also, in that case propagate to its masters.
631  *
632  * This function is only called in "noirq" and "syscore" stages of system power
633  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
634  * executed sequentially, so it is guaranteed that it will never run twice in
635  * parallel).
636  */
637 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
638 				   bool timed)
639 {
640 	struct gpd_link *link;
641 
642 	if (genpd->status == GPD_STATE_POWER_OFF)
643 		return;
644 
645 	if (genpd->suspended_count != genpd->device_count
646 	    || atomic_read(&genpd->sd_count) > 0)
647 		return;
648 
649 	genpd_power_off(genpd, timed);
650 
651 	genpd->status = GPD_STATE_POWER_OFF;
652 
653 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
654 		genpd_sd_counter_dec(link->master);
655 		pm_genpd_sync_poweroff(link->master, timed);
656 	}
657 }
658 
659 /**
660  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
661  * @genpd: PM domain to power on.
662  * @timed: True if latency measurements are allowed.
663  *
664  * This function is only called in "noirq" and "syscore" stages of system power
665  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
666  * executed sequentially, so it is guaranteed that it will never run twice in
667  * parallel).
668  */
669 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
670 				  bool timed)
671 {
672 	struct gpd_link *link;
673 
674 	if (genpd->status == GPD_STATE_ACTIVE)
675 		return;
676 
677 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
678 		pm_genpd_sync_poweron(link->master, timed);
679 		genpd_sd_counter_inc(link->master);
680 	}
681 
682 	genpd_power_on(genpd, timed);
683 
684 	genpd->status = GPD_STATE_ACTIVE;
685 }
686 
687 /**
688  * resume_needed - Check whether to resume a device before system suspend.
689  * @dev: Device to check.
690  * @genpd: PM domain the device belongs to.
691  *
692  * There are two cases in which a device that can wake up the system from sleep
693  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
694  * to wake up the system and it has to remain active for this purpose while the
695  * system is in the sleep state and (2) if the device is not enabled to wake up
696  * the system from sleep states and it generally doesn't generate wakeup signals
697  * by itself (those signals are generated on its behalf by other parts of the
698  * system).  In the latter case it may be necessary to reconfigure the device's
699  * wakeup settings during system suspend, because it may have been set up to
700  * signal remote wakeup from the system's working state as needed by runtime PM.
701  * Return 'true' in either of the above cases.
702  */
703 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
704 {
705 	bool active_wakeup;
706 
707 	if (!device_can_wakeup(dev))
708 		return false;
709 
710 	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
711 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
712 }
713 
714 /**
715  * pm_genpd_prepare - Start power transition of a device in a PM domain.
716  * @dev: Device to start the transition of.
717  *
718  * Start a power transition of a device (during a system-wide power transition)
719  * under the assumption that its pm_domain field points to the domain member of
720  * an object of type struct generic_pm_domain representing a PM domain
721  * consisting of I/O devices.
722  */
723 static int pm_genpd_prepare(struct device *dev)
724 {
725 	struct generic_pm_domain *genpd;
726 	int ret;
727 
728 	dev_dbg(dev, "%s()\n", __func__);
729 
730 	genpd = dev_to_genpd(dev);
731 	if (IS_ERR(genpd))
732 		return -EINVAL;
733 
734 	/*
735 	 * If a wakeup request is pending for the device, it should be woken up
736 	 * at this point and a system wakeup event should be reported if it's
737 	 * set up to wake up the system from sleep states.
738 	 */
739 	pm_runtime_get_noresume(dev);
740 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
741 		pm_wakeup_event(dev, 0);
742 
743 	if (pm_wakeup_pending()) {
744 		pm_runtime_put(dev);
745 		return -EBUSY;
746 	}
747 
748 	if (resume_needed(dev, genpd))
749 		pm_runtime_resume(dev);
750 
751 	mutex_lock(&genpd->lock);
752 
753 	if (genpd->prepared_count++ == 0) {
754 		genpd->suspended_count = 0;
755 		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
756 	}
757 
758 	mutex_unlock(&genpd->lock);
759 
760 	if (genpd->suspend_power_off) {
761 		pm_runtime_put_noidle(dev);
762 		return 0;
763 	}
764 
765 	/*
766 	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
767 	 * so pm_genpd_poweron() will return immediately, but if the device
768 	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
769 	 * to make it operational.
770 	 */
771 	pm_runtime_resume(dev);
772 	__pm_runtime_disable(dev, false);
773 
774 	ret = pm_generic_prepare(dev);
775 	if (ret) {
776 		mutex_lock(&genpd->lock);
777 
778 		if (--genpd->prepared_count == 0)
779 			genpd->suspend_power_off = false;
780 
781 		mutex_unlock(&genpd->lock);
782 		pm_runtime_enable(dev);
783 	}
784 
785 	pm_runtime_put(dev);
786 	return ret;
787 }
788 
789 /**
790  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
791  * @dev: Device to suspend.
792  *
793  * Suspend a device under the assumption that its pm_domain field points to the
794  * domain member of an object of type struct generic_pm_domain representing
795  * a PM domain consisting of I/O devices.
796  */
797 static int pm_genpd_suspend(struct device *dev)
798 {
799 	struct generic_pm_domain *genpd;
800 
801 	dev_dbg(dev, "%s()\n", __func__);
802 
803 	genpd = dev_to_genpd(dev);
804 	if (IS_ERR(genpd))
805 		return -EINVAL;
806 
807 	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
808 }
809 
810 /**
811  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
812  * @dev: Device to suspend.
813  *
814  * Carry out a late suspend of a device under the assumption that its
815  * pm_domain field points to the domain member of an object of type
816  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
817  */
818 static int pm_genpd_suspend_late(struct device *dev)
819 {
820 	struct generic_pm_domain *genpd;
821 
822 	dev_dbg(dev, "%s()\n", __func__);
823 
824 	genpd = dev_to_genpd(dev);
825 	if (IS_ERR(genpd))
826 		return -EINVAL;
827 
828 	return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
829 }
830 
831 /**
832  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
833  * @dev: Device to suspend.
834  *
835  * Stop the device and remove power from the domain if all devices in it have
836  * been stopped.
837  */
838 static int pm_genpd_suspend_noirq(struct device *dev)
839 {
840 	struct generic_pm_domain *genpd;
841 
842 	dev_dbg(dev, "%s()\n", __func__);
843 
844 	genpd = dev_to_genpd(dev);
845 	if (IS_ERR(genpd))
846 		return -EINVAL;
847 
848 	if (genpd->suspend_power_off
849 	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
850 		return 0;
851 
852 	genpd_stop_dev(genpd, dev);
853 
854 	/*
855 	 * Since all of the "noirq" callbacks are executed sequentially, it is
856 	 * guaranteed that this function will never run twice in parallel for
857 	 * the same PM domain, so it is not necessary to use locking here.
858 	 */
859 	genpd->suspended_count++;
860 	pm_genpd_sync_poweroff(genpd, true);
861 
862 	return 0;
863 }
864 
865 /**
866  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
867  * @dev: Device to resume.
868  *
869  * Restore power to the device's PM domain, if necessary, and start the device.
870  */
871 static int pm_genpd_resume_noirq(struct device *dev)
872 {
873 	struct generic_pm_domain *genpd;
874 
875 	dev_dbg(dev, "%s()\n", __func__);
876 
877 	genpd = dev_to_genpd(dev);
878 	if (IS_ERR(genpd))
879 		return -EINVAL;
880 
881 	if (genpd->suspend_power_off
882 	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
883 		return 0;
884 
885 	/*
886 	 * Since all of the "noirq" callbacks are executed sequentially, it is
887 	 * guaranteed that this function will never run twice in parallel for
888 	 * the same PM domain, so it is not necessary to use locking here.
889 	 */
890 	pm_genpd_sync_poweron(genpd, true);
891 	genpd->suspended_count--;
892 
893 	return genpd_start_dev(genpd, dev, true);
894 }
895 
896 /**
897  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
898  * @dev: Device to resume.
899  *
900  * Carry out an early resume of a device under the assumption that its
901  * pm_domain field points to the domain member of an object of type
902  * struct generic_pm_domain representing a power domain consisting of I/O
903  * devices.
904  */
905 static int pm_genpd_resume_early(struct device *dev)
906 {
907 	struct generic_pm_domain *genpd;
908 
909 	dev_dbg(dev, "%s()\n", __func__);
910 
911 	genpd = dev_to_genpd(dev);
912 	if (IS_ERR(genpd))
913 		return -EINVAL;
914 
915 	return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
916 }
917 
918 /**
919  * pm_genpd_resume - Resume of device in an I/O PM domain.
920  * @dev: Device to resume.
921  *
922  * Resume a device under the assumption that its pm_domain field points to the
923  * domain member of an object of type struct generic_pm_domain representing
924  * a power domain consisting of I/O devices.
925  */
926 static int pm_genpd_resume(struct device *dev)
927 {
928 	struct generic_pm_domain *genpd;
929 
930 	dev_dbg(dev, "%s()\n", __func__);
931 
932 	genpd = dev_to_genpd(dev);
933 	if (IS_ERR(genpd))
934 		return -EINVAL;
935 
936 	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
937 }
938 
939 /**
940  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
941  * @dev: Device to freeze.
942  *
943  * Freeze a device under the assumption that its pm_domain field points to the
944  * domain member of an object of type struct generic_pm_domain representing
945  * a power domain consisting of I/O devices.
946  */
947 static int pm_genpd_freeze(struct device *dev)
948 {
949 	struct generic_pm_domain *genpd;
950 
951 	dev_dbg(dev, "%s()\n", __func__);
952 
953 	genpd = dev_to_genpd(dev);
954 	if (IS_ERR(genpd))
955 		return -EINVAL;
956 
957 	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
958 }
959 
960 /**
961  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
962  * @dev: Device to freeze.
963  *
964  * Carry out a late freeze of a device under the assumption that its
965  * pm_domain field points to the domain member of an object of type
966  * struct generic_pm_domain representing a power domain consisting of I/O
967  * devices.
968  */
969 static int pm_genpd_freeze_late(struct device *dev)
970 {
971 	struct generic_pm_domain *genpd;
972 
973 	dev_dbg(dev, "%s()\n", __func__);
974 
975 	genpd = dev_to_genpd(dev);
976 	if (IS_ERR(genpd))
977 		return -EINVAL;
978 
979 	return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
980 }
981 
982 /**
983  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
984  * @dev: Device to freeze.
985  *
986  * Carry out a late freeze of a device under the assumption that its
987  * pm_domain field points to the domain member of an object of type
988  * struct generic_pm_domain representing a power domain consisting of I/O
989  * devices.
990  */
991 static int pm_genpd_freeze_noirq(struct device *dev)
992 {
993 	struct generic_pm_domain *genpd;
994 
995 	dev_dbg(dev, "%s()\n", __func__);
996 
997 	genpd = dev_to_genpd(dev);
998 	if (IS_ERR(genpd))
999 		return -EINVAL;
1000 
1001 	return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1002 }
1003 
1004 /**
1005  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1006  * @dev: Device to thaw.
1007  *
1008  * Start the device, unless power has been removed from the domain already
1009  * before the system transition.
1010  */
1011 static int pm_genpd_thaw_noirq(struct device *dev)
1012 {
1013 	struct generic_pm_domain *genpd;
1014 
1015 	dev_dbg(dev, "%s()\n", __func__);
1016 
1017 	genpd = dev_to_genpd(dev);
1018 	if (IS_ERR(genpd))
1019 		return -EINVAL;
1020 
1021 	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true);
1022 }
1023 
1024 /**
1025  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1026  * @dev: Device to thaw.
1027  *
1028  * Carry out an early thaw of a device under the assumption that its
1029  * pm_domain field points to the domain member of an object of type
1030  * struct generic_pm_domain representing a power domain consisting of I/O
1031  * devices.
1032  */
1033 static int pm_genpd_thaw_early(struct device *dev)
1034 {
1035 	struct generic_pm_domain *genpd;
1036 
1037 	dev_dbg(dev, "%s()\n", __func__);
1038 
1039 	genpd = dev_to_genpd(dev);
1040 	if (IS_ERR(genpd))
1041 		return -EINVAL;
1042 
1043 	return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1044 }
1045 
1046 /**
1047  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1048  * @dev: Device to thaw.
1049  *
1050  * Thaw a device under the assumption that its pm_domain field points to the
1051  * domain member of an object of type struct generic_pm_domain representing
1052  * a power domain consisting of I/O devices.
1053  */
1054 static int pm_genpd_thaw(struct device *dev)
1055 {
1056 	struct generic_pm_domain *genpd;
1057 
1058 	dev_dbg(dev, "%s()\n", __func__);
1059 
1060 	genpd = dev_to_genpd(dev);
1061 	if (IS_ERR(genpd))
1062 		return -EINVAL;
1063 
1064 	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1065 }
1066 
1067 /**
1068  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1069  * @dev: Device to resume.
1070  *
1071  * Make sure the domain will be in the same power state as before the
1072  * hibernation the system is resuming from and start the device if necessary.
1073  */
1074 static int pm_genpd_restore_noirq(struct device *dev)
1075 {
1076 	struct generic_pm_domain *genpd;
1077 
1078 	dev_dbg(dev, "%s()\n", __func__);
1079 
1080 	genpd = dev_to_genpd(dev);
1081 	if (IS_ERR(genpd))
1082 		return -EINVAL;
1083 
1084 	/*
1085 	 * Since all of the "noirq" callbacks are executed sequentially, it is
1086 	 * guaranteed that this function will never run twice in parallel for
1087 	 * the same PM domain, so it is not necessary to use locking here.
1088 	 *
1089 	 * At this point suspended_count == 0 means we are being run for the
1090 	 * first time for the given domain in the present cycle.
1091 	 */
1092 	if (genpd->suspended_count++ == 0) {
1093 		/*
1094 		 * The boot kernel might put the domain into arbitrary state,
1095 		 * so make it appear as powered off to pm_genpd_sync_poweron(),
1096 		 * so that it tries to power it on in case it was really off.
1097 		 */
1098 		genpd->status = GPD_STATE_POWER_OFF;
1099 		if (genpd->suspend_power_off) {
1100 			/*
1101 			 * If the domain was off before the hibernation, make
1102 			 * sure it will be off going forward.
1103 			 */
1104 			genpd_power_off(genpd, true);
1105 
1106 			return 0;
1107 		}
1108 	}
1109 
1110 	if (genpd->suspend_power_off)
1111 		return 0;
1112 
1113 	pm_genpd_sync_poweron(genpd, true);
1114 
1115 	return genpd_start_dev(genpd, dev, true);
1116 }
1117 
1118 /**
1119  * pm_genpd_complete - Complete power transition of a device in a power domain.
1120  * @dev: Device to complete the transition of.
1121  *
1122  * Complete a power transition of a device (during a system-wide power
1123  * transition) under the assumption that its pm_domain field points to the
1124  * domain member of an object of type struct generic_pm_domain representing
1125  * a power domain consisting of I/O devices.
1126  */
1127 static void pm_genpd_complete(struct device *dev)
1128 {
1129 	struct generic_pm_domain *genpd;
1130 	bool run_complete;
1131 
1132 	dev_dbg(dev, "%s()\n", __func__);
1133 
1134 	genpd = dev_to_genpd(dev);
1135 	if (IS_ERR(genpd))
1136 		return;
1137 
1138 	mutex_lock(&genpd->lock);
1139 
1140 	run_complete = !genpd->suspend_power_off;
1141 	if (--genpd->prepared_count == 0)
1142 		genpd->suspend_power_off = false;
1143 
1144 	mutex_unlock(&genpd->lock);
1145 
1146 	if (run_complete) {
1147 		pm_generic_complete(dev);
1148 		pm_runtime_set_active(dev);
1149 		pm_runtime_enable(dev);
1150 		pm_request_idle(dev);
1151 	}
1152 }
1153 
1154 /**
1155  * genpd_syscore_switch - Switch power during system core suspend or resume.
1156  * @dev: Device that normally is marked as "always on" to switch power for.
1157  *
1158  * This routine may only be called during the system core (syscore) suspend or
1159  * resume phase for devices whose "always on" flags are set.
1160  */
1161 static void genpd_syscore_switch(struct device *dev, bool suspend)
1162 {
1163 	struct generic_pm_domain *genpd;
1164 
1165 	genpd = dev_to_genpd(dev);
1166 	if (!pm_genpd_present(genpd))
1167 		return;
1168 
1169 	if (suspend) {
1170 		genpd->suspended_count++;
1171 		pm_genpd_sync_poweroff(genpd, false);
1172 	} else {
1173 		pm_genpd_sync_poweron(genpd, false);
1174 		genpd->suspended_count--;
1175 	}
1176 }
1177 
1178 void pm_genpd_syscore_poweroff(struct device *dev)
1179 {
1180 	genpd_syscore_switch(dev, true);
1181 }
1182 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1183 
1184 void pm_genpd_syscore_poweron(struct device *dev)
1185 {
1186 	genpd_syscore_switch(dev, false);
1187 }
1188 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1189 
1190 #else /* !CONFIG_PM_SLEEP */
1191 
1192 #define pm_genpd_prepare		NULL
1193 #define pm_genpd_suspend		NULL
1194 #define pm_genpd_suspend_late		NULL
1195 #define pm_genpd_suspend_noirq		NULL
1196 #define pm_genpd_resume_early		NULL
1197 #define pm_genpd_resume_noirq		NULL
1198 #define pm_genpd_resume			NULL
1199 #define pm_genpd_freeze			NULL
1200 #define pm_genpd_freeze_late		NULL
1201 #define pm_genpd_freeze_noirq		NULL
1202 #define pm_genpd_thaw_early		NULL
1203 #define pm_genpd_thaw_noirq		NULL
1204 #define pm_genpd_thaw			NULL
1205 #define pm_genpd_restore_noirq		NULL
1206 #define pm_genpd_complete		NULL
1207 
1208 #endif /* CONFIG_PM_SLEEP */
1209 
1210 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1211 					struct generic_pm_domain *genpd,
1212 					struct gpd_timing_data *td)
1213 {
1214 	struct generic_pm_domain_data *gpd_data;
1215 	int ret;
1216 
1217 	ret = dev_pm_get_subsys_data(dev);
1218 	if (ret)
1219 		return ERR_PTR(ret);
1220 
1221 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1222 	if (!gpd_data) {
1223 		ret = -ENOMEM;
1224 		goto err_put;
1225 	}
1226 
1227 	if (td)
1228 		gpd_data->td = *td;
1229 
1230 	gpd_data->base.dev = dev;
1231 	gpd_data->td.constraint_changed = true;
1232 	gpd_data->td.effective_constraint_ns = -1;
1233 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1234 
1235 	spin_lock_irq(&dev->power.lock);
1236 
1237 	if (dev->power.subsys_data->domain_data) {
1238 		ret = -EINVAL;
1239 		goto err_free;
1240 	}
1241 
1242 	dev->power.subsys_data->domain_data = &gpd_data->base;
1243 	dev->pm_domain = &genpd->domain;
1244 
1245 	spin_unlock_irq(&dev->power.lock);
1246 
1247 	return gpd_data;
1248 
1249  err_free:
1250 	spin_unlock_irq(&dev->power.lock);
1251 	kfree(gpd_data);
1252  err_put:
1253 	dev_pm_put_subsys_data(dev);
1254 	return ERR_PTR(ret);
1255 }
1256 
1257 static void genpd_free_dev_data(struct device *dev,
1258 				struct generic_pm_domain_data *gpd_data)
1259 {
1260 	spin_lock_irq(&dev->power.lock);
1261 
1262 	dev->pm_domain = NULL;
1263 	dev->power.subsys_data->domain_data = NULL;
1264 
1265 	spin_unlock_irq(&dev->power.lock);
1266 
1267 	kfree(gpd_data);
1268 	dev_pm_put_subsys_data(dev);
1269 }
1270 
1271 /**
1272  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1273  * @genpd: PM domain to add the device to.
1274  * @dev: Device to be added.
1275  * @td: Set of PM QoS timing parameters to attach to the device.
1276  */
1277 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1278 			  struct gpd_timing_data *td)
1279 {
1280 	struct generic_pm_domain_data *gpd_data;
1281 	int ret = 0;
1282 
1283 	dev_dbg(dev, "%s()\n", __func__);
1284 
1285 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1286 		return -EINVAL;
1287 
1288 	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1289 	if (IS_ERR(gpd_data))
1290 		return PTR_ERR(gpd_data);
1291 
1292 	mutex_lock(&genpd->lock);
1293 
1294 	if (genpd->prepared_count > 0) {
1295 		ret = -EAGAIN;
1296 		goto out;
1297 	}
1298 
1299 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1300 	if (ret)
1301 		goto out;
1302 
1303 	genpd->device_count++;
1304 	genpd->max_off_time_changed = true;
1305 
1306 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1307 
1308  out:
1309 	mutex_unlock(&genpd->lock);
1310 
1311 	if (ret)
1312 		genpd_free_dev_data(dev, gpd_data);
1313 	else
1314 		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1315 
1316 	return ret;
1317 }
1318 
1319 /**
1320  * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1321  * @domain_name: Name of the PM domain to add the device to.
1322  * @dev: Device to be added.
1323  * @td: Set of PM QoS timing parameters to attach to the device.
1324  */
1325 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1326 			       struct gpd_timing_data *td)
1327 {
1328 	return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1329 }
1330 
1331 /**
1332  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1333  * @genpd: PM domain to remove the device from.
1334  * @dev: Device to be removed.
1335  */
1336 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1337 			   struct device *dev)
1338 {
1339 	struct generic_pm_domain_data *gpd_data;
1340 	struct pm_domain_data *pdd;
1341 	int ret = 0;
1342 
1343 	dev_dbg(dev, "%s()\n", __func__);
1344 
1345 	if (!genpd || genpd != pm_genpd_lookup_dev(dev))
1346 		return -EINVAL;
1347 
1348 	/* The above validation also means we have existing domain_data. */
1349 	pdd = dev->power.subsys_data->domain_data;
1350 	gpd_data = to_gpd_data(pdd);
1351 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1352 
1353 	mutex_lock(&genpd->lock);
1354 
1355 	if (genpd->prepared_count > 0) {
1356 		ret = -EAGAIN;
1357 		goto out;
1358 	}
1359 
1360 	genpd->device_count--;
1361 	genpd->max_off_time_changed = true;
1362 
1363 	if (genpd->detach_dev)
1364 		genpd->detach_dev(genpd, dev);
1365 
1366 	list_del_init(&pdd->list_node);
1367 
1368 	mutex_unlock(&genpd->lock);
1369 
1370 	genpd_free_dev_data(dev, gpd_data);
1371 
1372 	return 0;
1373 
1374  out:
1375 	mutex_unlock(&genpd->lock);
1376 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1377 
1378 	return ret;
1379 }
1380 
1381 /**
1382  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1383  * @genpd: Master PM domain to add the subdomain to.
1384  * @subdomain: Subdomain to be added.
1385  */
1386 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1387 			   struct generic_pm_domain *subdomain)
1388 {
1389 	struct gpd_link *link;
1390 	int ret = 0;
1391 
1392 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1393 	    || genpd == subdomain)
1394 		return -EINVAL;
1395 
1396 	mutex_lock(&genpd->lock);
1397 	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1398 
1399 	if (genpd->status == GPD_STATE_POWER_OFF
1400 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1401 		ret = -EINVAL;
1402 		goto out;
1403 	}
1404 
1405 	list_for_each_entry(link, &genpd->master_links, master_node) {
1406 		if (link->slave == subdomain && link->master == genpd) {
1407 			ret = -EINVAL;
1408 			goto out;
1409 		}
1410 	}
1411 
1412 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1413 	if (!link) {
1414 		ret = -ENOMEM;
1415 		goto out;
1416 	}
1417 	link->master = genpd;
1418 	list_add_tail(&link->master_node, &genpd->master_links);
1419 	link->slave = subdomain;
1420 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1421 	if (subdomain->status != GPD_STATE_POWER_OFF)
1422 		genpd_sd_counter_inc(genpd);
1423 
1424  out:
1425 	mutex_unlock(&subdomain->lock);
1426 	mutex_unlock(&genpd->lock);
1427 
1428 	return ret;
1429 }
1430 
1431 /**
1432  * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1433  * @master_name: Name of the master PM domain to add the subdomain to.
1434  * @subdomain_name: Name of the subdomain to be added.
1435  */
1436 int pm_genpd_add_subdomain_names(const char *master_name,
1437 				 const char *subdomain_name)
1438 {
1439 	struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1440 
1441 	if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1442 		return -EINVAL;
1443 
1444 	mutex_lock(&gpd_list_lock);
1445 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1446 		if (!master && !strcmp(gpd->name, master_name))
1447 			master = gpd;
1448 
1449 		if (!subdomain && !strcmp(gpd->name, subdomain_name))
1450 			subdomain = gpd;
1451 
1452 		if (master && subdomain)
1453 			break;
1454 	}
1455 	mutex_unlock(&gpd_list_lock);
1456 
1457 	return pm_genpd_add_subdomain(master, subdomain);
1458 }
1459 
1460 /**
1461  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1462  * @genpd: Master PM domain to remove the subdomain from.
1463  * @subdomain: Subdomain to be removed.
1464  */
1465 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1466 			      struct generic_pm_domain *subdomain)
1467 {
1468 	struct gpd_link *link;
1469 	int ret = -EINVAL;
1470 
1471 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1472 		return -EINVAL;
1473 
1474 	mutex_lock(&genpd->lock);
1475 
1476 	if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
1477 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1478 			subdomain->name);
1479 		ret = -EBUSY;
1480 		goto out;
1481 	}
1482 
1483 	list_for_each_entry(link, &genpd->master_links, master_node) {
1484 		if (link->slave != subdomain)
1485 			continue;
1486 
1487 		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1488 
1489 		list_del(&link->master_node);
1490 		list_del(&link->slave_node);
1491 		kfree(link);
1492 		if (subdomain->status != GPD_STATE_POWER_OFF)
1493 			genpd_sd_counter_dec(genpd);
1494 
1495 		mutex_unlock(&subdomain->lock);
1496 
1497 		ret = 0;
1498 		break;
1499 	}
1500 
1501 out:
1502 	mutex_unlock(&genpd->lock);
1503 
1504 	return ret;
1505 }
1506 
1507 /**
1508  * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1509  * @genpd: PM domain to be connected with cpuidle.
1510  * @state: cpuidle state this domain can disable/enable.
1511  *
1512  * Make a PM domain behave as though it contained a CPU core, that is, instead
1513  * of calling its power down routine it will enable the given cpuidle state so
1514  * that the cpuidle subsystem can power it down (if possible and desirable).
1515  */
1516 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1517 {
1518 	struct cpuidle_driver *cpuidle_drv;
1519 	struct gpd_cpuidle_data *cpuidle_data;
1520 	struct cpuidle_state *idle_state;
1521 	int ret = 0;
1522 
1523 	if (IS_ERR_OR_NULL(genpd) || state < 0)
1524 		return -EINVAL;
1525 
1526 	mutex_lock(&genpd->lock);
1527 
1528 	if (genpd->cpuidle_data) {
1529 		ret = -EEXIST;
1530 		goto out;
1531 	}
1532 	cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1533 	if (!cpuidle_data) {
1534 		ret = -ENOMEM;
1535 		goto out;
1536 	}
1537 	cpuidle_drv = cpuidle_driver_ref();
1538 	if (!cpuidle_drv) {
1539 		ret = -ENODEV;
1540 		goto err_drv;
1541 	}
1542 	if (cpuidle_drv->state_count <= state) {
1543 		ret = -EINVAL;
1544 		goto err;
1545 	}
1546 	idle_state = &cpuidle_drv->states[state];
1547 	if (!idle_state->disabled) {
1548 		ret = -EAGAIN;
1549 		goto err;
1550 	}
1551 	cpuidle_data->idle_state = idle_state;
1552 	cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1553 	genpd->cpuidle_data = cpuidle_data;
1554 	genpd_recalc_cpu_exit_latency(genpd);
1555 
1556  out:
1557 	mutex_unlock(&genpd->lock);
1558 	return ret;
1559 
1560  err:
1561 	cpuidle_driver_unref();
1562 
1563  err_drv:
1564 	kfree(cpuidle_data);
1565 	goto out;
1566 }
1567 
1568 /**
1569  * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1570  * @name: Name of the domain to connect to cpuidle.
1571  * @state: cpuidle state this domain can manipulate.
1572  */
1573 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1574 {
1575 	return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1576 }
1577 
1578 /**
1579  * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1580  * @genpd: PM domain to remove the cpuidle connection from.
1581  *
1582  * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1583  * given PM domain.
1584  */
1585 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1586 {
1587 	struct gpd_cpuidle_data *cpuidle_data;
1588 	struct cpuidle_state *idle_state;
1589 	int ret = 0;
1590 
1591 	if (IS_ERR_OR_NULL(genpd))
1592 		return -EINVAL;
1593 
1594 	mutex_lock(&genpd->lock);
1595 
1596 	cpuidle_data = genpd->cpuidle_data;
1597 	if (!cpuidle_data) {
1598 		ret = -ENODEV;
1599 		goto out;
1600 	}
1601 	idle_state = cpuidle_data->idle_state;
1602 	if (!idle_state->disabled) {
1603 		ret = -EAGAIN;
1604 		goto out;
1605 	}
1606 	idle_state->exit_latency = cpuidle_data->saved_exit_latency;
1607 	cpuidle_driver_unref();
1608 	genpd->cpuidle_data = NULL;
1609 	kfree(cpuidle_data);
1610 
1611  out:
1612 	mutex_unlock(&genpd->lock);
1613 	return ret;
1614 }
1615 
1616 /**
1617  * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1618  * @name: Name of the domain to disconnect cpuidle from.
1619  */
1620 int pm_genpd_name_detach_cpuidle(const char *name)
1621 {
1622 	return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1623 }
1624 
1625 /* Default device callbacks for generic PM domains. */
1626 
1627 /**
1628  * pm_genpd_default_save_state - Default "save device state" for PM domains.
1629  * @dev: Device to handle.
1630  */
1631 static int pm_genpd_default_save_state(struct device *dev)
1632 {
1633 	int (*cb)(struct device *__dev);
1634 
1635 	if (dev->type && dev->type->pm)
1636 		cb = dev->type->pm->runtime_suspend;
1637 	else if (dev->class && dev->class->pm)
1638 		cb = dev->class->pm->runtime_suspend;
1639 	else if (dev->bus && dev->bus->pm)
1640 		cb = dev->bus->pm->runtime_suspend;
1641 	else
1642 		cb = NULL;
1643 
1644 	if (!cb && dev->driver && dev->driver->pm)
1645 		cb = dev->driver->pm->runtime_suspend;
1646 
1647 	return cb ? cb(dev) : 0;
1648 }
1649 
1650 /**
1651  * pm_genpd_default_restore_state - Default PM domains "restore device state".
1652  * @dev: Device to handle.
1653  */
1654 static int pm_genpd_default_restore_state(struct device *dev)
1655 {
1656 	int (*cb)(struct device *__dev);
1657 
1658 	if (dev->type && dev->type->pm)
1659 		cb = dev->type->pm->runtime_resume;
1660 	else if (dev->class && dev->class->pm)
1661 		cb = dev->class->pm->runtime_resume;
1662 	else if (dev->bus && dev->bus->pm)
1663 		cb = dev->bus->pm->runtime_resume;
1664 	else
1665 		cb = NULL;
1666 
1667 	if (!cb && dev->driver && dev->driver->pm)
1668 		cb = dev->driver->pm->runtime_resume;
1669 
1670 	return cb ? cb(dev) : 0;
1671 }
1672 
1673 /**
1674  * pm_genpd_init - Initialize a generic I/O PM domain object.
1675  * @genpd: PM domain object to initialize.
1676  * @gov: PM domain governor to associate with the domain (may be NULL).
1677  * @is_off: Initial value of the domain's power_is_off field.
1678  */
1679 void pm_genpd_init(struct generic_pm_domain *genpd,
1680 		   struct dev_power_governor *gov, bool is_off)
1681 {
1682 	if (IS_ERR_OR_NULL(genpd))
1683 		return;
1684 
1685 	INIT_LIST_HEAD(&genpd->master_links);
1686 	INIT_LIST_HEAD(&genpd->slave_links);
1687 	INIT_LIST_HEAD(&genpd->dev_list);
1688 	mutex_init(&genpd->lock);
1689 	genpd->gov = gov;
1690 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1691 	genpd->in_progress = 0;
1692 	atomic_set(&genpd->sd_count, 0);
1693 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1694 	genpd->device_count = 0;
1695 	genpd->max_off_time_ns = -1;
1696 	genpd->max_off_time_changed = true;
1697 	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1698 	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1699 	genpd->domain.ops.prepare = pm_genpd_prepare;
1700 	genpd->domain.ops.suspend = pm_genpd_suspend;
1701 	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1702 	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1703 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1704 	genpd->domain.ops.resume_early = pm_genpd_resume_early;
1705 	genpd->domain.ops.resume = pm_genpd_resume;
1706 	genpd->domain.ops.freeze = pm_genpd_freeze;
1707 	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1708 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1709 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1710 	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1711 	genpd->domain.ops.thaw = pm_genpd_thaw;
1712 	genpd->domain.ops.poweroff = pm_genpd_suspend;
1713 	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1714 	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1715 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1716 	genpd->domain.ops.restore_early = pm_genpd_resume_early;
1717 	genpd->domain.ops.restore = pm_genpd_resume;
1718 	genpd->domain.ops.complete = pm_genpd_complete;
1719 	genpd->dev_ops.save_state = pm_genpd_default_save_state;
1720 	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1721 
1722 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1723 		genpd->dev_ops.stop = pm_clk_suspend;
1724 		genpd->dev_ops.start = pm_clk_resume;
1725 	}
1726 
1727 	mutex_lock(&gpd_list_lock);
1728 	list_add(&genpd->gpd_list_node, &gpd_list);
1729 	mutex_unlock(&gpd_list_lock);
1730 }
1731 EXPORT_SYMBOL_GPL(pm_genpd_init);
1732 
1733 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1734 /*
1735  * Device Tree based PM domain providers.
1736  *
1737  * The code below implements generic device tree based PM domain providers that
1738  * bind device tree nodes with generic PM domains registered in the system.
1739  *
1740  * Any driver that registers generic PM domains and needs to support binding of
1741  * devices to these domains is supposed to register a PM domain provider, which
1742  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1743  *
1744  * Two simple mapping functions have been provided for convenience:
1745  *  - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1746  *  - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1747  *    index.
1748  */
1749 
1750 /**
1751  * struct of_genpd_provider - PM domain provider registration structure
1752  * @link: Entry in global list of PM domain providers
1753  * @node: Pointer to device tree node of PM domain provider
1754  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1755  *         into a PM domain.
1756  * @data: context pointer to be passed into @xlate callback
1757  */
1758 struct of_genpd_provider {
1759 	struct list_head link;
1760 	struct device_node *node;
1761 	genpd_xlate_t xlate;
1762 	void *data;
1763 };
1764 
1765 /* List of registered PM domain providers. */
1766 static LIST_HEAD(of_genpd_providers);
1767 /* Mutex to protect the list above. */
1768 static DEFINE_MUTEX(of_genpd_mutex);
1769 
1770 /**
1771  * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1772  * @genpdspec: OF phandle args to map into a PM domain
1773  * @data: xlate function private data - pointer to struct generic_pm_domain
1774  *
1775  * This is a generic xlate function that can be used to model PM domains that
1776  * have their own device tree nodes. The private data of xlate function needs
1777  * to be a valid pointer to struct generic_pm_domain.
1778  */
1779 struct generic_pm_domain *__of_genpd_xlate_simple(
1780 					struct of_phandle_args *genpdspec,
1781 					void *data)
1782 {
1783 	if (genpdspec->args_count != 0)
1784 		return ERR_PTR(-EINVAL);
1785 	return data;
1786 }
1787 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1788 
1789 /**
1790  * __of_genpd_xlate_onecell() - Xlate function using a single index.
1791  * @genpdspec: OF phandle args to map into a PM domain
1792  * @data: xlate function private data - pointer to struct genpd_onecell_data
1793  *
1794  * This is a generic xlate function that can be used to model simple PM domain
1795  * controllers that have one device tree node and provide multiple PM domains.
1796  * A single cell is used as an index into an array of PM domains specified in
1797  * the genpd_onecell_data struct when registering the provider.
1798  */
1799 struct generic_pm_domain *__of_genpd_xlate_onecell(
1800 					struct of_phandle_args *genpdspec,
1801 					void *data)
1802 {
1803 	struct genpd_onecell_data *genpd_data = data;
1804 	unsigned int idx = genpdspec->args[0];
1805 
1806 	if (genpdspec->args_count != 1)
1807 		return ERR_PTR(-EINVAL);
1808 
1809 	if (idx >= genpd_data->num_domains) {
1810 		pr_err("%s: invalid domain index %u\n", __func__, idx);
1811 		return ERR_PTR(-EINVAL);
1812 	}
1813 
1814 	if (!genpd_data->domains[idx])
1815 		return ERR_PTR(-ENOENT);
1816 
1817 	return genpd_data->domains[idx];
1818 }
1819 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
1820 
1821 /**
1822  * __of_genpd_add_provider() - Register a PM domain provider for a node
1823  * @np: Device node pointer associated with the PM domain provider.
1824  * @xlate: Callback for decoding PM domain from phandle arguments.
1825  * @data: Context pointer for @xlate callback.
1826  */
1827 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1828 			void *data)
1829 {
1830 	struct of_genpd_provider *cp;
1831 
1832 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1833 	if (!cp)
1834 		return -ENOMEM;
1835 
1836 	cp->node = of_node_get(np);
1837 	cp->data = data;
1838 	cp->xlate = xlate;
1839 
1840 	mutex_lock(&of_genpd_mutex);
1841 	list_add(&cp->link, &of_genpd_providers);
1842 	mutex_unlock(&of_genpd_mutex);
1843 	pr_debug("Added domain provider from %s\n", np->full_name);
1844 
1845 	return 0;
1846 }
1847 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
1848 
1849 /**
1850  * of_genpd_del_provider() - Remove a previously registered PM domain provider
1851  * @np: Device node pointer associated with the PM domain provider
1852  */
1853 void of_genpd_del_provider(struct device_node *np)
1854 {
1855 	struct of_genpd_provider *cp;
1856 
1857 	mutex_lock(&of_genpd_mutex);
1858 	list_for_each_entry(cp, &of_genpd_providers, link) {
1859 		if (cp->node == np) {
1860 			list_del(&cp->link);
1861 			of_node_put(cp->node);
1862 			kfree(cp);
1863 			break;
1864 		}
1865 	}
1866 	mutex_unlock(&of_genpd_mutex);
1867 }
1868 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1869 
1870 /**
1871  * of_genpd_get_from_provider() - Look-up PM domain
1872  * @genpdspec: OF phandle args to use for look-up
1873  *
1874  * Looks for a PM domain provider under the node specified by @genpdspec and if
1875  * found, uses xlate function of the provider to map phandle args to a PM
1876  * domain.
1877  *
1878  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1879  * on failure.
1880  */
1881 struct generic_pm_domain *of_genpd_get_from_provider(
1882 					struct of_phandle_args *genpdspec)
1883 {
1884 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1885 	struct of_genpd_provider *provider;
1886 
1887 	mutex_lock(&of_genpd_mutex);
1888 
1889 	/* Check if we have such a provider in our array */
1890 	list_for_each_entry(provider, &of_genpd_providers, link) {
1891 		if (provider->node == genpdspec->np)
1892 			genpd = provider->xlate(genpdspec, provider->data);
1893 		if (!IS_ERR(genpd))
1894 			break;
1895 	}
1896 
1897 	mutex_unlock(&of_genpd_mutex);
1898 
1899 	return genpd;
1900 }
1901 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
1902 
1903 /**
1904  * genpd_dev_pm_detach - Detach a device from its PM domain.
1905  * @dev: Device to detach.
1906  * @power_off: Currently not used
1907  *
1908  * Try to locate a corresponding generic PM domain, which the device was
1909  * attached to previously. If such is found, the device is detached from it.
1910  */
1911 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1912 {
1913 	struct generic_pm_domain *pd;
1914 	unsigned int i;
1915 	int ret = 0;
1916 
1917 	pd = pm_genpd_lookup_dev(dev);
1918 	if (!pd)
1919 		return;
1920 
1921 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1922 
1923 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1924 		ret = pm_genpd_remove_device(pd, dev);
1925 		if (ret != -EAGAIN)
1926 			break;
1927 
1928 		mdelay(i);
1929 		cond_resched();
1930 	}
1931 
1932 	if (ret < 0) {
1933 		dev_err(dev, "failed to remove from PM domain %s: %d",
1934 			pd->name, ret);
1935 		return;
1936 	}
1937 
1938 	/* Check if PM domain can be powered off after removing this device. */
1939 	genpd_queue_power_off_work(pd);
1940 }
1941 
1942 static void genpd_dev_pm_sync(struct device *dev)
1943 {
1944 	struct generic_pm_domain *pd;
1945 
1946 	pd = dev_to_genpd(dev);
1947 	if (IS_ERR(pd))
1948 		return;
1949 
1950 	genpd_queue_power_off_work(pd);
1951 }
1952 
1953 /**
1954  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1955  * @dev: Device to attach.
1956  *
1957  * Parse device's OF node to find a PM domain specifier. If such is found,
1958  * attaches the device to retrieved pm_domain ops.
1959  *
1960  * Both generic and legacy Samsung-specific DT bindings are supported to keep
1961  * backwards compatibility with existing DTBs.
1962  *
1963  * Returns 0 on successfully attached PM domain or negative error code. Note
1964  * that if a power-domain exists for the device, but it cannot be found or
1965  * turned on, then return -EPROBE_DEFER to ensure that the device is not
1966  * probed and to re-try again later.
1967  */
1968 int genpd_dev_pm_attach(struct device *dev)
1969 {
1970 	struct of_phandle_args pd_args;
1971 	struct generic_pm_domain *pd;
1972 	unsigned int i;
1973 	int ret;
1974 
1975 	if (!dev->of_node)
1976 		return -ENODEV;
1977 
1978 	if (dev->pm_domain)
1979 		return -EEXIST;
1980 
1981 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1982 					"#power-domain-cells", 0, &pd_args);
1983 	if (ret < 0) {
1984 		if (ret != -ENOENT)
1985 			return ret;
1986 
1987 		/*
1988 		 * Try legacy Samsung-specific bindings
1989 		 * (for backwards compatibility of DT ABI)
1990 		 */
1991 		pd_args.args_count = 0;
1992 		pd_args.np = of_parse_phandle(dev->of_node,
1993 						"samsung,power-domain", 0);
1994 		if (!pd_args.np)
1995 			return -ENOENT;
1996 	}
1997 
1998 	pd = of_genpd_get_from_provider(&pd_args);
1999 	if (IS_ERR(pd)) {
2000 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2001 			__func__, PTR_ERR(pd));
2002 		of_node_put(dev->of_node);
2003 		return -EPROBE_DEFER;
2004 	}
2005 
2006 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2007 
2008 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2009 		ret = pm_genpd_add_device(pd, dev);
2010 		if (ret != -EAGAIN)
2011 			break;
2012 
2013 		mdelay(i);
2014 		cond_resched();
2015 	}
2016 
2017 	if (ret < 0) {
2018 		dev_err(dev, "failed to add to PM domain %s: %d",
2019 			pd->name, ret);
2020 		of_node_put(dev->of_node);
2021 		goto out;
2022 	}
2023 
2024 	dev->pm_domain->detach = genpd_dev_pm_detach;
2025 	dev->pm_domain->sync = genpd_dev_pm_sync;
2026 	ret = pm_genpd_poweron(pd);
2027 
2028 out:
2029 	return ret ? -EPROBE_DEFER : 0;
2030 }
2031 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2032 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2033 
2034 
2035 /***        debugfs support        ***/
2036 
2037 #ifdef CONFIG_PM_ADVANCED_DEBUG
2038 #include <linux/pm.h>
2039 #include <linux/device.h>
2040 #include <linux/debugfs.h>
2041 #include <linux/seq_file.h>
2042 #include <linux/init.h>
2043 #include <linux/kobject.h>
2044 static struct dentry *pm_genpd_debugfs_dir;
2045 
2046 /*
2047  * TODO: This function is a slightly modified version of rtpm_status_show
2048  * from sysfs.c, so generalize it.
2049  */
2050 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2051 {
2052 	static const char * const status_lookup[] = {
2053 		[RPM_ACTIVE] = "active",
2054 		[RPM_RESUMING] = "resuming",
2055 		[RPM_SUSPENDED] = "suspended",
2056 		[RPM_SUSPENDING] = "suspending"
2057 	};
2058 	const char *p = "";
2059 
2060 	if (dev->power.runtime_error)
2061 		p = "error";
2062 	else if (dev->power.disable_depth)
2063 		p = "unsupported";
2064 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2065 		p = status_lookup[dev->power.runtime_status];
2066 	else
2067 		WARN_ON(1);
2068 
2069 	seq_puts(s, p);
2070 }
2071 
2072 static int pm_genpd_summary_one(struct seq_file *s,
2073 				struct generic_pm_domain *genpd)
2074 {
2075 	static const char * const status_lookup[] = {
2076 		[GPD_STATE_ACTIVE] = "on",
2077 		[GPD_STATE_POWER_OFF] = "off"
2078 	};
2079 	struct pm_domain_data *pm_data;
2080 	const char *kobj_path;
2081 	struct gpd_link *link;
2082 	int ret;
2083 
2084 	ret = mutex_lock_interruptible(&genpd->lock);
2085 	if (ret)
2086 		return -ERESTARTSYS;
2087 
2088 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2089 		goto exit;
2090 	seq_printf(s, "%-30s  %-15s ", genpd->name, status_lookup[genpd->status]);
2091 
2092 	/*
2093 	 * Modifications on the list require holding locks on both
2094 	 * master and slave, so we are safe.
2095 	 * Also genpd->name is immutable.
2096 	 */
2097 	list_for_each_entry(link, &genpd->master_links, master_node) {
2098 		seq_printf(s, "%s", link->slave->name);
2099 		if (!list_is_last(&link->master_node, &genpd->master_links))
2100 			seq_puts(s, ", ");
2101 	}
2102 
2103 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2104 		kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2105 		if (kobj_path == NULL)
2106 			continue;
2107 
2108 		seq_printf(s, "\n    %-50s  ", kobj_path);
2109 		rtpm_status_str(s, pm_data->dev);
2110 		kfree(kobj_path);
2111 	}
2112 
2113 	seq_puts(s, "\n");
2114 exit:
2115 	mutex_unlock(&genpd->lock);
2116 
2117 	return 0;
2118 }
2119 
2120 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2121 {
2122 	struct generic_pm_domain *genpd;
2123 	int ret = 0;
2124 
2125 	seq_puts(s, "domain                          status          slaves\n");
2126 	seq_puts(s, "    /device                                             runtime status\n");
2127 	seq_puts(s, "----------------------------------------------------------------------\n");
2128 
2129 	ret = mutex_lock_interruptible(&gpd_list_lock);
2130 	if (ret)
2131 		return -ERESTARTSYS;
2132 
2133 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2134 		ret = pm_genpd_summary_one(s, genpd);
2135 		if (ret)
2136 			break;
2137 	}
2138 	mutex_unlock(&gpd_list_lock);
2139 
2140 	return ret;
2141 }
2142 
2143 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2144 {
2145 	return single_open(file, pm_genpd_summary_show, NULL);
2146 }
2147 
2148 static const struct file_operations pm_genpd_summary_fops = {
2149 	.open = pm_genpd_summary_open,
2150 	.read = seq_read,
2151 	.llseek = seq_lseek,
2152 	.release = single_release,
2153 };
2154 
2155 static int __init pm_genpd_debug_init(void)
2156 {
2157 	struct dentry *d;
2158 
2159 	pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2160 
2161 	if (!pm_genpd_debugfs_dir)
2162 		return -ENOMEM;
2163 
2164 	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2165 			pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2166 	if (!d)
2167 		return -ENOMEM;
2168 
2169 	return 0;
2170 }
2171 late_initcall(pm_genpd_debug_init);
2172 
2173 static void __exit pm_genpd_debug_exit(void)
2174 {
2175 	debugfs_remove_recursive(pm_genpd_debugfs_dir);
2176 }
2177 __exitcall(pm_genpd_debug_exit);
2178 #endif /* CONFIG_PM_ADVANCED_DEBUG */
2179