xref: /openbmc/linux/drivers/base/power/domain.c (revision f7d84fa7)
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22 
23 #include "power.h"
24 
25 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
26 
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
28 ({								\
29 	type (*__routine)(struct device *__d); 			\
30 	type __ret = (type)0;					\
31 								\
32 	__routine = genpd->dev_ops.callback; 			\
33 	if (__routine) {					\
34 		__ret = __routine(dev); 			\
35 	}							\
36 	__ret;							\
37 })
38 
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
41 
42 struct genpd_lock_ops {
43 	void (*lock)(struct generic_pm_domain *genpd);
44 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
45 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
46 	void (*unlock)(struct generic_pm_domain *genpd);
47 };
48 
49 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
50 {
51 	mutex_lock(&genpd->mlock);
52 }
53 
54 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
55 					int depth)
56 {
57 	mutex_lock_nested(&genpd->mlock, depth);
58 }
59 
60 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
61 {
62 	return mutex_lock_interruptible(&genpd->mlock);
63 }
64 
65 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
66 {
67 	return mutex_unlock(&genpd->mlock);
68 }
69 
70 static const struct genpd_lock_ops genpd_mtx_ops = {
71 	.lock = genpd_lock_mtx,
72 	.lock_nested = genpd_lock_nested_mtx,
73 	.lock_interruptible = genpd_lock_interruptible_mtx,
74 	.unlock = genpd_unlock_mtx,
75 };
76 
77 static void genpd_lock_spin(struct generic_pm_domain *genpd)
78 	__acquires(&genpd->slock)
79 {
80 	unsigned long flags;
81 
82 	spin_lock_irqsave(&genpd->slock, flags);
83 	genpd->lock_flags = flags;
84 }
85 
86 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
87 					int depth)
88 	__acquires(&genpd->slock)
89 {
90 	unsigned long flags;
91 
92 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
93 	genpd->lock_flags = flags;
94 }
95 
96 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
97 	__acquires(&genpd->slock)
98 {
99 	unsigned long flags;
100 
101 	spin_lock_irqsave(&genpd->slock, flags);
102 	genpd->lock_flags = flags;
103 	return 0;
104 }
105 
106 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
107 	__releases(&genpd->slock)
108 {
109 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
110 }
111 
112 static const struct genpd_lock_ops genpd_spin_ops = {
113 	.lock = genpd_lock_spin,
114 	.lock_nested = genpd_lock_nested_spin,
115 	.lock_interruptible = genpd_lock_interruptible_spin,
116 	.unlock = genpd_unlock_spin,
117 };
118 
119 #define genpd_lock(p)			p->lock_ops->lock(p)
120 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
121 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
122 #define genpd_unlock(p)			p->lock_ops->unlock(p)
123 
124 #define genpd_status_on(genpd)		(genpd->status == GPD_STATE_ACTIVE)
125 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
126 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
127 
128 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
129 		struct generic_pm_domain *genpd)
130 {
131 	bool ret;
132 
133 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
134 
135 	/*
136 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
137 	 * to indicate a suboptimal configuration for PM. For an always on
138 	 * domain this isn't case, thus don't warn.
139 	 */
140 	if (ret && !genpd_is_always_on(genpd))
141 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
142 				genpd->name);
143 
144 	return ret;
145 }
146 
147 /*
148  * Get the generic PM domain for a particular struct device.
149  * This validates the struct device pointer, the PM domain pointer,
150  * and checks that the PM domain pointer is a real generic PM domain.
151  * Any failure results in NULL being returned.
152  */
153 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
154 {
155 	struct generic_pm_domain *genpd = NULL, *gpd;
156 
157 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
158 		return NULL;
159 
160 	mutex_lock(&gpd_list_lock);
161 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
162 		if (&gpd->domain == dev->pm_domain) {
163 			genpd = gpd;
164 			break;
165 		}
166 	}
167 	mutex_unlock(&gpd_list_lock);
168 
169 	return genpd;
170 }
171 
172 /*
173  * This should only be used where we are certain that the pm_domain
174  * attached to the device is a genpd domain.
175  */
176 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
177 {
178 	if (IS_ERR_OR_NULL(dev->pm_domain))
179 		return ERR_PTR(-EINVAL);
180 
181 	return pd_to_genpd(dev->pm_domain);
182 }
183 
184 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
185 {
186 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
187 }
188 
189 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
190 {
191 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
192 }
193 
194 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
195 {
196 	bool ret = false;
197 
198 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
199 		ret = !!atomic_dec_and_test(&genpd->sd_count);
200 
201 	return ret;
202 }
203 
204 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
205 {
206 	atomic_inc(&genpd->sd_count);
207 	smp_mb__after_atomic();
208 }
209 
210 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
211 {
212 	unsigned int state_idx = genpd->state_idx;
213 	ktime_t time_start;
214 	s64 elapsed_ns;
215 	int ret;
216 
217 	if (!genpd->power_on)
218 		return 0;
219 
220 	if (!timed)
221 		return genpd->power_on(genpd);
222 
223 	time_start = ktime_get();
224 	ret = genpd->power_on(genpd);
225 	if (ret)
226 		return ret;
227 
228 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
229 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
230 		return ret;
231 
232 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
233 	genpd->max_off_time_changed = true;
234 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
235 		 genpd->name, "on", elapsed_ns);
236 
237 	return ret;
238 }
239 
240 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
241 {
242 	unsigned int state_idx = genpd->state_idx;
243 	ktime_t time_start;
244 	s64 elapsed_ns;
245 	int ret;
246 
247 	if (!genpd->power_off)
248 		return 0;
249 
250 	if (!timed)
251 		return genpd->power_off(genpd);
252 
253 	time_start = ktime_get();
254 	ret = genpd->power_off(genpd);
255 	if (ret == -EBUSY)
256 		return ret;
257 
258 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
259 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
260 		return ret;
261 
262 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
263 	genpd->max_off_time_changed = true;
264 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
265 		 genpd->name, "off", elapsed_ns);
266 
267 	return ret;
268 }
269 
270 /**
271  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
272  * @genpd: PM domain to power off.
273  *
274  * Queue up the execution of genpd_power_off() unless it's already been done
275  * before.
276  */
277 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
278 {
279 	queue_work(pm_wq, &genpd->power_off_work);
280 }
281 
282 /**
283  * genpd_power_off - Remove power from a given PM domain.
284  * @genpd: PM domain to power down.
285  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
286  * RPM status of the releated device is in an intermediate state, not yet turned
287  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
288  * be RPM_SUSPENDED, while it tries to power off the PM domain.
289  *
290  * If all of the @genpd's devices have been suspended and all of its subdomains
291  * have been powered down, remove power from @genpd.
292  */
293 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
294 			   unsigned int depth)
295 {
296 	struct pm_domain_data *pdd;
297 	struct gpd_link *link;
298 	unsigned int not_suspended = 0;
299 
300 	/*
301 	 * Do not try to power off the domain in the following situations:
302 	 * (1) The domain is already in the "power off" state.
303 	 * (2) System suspend is in progress.
304 	 */
305 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
306 		return 0;
307 
308 	/*
309 	 * Abort power off for the PM domain in the following situations:
310 	 * (1) The domain is configured as always on.
311 	 * (2) When the domain has a subdomain being powered on.
312 	 */
313 	if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
314 		return -EBUSY;
315 
316 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
317 		enum pm_qos_flags_status stat;
318 
319 		stat = dev_pm_qos_flags(pdd->dev,
320 					PM_QOS_FLAG_NO_POWER_OFF
321 						| PM_QOS_FLAG_REMOTE_WAKEUP);
322 		if (stat > PM_QOS_FLAGS_NONE)
323 			return -EBUSY;
324 
325 		/*
326 		 * Do not allow PM domain to be powered off, when an IRQ safe
327 		 * device is part of a non-IRQ safe domain.
328 		 */
329 		if (!pm_runtime_suspended(pdd->dev) ||
330 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
331 			not_suspended++;
332 	}
333 
334 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
335 		return -EBUSY;
336 
337 	if (genpd->gov && genpd->gov->power_down_ok) {
338 		if (!genpd->gov->power_down_ok(&genpd->domain))
339 			return -EAGAIN;
340 	}
341 
342 	if (genpd->power_off) {
343 		int ret;
344 
345 		if (atomic_read(&genpd->sd_count) > 0)
346 			return -EBUSY;
347 
348 		/*
349 		 * If sd_count > 0 at this point, one of the subdomains hasn't
350 		 * managed to call genpd_power_on() for the master yet after
351 		 * incrementing it.  In that case genpd_power_on() will wait
352 		 * for us to drop the lock, so we can call .power_off() and let
353 		 * the genpd_power_on() restore power for us (this shouldn't
354 		 * happen very often).
355 		 */
356 		ret = _genpd_power_off(genpd, true);
357 		if (ret)
358 			return ret;
359 	}
360 
361 	genpd->status = GPD_STATE_POWER_OFF;
362 
363 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
364 		genpd_sd_counter_dec(link->master);
365 		genpd_lock_nested(link->master, depth + 1);
366 		genpd_power_off(link->master, false, depth + 1);
367 		genpd_unlock(link->master);
368 	}
369 
370 	return 0;
371 }
372 
373 /**
374  * genpd_power_on - Restore power to a given PM domain and its masters.
375  * @genpd: PM domain to power up.
376  * @depth: nesting count for lockdep.
377  *
378  * Restore power to @genpd and all of its masters so that it is possible to
379  * resume a device belonging to it.
380  */
381 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
382 {
383 	struct gpd_link *link;
384 	int ret = 0;
385 
386 	if (genpd_status_on(genpd))
387 		return 0;
388 
389 	/*
390 	 * The list is guaranteed not to change while the loop below is being
391 	 * executed, unless one of the masters' .power_on() callbacks fiddles
392 	 * with it.
393 	 */
394 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
395 		struct generic_pm_domain *master = link->master;
396 
397 		genpd_sd_counter_inc(master);
398 
399 		genpd_lock_nested(master, depth + 1);
400 		ret = genpd_power_on(master, depth + 1);
401 		genpd_unlock(master);
402 
403 		if (ret) {
404 			genpd_sd_counter_dec(master);
405 			goto err;
406 		}
407 	}
408 
409 	ret = _genpd_power_on(genpd, true);
410 	if (ret)
411 		goto err;
412 
413 	genpd->status = GPD_STATE_ACTIVE;
414 	return 0;
415 
416  err:
417 	list_for_each_entry_continue_reverse(link,
418 					&genpd->slave_links,
419 					slave_node) {
420 		genpd_sd_counter_dec(link->master);
421 		genpd_lock_nested(link->master, depth + 1);
422 		genpd_power_off(link->master, false, depth + 1);
423 		genpd_unlock(link->master);
424 	}
425 
426 	return ret;
427 }
428 
429 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
430 				     unsigned long val, void *ptr)
431 {
432 	struct generic_pm_domain_data *gpd_data;
433 	struct device *dev;
434 
435 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
436 	dev = gpd_data->base.dev;
437 
438 	for (;;) {
439 		struct generic_pm_domain *genpd;
440 		struct pm_domain_data *pdd;
441 
442 		spin_lock_irq(&dev->power.lock);
443 
444 		pdd = dev->power.subsys_data ?
445 				dev->power.subsys_data->domain_data : NULL;
446 		if (pdd && pdd->dev) {
447 			to_gpd_data(pdd)->td.constraint_changed = true;
448 			genpd = dev_to_genpd(dev);
449 		} else {
450 			genpd = ERR_PTR(-ENODATA);
451 		}
452 
453 		spin_unlock_irq(&dev->power.lock);
454 
455 		if (!IS_ERR(genpd)) {
456 			genpd_lock(genpd);
457 			genpd->max_off_time_changed = true;
458 			genpd_unlock(genpd);
459 		}
460 
461 		dev = dev->parent;
462 		if (!dev || dev->power.ignore_children)
463 			break;
464 	}
465 
466 	return NOTIFY_DONE;
467 }
468 
469 /**
470  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
471  * @work: Work structure used for scheduling the execution of this function.
472  */
473 static void genpd_power_off_work_fn(struct work_struct *work)
474 {
475 	struct generic_pm_domain *genpd;
476 
477 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
478 
479 	genpd_lock(genpd);
480 	genpd_power_off(genpd, false, 0);
481 	genpd_unlock(genpd);
482 }
483 
484 /**
485  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
486  * @dev: Device to handle.
487  */
488 static int __genpd_runtime_suspend(struct device *dev)
489 {
490 	int (*cb)(struct device *__dev);
491 
492 	if (dev->type && dev->type->pm)
493 		cb = dev->type->pm->runtime_suspend;
494 	else if (dev->class && dev->class->pm)
495 		cb = dev->class->pm->runtime_suspend;
496 	else if (dev->bus && dev->bus->pm)
497 		cb = dev->bus->pm->runtime_suspend;
498 	else
499 		cb = NULL;
500 
501 	if (!cb && dev->driver && dev->driver->pm)
502 		cb = dev->driver->pm->runtime_suspend;
503 
504 	return cb ? cb(dev) : 0;
505 }
506 
507 /**
508  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
509  * @dev: Device to handle.
510  */
511 static int __genpd_runtime_resume(struct device *dev)
512 {
513 	int (*cb)(struct device *__dev);
514 
515 	if (dev->type && dev->type->pm)
516 		cb = dev->type->pm->runtime_resume;
517 	else if (dev->class && dev->class->pm)
518 		cb = dev->class->pm->runtime_resume;
519 	else if (dev->bus && dev->bus->pm)
520 		cb = dev->bus->pm->runtime_resume;
521 	else
522 		cb = NULL;
523 
524 	if (!cb && dev->driver && dev->driver->pm)
525 		cb = dev->driver->pm->runtime_resume;
526 
527 	return cb ? cb(dev) : 0;
528 }
529 
530 /**
531  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
532  * @dev: Device to suspend.
533  *
534  * Carry out a runtime suspend of a device under the assumption that its
535  * pm_domain field points to the domain member of an object of type
536  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
537  */
538 static int genpd_runtime_suspend(struct device *dev)
539 {
540 	struct generic_pm_domain *genpd;
541 	bool (*suspend_ok)(struct device *__dev);
542 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
543 	bool runtime_pm = pm_runtime_enabled(dev);
544 	ktime_t time_start;
545 	s64 elapsed_ns;
546 	int ret;
547 
548 	dev_dbg(dev, "%s()\n", __func__);
549 
550 	genpd = dev_to_genpd(dev);
551 	if (IS_ERR(genpd))
552 		return -EINVAL;
553 
554 	/*
555 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
556 	 * callbacks for other purposes than runtime PM. In those scenarios
557 	 * runtime PM is disabled. Under these circumstances, we shall skip
558 	 * validating/measuring the PM QoS latency.
559 	 */
560 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
561 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
562 		return -EBUSY;
563 
564 	/* Measure suspend latency. */
565 	time_start = 0;
566 	if (runtime_pm)
567 		time_start = ktime_get();
568 
569 	ret = __genpd_runtime_suspend(dev);
570 	if (ret)
571 		return ret;
572 
573 	ret = genpd_stop_dev(genpd, dev);
574 	if (ret) {
575 		__genpd_runtime_resume(dev);
576 		return ret;
577 	}
578 
579 	/* Update suspend latency value if the measured time exceeds it. */
580 	if (runtime_pm) {
581 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
582 		if (elapsed_ns > td->suspend_latency_ns) {
583 			td->suspend_latency_ns = elapsed_ns;
584 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
585 				elapsed_ns);
586 			genpd->max_off_time_changed = true;
587 			td->constraint_changed = true;
588 		}
589 	}
590 
591 	/*
592 	 * If power.irq_safe is set, this routine may be run with
593 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
594 	 */
595 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
596 		return 0;
597 
598 	genpd_lock(genpd);
599 	genpd_power_off(genpd, true, 0);
600 	genpd_unlock(genpd);
601 
602 	return 0;
603 }
604 
605 /**
606  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
607  * @dev: Device to resume.
608  *
609  * Carry out a runtime resume of a device under the assumption that its
610  * pm_domain field points to the domain member of an object of type
611  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
612  */
613 static int genpd_runtime_resume(struct device *dev)
614 {
615 	struct generic_pm_domain *genpd;
616 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
617 	bool runtime_pm = pm_runtime_enabled(dev);
618 	ktime_t time_start;
619 	s64 elapsed_ns;
620 	int ret;
621 	bool timed = true;
622 
623 	dev_dbg(dev, "%s()\n", __func__);
624 
625 	genpd = dev_to_genpd(dev);
626 	if (IS_ERR(genpd))
627 		return -EINVAL;
628 
629 	/*
630 	 * As we don't power off a non IRQ safe domain, which holds
631 	 * an IRQ safe device, we don't need to restore power to it.
632 	 */
633 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
634 		timed = false;
635 		goto out;
636 	}
637 
638 	genpd_lock(genpd);
639 	ret = genpd_power_on(genpd, 0);
640 	genpd_unlock(genpd);
641 
642 	if (ret)
643 		return ret;
644 
645  out:
646 	/* Measure resume latency. */
647 	time_start = 0;
648 	if (timed && runtime_pm)
649 		time_start = ktime_get();
650 
651 	ret = genpd_start_dev(genpd, dev);
652 	if (ret)
653 		goto err_poweroff;
654 
655 	ret = __genpd_runtime_resume(dev);
656 	if (ret)
657 		goto err_stop;
658 
659 	/* Update resume latency value if the measured time exceeds it. */
660 	if (timed && runtime_pm) {
661 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
662 		if (elapsed_ns > td->resume_latency_ns) {
663 			td->resume_latency_ns = elapsed_ns;
664 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
665 				elapsed_ns);
666 			genpd->max_off_time_changed = true;
667 			td->constraint_changed = true;
668 		}
669 	}
670 
671 	return 0;
672 
673 err_stop:
674 	genpd_stop_dev(genpd, dev);
675 err_poweroff:
676 	if (!pm_runtime_is_irq_safe(dev) ||
677 		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
678 		genpd_lock(genpd);
679 		genpd_power_off(genpd, true, 0);
680 		genpd_unlock(genpd);
681 	}
682 
683 	return ret;
684 }
685 
686 static bool pd_ignore_unused;
687 static int __init pd_ignore_unused_setup(char *__unused)
688 {
689 	pd_ignore_unused = true;
690 	return 1;
691 }
692 __setup("pd_ignore_unused", pd_ignore_unused_setup);
693 
694 /**
695  * genpd_power_off_unused - Power off all PM domains with no devices in use.
696  */
697 static int __init genpd_power_off_unused(void)
698 {
699 	struct generic_pm_domain *genpd;
700 
701 	if (pd_ignore_unused) {
702 		pr_warn("genpd: Not disabling unused power domains\n");
703 		return 0;
704 	}
705 
706 	mutex_lock(&gpd_list_lock);
707 
708 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
709 		genpd_queue_power_off_work(genpd);
710 
711 	mutex_unlock(&gpd_list_lock);
712 
713 	return 0;
714 }
715 late_initcall(genpd_power_off_unused);
716 
717 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
718 
719 /**
720  * pm_genpd_present - Check if the given PM domain has been initialized.
721  * @genpd: PM domain to check.
722  */
723 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
724 {
725 	const struct generic_pm_domain *gpd;
726 
727 	if (IS_ERR_OR_NULL(genpd))
728 		return false;
729 
730 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
731 		if (gpd == genpd)
732 			return true;
733 
734 	return false;
735 }
736 
737 #endif
738 
739 #ifdef CONFIG_PM_SLEEP
740 
741 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
742 				    struct device *dev)
743 {
744 	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
745 }
746 
747 /**
748  * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
749  * @genpd: PM domain to power off, if possible.
750  * @use_lock: use the lock.
751  * @depth: nesting count for lockdep.
752  *
753  * Check if the given PM domain can be powered off (during system suspend or
754  * hibernation) and do that if so.  Also, in that case propagate to its masters.
755  *
756  * This function is only called in "noirq" and "syscore" stages of system power
757  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
758  * these cases the lock must be held.
759  */
760 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
761 				 unsigned int depth)
762 {
763 	struct gpd_link *link;
764 
765 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
766 		return;
767 
768 	if (genpd->suspended_count != genpd->device_count
769 	    || atomic_read(&genpd->sd_count) > 0)
770 		return;
771 
772 	/* Choose the deepest state when suspending */
773 	genpd->state_idx = genpd->state_count - 1;
774 	if (_genpd_power_off(genpd, false))
775 		return;
776 
777 	genpd->status = GPD_STATE_POWER_OFF;
778 
779 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
780 		genpd_sd_counter_dec(link->master);
781 
782 		if (use_lock)
783 			genpd_lock_nested(link->master, depth + 1);
784 
785 		genpd_sync_power_off(link->master, use_lock, depth + 1);
786 
787 		if (use_lock)
788 			genpd_unlock(link->master);
789 	}
790 }
791 
792 /**
793  * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
794  * @genpd: PM domain to power on.
795  * @use_lock: use the lock.
796  * @depth: nesting count for lockdep.
797  *
798  * This function is only called in "noirq" and "syscore" stages of system power
799  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
800  * these cases the lock must be held.
801  */
802 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
803 				unsigned int depth)
804 {
805 	struct gpd_link *link;
806 
807 	if (genpd_status_on(genpd))
808 		return;
809 
810 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
811 		genpd_sd_counter_inc(link->master);
812 
813 		if (use_lock)
814 			genpd_lock_nested(link->master, depth + 1);
815 
816 		genpd_sync_power_on(link->master, use_lock, depth + 1);
817 
818 		if (use_lock)
819 			genpd_unlock(link->master);
820 	}
821 
822 	_genpd_power_on(genpd, false);
823 
824 	genpd->status = GPD_STATE_ACTIVE;
825 }
826 
827 /**
828  * resume_needed - Check whether to resume a device before system suspend.
829  * @dev: Device to check.
830  * @genpd: PM domain the device belongs to.
831  *
832  * There are two cases in which a device that can wake up the system from sleep
833  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
834  * to wake up the system and it has to remain active for this purpose while the
835  * system is in the sleep state and (2) if the device is not enabled to wake up
836  * the system from sleep states and it generally doesn't generate wakeup signals
837  * by itself (those signals are generated on its behalf by other parts of the
838  * system).  In the latter case it may be necessary to reconfigure the device's
839  * wakeup settings during system suspend, because it may have been set up to
840  * signal remote wakeup from the system's working state as needed by runtime PM.
841  * Return 'true' in either of the above cases.
842  */
843 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
844 {
845 	bool active_wakeup;
846 
847 	if (!device_can_wakeup(dev))
848 		return false;
849 
850 	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
851 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
852 }
853 
854 /**
855  * pm_genpd_prepare - Start power transition of a device in a PM domain.
856  * @dev: Device to start the transition of.
857  *
858  * Start a power transition of a device (during a system-wide power transition)
859  * under the assumption that its pm_domain field points to the domain member of
860  * an object of type struct generic_pm_domain representing a PM domain
861  * consisting of I/O devices.
862  */
863 static int pm_genpd_prepare(struct device *dev)
864 {
865 	struct generic_pm_domain *genpd;
866 	int ret;
867 
868 	dev_dbg(dev, "%s()\n", __func__);
869 
870 	genpd = dev_to_genpd(dev);
871 	if (IS_ERR(genpd))
872 		return -EINVAL;
873 
874 	/*
875 	 * If a wakeup request is pending for the device, it should be woken up
876 	 * at this point and a system wakeup event should be reported if it's
877 	 * set up to wake up the system from sleep states.
878 	 */
879 	if (resume_needed(dev, genpd))
880 		pm_runtime_resume(dev);
881 
882 	genpd_lock(genpd);
883 
884 	if (genpd->prepared_count++ == 0)
885 		genpd->suspended_count = 0;
886 
887 	genpd_unlock(genpd);
888 
889 	ret = pm_generic_prepare(dev);
890 	if (ret) {
891 		genpd_lock(genpd);
892 
893 		genpd->prepared_count--;
894 
895 		genpd_unlock(genpd);
896 	}
897 
898 	return ret;
899 }
900 
901 /**
902  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
903  * @dev: Device to suspend.
904  *
905  * Stop the device and remove power from the domain if all devices in it have
906  * been stopped.
907  */
908 static int pm_genpd_suspend_noirq(struct device *dev)
909 {
910 	struct generic_pm_domain *genpd;
911 	int ret;
912 
913 	dev_dbg(dev, "%s()\n", __func__);
914 
915 	genpd = dev_to_genpd(dev);
916 	if (IS_ERR(genpd))
917 		return -EINVAL;
918 
919 	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
920 		return 0;
921 
922 	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
923 		ret = pm_runtime_force_suspend(dev);
924 		if (ret)
925 			return ret;
926 	}
927 
928 	genpd_lock(genpd);
929 	genpd->suspended_count++;
930 	genpd_sync_power_off(genpd, true, 0);
931 	genpd_unlock(genpd);
932 
933 	return 0;
934 }
935 
936 /**
937  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
938  * @dev: Device to resume.
939  *
940  * Restore power to the device's PM domain, if necessary, and start the device.
941  */
942 static int pm_genpd_resume_noirq(struct device *dev)
943 {
944 	struct generic_pm_domain *genpd;
945 	int ret = 0;
946 
947 	dev_dbg(dev, "%s()\n", __func__);
948 
949 	genpd = dev_to_genpd(dev);
950 	if (IS_ERR(genpd))
951 		return -EINVAL;
952 
953 	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
954 		return 0;
955 
956 	genpd_lock(genpd);
957 	genpd_sync_power_on(genpd, true, 0);
958 	genpd->suspended_count--;
959 	genpd_unlock(genpd);
960 
961 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
962 		ret = pm_runtime_force_resume(dev);
963 
964 	return ret;
965 }
966 
967 /**
968  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
969  * @dev: Device to freeze.
970  *
971  * Carry out a late freeze of a device under the assumption that its
972  * pm_domain field points to the domain member of an object of type
973  * struct generic_pm_domain representing a power domain consisting of I/O
974  * devices.
975  */
976 static int pm_genpd_freeze_noirq(struct device *dev)
977 {
978 	struct generic_pm_domain *genpd;
979 	int ret = 0;
980 
981 	dev_dbg(dev, "%s()\n", __func__);
982 
983 	genpd = dev_to_genpd(dev);
984 	if (IS_ERR(genpd))
985 		return -EINVAL;
986 
987 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
988 		ret = pm_runtime_force_suspend(dev);
989 
990 	return ret;
991 }
992 
993 /**
994  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
995  * @dev: Device to thaw.
996  *
997  * Start the device, unless power has been removed from the domain already
998  * before the system transition.
999  */
1000 static int pm_genpd_thaw_noirq(struct device *dev)
1001 {
1002 	struct generic_pm_domain *genpd;
1003 	int ret = 0;
1004 
1005 	dev_dbg(dev, "%s()\n", __func__);
1006 
1007 	genpd = dev_to_genpd(dev);
1008 	if (IS_ERR(genpd))
1009 		return -EINVAL;
1010 
1011 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
1012 		ret = pm_runtime_force_resume(dev);
1013 
1014 	return ret;
1015 }
1016 
1017 /**
1018  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1019  * @dev: Device to resume.
1020  *
1021  * Make sure the domain will be in the same power state as before the
1022  * hibernation the system is resuming from and start the device if necessary.
1023  */
1024 static int pm_genpd_restore_noirq(struct device *dev)
1025 {
1026 	struct generic_pm_domain *genpd;
1027 	int ret = 0;
1028 
1029 	dev_dbg(dev, "%s()\n", __func__);
1030 
1031 	genpd = dev_to_genpd(dev);
1032 	if (IS_ERR(genpd))
1033 		return -EINVAL;
1034 
1035 	/*
1036 	 * At this point suspended_count == 0 means we are being run for the
1037 	 * first time for the given domain in the present cycle.
1038 	 */
1039 	genpd_lock(genpd);
1040 	if (genpd->suspended_count++ == 0)
1041 		/*
1042 		 * The boot kernel might put the domain into arbitrary state,
1043 		 * so make it appear as powered off to genpd_sync_power_on(),
1044 		 * so that it tries to power it on in case it was really off.
1045 		 */
1046 		genpd->status = GPD_STATE_POWER_OFF;
1047 
1048 	genpd_sync_power_on(genpd, true, 0);
1049 	genpd_unlock(genpd);
1050 
1051 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
1052 		ret = pm_runtime_force_resume(dev);
1053 
1054 	return ret;
1055 }
1056 
1057 /**
1058  * pm_genpd_complete - Complete power transition of a device in a power domain.
1059  * @dev: Device to complete the transition of.
1060  *
1061  * Complete a power transition of a device (during a system-wide power
1062  * transition) under the assumption that its pm_domain field points to the
1063  * domain member of an object of type struct generic_pm_domain representing
1064  * a power domain consisting of I/O devices.
1065  */
1066 static void pm_genpd_complete(struct device *dev)
1067 {
1068 	struct generic_pm_domain *genpd;
1069 
1070 	dev_dbg(dev, "%s()\n", __func__);
1071 
1072 	genpd = dev_to_genpd(dev);
1073 	if (IS_ERR(genpd))
1074 		return;
1075 
1076 	pm_generic_complete(dev);
1077 
1078 	genpd_lock(genpd);
1079 
1080 	genpd->prepared_count--;
1081 	if (!genpd->prepared_count)
1082 		genpd_queue_power_off_work(genpd);
1083 
1084 	genpd_unlock(genpd);
1085 }
1086 
1087 /**
1088  * genpd_syscore_switch - Switch power during system core suspend or resume.
1089  * @dev: Device that normally is marked as "always on" to switch power for.
1090  *
1091  * This routine may only be called during the system core (syscore) suspend or
1092  * resume phase for devices whose "always on" flags are set.
1093  */
1094 static void genpd_syscore_switch(struct device *dev, bool suspend)
1095 {
1096 	struct generic_pm_domain *genpd;
1097 
1098 	genpd = dev_to_genpd(dev);
1099 	if (!pm_genpd_present(genpd))
1100 		return;
1101 
1102 	if (suspend) {
1103 		genpd->suspended_count++;
1104 		genpd_sync_power_off(genpd, false, 0);
1105 	} else {
1106 		genpd_sync_power_on(genpd, false, 0);
1107 		genpd->suspended_count--;
1108 	}
1109 }
1110 
1111 void pm_genpd_syscore_poweroff(struct device *dev)
1112 {
1113 	genpd_syscore_switch(dev, true);
1114 }
1115 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1116 
1117 void pm_genpd_syscore_poweron(struct device *dev)
1118 {
1119 	genpd_syscore_switch(dev, false);
1120 }
1121 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1122 
1123 #else /* !CONFIG_PM_SLEEP */
1124 
1125 #define pm_genpd_prepare		NULL
1126 #define pm_genpd_suspend_noirq		NULL
1127 #define pm_genpd_resume_noirq		NULL
1128 #define pm_genpd_freeze_noirq		NULL
1129 #define pm_genpd_thaw_noirq		NULL
1130 #define pm_genpd_restore_noirq		NULL
1131 #define pm_genpd_complete		NULL
1132 
1133 #endif /* CONFIG_PM_SLEEP */
1134 
1135 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1136 					struct generic_pm_domain *genpd,
1137 					struct gpd_timing_data *td)
1138 {
1139 	struct generic_pm_domain_data *gpd_data;
1140 	int ret;
1141 
1142 	ret = dev_pm_get_subsys_data(dev);
1143 	if (ret)
1144 		return ERR_PTR(ret);
1145 
1146 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1147 	if (!gpd_data) {
1148 		ret = -ENOMEM;
1149 		goto err_put;
1150 	}
1151 
1152 	if (td)
1153 		gpd_data->td = *td;
1154 
1155 	gpd_data->base.dev = dev;
1156 	gpd_data->td.constraint_changed = true;
1157 	gpd_data->td.effective_constraint_ns = -1;
1158 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1159 
1160 	spin_lock_irq(&dev->power.lock);
1161 
1162 	if (dev->power.subsys_data->domain_data) {
1163 		ret = -EINVAL;
1164 		goto err_free;
1165 	}
1166 
1167 	dev->power.subsys_data->domain_data = &gpd_data->base;
1168 
1169 	spin_unlock_irq(&dev->power.lock);
1170 
1171 	dev_pm_domain_set(dev, &genpd->domain);
1172 
1173 	return gpd_data;
1174 
1175  err_free:
1176 	spin_unlock_irq(&dev->power.lock);
1177 	kfree(gpd_data);
1178  err_put:
1179 	dev_pm_put_subsys_data(dev);
1180 	return ERR_PTR(ret);
1181 }
1182 
1183 static void genpd_free_dev_data(struct device *dev,
1184 				struct generic_pm_domain_data *gpd_data)
1185 {
1186 	dev_pm_domain_set(dev, NULL);
1187 
1188 	spin_lock_irq(&dev->power.lock);
1189 
1190 	dev->power.subsys_data->domain_data = NULL;
1191 
1192 	spin_unlock_irq(&dev->power.lock);
1193 
1194 	kfree(gpd_data);
1195 	dev_pm_put_subsys_data(dev);
1196 }
1197 
1198 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1199 			    struct gpd_timing_data *td)
1200 {
1201 	struct generic_pm_domain_data *gpd_data;
1202 	int ret = 0;
1203 
1204 	dev_dbg(dev, "%s()\n", __func__);
1205 
1206 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1207 		return -EINVAL;
1208 
1209 	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1210 	if (IS_ERR(gpd_data))
1211 		return PTR_ERR(gpd_data);
1212 
1213 	genpd_lock(genpd);
1214 
1215 	if (genpd->prepared_count > 0) {
1216 		ret = -EAGAIN;
1217 		goto out;
1218 	}
1219 
1220 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1221 	if (ret)
1222 		goto out;
1223 
1224 	genpd->device_count++;
1225 	genpd->max_off_time_changed = true;
1226 
1227 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1228 
1229  out:
1230 	genpd_unlock(genpd);
1231 
1232 	if (ret)
1233 		genpd_free_dev_data(dev, gpd_data);
1234 	else
1235 		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1236 
1237 	return ret;
1238 }
1239 
1240 /**
1241  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1242  * @genpd: PM domain to add the device to.
1243  * @dev: Device to be added.
1244  * @td: Set of PM QoS timing parameters to attach to the device.
1245  */
1246 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1247 			  struct gpd_timing_data *td)
1248 {
1249 	int ret;
1250 
1251 	mutex_lock(&gpd_list_lock);
1252 	ret = genpd_add_device(genpd, dev, td);
1253 	mutex_unlock(&gpd_list_lock);
1254 
1255 	return ret;
1256 }
1257 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1258 
1259 static int genpd_remove_device(struct generic_pm_domain *genpd,
1260 			       struct device *dev)
1261 {
1262 	struct generic_pm_domain_data *gpd_data;
1263 	struct pm_domain_data *pdd;
1264 	int ret = 0;
1265 
1266 	dev_dbg(dev, "%s()\n", __func__);
1267 
1268 	pdd = dev->power.subsys_data->domain_data;
1269 	gpd_data = to_gpd_data(pdd);
1270 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1271 
1272 	genpd_lock(genpd);
1273 
1274 	if (genpd->prepared_count > 0) {
1275 		ret = -EAGAIN;
1276 		goto out;
1277 	}
1278 
1279 	genpd->device_count--;
1280 	genpd->max_off_time_changed = true;
1281 
1282 	if (genpd->detach_dev)
1283 		genpd->detach_dev(genpd, dev);
1284 
1285 	list_del_init(&pdd->list_node);
1286 
1287 	genpd_unlock(genpd);
1288 
1289 	genpd_free_dev_data(dev, gpd_data);
1290 
1291 	return 0;
1292 
1293  out:
1294 	genpd_unlock(genpd);
1295 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1296 
1297 	return ret;
1298 }
1299 
1300 /**
1301  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1302  * @genpd: PM domain to remove the device from.
1303  * @dev: Device to be removed.
1304  */
1305 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1306 			   struct device *dev)
1307 {
1308 	if (!genpd || genpd != genpd_lookup_dev(dev))
1309 		return -EINVAL;
1310 
1311 	return genpd_remove_device(genpd, dev);
1312 }
1313 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1314 
1315 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1316 			       struct generic_pm_domain *subdomain)
1317 {
1318 	struct gpd_link *link, *itr;
1319 	int ret = 0;
1320 
1321 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1322 	    || genpd == subdomain)
1323 		return -EINVAL;
1324 
1325 	/*
1326 	 * If the domain can be powered on/off in an IRQ safe
1327 	 * context, ensure that the subdomain can also be
1328 	 * powered on/off in that context.
1329 	 */
1330 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1331 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1332 				genpd->name, subdomain->name);
1333 		return -EINVAL;
1334 	}
1335 
1336 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1337 	if (!link)
1338 		return -ENOMEM;
1339 
1340 	genpd_lock(subdomain);
1341 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1342 
1343 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1344 		ret = -EINVAL;
1345 		goto out;
1346 	}
1347 
1348 	list_for_each_entry(itr, &genpd->master_links, master_node) {
1349 		if (itr->slave == subdomain && itr->master == genpd) {
1350 			ret = -EINVAL;
1351 			goto out;
1352 		}
1353 	}
1354 
1355 	link->master = genpd;
1356 	list_add_tail(&link->master_node, &genpd->master_links);
1357 	link->slave = subdomain;
1358 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1359 	if (genpd_status_on(subdomain))
1360 		genpd_sd_counter_inc(genpd);
1361 
1362  out:
1363 	genpd_unlock(genpd);
1364 	genpd_unlock(subdomain);
1365 	if (ret)
1366 		kfree(link);
1367 	return ret;
1368 }
1369 
1370 /**
1371  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1372  * @genpd: Master PM domain to add the subdomain to.
1373  * @subdomain: Subdomain to be added.
1374  */
1375 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1376 			   struct generic_pm_domain *subdomain)
1377 {
1378 	int ret;
1379 
1380 	mutex_lock(&gpd_list_lock);
1381 	ret = genpd_add_subdomain(genpd, subdomain);
1382 	mutex_unlock(&gpd_list_lock);
1383 
1384 	return ret;
1385 }
1386 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1387 
1388 /**
1389  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1390  * @genpd: Master PM domain to remove the subdomain from.
1391  * @subdomain: Subdomain to be removed.
1392  */
1393 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1394 			      struct generic_pm_domain *subdomain)
1395 {
1396 	struct gpd_link *link;
1397 	int ret = -EINVAL;
1398 
1399 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1400 		return -EINVAL;
1401 
1402 	genpd_lock(subdomain);
1403 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1404 
1405 	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1406 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1407 			subdomain->name);
1408 		ret = -EBUSY;
1409 		goto out;
1410 	}
1411 
1412 	list_for_each_entry(link, &genpd->master_links, master_node) {
1413 		if (link->slave != subdomain)
1414 			continue;
1415 
1416 		list_del(&link->master_node);
1417 		list_del(&link->slave_node);
1418 		kfree(link);
1419 		if (genpd_status_on(subdomain))
1420 			genpd_sd_counter_dec(genpd);
1421 
1422 		ret = 0;
1423 		break;
1424 	}
1425 
1426 out:
1427 	genpd_unlock(genpd);
1428 	genpd_unlock(subdomain);
1429 
1430 	return ret;
1431 }
1432 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1433 
1434 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1435 {
1436 	struct genpd_power_state *state;
1437 
1438 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1439 	if (!state)
1440 		return -ENOMEM;
1441 
1442 	genpd->states = state;
1443 	genpd->state_count = 1;
1444 	genpd->free = state;
1445 
1446 	return 0;
1447 }
1448 
1449 static void genpd_lock_init(struct generic_pm_domain *genpd)
1450 {
1451 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1452 		spin_lock_init(&genpd->slock);
1453 		genpd->lock_ops = &genpd_spin_ops;
1454 	} else {
1455 		mutex_init(&genpd->mlock);
1456 		genpd->lock_ops = &genpd_mtx_ops;
1457 	}
1458 }
1459 
1460 /**
1461  * pm_genpd_init - Initialize a generic I/O PM domain object.
1462  * @genpd: PM domain object to initialize.
1463  * @gov: PM domain governor to associate with the domain (may be NULL).
1464  * @is_off: Initial value of the domain's power_is_off field.
1465  *
1466  * Returns 0 on successful initialization, else a negative error code.
1467  */
1468 int pm_genpd_init(struct generic_pm_domain *genpd,
1469 		  struct dev_power_governor *gov, bool is_off)
1470 {
1471 	int ret;
1472 
1473 	if (IS_ERR_OR_NULL(genpd))
1474 		return -EINVAL;
1475 
1476 	INIT_LIST_HEAD(&genpd->master_links);
1477 	INIT_LIST_HEAD(&genpd->slave_links);
1478 	INIT_LIST_HEAD(&genpd->dev_list);
1479 	genpd_lock_init(genpd);
1480 	genpd->gov = gov;
1481 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1482 	atomic_set(&genpd->sd_count, 0);
1483 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1484 	genpd->device_count = 0;
1485 	genpd->max_off_time_ns = -1;
1486 	genpd->max_off_time_changed = true;
1487 	genpd->provider = NULL;
1488 	genpd->has_provider = false;
1489 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1490 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1491 	genpd->domain.ops.prepare = pm_genpd_prepare;
1492 	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1493 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1494 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1495 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1496 	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1497 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1498 	genpd->domain.ops.complete = pm_genpd_complete;
1499 
1500 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1501 		genpd->dev_ops.stop = pm_clk_suspend;
1502 		genpd->dev_ops.start = pm_clk_resume;
1503 	}
1504 
1505 	/* Always-on domains must be powered on at initialization. */
1506 	if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1507 		return -EINVAL;
1508 
1509 	/* Use only one "off" state if there were no states declared */
1510 	if (genpd->state_count == 0) {
1511 		ret = genpd_set_default_power_state(genpd);
1512 		if (ret)
1513 			return ret;
1514 	}
1515 
1516 	mutex_lock(&gpd_list_lock);
1517 	list_add(&genpd->gpd_list_node, &gpd_list);
1518 	mutex_unlock(&gpd_list_lock);
1519 
1520 	return 0;
1521 }
1522 EXPORT_SYMBOL_GPL(pm_genpd_init);
1523 
1524 static int genpd_remove(struct generic_pm_domain *genpd)
1525 {
1526 	struct gpd_link *l, *link;
1527 
1528 	if (IS_ERR_OR_NULL(genpd))
1529 		return -EINVAL;
1530 
1531 	genpd_lock(genpd);
1532 
1533 	if (genpd->has_provider) {
1534 		genpd_unlock(genpd);
1535 		pr_err("Provider present, unable to remove %s\n", genpd->name);
1536 		return -EBUSY;
1537 	}
1538 
1539 	if (!list_empty(&genpd->master_links) || genpd->device_count) {
1540 		genpd_unlock(genpd);
1541 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1542 		return -EBUSY;
1543 	}
1544 
1545 	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1546 		list_del(&link->master_node);
1547 		list_del(&link->slave_node);
1548 		kfree(link);
1549 	}
1550 
1551 	list_del(&genpd->gpd_list_node);
1552 	genpd_unlock(genpd);
1553 	cancel_work_sync(&genpd->power_off_work);
1554 	kfree(genpd->free);
1555 	pr_debug("%s: removed %s\n", __func__, genpd->name);
1556 
1557 	return 0;
1558 }
1559 
1560 /**
1561  * pm_genpd_remove - Remove a generic I/O PM domain
1562  * @genpd: Pointer to PM domain that is to be removed.
1563  *
1564  * To remove the PM domain, this function:
1565  *  - Removes the PM domain as a subdomain to any parent domains,
1566  *    if it was added.
1567  *  - Removes the PM domain from the list of registered PM domains.
1568  *
1569  * The PM domain will only be removed, if the associated provider has
1570  * been removed, it is not a parent to any other PM domain and has no
1571  * devices associated with it.
1572  */
1573 int pm_genpd_remove(struct generic_pm_domain *genpd)
1574 {
1575 	int ret;
1576 
1577 	mutex_lock(&gpd_list_lock);
1578 	ret = genpd_remove(genpd);
1579 	mutex_unlock(&gpd_list_lock);
1580 
1581 	return ret;
1582 }
1583 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1584 
1585 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1586 
1587 typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
1588 						   void *data);
1589 
1590 /*
1591  * Device Tree based PM domain providers.
1592  *
1593  * The code below implements generic device tree based PM domain providers that
1594  * bind device tree nodes with generic PM domains registered in the system.
1595  *
1596  * Any driver that registers generic PM domains and needs to support binding of
1597  * devices to these domains is supposed to register a PM domain provider, which
1598  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1599  *
1600  * Two simple mapping functions have been provided for convenience:
1601  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1602  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1603  *    index.
1604  */
1605 
1606 /**
1607  * struct of_genpd_provider - PM domain provider registration structure
1608  * @link: Entry in global list of PM domain providers
1609  * @node: Pointer to device tree node of PM domain provider
1610  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1611  *         into a PM domain.
1612  * @data: context pointer to be passed into @xlate callback
1613  */
1614 struct of_genpd_provider {
1615 	struct list_head link;
1616 	struct device_node *node;
1617 	genpd_xlate_t xlate;
1618 	void *data;
1619 };
1620 
1621 /* List of registered PM domain providers. */
1622 static LIST_HEAD(of_genpd_providers);
1623 /* Mutex to protect the list above. */
1624 static DEFINE_MUTEX(of_genpd_mutex);
1625 
1626 /**
1627  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1628  * @genpdspec: OF phandle args to map into a PM domain
1629  * @data: xlate function private data - pointer to struct generic_pm_domain
1630  *
1631  * This is a generic xlate function that can be used to model PM domains that
1632  * have their own device tree nodes. The private data of xlate function needs
1633  * to be a valid pointer to struct generic_pm_domain.
1634  */
1635 static struct generic_pm_domain *genpd_xlate_simple(
1636 					struct of_phandle_args *genpdspec,
1637 					void *data)
1638 {
1639 	return data;
1640 }
1641 
1642 /**
1643  * genpd_xlate_onecell() - Xlate function using a single index.
1644  * @genpdspec: OF phandle args to map into a PM domain
1645  * @data: xlate function private data - pointer to struct genpd_onecell_data
1646  *
1647  * This is a generic xlate function that can be used to model simple PM domain
1648  * controllers that have one device tree node and provide multiple PM domains.
1649  * A single cell is used as an index into an array of PM domains specified in
1650  * the genpd_onecell_data struct when registering the provider.
1651  */
1652 static struct generic_pm_domain *genpd_xlate_onecell(
1653 					struct of_phandle_args *genpdspec,
1654 					void *data)
1655 {
1656 	struct genpd_onecell_data *genpd_data = data;
1657 	unsigned int idx = genpdspec->args[0];
1658 
1659 	if (genpdspec->args_count != 1)
1660 		return ERR_PTR(-EINVAL);
1661 
1662 	if (idx >= genpd_data->num_domains) {
1663 		pr_err("%s: invalid domain index %u\n", __func__, idx);
1664 		return ERR_PTR(-EINVAL);
1665 	}
1666 
1667 	if (!genpd_data->domains[idx])
1668 		return ERR_PTR(-ENOENT);
1669 
1670 	return genpd_data->domains[idx];
1671 }
1672 
1673 /**
1674  * genpd_add_provider() - Register a PM domain provider for a node
1675  * @np: Device node pointer associated with the PM domain provider.
1676  * @xlate: Callback for decoding PM domain from phandle arguments.
1677  * @data: Context pointer for @xlate callback.
1678  */
1679 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1680 			      void *data)
1681 {
1682 	struct of_genpd_provider *cp;
1683 
1684 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1685 	if (!cp)
1686 		return -ENOMEM;
1687 
1688 	cp->node = of_node_get(np);
1689 	cp->data = data;
1690 	cp->xlate = xlate;
1691 
1692 	mutex_lock(&of_genpd_mutex);
1693 	list_add(&cp->link, &of_genpd_providers);
1694 	mutex_unlock(&of_genpd_mutex);
1695 	pr_debug("Added domain provider from %s\n", np->full_name);
1696 
1697 	return 0;
1698 }
1699 
1700 /**
1701  * of_genpd_add_provider_simple() - Register a simple PM domain provider
1702  * @np: Device node pointer associated with the PM domain provider.
1703  * @genpd: Pointer to PM domain associated with the PM domain provider.
1704  */
1705 int of_genpd_add_provider_simple(struct device_node *np,
1706 				 struct generic_pm_domain *genpd)
1707 {
1708 	int ret = -EINVAL;
1709 
1710 	if (!np || !genpd)
1711 		return -EINVAL;
1712 
1713 	mutex_lock(&gpd_list_lock);
1714 
1715 	if (pm_genpd_present(genpd)) {
1716 		ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1717 		if (!ret) {
1718 			genpd->provider = &np->fwnode;
1719 			genpd->has_provider = true;
1720 		}
1721 	}
1722 
1723 	mutex_unlock(&gpd_list_lock);
1724 
1725 	return ret;
1726 }
1727 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1728 
1729 /**
1730  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1731  * @np: Device node pointer associated with the PM domain provider.
1732  * @data: Pointer to the data associated with the PM domain provider.
1733  */
1734 int of_genpd_add_provider_onecell(struct device_node *np,
1735 				  struct genpd_onecell_data *data)
1736 {
1737 	unsigned int i;
1738 	int ret = -EINVAL;
1739 
1740 	if (!np || !data)
1741 		return -EINVAL;
1742 
1743 	mutex_lock(&gpd_list_lock);
1744 
1745 	for (i = 0; i < data->num_domains; i++) {
1746 		if (!data->domains[i])
1747 			continue;
1748 		if (!pm_genpd_present(data->domains[i]))
1749 			goto error;
1750 
1751 		data->domains[i]->provider = &np->fwnode;
1752 		data->domains[i]->has_provider = true;
1753 	}
1754 
1755 	ret = genpd_add_provider(np, genpd_xlate_onecell, data);
1756 	if (ret < 0)
1757 		goto error;
1758 
1759 	mutex_unlock(&gpd_list_lock);
1760 
1761 	return 0;
1762 
1763 error:
1764 	while (i--) {
1765 		if (!data->domains[i])
1766 			continue;
1767 		data->domains[i]->provider = NULL;
1768 		data->domains[i]->has_provider = false;
1769 	}
1770 
1771 	mutex_unlock(&gpd_list_lock);
1772 
1773 	return ret;
1774 }
1775 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1776 
1777 /**
1778  * of_genpd_del_provider() - Remove a previously registered PM domain provider
1779  * @np: Device node pointer associated with the PM domain provider
1780  */
1781 void of_genpd_del_provider(struct device_node *np)
1782 {
1783 	struct of_genpd_provider *cp;
1784 	struct generic_pm_domain *gpd;
1785 
1786 	mutex_lock(&gpd_list_lock);
1787 	mutex_lock(&of_genpd_mutex);
1788 	list_for_each_entry(cp, &of_genpd_providers, link) {
1789 		if (cp->node == np) {
1790 			/*
1791 			 * For each PM domain associated with the
1792 			 * provider, set the 'has_provider' to false
1793 			 * so that the PM domain can be safely removed.
1794 			 */
1795 			list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1796 				if (gpd->provider == &np->fwnode)
1797 					gpd->has_provider = false;
1798 
1799 			list_del(&cp->link);
1800 			of_node_put(cp->node);
1801 			kfree(cp);
1802 			break;
1803 		}
1804 	}
1805 	mutex_unlock(&of_genpd_mutex);
1806 	mutex_unlock(&gpd_list_lock);
1807 }
1808 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1809 
1810 /**
1811  * genpd_get_from_provider() - Look-up PM domain
1812  * @genpdspec: OF phandle args to use for look-up
1813  *
1814  * Looks for a PM domain provider under the node specified by @genpdspec and if
1815  * found, uses xlate function of the provider to map phandle args to a PM
1816  * domain.
1817  *
1818  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1819  * on failure.
1820  */
1821 static struct generic_pm_domain *genpd_get_from_provider(
1822 					struct of_phandle_args *genpdspec)
1823 {
1824 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1825 	struct of_genpd_provider *provider;
1826 
1827 	if (!genpdspec)
1828 		return ERR_PTR(-EINVAL);
1829 
1830 	mutex_lock(&of_genpd_mutex);
1831 
1832 	/* Check if we have such a provider in our array */
1833 	list_for_each_entry(provider, &of_genpd_providers, link) {
1834 		if (provider->node == genpdspec->np)
1835 			genpd = provider->xlate(genpdspec, provider->data);
1836 		if (!IS_ERR(genpd))
1837 			break;
1838 	}
1839 
1840 	mutex_unlock(&of_genpd_mutex);
1841 
1842 	return genpd;
1843 }
1844 
1845 /**
1846  * of_genpd_add_device() - Add a device to an I/O PM domain
1847  * @genpdspec: OF phandle args to use for look-up PM domain
1848  * @dev: Device to be added.
1849  *
1850  * Looks-up an I/O PM domain based upon phandle args provided and adds
1851  * the device to the PM domain. Returns a negative error code on failure.
1852  */
1853 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1854 {
1855 	struct generic_pm_domain *genpd;
1856 	int ret;
1857 
1858 	mutex_lock(&gpd_list_lock);
1859 
1860 	genpd = genpd_get_from_provider(genpdspec);
1861 	if (IS_ERR(genpd)) {
1862 		ret = PTR_ERR(genpd);
1863 		goto out;
1864 	}
1865 
1866 	ret = genpd_add_device(genpd, dev, NULL);
1867 
1868 out:
1869 	mutex_unlock(&gpd_list_lock);
1870 
1871 	return ret;
1872 }
1873 EXPORT_SYMBOL_GPL(of_genpd_add_device);
1874 
1875 /**
1876  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1877  * @parent_spec: OF phandle args to use for parent PM domain look-up
1878  * @subdomain_spec: OF phandle args to use for subdomain look-up
1879  *
1880  * Looks-up a parent PM domain and subdomain based upon phandle args
1881  * provided and adds the subdomain to the parent PM domain. Returns a
1882  * negative error code on failure.
1883  */
1884 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1885 			   struct of_phandle_args *subdomain_spec)
1886 {
1887 	struct generic_pm_domain *parent, *subdomain;
1888 	int ret;
1889 
1890 	mutex_lock(&gpd_list_lock);
1891 
1892 	parent = genpd_get_from_provider(parent_spec);
1893 	if (IS_ERR(parent)) {
1894 		ret = PTR_ERR(parent);
1895 		goto out;
1896 	}
1897 
1898 	subdomain = genpd_get_from_provider(subdomain_spec);
1899 	if (IS_ERR(subdomain)) {
1900 		ret = PTR_ERR(subdomain);
1901 		goto out;
1902 	}
1903 
1904 	ret = genpd_add_subdomain(parent, subdomain);
1905 
1906 out:
1907 	mutex_unlock(&gpd_list_lock);
1908 
1909 	return ret;
1910 }
1911 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1912 
1913 /**
1914  * of_genpd_remove_last - Remove the last PM domain registered for a provider
1915  * @provider: Pointer to device structure associated with provider
1916  *
1917  * Find the last PM domain that was added by a particular provider and
1918  * remove this PM domain from the list of PM domains. The provider is
1919  * identified by the 'provider' device structure that is passed. The PM
1920  * domain will only be removed, if the provider associated with domain
1921  * has been removed.
1922  *
1923  * Returns a valid pointer to struct generic_pm_domain on success or
1924  * ERR_PTR() on failure.
1925  */
1926 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
1927 {
1928 	struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
1929 	int ret;
1930 
1931 	if (IS_ERR_OR_NULL(np))
1932 		return ERR_PTR(-EINVAL);
1933 
1934 	mutex_lock(&gpd_list_lock);
1935 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1936 		if (gpd->provider == &np->fwnode) {
1937 			ret = genpd_remove(gpd);
1938 			genpd = ret ? ERR_PTR(ret) : gpd;
1939 			break;
1940 		}
1941 	}
1942 	mutex_unlock(&gpd_list_lock);
1943 
1944 	return genpd;
1945 }
1946 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
1947 
1948 /**
1949  * genpd_dev_pm_detach - Detach a device from its PM domain.
1950  * @dev: Device to detach.
1951  * @power_off: Currently not used
1952  *
1953  * Try to locate a corresponding generic PM domain, which the device was
1954  * attached to previously. If such is found, the device is detached from it.
1955  */
1956 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1957 {
1958 	struct generic_pm_domain *pd;
1959 	unsigned int i;
1960 	int ret = 0;
1961 
1962 	pd = dev_to_genpd(dev);
1963 	if (IS_ERR(pd))
1964 		return;
1965 
1966 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1967 
1968 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1969 		ret = genpd_remove_device(pd, dev);
1970 		if (ret != -EAGAIN)
1971 			break;
1972 
1973 		mdelay(i);
1974 		cond_resched();
1975 	}
1976 
1977 	if (ret < 0) {
1978 		dev_err(dev, "failed to remove from PM domain %s: %d",
1979 			pd->name, ret);
1980 		return;
1981 	}
1982 
1983 	/* Check if PM domain can be powered off after removing this device. */
1984 	genpd_queue_power_off_work(pd);
1985 }
1986 
1987 static void genpd_dev_pm_sync(struct device *dev)
1988 {
1989 	struct generic_pm_domain *pd;
1990 
1991 	pd = dev_to_genpd(dev);
1992 	if (IS_ERR(pd))
1993 		return;
1994 
1995 	genpd_queue_power_off_work(pd);
1996 }
1997 
1998 /**
1999  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2000  * @dev: Device to attach.
2001  *
2002  * Parse device's OF node to find a PM domain specifier. If such is found,
2003  * attaches the device to retrieved pm_domain ops.
2004  *
2005  * Both generic and legacy Samsung-specific DT bindings are supported to keep
2006  * backwards compatibility with existing DTBs.
2007  *
2008  * Returns 0 on successfully attached PM domain or negative error code. Note
2009  * that if a power-domain exists for the device, but it cannot be found or
2010  * turned on, then return -EPROBE_DEFER to ensure that the device is not
2011  * probed and to re-try again later.
2012  */
2013 int genpd_dev_pm_attach(struct device *dev)
2014 {
2015 	struct of_phandle_args pd_args;
2016 	struct generic_pm_domain *pd;
2017 	unsigned int i;
2018 	int ret;
2019 
2020 	if (!dev->of_node)
2021 		return -ENODEV;
2022 
2023 	if (dev->pm_domain)
2024 		return -EEXIST;
2025 
2026 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2027 					"#power-domain-cells", 0, &pd_args);
2028 	if (ret < 0) {
2029 		if (ret != -ENOENT)
2030 			return ret;
2031 
2032 		/*
2033 		 * Try legacy Samsung-specific bindings
2034 		 * (for backwards compatibility of DT ABI)
2035 		 */
2036 		pd_args.args_count = 0;
2037 		pd_args.np = of_parse_phandle(dev->of_node,
2038 						"samsung,power-domain", 0);
2039 		if (!pd_args.np)
2040 			return -ENOENT;
2041 	}
2042 
2043 	mutex_lock(&gpd_list_lock);
2044 	pd = genpd_get_from_provider(&pd_args);
2045 	of_node_put(pd_args.np);
2046 	if (IS_ERR(pd)) {
2047 		mutex_unlock(&gpd_list_lock);
2048 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2049 			__func__, PTR_ERR(pd));
2050 		return -EPROBE_DEFER;
2051 	}
2052 
2053 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2054 
2055 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2056 		ret = genpd_add_device(pd, dev, NULL);
2057 		if (ret != -EAGAIN)
2058 			break;
2059 
2060 		mdelay(i);
2061 		cond_resched();
2062 	}
2063 	mutex_unlock(&gpd_list_lock);
2064 
2065 	if (ret < 0) {
2066 		if (ret != -EPROBE_DEFER)
2067 			dev_err(dev, "failed to add to PM domain %s: %d",
2068 				pd->name, ret);
2069 		goto out;
2070 	}
2071 
2072 	dev->pm_domain->detach = genpd_dev_pm_detach;
2073 	dev->pm_domain->sync = genpd_dev_pm_sync;
2074 
2075 	genpd_lock(pd);
2076 	ret = genpd_power_on(pd, 0);
2077 	genpd_unlock(pd);
2078 out:
2079 	return ret ? -EPROBE_DEFER : 0;
2080 }
2081 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2082 
2083 static const struct of_device_id idle_state_match[] = {
2084 	{ .compatible = "domain-idle-state", },
2085 	{ }
2086 };
2087 
2088 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2089 				    struct device_node *state_node)
2090 {
2091 	int err;
2092 	u32 residency;
2093 	u32 entry_latency, exit_latency;
2094 
2095 	err = of_property_read_u32(state_node, "entry-latency-us",
2096 						&entry_latency);
2097 	if (err) {
2098 		pr_debug(" * %s missing entry-latency-us property\n",
2099 						state_node->full_name);
2100 		return -EINVAL;
2101 	}
2102 
2103 	err = of_property_read_u32(state_node, "exit-latency-us",
2104 						&exit_latency);
2105 	if (err) {
2106 		pr_debug(" * %s missing exit-latency-us property\n",
2107 						state_node->full_name);
2108 		return -EINVAL;
2109 	}
2110 
2111 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2112 	if (!err)
2113 		genpd_state->residency_ns = 1000 * residency;
2114 
2115 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2116 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2117 	genpd_state->fwnode = &state_node->fwnode;
2118 
2119 	return 0;
2120 }
2121 
2122 /**
2123  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2124  *
2125  * @dn: The genpd device node
2126  * @states: The pointer to which the state array will be saved.
2127  * @n: The count of elements in the array returned from this function.
2128  *
2129  * Returns the device states parsed from the OF node. The memory for the states
2130  * is allocated by this function and is the responsibility of the caller to
2131  * free the memory after use.
2132  */
2133 int of_genpd_parse_idle_states(struct device_node *dn,
2134 			struct genpd_power_state **states, int *n)
2135 {
2136 	struct genpd_power_state *st;
2137 	struct device_node *np;
2138 	int i = 0;
2139 	int err, ret;
2140 	int count;
2141 	struct of_phandle_iterator it;
2142 	const struct of_device_id *match_id;
2143 
2144 	count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2145 	if (count <= 0)
2146 		return -EINVAL;
2147 
2148 	st = kcalloc(count, sizeof(*st), GFP_KERNEL);
2149 	if (!st)
2150 		return -ENOMEM;
2151 
2152 	/* Loop over the phandles until all the requested entry is found */
2153 	of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
2154 		np = it.node;
2155 		match_id = of_match_node(idle_state_match, np);
2156 		if (!match_id)
2157 			continue;
2158 		ret = genpd_parse_state(&st[i++], np);
2159 		if (ret) {
2160 			pr_err
2161 			("Parsing idle state node %s failed with err %d\n",
2162 							np->full_name, ret);
2163 			of_node_put(np);
2164 			kfree(st);
2165 			return ret;
2166 		}
2167 	}
2168 
2169 	*n = i;
2170 	if (!i)
2171 		kfree(st);
2172 	else
2173 		*states = st;
2174 
2175 	return 0;
2176 }
2177 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2178 
2179 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2180 
2181 
2182 /***        debugfs support        ***/
2183 
2184 #ifdef CONFIG_DEBUG_FS
2185 #include <linux/pm.h>
2186 #include <linux/device.h>
2187 #include <linux/debugfs.h>
2188 #include <linux/seq_file.h>
2189 #include <linux/init.h>
2190 #include <linux/kobject.h>
2191 static struct dentry *pm_genpd_debugfs_dir;
2192 
2193 /*
2194  * TODO: This function is a slightly modified version of rtpm_status_show
2195  * from sysfs.c, so generalize it.
2196  */
2197 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2198 {
2199 	static const char * const status_lookup[] = {
2200 		[RPM_ACTIVE] = "active",
2201 		[RPM_RESUMING] = "resuming",
2202 		[RPM_SUSPENDED] = "suspended",
2203 		[RPM_SUSPENDING] = "suspending"
2204 	};
2205 	const char *p = "";
2206 
2207 	if (dev->power.runtime_error)
2208 		p = "error";
2209 	else if (dev->power.disable_depth)
2210 		p = "unsupported";
2211 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2212 		p = status_lookup[dev->power.runtime_status];
2213 	else
2214 		WARN_ON(1);
2215 
2216 	seq_puts(s, p);
2217 }
2218 
2219 static int pm_genpd_summary_one(struct seq_file *s,
2220 				struct generic_pm_domain *genpd)
2221 {
2222 	static const char * const status_lookup[] = {
2223 		[GPD_STATE_ACTIVE] = "on",
2224 		[GPD_STATE_POWER_OFF] = "off"
2225 	};
2226 	struct pm_domain_data *pm_data;
2227 	const char *kobj_path;
2228 	struct gpd_link *link;
2229 	char state[16];
2230 	int ret;
2231 
2232 	ret = genpd_lock_interruptible(genpd);
2233 	if (ret)
2234 		return -ERESTARTSYS;
2235 
2236 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2237 		goto exit;
2238 	if (!genpd_status_on(genpd))
2239 		snprintf(state, sizeof(state), "%s-%u",
2240 			 status_lookup[genpd->status], genpd->state_idx);
2241 	else
2242 		snprintf(state, sizeof(state), "%s",
2243 			 status_lookup[genpd->status]);
2244 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2245 
2246 	/*
2247 	 * Modifications on the list require holding locks on both
2248 	 * master and slave, so we are safe.
2249 	 * Also genpd->name is immutable.
2250 	 */
2251 	list_for_each_entry(link, &genpd->master_links, master_node) {
2252 		seq_printf(s, "%s", link->slave->name);
2253 		if (!list_is_last(&link->master_node, &genpd->master_links))
2254 			seq_puts(s, ", ");
2255 	}
2256 
2257 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2258 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2259 				genpd_is_irq_safe(genpd) ?
2260 				GFP_ATOMIC : GFP_KERNEL);
2261 		if (kobj_path == NULL)
2262 			continue;
2263 
2264 		seq_printf(s, "\n    %-50s  ", kobj_path);
2265 		rtpm_status_str(s, pm_data->dev);
2266 		kfree(kobj_path);
2267 	}
2268 
2269 	seq_puts(s, "\n");
2270 exit:
2271 	genpd_unlock(genpd);
2272 
2273 	return 0;
2274 }
2275 
2276 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2277 {
2278 	struct generic_pm_domain *genpd;
2279 	int ret = 0;
2280 
2281 	seq_puts(s, "domain                          status          slaves\n");
2282 	seq_puts(s, "    /device                                             runtime status\n");
2283 	seq_puts(s, "----------------------------------------------------------------------\n");
2284 
2285 	ret = mutex_lock_interruptible(&gpd_list_lock);
2286 	if (ret)
2287 		return -ERESTARTSYS;
2288 
2289 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2290 		ret = pm_genpd_summary_one(s, genpd);
2291 		if (ret)
2292 			break;
2293 	}
2294 	mutex_unlock(&gpd_list_lock);
2295 
2296 	return ret;
2297 }
2298 
2299 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2300 {
2301 	return single_open(file, pm_genpd_summary_show, NULL);
2302 }
2303 
2304 static const struct file_operations pm_genpd_summary_fops = {
2305 	.open = pm_genpd_summary_open,
2306 	.read = seq_read,
2307 	.llseek = seq_lseek,
2308 	.release = single_release,
2309 };
2310 
2311 static int __init pm_genpd_debug_init(void)
2312 {
2313 	struct dentry *d;
2314 
2315 	pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2316 
2317 	if (!pm_genpd_debugfs_dir)
2318 		return -ENOMEM;
2319 
2320 	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2321 			pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2322 	if (!d)
2323 		return -ENOMEM;
2324 
2325 	return 0;
2326 }
2327 late_initcall(pm_genpd_debug_init);
2328 
2329 static void __exit pm_genpd_debug_exit(void)
2330 {
2331 	debugfs_remove_recursive(pm_genpd_debugfs_dir);
2332 }
2333 __exitcall(pm_genpd_debug_exit);
2334 #endif /* CONFIG_DEBUG_FS */
2335