xref: /openbmc/linux/drivers/base/power/domain.c (revision 0c380187)
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22 
23 #include "power.h"
24 
25 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
26 
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
28 ({								\
29 	type (*__routine)(struct device *__d); 			\
30 	type __ret = (type)0;					\
31 								\
32 	__routine = genpd->dev_ops.callback; 			\
33 	if (__routine) {					\
34 		__ret = __routine(dev); 			\
35 	}							\
36 	__ret;							\
37 })
38 
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
41 
42 struct genpd_lock_ops {
43 	void (*lock)(struct generic_pm_domain *genpd);
44 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
45 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
46 	void (*unlock)(struct generic_pm_domain *genpd);
47 };
48 
49 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
50 {
51 	mutex_lock(&genpd->mlock);
52 }
53 
54 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
55 					int depth)
56 {
57 	mutex_lock_nested(&genpd->mlock, depth);
58 }
59 
60 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
61 {
62 	return mutex_lock_interruptible(&genpd->mlock);
63 }
64 
65 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
66 {
67 	return mutex_unlock(&genpd->mlock);
68 }
69 
70 static const struct genpd_lock_ops genpd_mtx_ops = {
71 	.lock = genpd_lock_mtx,
72 	.lock_nested = genpd_lock_nested_mtx,
73 	.lock_interruptible = genpd_lock_interruptible_mtx,
74 	.unlock = genpd_unlock_mtx,
75 };
76 
77 static void genpd_lock_spin(struct generic_pm_domain *genpd)
78 	__acquires(&genpd->slock)
79 {
80 	unsigned long flags;
81 
82 	spin_lock_irqsave(&genpd->slock, flags);
83 	genpd->lock_flags = flags;
84 }
85 
86 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
87 					int depth)
88 	__acquires(&genpd->slock)
89 {
90 	unsigned long flags;
91 
92 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
93 	genpd->lock_flags = flags;
94 }
95 
96 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
97 	__acquires(&genpd->slock)
98 {
99 	unsigned long flags;
100 
101 	spin_lock_irqsave(&genpd->slock, flags);
102 	genpd->lock_flags = flags;
103 	return 0;
104 }
105 
106 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
107 	__releases(&genpd->slock)
108 {
109 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
110 }
111 
112 static const struct genpd_lock_ops genpd_spin_ops = {
113 	.lock = genpd_lock_spin,
114 	.lock_nested = genpd_lock_nested_spin,
115 	.lock_interruptible = genpd_lock_interruptible_spin,
116 	.unlock = genpd_unlock_spin,
117 };
118 
119 #define genpd_lock(p)			p->lock_ops->lock(p)
120 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
121 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
122 #define genpd_unlock(p)			p->lock_ops->unlock(p)
123 
124 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
125 
126 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
127 		struct generic_pm_domain *genpd)
128 {
129 	bool ret;
130 
131 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
132 
133 	/* Warn once if IRQ safe dev in no sleep domain */
134 	if (ret)
135 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
136 				genpd->name);
137 
138 	return ret;
139 }
140 
141 /*
142  * Get the generic PM domain for a particular struct device.
143  * This validates the struct device pointer, the PM domain pointer,
144  * and checks that the PM domain pointer is a real generic PM domain.
145  * Any failure results in NULL being returned.
146  */
147 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
148 {
149 	struct generic_pm_domain *genpd = NULL, *gpd;
150 
151 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
152 		return NULL;
153 
154 	mutex_lock(&gpd_list_lock);
155 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
156 		if (&gpd->domain == dev->pm_domain) {
157 			genpd = gpd;
158 			break;
159 		}
160 	}
161 	mutex_unlock(&gpd_list_lock);
162 
163 	return genpd;
164 }
165 
166 /*
167  * This should only be used where we are certain that the pm_domain
168  * attached to the device is a genpd domain.
169  */
170 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
171 {
172 	if (IS_ERR_OR_NULL(dev->pm_domain))
173 		return ERR_PTR(-EINVAL);
174 
175 	return pd_to_genpd(dev->pm_domain);
176 }
177 
178 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
179 {
180 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
181 }
182 
183 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
184 {
185 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
186 }
187 
188 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
189 {
190 	bool ret = false;
191 
192 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
193 		ret = !!atomic_dec_and_test(&genpd->sd_count);
194 
195 	return ret;
196 }
197 
198 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
199 {
200 	atomic_inc(&genpd->sd_count);
201 	smp_mb__after_atomic();
202 }
203 
204 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
205 {
206 	unsigned int state_idx = genpd->state_idx;
207 	ktime_t time_start;
208 	s64 elapsed_ns;
209 	int ret;
210 
211 	if (!genpd->power_on)
212 		return 0;
213 
214 	if (!timed)
215 		return genpd->power_on(genpd);
216 
217 	time_start = ktime_get();
218 	ret = genpd->power_on(genpd);
219 	if (ret)
220 		return ret;
221 
222 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
223 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
224 		return ret;
225 
226 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
227 	genpd->max_off_time_changed = true;
228 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
229 		 genpd->name, "on", elapsed_ns);
230 
231 	return ret;
232 }
233 
234 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
235 {
236 	unsigned int state_idx = genpd->state_idx;
237 	ktime_t time_start;
238 	s64 elapsed_ns;
239 	int ret;
240 
241 	if (!genpd->power_off)
242 		return 0;
243 
244 	if (!timed)
245 		return genpd->power_off(genpd);
246 
247 	time_start = ktime_get();
248 	ret = genpd->power_off(genpd);
249 	if (ret == -EBUSY)
250 		return ret;
251 
252 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
253 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
254 		return ret;
255 
256 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
257 	genpd->max_off_time_changed = true;
258 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
259 		 genpd->name, "off", elapsed_ns);
260 
261 	return ret;
262 }
263 
264 /**
265  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
266  * @genpd: PM domain to power off.
267  *
268  * Queue up the execution of genpd_power_off() unless it's already been done
269  * before.
270  */
271 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
272 {
273 	queue_work(pm_wq, &genpd->power_off_work);
274 }
275 
276 /**
277  * genpd_power_off - Remove power from a given PM domain.
278  * @genpd: PM domain to power down.
279  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
280  * RPM status of the releated device is in an intermediate state, not yet turned
281  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
282  * be RPM_SUSPENDED, while it tries to power off the PM domain.
283  *
284  * If all of the @genpd's devices have been suspended and all of its subdomains
285  * have been powered down, remove power from @genpd.
286  */
287 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
288 			   unsigned int depth)
289 {
290 	struct pm_domain_data *pdd;
291 	struct gpd_link *link;
292 	unsigned int not_suspended = 0;
293 
294 	/*
295 	 * Do not try to power off the domain in the following situations:
296 	 * (1) The domain is already in the "power off" state.
297 	 * (2) System suspend is in progress.
298 	 */
299 	if (genpd->status == GPD_STATE_POWER_OFF
300 	    || genpd->prepared_count > 0)
301 		return 0;
302 
303 	if (atomic_read(&genpd->sd_count) > 0)
304 		return -EBUSY;
305 
306 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
307 		enum pm_qos_flags_status stat;
308 
309 		stat = dev_pm_qos_flags(pdd->dev,
310 					PM_QOS_FLAG_NO_POWER_OFF
311 						| PM_QOS_FLAG_REMOTE_WAKEUP);
312 		if (stat > PM_QOS_FLAGS_NONE)
313 			return -EBUSY;
314 
315 		/*
316 		 * Do not allow PM domain to be powered off, when an IRQ safe
317 		 * device is part of a non-IRQ safe domain.
318 		 */
319 		if (!pm_runtime_suspended(pdd->dev) ||
320 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
321 			not_suspended++;
322 	}
323 
324 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
325 		return -EBUSY;
326 
327 	if (genpd->gov && genpd->gov->power_down_ok) {
328 		if (!genpd->gov->power_down_ok(&genpd->domain))
329 			return -EAGAIN;
330 	}
331 
332 	if (genpd->power_off) {
333 		int ret;
334 
335 		if (atomic_read(&genpd->sd_count) > 0)
336 			return -EBUSY;
337 
338 		/*
339 		 * If sd_count > 0 at this point, one of the subdomains hasn't
340 		 * managed to call genpd_power_on() for the master yet after
341 		 * incrementing it.  In that case genpd_power_on() will wait
342 		 * for us to drop the lock, so we can call .power_off() and let
343 		 * the genpd_power_on() restore power for us (this shouldn't
344 		 * happen very often).
345 		 */
346 		ret = _genpd_power_off(genpd, true);
347 		if (ret)
348 			return ret;
349 	}
350 
351 	genpd->status = GPD_STATE_POWER_OFF;
352 
353 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
354 		genpd_sd_counter_dec(link->master);
355 		genpd_lock_nested(link->master, depth + 1);
356 		genpd_power_off(link->master, false, depth + 1);
357 		genpd_unlock(link->master);
358 	}
359 
360 	return 0;
361 }
362 
363 /**
364  * genpd_power_on - Restore power to a given PM domain and its masters.
365  * @genpd: PM domain to power up.
366  * @depth: nesting count for lockdep.
367  *
368  * Restore power to @genpd and all of its masters so that it is possible to
369  * resume a device belonging to it.
370  */
371 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
372 {
373 	struct gpd_link *link;
374 	int ret = 0;
375 
376 	if (genpd->status == GPD_STATE_ACTIVE)
377 		return 0;
378 
379 	/*
380 	 * The list is guaranteed not to change while the loop below is being
381 	 * executed, unless one of the masters' .power_on() callbacks fiddles
382 	 * with it.
383 	 */
384 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
385 		struct generic_pm_domain *master = link->master;
386 
387 		genpd_sd_counter_inc(master);
388 
389 		genpd_lock_nested(master, depth + 1);
390 		ret = genpd_power_on(master, depth + 1);
391 		genpd_unlock(master);
392 
393 		if (ret) {
394 			genpd_sd_counter_dec(master);
395 			goto err;
396 		}
397 	}
398 
399 	ret = _genpd_power_on(genpd, true);
400 	if (ret)
401 		goto err;
402 
403 	genpd->status = GPD_STATE_ACTIVE;
404 	return 0;
405 
406  err:
407 	list_for_each_entry_continue_reverse(link,
408 					&genpd->slave_links,
409 					slave_node) {
410 		genpd_sd_counter_dec(link->master);
411 		genpd_lock_nested(link->master, depth + 1);
412 		genpd_power_off(link->master, false, depth + 1);
413 		genpd_unlock(link->master);
414 	}
415 
416 	return ret;
417 }
418 
419 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
420 				     unsigned long val, void *ptr)
421 {
422 	struct generic_pm_domain_data *gpd_data;
423 	struct device *dev;
424 
425 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
426 	dev = gpd_data->base.dev;
427 
428 	for (;;) {
429 		struct generic_pm_domain *genpd;
430 		struct pm_domain_data *pdd;
431 
432 		spin_lock_irq(&dev->power.lock);
433 
434 		pdd = dev->power.subsys_data ?
435 				dev->power.subsys_data->domain_data : NULL;
436 		if (pdd && pdd->dev) {
437 			to_gpd_data(pdd)->td.constraint_changed = true;
438 			genpd = dev_to_genpd(dev);
439 		} else {
440 			genpd = ERR_PTR(-ENODATA);
441 		}
442 
443 		spin_unlock_irq(&dev->power.lock);
444 
445 		if (!IS_ERR(genpd)) {
446 			genpd_lock(genpd);
447 			genpd->max_off_time_changed = true;
448 			genpd_unlock(genpd);
449 		}
450 
451 		dev = dev->parent;
452 		if (!dev || dev->power.ignore_children)
453 			break;
454 	}
455 
456 	return NOTIFY_DONE;
457 }
458 
459 /**
460  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
461  * @work: Work structure used for scheduling the execution of this function.
462  */
463 static void genpd_power_off_work_fn(struct work_struct *work)
464 {
465 	struct generic_pm_domain *genpd;
466 
467 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
468 
469 	genpd_lock(genpd);
470 	genpd_power_off(genpd, false, 0);
471 	genpd_unlock(genpd);
472 }
473 
474 /**
475  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
476  * @dev: Device to handle.
477  */
478 static int __genpd_runtime_suspend(struct device *dev)
479 {
480 	int (*cb)(struct device *__dev);
481 
482 	if (dev->type && dev->type->pm)
483 		cb = dev->type->pm->runtime_suspend;
484 	else if (dev->class && dev->class->pm)
485 		cb = dev->class->pm->runtime_suspend;
486 	else if (dev->bus && dev->bus->pm)
487 		cb = dev->bus->pm->runtime_suspend;
488 	else
489 		cb = NULL;
490 
491 	if (!cb && dev->driver && dev->driver->pm)
492 		cb = dev->driver->pm->runtime_suspend;
493 
494 	return cb ? cb(dev) : 0;
495 }
496 
497 /**
498  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
499  * @dev: Device to handle.
500  */
501 static int __genpd_runtime_resume(struct device *dev)
502 {
503 	int (*cb)(struct device *__dev);
504 
505 	if (dev->type && dev->type->pm)
506 		cb = dev->type->pm->runtime_resume;
507 	else if (dev->class && dev->class->pm)
508 		cb = dev->class->pm->runtime_resume;
509 	else if (dev->bus && dev->bus->pm)
510 		cb = dev->bus->pm->runtime_resume;
511 	else
512 		cb = NULL;
513 
514 	if (!cb && dev->driver && dev->driver->pm)
515 		cb = dev->driver->pm->runtime_resume;
516 
517 	return cb ? cb(dev) : 0;
518 }
519 
520 /**
521  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
522  * @dev: Device to suspend.
523  *
524  * Carry out a runtime suspend of a device under the assumption that its
525  * pm_domain field points to the domain member of an object of type
526  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
527  */
528 static int genpd_runtime_suspend(struct device *dev)
529 {
530 	struct generic_pm_domain *genpd;
531 	bool (*suspend_ok)(struct device *__dev);
532 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
533 	bool runtime_pm = pm_runtime_enabled(dev);
534 	ktime_t time_start;
535 	s64 elapsed_ns;
536 	int ret;
537 
538 	dev_dbg(dev, "%s()\n", __func__);
539 
540 	genpd = dev_to_genpd(dev);
541 	if (IS_ERR(genpd))
542 		return -EINVAL;
543 
544 	/*
545 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
546 	 * callbacks for other purposes than runtime PM. In those scenarios
547 	 * runtime PM is disabled. Under these circumstances, we shall skip
548 	 * validating/measuring the PM QoS latency.
549 	 */
550 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
551 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
552 		return -EBUSY;
553 
554 	/* Measure suspend latency. */
555 	time_start = 0;
556 	if (runtime_pm)
557 		time_start = ktime_get();
558 
559 	ret = __genpd_runtime_suspend(dev);
560 	if (ret)
561 		return ret;
562 
563 	ret = genpd_stop_dev(genpd, dev);
564 	if (ret) {
565 		__genpd_runtime_resume(dev);
566 		return ret;
567 	}
568 
569 	/* Update suspend latency value if the measured time exceeds it. */
570 	if (runtime_pm) {
571 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
572 		if (elapsed_ns > td->suspend_latency_ns) {
573 			td->suspend_latency_ns = elapsed_ns;
574 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
575 				elapsed_ns);
576 			genpd->max_off_time_changed = true;
577 			td->constraint_changed = true;
578 		}
579 	}
580 
581 	/*
582 	 * If power.irq_safe is set, this routine may be run with
583 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
584 	 */
585 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
586 		return 0;
587 
588 	genpd_lock(genpd);
589 	genpd_power_off(genpd, true, 0);
590 	genpd_unlock(genpd);
591 
592 	return 0;
593 }
594 
595 /**
596  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
597  * @dev: Device to resume.
598  *
599  * Carry out a runtime resume of a device under the assumption that its
600  * pm_domain field points to the domain member of an object of type
601  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
602  */
603 static int genpd_runtime_resume(struct device *dev)
604 {
605 	struct generic_pm_domain *genpd;
606 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
607 	bool runtime_pm = pm_runtime_enabled(dev);
608 	ktime_t time_start;
609 	s64 elapsed_ns;
610 	int ret;
611 	bool timed = true;
612 
613 	dev_dbg(dev, "%s()\n", __func__);
614 
615 	genpd = dev_to_genpd(dev);
616 	if (IS_ERR(genpd))
617 		return -EINVAL;
618 
619 	/*
620 	 * As we don't power off a non IRQ safe domain, which holds
621 	 * an IRQ safe device, we don't need to restore power to it.
622 	 */
623 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
624 		timed = false;
625 		goto out;
626 	}
627 
628 	genpd_lock(genpd);
629 	ret = genpd_power_on(genpd, 0);
630 	genpd_unlock(genpd);
631 
632 	if (ret)
633 		return ret;
634 
635  out:
636 	/* Measure resume latency. */
637 	time_start = 0;
638 	if (timed && runtime_pm)
639 		time_start = ktime_get();
640 
641 	ret = genpd_start_dev(genpd, dev);
642 	if (ret)
643 		goto err_poweroff;
644 
645 	ret = __genpd_runtime_resume(dev);
646 	if (ret)
647 		goto err_stop;
648 
649 	/* Update resume latency value if the measured time exceeds it. */
650 	if (timed && runtime_pm) {
651 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
652 		if (elapsed_ns > td->resume_latency_ns) {
653 			td->resume_latency_ns = elapsed_ns;
654 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
655 				elapsed_ns);
656 			genpd->max_off_time_changed = true;
657 			td->constraint_changed = true;
658 		}
659 	}
660 
661 	return 0;
662 
663 err_stop:
664 	genpd_stop_dev(genpd, dev);
665 err_poweroff:
666 	if (!pm_runtime_is_irq_safe(dev) ||
667 		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
668 		genpd_lock(genpd);
669 		genpd_power_off(genpd, true, 0);
670 		genpd_unlock(genpd);
671 	}
672 
673 	return ret;
674 }
675 
676 static bool pd_ignore_unused;
677 static int __init pd_ignore_unused_setup(char *__unused)
678 {
679 	pd_ignore_unused = true;
680 	return 1;
681 }
682 __setup("pd_ignore_unused", pd_ignore_unused_setup);
683 
684 /**
685  * genpd_power_off_unused - Power off all PM domains with no devices in use.
686  */
687 static int __init genpd_power_off_unused(void)
688 {
689 	struct generic_pm_domain *genpd;
690 
691 	if (pd_ignore_unused) {
692 		pr_warn("genpd: Not disabling unused power domains\n");
693 		return 0;
694 	}
695 
696 	mutex_lock(&gpd_list_lock);
697 
698 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
699 		genpd_queue_power_off_work(genpd);
700 
701 	mutex_unlock(&gpd_list_lock);
702 
703 	return 0;
704 }
705 late_initcall(genpd_power_off_unused);
706 
707 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
708 
709 /**
710  * pm_genpd_present - Check if the given PM domain has been initialized.
711  * @genpd: PM domain to check.
712  */
713 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
714 {
715 	const struct generic_pm_domain *gpd;
716 
717 	if (IS_ERR_OR_NULL(genpd))
718 		return false;
719 
720 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
721 		if (gpd == genpd)
722 			return true;
723 
724 	return false;
725 }
726 
727 #endif
728 
729 #ifdef CONFIG_PM_SLEEP
730 
731 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
732 				    struct device *dev)
733 {
734 	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
735 }
736 
737 /**
738  * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
739  * @genpd: PM domain to power off, if possible.
740  * @use_lock: use the lock.
741  * @depth: nesting count for lockdep.
742  *
743  * Check if the given PM domain can be powered off (during system suspend or
744  * hibernation) and do that if so.  Also, in that case propagate to its masters.
745  *
746  * This function is only called in "noirq" and "syscore" stages of system power
747  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
748  * these cases the lock must be held.
749  */
750 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
751 				 unsigned int depth)
752 {
753 	struct gpd_link *link;
754 
755 	if (genpd->status == GPD_STATE_POWER_OFF)
756 		return;
757 
758 	if (genpd->suspended_count != genpd->device_count
759 	    || atomic_read(&genpd->sd_count) > 0)
760 		return;
761 
762 	/* Choose the deepest state when suspending */
763 	genpd->state_idx = genpd->state_count - 1;
764 	_genpd_power_off(genpd, false);
765 
766 	genpd->status = GPD_STATE_POWER_OFF;
767 
768 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
769 		genpd_sd_counter_dec(link->master);
770 
771 		if (use_lock)
772 			genpd_lock_nested(link->master, depth + 1);
773 
774 		genpd_sync_power_off(link->master, use_lock, depth + 1);
775 
776 		if (use_lock)
777 			genpd_unlock(link->master);
778 	}
779 }
780 
781 /**
782  * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
783  * @genpd: PM domain to power on.
784  * @use_lock: use the lock.
785  * @depth: nesting count for lockdep.
786  *
787  * This function is only called in "noirq" and "syscore" stages of system power
788  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
789  * these cases the lock must be held.
790  */
791 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
792 				unsigned int depth)
793 {
794 	struct gpd_link *link;
795 
796 	if (genpd->status == GPD_STATE_ACTIVE)
797 		return;
798 
799 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
800 		genpd_sd_counter_inc(link->master);
801 
802 		if (use_lock)
803 			genpd_lock_nested(link->master, depth + 1);
804 
805 		genpd_sync_power_on(link->master, use_lock, depth + 1);
806 
807 		if (use_lock)
808 			genpd_unlock(link->master);
809 	}
810 
811 	_genpd_power_on(genpd, false);
812 
813 	genpd->status = GPD_STATE_ACTIVE;
814 }
815 
816 /**
817  * resume_needed - Check whether to resume a device before system suspend.
818  * @dev: Device to check.
819  * @genpd: PM domain the device belongs to.
820  *
821  * There are two cases in which a device that can wake up the system from sleep
822  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
823  * to wake up the system and it has to remain active for this purpose while the
824  * system is in the sleep state and (2) if the device is not enabled to wake up
825  * the system from sleep states and it generally doesn't generate wakeup signals
826  * by itself (those signals are generated on its behalf by other parts of the
827  * system).  In the latter case it may be necessary to reconfigure the device's
828  * wakeup settings during system suspend, because it may have been set up to
829  * signal remote wakeup from the system's working state as needed by runtime PM.
830  * Return 'true' in either of the above cases.
831  */
832 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
833 {
834 	bool active_wakeup;
835 
836 	if (!device_can_wakeup(dev))
837 		return false;
838 
839 	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
840 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
841 }
842 
843 /**
844  * pm_genpd_prepare - Start power transition of a device in a PM domain.
845  * @dev: Device to start the transition of.
846  *
847  * Start a power transition of a device (during a system-wide power transition)
848  * under the assumption that its pm_domain field points to the domain member of
849  * an object of type struct generic_pm_domain representing a PM domain
850  * consisting of I/O devices.
851  */
852 static int pm_genpd_prepare(struct device *dev)
853 {
854 	struct generic_pm_domain *genpd;
855 	int ret;
856 
857 	dev_dbg(dev, "%s()\n", __func__);
858 
859 	genpd = dev_to_genpd(dev);
860 	if (IS_ERR(genpd))
861 		return -EINVAL;
862 
863 	/*
864 	 * If a wakeup request is pending for the device, it should be woken up
865 	 * at this point and a system wakeup event should be reported if it's
866 	 * set up to wake up the system from sleep states.
867 	 */
868 	if (resume_needed(dev, genpd))
869 		pm_runtime_resume(dev);
870 
871 	genpd_lock(genpd);
872 
873 	if (genpd->prepared_count++ == 0)
874 		genpd->suspended_count = 0;
875 
876 	genpd_unlock(genpd);
877 
878 	ret = pm_generic_prepare(dev);
879 	if (ret) {
880 		genpd_lock(genpd);
881 
882 		genpd->prepared_count--;
883 
884 		genpd_unlock(genpd);
885 	}
886 
887 	return ret;
888 }
889 
890 /**
891  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
892  * @dev: Device to suspend.
893  *
894  * Stop the device and remove power from the domain if all devices in it have
895  * been stopped.
896  */
897 static int pm_genpd_suspend_noirq(struct device *dev)
898 {
899 	struct generic_pm_domain *genpd;
900 	int ret;
901 
902 	dev_dbg(dev, "%s()\n", __func__);
903 
904 	genpd = dev_to_genpd(dev);
905 	if (IS_ERR(genpd))
906 		return -EINVAL;
907 
908 	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
909 		return 0;
910 
911 	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
912 		ret = pm_runtime_force_suspend(dev);
913 		if (ret)
914 			return ret;
915 	}
916 
917 	genpd_lock(genpd);
918 	genpd->suspended_count++;
919 	genpd_sync_power_off(genpd, true, 0);
920 	genpd_unlock(genpd);
921 
922 	return 0;
923 }
924 
925 /**
926  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
927  * @dev: Device to resume.
928  *
929  * Restore power to the device's PM domain, if necessary, and start the device.
930  */
931 static int pm_genpd_resume_noirq(struct device *dev)
932 {
933 	struct generic_pm_domain *genpd;
934 	int ret = 0;
935 
936 	dev_dbg(dev, "%s()\n", __func__);
937 
938 	genpd = dev_to_genpd(dev);
939 	if (IS_ERR(genpd))
940 		return -EINVAL;
941 
942 	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
943 		return 0;
944 
945 	genpd_lock(genpd);
946 	genpd_sync_power_on(genpd, true, 0);
947 	genpd->suspended_count--;
948 	genpd_unlock(genpd);
949 
950 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
951 		ret = pm_runtime_force_resume(dev);
952 
953 	return ret;
954 }
955 
956 /**
957  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
958  * @dev: Device to freeze.
959  *
960  * Carry out a late freeze of a device under the assumption that its
961  * pm_domain field points to the domain member of an object of type
962  * struct generic_pm_domain representing a power domain consisting of I/O
963  * devices.
964  */
965 static int pm_genpd_freeze_noirq(struct device *dev)
966 {
967 	struct generic_pm_domain *genpd;
968 	int ret = 0;
969 
970 	dev_dbg(dev, "%s()\n", __func__);
971 
972 	genpd = dev_to_genpd(dev);
973 	if (IS_ERR(genpd))
974 		return -EINVAL;
975 
976 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
977 		ret = pm_runtime_force_suspend(dev);
978 
979 	return ret;
980 }
981 
982 /**
983  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
984  * @dev: Device to thaw.
985  *
986  * Start the device, unless power has been removed from the domain already
987  * before the system transition.
988  */
989 static int pm_genpd_thaw_noirq(struct device *dev)
990 {
991 	struct generic_pm_domain *genpd;
992 	int ret = 0;
993 
994 	dev_dbg(dev, "%s()\n", __func__);
995 
996 	genpd = dev_to_genpd(dev);
997 	if (IS_ERR(genpd))
998 		return -EINVAL;
999 
1000 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
1001 		ret = pm_runtime_force_resume(dev);
1002 
1003 	return ret;
1004 }
1005 
1006 /**
1007  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1008  * @dev: Device to resume.
1009  *
1010  * Make sure the domain will be in the same power state as before the
1011  * hibernation the system is resuming from and start the device if necessary.
1012  */
1013 static int pm_genpd_restore_noirq(struct device *dev)
1014 {
1015 	struct generic_pm_domain *genpd;
1016 	int ret = 0;
1017 
1018 	dev_dbg(dev, "%s()\n", __func__);
1019 
1020 	genpd = dev_to_genpd(dev);
1021 	if (IS_ERR(genpd))
1022 		return -EINVAL;
1023 
1024 	/*
1025 	 * At this point suspended_count == 0 means we are being run for the
1026 	 * first time for the given domain in the present cycle.
1027 	 */
1028 	genpd_lock(genpd);
1029 	if (genpd->suspended_count++ == 0)
1030 		/*
1031 		 * The boot kernel might put the domain into arbitrary state,
1032 		 * so make it appear as powered off to genpd_sync_power_on(),
1033 		 * so that it tries to power it on in case it was really off.
1034 		 */
1035 		genpd->status = GPD_STATE_POWER_OFF;
1036 
1037 	genpd_sync_power_on(genpd, true, 0);
1038 	genpd_unlock(genpd);
1039 
1040 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
1041 		ret = pm_runtime_force_resume(dev);
1042 
1043 	return ret;
1044 }
1045 
1046 /**
1047  * pm_genpd_complete - Complete power transition of a device in a power domain.
1048  * @dev: Device to complete the transition of.
1049  *
1050  * Complete a power transition of a device (during a system-wide power
1051  * transition) under the assumption that its pm_domain field points to the
1052  * domain member of an object of type struct generic_pm_domain representing
1053  * a power domain consisting of I/O devices.
1054  */
1055 static void pm_genpd_complete(struct device *dev)
1056 {
1057 	struct generic_pm_domain *genpd;
1058 
1059 	dev_dbg(dev, "%s()\n", __func__);
1060 
1061 	genpd = dev_to_genpd(dev);
1062 	if (IS_ERR(genpd))
1063 		return;
1064 
1065 	pm_generic_complete(dev);
1066 
1067 	genpd_lock(genpd);
1068 
1069 	genpd->prepared_count--;
1070 	if (!genpd->prepared_count)
1071 		genpd_queue_power_off_work(genpd);
1072 
1073 	genpd_unlock(genpd);
1074 }
1075 
1076 /**
1077  * genpd_syscore_switch - Switch power during system core suspend or resume.
1078  * @dev: Device that normally is marked as "always on" to switch power for.
1079  *
1080  * This routine may only be called during the system core (syscore) suspend or
1081  * resume phase for devices whose "always on" flags are set.
1082  */
1083 static void genpd_syscore_switch(struct device *dev, bool suspend)
1084 {
1085 	struct generic_pm_domain *genpd;
1086 
1087 	genpd = dev_to_genpd(dev);
1088 	if (!pm_genpd_present(genpd))
1089 		return;
1090 
1091 	if (suspend) {
1092 		genpd->suspended_count++;
1093 		genpd_sync_power_off(genpd, false, 0);
1094 	} else {
1095 		genpd_sync_power_on(genpd, false, 0);
1096 		genpd->suspended_count--;
1097 	}
1098 }
1099 
1100 void pm_genpd_syscore_poweroff(struct device *dev)
1101 {
1102 	genpd_syscore_switch(dev, true);
1103 }
1104 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1105 
1106 void pm_genpd_syscore_poweron(struct device *dev)
1107 {
1108 	genpd_syscore_switch(dev, false);
1109 }
1110 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1111 
1112 #else /* !CONFIG_PM_SLEEP */
1113 
1114 #define pm_genpd_prepare		NULL
1115 #define pm_genpd_suspend_noirq		NULL
1116 #define pm_genpd_resume_noirq		NULL
1117 #define pm_genpd_freeze_noirq		NULL
1118 #define pm_genpd_thaw_noirq		NULL
1119 #define pm_genpd_restore_noirq		NULL
1120 #define pm_genpd_complete		NULL
1121 
1122 #endif /* CONFIG_PM_SLEEP */
1123 
1124 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1125 					struct generic_pm_domain *genpd,
1126 					struct gpd_timing_data *td)
1127 {
1128 	struct generic_pm_domain_data *gpd_data;
1129 	int ret;
1130 
1131 	ret = dev_pm_get_subsys_data(dev);
1132 	if (ret)
1133 		return ERR_PTR(ret);
1134 
1135 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1136 	if (!gpd_data) {
1137 		ret = -ENOMEM;
1138 		goto err_put;
1139 	}
1140 
1141 	if (td)
1142 		gpd_data->td = *td;
1143 
1144 	gpd_data->base.dev = dev;
1145 	gpd_data->td.constraint_changed = true;
1146 	gpd_data->td.effective_constraint_ns = -1;
1147 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1148 
1149 	spin_lock_irq(&dev->power.lock);
1150 
1151 	if (dev->power.subsys_data->domain_data) {
1152 		ret = -EINVAL;
1153 		goto err_free;
1154 	}
1155 
1156 	dev->power.subsys_data->domain_data = &gpd_data->base;
1157 
1158 	spin_unlock_irq(&dev->power.lock);
1159 
1160 	dev_pm_domain_set(dev, &genpd->domain);
1161 
1162 	return gpd_data;
1163 
1164  err_free:
1165 	spin_unlock_irq(&dev->power.lock);
1166 	kfree(gpd_data);
1167  err_put:
1168 	dev_pm_put_subsys_data(dev);
1169 	return ERR_PTR(ret);
1170 }
1171 
1172 static void genpd_free_dev_data(struct device *dev,
1173 				struct generic_pm_domain_data *gpd_data)
1174 {
1175 	dev_pm_domain_set(dev, NULL);
1176 
1177 	spin_lock_irq(&dev->power.lock);
1178 
1179 	dev->power.subsys_data->domain_data = NULL;
1180 
1181 	spin_unlock_irq(&dev->power.lock);
1182 
1183 	kfree(gpd_data);
1184 	dev_pm_put_subsys_data(dev);
1185 }
1186 
1187 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1188 			    struct gpd_timing_data *td)
1189 {
1190 	struct generic_pm_domain_data *gpd_data;
1191 	int ret = 0;
1192 
1193 	dev_dbg(dev, "%s()\n", __func__);
1194 
1195 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1196 		return -EINVAL;
1197 
1198 	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1199 	if (IS_ERR(gpd_data))
1200 		return PTR_ERR(gpd_data);
1201 
1202 	genpd_lock(genpd);
1203 
1204 	if (genpd->prepared_count > 0) {
1205 		ret = -EAGAIN;
1206 		goto out;
1207 	}
1208 
1209 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1210 	if (ret)
1211 		goto out;
1212 
1213 	genpd->device_count++;
1214 	genpd->max_off_time_changed = true;
1215 
1216 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1217 
1218  out:
1219 	genpd_unlock(genpd);
1220 
1221 	if (ret)
1222 		genpd_free_dev_data(dev, gpd_data);
1223 	else
1224 		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1225 
1226 	return ret;
1227 }
1228 
1229 /**
1230  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1231  * @genpd: PM domain to add the device to.
1232  * @dev: Device to be added.
1233  * @td: Set of PM QoS timing parameters to attach to the device.
1234  */
1235 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1236 			  struct gpd_timing_data *td)
1237 {
1238 	int ret;
1239 
1240 	mutex_lock(&gpd_list_lock);
1241 	ret = genpd_add_device(genpd, dev, td);
1242 	mutex_unlock(&gpd_list_lock);
1243 
1244 	return ret;
1245 }
1246 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1247 
1248 static int genpd_remove_device(struct generic_pm_domain *genpd,
1249 			       struct device *dev)
1250 {
1251 	struct generic_pm_domain_data *gpd_data;
1252 	struct pm_domain_data *pdd;
1253 	int ret = 0;
1254 
1255 	dev_dbg(dev, "%s()\n", __func__);
1256 
1257 	pdd = dev->power.subsys_data->domain_data;
1258 	gpd_data = to_gpd_data(pdd);
1259 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1260 
1261 	genpd_lock(genpd);
1262 
1263 	if (genpd->prepared_count > 0) {
1264 		ret = -EAGAIN;
1265 		goto out;
1266 	}
1267 
1268 	genpd->device_count--;
1269 	genpd->max_off_time_changed = true;
1270 
1271 	if (genpd->detach_dev)
1272 		genpd->detach_dev(genpd, dev);
1273 
1274 	list_del_init(&pdd->list_node);
1275 
1276 	genpd_unlock(genpd);
1277 
1278 	genpd_free_dev_data(dev, gpd_data);
1279 
1280 	return 0;
1281 
1282  out:
1283 	genpd_unlock(genpd);
1284 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1285 
1286 	return ret;
1287 }
1288 
1289 /**
1290  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1291  * @genpd: PM domain to remove the device from.
1292  * @dev: Device to be removed.
1293  */
1294 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1295 			   struct device *dev)
1296 {
1297 	if (!genpd || genpd != genpd_lookup_dev(dev))
1298 		return -EINVAL;
1299 
1300 	return genpd_remove_device(genpd, dev);
1301 }
1302 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1303 
1304 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1305 			       struct generic_pm_domain *subdomain)
1306 {
1307 	struct gpd_link *link, *itr;
1308 	int ret = 0;
1309 
1310 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1311 	    || genpd == subdomain)
1312 		return -EINVAL;
1313 
1314 	/*
1315 	 * If the domain can be powered on/off in an IRQ safe
1316 	 * context, ensure that the subdomain can also be
1317 	 * powered on/off in that context.
1318 	 */
1319 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1320 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1321 				genpd->name, subdomain->name);
1322 		return -EINVAL;
1323 	}
1324 
1325 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1326 	if (!link)
1327 		return -ENOMEM;
1328 
1329 	genpd_lock(subdomain);
1330 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1331 
1332 	if (genpd->status == GPD_STATE_POWER_OFF
1333 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1334 		ret = -EINVAL;
1335 		goto out;
1336 	}
1337 
1338 	list_for_each_entry(itr, &genpd->master_links, master_node) {
1339 		if (itr->slave == subdomain && itr->master == genpd) {
1340 			ret = -EINVAL;
1341 			goto out;
1342 		}
1343 	}
1344 
1345 	link->master = genpd;
1346 	list_add_tail(&link->master_node, &genpd->master_links);
1347 	link->slave = subdomain;
1348 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1349 	if (subdomain->status != GPD_STATE_POWER_OFF)
1350 		genpd_sd_counter_inc(genpd);
1351 
1352  out:
1353 	genpd_unlock(genpd);
1354 	genpd_unlock(subdomain);
1355 	if (ret)
1356 		kfree(link);
1357 	return ret;
1358 }
1359 
1360 /**
1361  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1362  * @genpd: Master PM domain to add the subdomain to.
1363  * @subdomain: Subdomain to be added.
1364  */
1365 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1366 			   struct generic_pm_domain *subdomain)
1367 {
1368 	int ret;
1369 
1370 	mutex_lock(&gpd_list_lock);
1371 	ret = genpd_add_subdomain(genpd, subdomain);
1372 	mutex_unlock(&gpd_list_lock);
1373 
1374 	return ret;
1375 }
1376 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1377 
1378 /**
1379  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1380  * @genpd: Master PM domain to remove the subdomain from.
1381  * @subdomain: Subdomain to be removed.
1382  */
1383 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1384 			      struct generic_pm_domain *subdomain)
1385 {
1386 	struct gpd_link *link;
1387 	int ret = -EINVAL;
1388 
1389 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1390 		return -EINVAL;
1391 
1392 	genpd_lock(subdomain);
1393 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1394 
1395 	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1396 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1397 			subdomain->name);
1398 		ret = -EBUSY;
1399 		goto out;
1400 	}
1401 
1402 	list_for_each_entry(link, &genpd->master_links, master_node) {
1403 		if (link->slave != subdomain)
1404 			continue;
1405 
1406 		list_del(&link->master_node);
1407 		list_del(&link->slave_node);
1408 		kfree(link);
1409 		if (subdomain->status != GPD_STATE_POWER_OFF)
1410 			genpd_sd_counter_dec(genpd);
1411 
1412 		ret = 0;
1413 		break;
1414 	}
1415 
1416 out:
1417 	genpd_unlock(genpd);
1418 	genpd_unlock(subdomain);
1419 
1420 	return ret;
1421 }
1422 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1423 
1424 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1425 {
1426 	struct genpd_power_state *state;
1427 
1428 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1429 	if (!state)
1430 		return -ENOMEM;
1431 
1432 	genpd->states = state;
1433 	genpd->state_count = 1;
1434 	genpd->free = state;
1435 
1436 	return 0;
1437 }
1438 
1439 static void genpd_lock_init(struct generic_pm_domain *genpd)
1440 {
1441 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1442 		spin_lock_init(&genpd->slock);
1443 		genpd->lock_ops = &genpd_spin_ops;
1444 	} else {
1445 		mutex_init(&genpd->mlock);
1446 		genpd->lock_ops = &genpd_mtx_ops;
1447 	}
1448 }
1449 
1450 /**
1451  * pm_genpd_init - Initialize a generic I/O PM domain object.
1452  * @genpd: PM domain object to initialize.
1453  * @gov: PM domain governor to associate with the domain (may be NULL).
1454  * @is_off: Initial value of the domain's power_is_off field.
1455  *
1456  * Returns 0 on successful initialization, else a negative error code.
1457  */
1458 int pm_genpd_init(struct generic_pm_domain *genpd,
1459 		  struct dev_power_governor *gov, bool is_off)
1460 {
1461 	int ret;
1462 
1463 	if (IS_ERR_OR_NULL(genpd))
1464 		return -EINVAL;
1465 
1466 	INIT_LIST_HEAD(&genpd->master_links);
1467 	INIT_LIST_HEAD(&genpd->slave_links);
1468 	INIT_LIST_HEAD(&genpd->dev_list);
1469 	genpd_lock_init(genpd);
1470 	genpd->gov = gov;
1471 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1472 	atomic_set(&genpd->sd_count, 0);
1473 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1474 	genpd->device_count = 0;
1475 	genpd->max_off_time_ns = -1;
1476 	genpd->max_off_time_changed = true;
1477 	genpd->provider = NULL;
1478 	genpd->has_provider = false;
1479 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1480 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1481 	genpd->domain.ops.prepare = pm_genpd_prepare;
1482 	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1483 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1484 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1485 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1486 	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1487 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1488 	genpd->domain.ops.complete = pm_genpd_complete;
1489 
1490 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1491 		genpd->dev_ops.stop = pm_clk_suspend;
1492 		genpd->dev_ops.start = pm_clk_resume;
1493 	}
1494 
1495 	/* Use only one "off" state if there were no states declared */
1496 	if (genpd->state_count == 0) {
1497 		ret = genpd_set_default_power_state(genpd);
1498 		if (ret)
1499 			return ret;
1500 	}
1501 
1502 	mutex_lock(&gpd_list_lock);
1503 	list_add(&genpd->gpd_list_node, &gpd_list);
1504 	mutex_unlock(&gpd_list_lock);
1505 
1506 	return 0;
1507 }
1508 EXPORT_SYMBOL_GPL(pm_genpd_init);
1509 
1510 static int genpd_remove(struct generic_pm_domain *genpd)
1511 {
1512 	struct gpd_link *l, *link;
1513 
1514 	if (IS_ERR_OR_NULL(genpd))
1515 		return -EINVAL;
1516 
1517 	genpd_lock(genpd);
1518 
1519 	if (genpd->has_provider) {
1520 		genpd_unlock(genpd);
1521 		pr_err("Provider present, unable to remove %s\n", genpd->name);
1522 		return -EBUSY;
1523 	}
1524 
1525 	if (!list_empty(&genpd->master_links) || genpd->device_count) {
1526 		genpd_unlock(genpd);
1527 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1528 		return -EBUSY;
1529 	}
1530 
1531 	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1532 		list_del(&link->master_node);
1533 		list_del(&link->slave_node);
1534 		kfree(link);
1535 	}
1536 
1537 	list_del(&genpd->gpd_list_node);
1538 	genpd_unlock(genpd);
1539 	cancel_work_sync(&genpd->power_off_work);
1540 	kfree(genpd->free);
1541 	pr_debug("%s: removed %s\n", __func__, genpd->name);
1542 
1543 	return 0;
1544 }
1545 
1546 /**
1547  * pm_genpd_remove - Remove a generic I/O PM domain
1548  * @genpd: Pointer to PM domain that is to be removed.
1549  *
1550  * To remove the PM domain, this function:
1551  *  - Removes the PM domain as a subdomain to any parent domains,
1552  *    if it was added.
1553  *  - Removes the PM domain from the list of registered PM domains.
1554  *
1555  * The PM domain will only be removed, if the associated provider has
1556  * been removed, it is not a parent to any other PM domain and has no
1557  * devices associated with it.
1558  */
1559 int pm_genpd_remove(struct generic_pm_domain *genpd)
1560 {
1561 	int ret;
1562 
1563 	mutex_lock(&gpd_list_lock);
1564 	ret = genpd_remove(genpd);
1565 	mutex_unlock(&gpd_list_lock);
1566 
1567 	return ret;
1568 }
1569 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1570 
1571 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1572 
1573 typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
1574 						   void *data);
1575 
1576 /*
1577  * Device Tree based PM domain providers.
1578  *
1579  * The code below implements generic device tree based PM domain providers that
1580  * bind device tree nodes with generic PM domains registered in the system.
1581  *
1582  * Any driver that registers generic PM domains and needs to support binding of
1583  * devices to these domains is supposed to register a PM domain provider, which
1584  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1585  *
1586  * Two simple mapping functions have been provided for convenience:
1587  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1588  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1589  *    index.
1590  */
1591 
1592 /**
1593  * struct of_genpd_provider - PM domain provider registration structure
1594  * @link: Entry in global list of PM domain providers
1595  * @node: Pointer to device tree node of PM domain provider
1596  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1597  *         into a PM domain.
1598  * @data: context pointer to be passed into @xlate callback
1599  */
1600 struct of_genpd_provider {
1601 	struct list_head link;
1602 	struct device_node *node;
1603 	genpd_xlate_t xlate;
1604 	void *data;
1605 };
1606 
1607 /* List of registered PM domain providers. */
1608 static LIST_HEAD(of_genpd_providers);
1609 /* Mutex to protect the list above. */
1610 static DEFINE_MUTEX(of_genpd_mutex);
1611 
1612 /**
1613  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1614  * @genpdspec: OF phandle args to map into a PM domain
1615  * @data: xlate function private data - pointer to struct generic_pm_domain
1616  *
1617  * This is a generic xlate function that can be used to model PM domains that
1618  * have their own device tree nodes. The private data of xlate function needs
1619  * to be a valid pointer to struct generic_pm_domain.
1620  */
1621 static struct generic_pm_domain *genpd_xlate_simple(
1622 					struct of_phandle_args *genpdspec,
1623 					void *data)
1624 {
1625 	if (genpdspec->args_count != 0)
1626 		return ERR_PTR(-EINVAL);
1627 	return data;
1628 }
1629 
1630 /**
1631  * genpd_xlate_onecell() - Xlate function using a single index.
1632  * @genpdspec: OF phandle args to map into a PM domain
1633  * @data: xlate function private data - pointer to struct genpd_onecell_data
1634  *
1635  * This is a generic xlate function that can be used to model simple PM domain
1636  * controllers that have one device tree node and provide multiple PM domains.
1637  * A single cell is used as an index into an array of PM domains specified in
1638  * the genpd_onecell_data struct when registering the provider.
1639  */
1640 static struct generic_pm_domain *genpd_xlate_onecell(
1641 					struct of_phandle_args *genpdspec,
1642 					void *data)
1643 {
1644 	struct genpd_onecell_data *genpd_data = data;
1645 	unsigned int idx = genpdspec->args[0];
1646 
1647 	if (genpdspec->args_count != 1)
1648 		return ERR_PTR(-EINVAL);
1649 
1650 	if (idx >= genpd_data->num_domains) {
1651 		pr_err("%s: invalid domain index %u\n", __func__, idx);
1652 		return ERR_PTR(-EINVAL);
1653 	}
1654 
1655 	if (!genpd_data->domains[idx])
1656 		return ERR_PTR(-ENOENT);
1657 
1658 	return genpd_data->domains[idx];
1659 }
1660 
1661 /**
1662  * genpd_add_provider() - Register a PM domain provider for a node
1663  * @np: Device node pointer associated with the PM domain provider.
1664  * @xlate: Callback for decoding PM domain from phandle arguments.
1665  * @data: Context pointer for @xlate callback.
1666  */
1667 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1668 			      void *data)
1669 {
1670 	struct of_genpd_provider *cp;
1671 
1672 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1673 	if (!cp)
1674 		return -ENOMEM;
1675 
1676 	cp->node = of_node_get(np);
1677 	cp->data = data;
1678 	cp->xlate = xlate;
1679 
1680 	mutex_lock(&of_genpd_mutex);
1681 	list_add(&cp->link, &of_genpd_providers);
1682 	mutex_unlock(&of_genpd_mutex);
1683 	pr_debug("Added domain provider from %s\n", np->full_name);
1684 
1685 	return 0;
1686 }
1687 
1688 /**
1689  * of_genpd_add_provider_simple() - Register a simple PM domain provider
1690  * @np: Device node pointer associated with the PM domain provider.
1691  * @genpd: Pointer to PM domain associated with the PM domain provider.
1692  */
1693 int of_genpd_add_provider_simple(struct device_node *np,
1694 				 struct generic_pm_domain *genpd)
1695 {
1696 	int ret = -EINVAL;
1697 
1698 	if (!np || !genpd)
1699 		return -EINVAL;
1700 
1701 	mutex_lock(&gpd_list_lock);
1702 
1703 	if (pm_genpd_present(genpd))
1704 		ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1705 
1706 	if (!ret) {
1707 		genpd->provider = &np->fwnode;
1708 		genpd->has_provider = true;
1709 	}
1710 
1711 	mutex_unlock(&gpd_list_lock);
1712 
1713 	return ret;
1714 }
1715 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1716 
1717 /**
1718  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1719  * @np: Device node pointer associated with the PM domain provider.
1720  * @data: Pointer to the data associated with the PM domain provider.
1721  */
1722 int of_genpd_add_provider_onecell(struct device_node *np,
1723 				  struct genpd_onecell_data *data)
1724 {
1725 	unsigned int i;
1726 	int ret = -EINVAL;
1727 
1728 	if (!np || !data)
1729 		return -EINVAL;
1730 
1731 	mutex_lock(&gpd_list_lock);
1732 
1733 	for (i = 0; i < data->num_domains; i++) {
1734 		if (!data->domains[i])
1735 			continue;
1736 		if (!pm_genpd_present(data->domains[i]))
1737 			goto error;
1738 
1739 		data->domains[i]->provider = &np->fwnode;
1740 		data->domains[i]->has_provider = true;
1741 	}
1742 
1743 	ret = genpd_add_provider(np, genpd_xlate_onecell, data);
1744 	if (ret < 0)
1745 		goto error;
1746 
1747 	mutex_unlock(&gpd_list_lock);
1748 
1749 	return 0;
1750 
1751 error:
1752 	while (i--) {
1753 		if (!data->domains[i])
1754 			continue;
1755 		data->domains[i]->provider = NULL;
1756 		data->domains[i]->has_provider = false;
1757 	}
1758 
1759 	mutex_unlock(&gpd_list_lock);
1760 
1761 	return ret;
1762 }
1763 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1764 
1765 /**
1766  * of_genpd_del_provider() - Remove a previously registered PM domain provider
1767  * @np: Device node pointer associated with the PM domain provider
1768  */
1769 void of_genpd_del_provider(struct device_node *np)
1770 {
1771 	struct of_genpd_provider *cp;
1772 	struct generic_pm_domain *gpd;
1773 
1774 	mutex_lock(&gpd_list_lock);
1775 	mutex_lock(&of_genpd_mutex);
1776 	list_for_each_entry(cp, &of_genpd_providers, link) {
1777 		if (cp->node == np) {
1778 			/*
1779 			 * For each PM domain associated with the
1780 			 * provider, set the 'has_provider' to false
1781 			 * so that the PM domain can be safely removed.
1782 			 */
1783 			list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1784 				if (gpd->provider == &np->fwnode)
1785 					gpd->has_provider = false;
1786 
1787 			list_del(&cp->link);
1788 			of_node_put(cp->node);
1789 			kfree(cp);
1790 			break;
1791 		}
1792 	}
1793 	mutex_unlock(&of_genpd_mutex);
1794 	mutex_unlock(&gpd_list_lock);
1795 }
1796 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1797 
1798 /**
1799  * genpd_get_from_provider() - Look-up PM domain
1800  * @genpdspec: OF phandle args to use for look-up
1801  *
1802  * Looks for a PM domain provider under the node specified by @genpdspec and if
1803  * found, uses xlate function of the provider to map phandle args to a PM
1804  * domain.
1805  *
1806  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1807  * on failure.
1808  */
1809 static struct generic_pm_domain *genpd_get_from_provider(
1810 					struct of_phandle_args *genpdspec)
1811 {
1812 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1813 	struct of_genpd_provider *provider;
1814 
1815 	if (!genpdspec)
1816 		return ERR_PTR(-EINVAL);
1817 
1818 	mutex_lock(&of_genpd_mutex);
1819 
1820 	/* Check if we have such a provider in our array */
1821 	list_for_each_entry(provider, &of_genpd_providers, link) {
1822 		if (provider->node == genpdspec->np)
1823 			genpd = provider->xlate(genpdspec, provider->data);
1824 		if (!IS_ERR(genpd))
1825 			break;
1826 	}
1827 
1828 	mutex_unlock(&of_genpd_mutex);
1829 
1830 	return genpd;
1831 }
1832 
1833 /**
1834  * of_genpd_add_device() - Add a device to an I/O PM domain
1835  * @genpdspec: OF phandle args to use for look-up PM domain
1836  * @dev: Device to be added.
1837  *
1838  * Looks-up an I/O PM domain based upon phandle args provided and adds
1839  * the device to the PM domain. Returns a negative error code on failure.
1840  */
1841 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1842 {
1843 	struct generic_pm_domain *genpd;
1844 	int ret;
1845 
1846 	mutex_lock(&gpd_list_lock);
1847 
1848 	genpd = genpd_get_from_provider(genpdspec);
1849 	if (IS_ERR(genpd)) {
1850 		ret = PTR_ERR(genpd);
1851 		goto out;
1852 	}
1853 
1854 	ret = genpd_add_device(genpd, dev, NULL);
1855 
1856 out:
1857 	mutex_unlock(&gpd_list_lock);
1858 
1859 	return ret;
1860 }
1861 EXPORT_SYMBOL_GPL(of_genpd_add_device);
1862 
1863 /**
1864  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1865  * @parent_spec: OF phandle args to use for parent PM domain look-up
1866  * @subdomain_spec: OF phandle args to use for subdomain look-up
1867  *
1868  * Looks-up a parent PM domain and subdomain based upon phandle args
1869  * provided and adds the subdomain to the parent PM domain. Returns a
1870  * negative error code on failure.
1871  */
1872 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1873 			   struct of_phandle_args *subdomain_spec)
1874 {
1875 	struct generic_pm_domain *parent, *subdomain;
1876 	int ret;
1877 
1878 	mutex_lock(&gpd_list_lock);
1879 
1880 	parent = genpd_get_from_provider(parent_spec);
1881 	if (IS_ERR(parent)) {
1882 		ret = PTR_ERR(parent);
1883 		goto out;
1884 	}
1885 
1886 	subdomain = genpd_get_from_provider(subdomain_spec);
1887 	if (IS_ERR(subdomain)) {
1888 		ret = PTR_ERR(subdomain);
1889 		goto out;
1890 	}
1891 
1892 	ret = genpd_add_subdomain(parent, subdomain);
1893 
1894 out:
1895 	mutex_unlock(&gpd_list_lock);
1896 
1897 	return ret;
1898 }
1899 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1900 
1901 /**
1902  * of_genpd_remove_last - Remove the last PM domain registered for a provider
1903  * @provider: Pointer to device structure associated with provider
1904  *
1905  * Find the last PM domain that was added by a particular provider and
1906  * remove this PM domain from the list of PM domains. The provider is
1907  * identified by the 'provider' device structure that is passed. The PM
1908  * domain will only be removed, if the provider associated with domain
1909  * has been removed.
1910  *
1911  * Returns a valid pointer to struct generic_pm_domain on success or
1912  * ERR_PTR() on failure.
1913  */
1914 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
1915 {
1916 	struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
1917 	int ret;
1918 
1919 	if (IS_ERR_OR_NULL(np))
1920 		return ERR_PTR(-EINVAL);
1921 
1922 	mutex_lock(&gpd_list_lock);
1923 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1924 		if (gpd->provider == &np->fwnode) {
1925 			ret = genpd_remove(gpd);
1926 			genpd = ret ? ERR_PTR(ret) : gpd;
1927 			break;
1928 		}
1929 	}
1930 	mutex_unlock(&gpd_list_lock);
1931 
1932 	return genpd;
1933 }
1934 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
1935 
1936 /**
1937  * genpd_dev_pm_detach - Detach a device from its PM domain.
1938  * @dev: Device to detach.
1939  * @power_off: Currently not used
1940  *
1941  * Try to locate a corresponding generic PM domain, which the device was
1942  * attached to previously. If such is found, the device is detached from it.
1943  */
1944 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1945 {
1946 	struct generic_pm_domain *pd;
1947 	unsigned int i;
1948 	int ret = 0;
1949 
1950 	pd = dev_to_genpd(dev);
1951 	if (IS_ERR(pd))
1952 		return;
1953 
1954 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1955 
1956 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1957 		ret = genpd_remove_device(pd, dev);
1958 		if (ret != -EAGAIN)
1959 			break;
1960 
1961 		mdelay(i);
1962 		cond_resched();
1963 	}
1964 
1965 	if (ret < 0) {
1966 		dev_err(dev, "failed to remove from PM domain %s: %d",
1967 			pd->name, ret);
1968 		return;
1969 	}
1970 
1971 	/* Check if PM domain can be powered off after removing this device. */
1972 	genpd_queue_power_off_work(pd);
1973 }
1974 
1975 static void genpd_dev_pm_sync(struct device *dev)
1976 {
1977 	struct generic_pm_domain *pd;
1978 
1979 	pd = dev_to_genpd(dev);
1980 	if (IS_ERR(pd))
1981 		return;
1982 
1983 	genpd_queue_power_off_work(pd);
1984 }
1985 
1986 /**
1987  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1988  * @dev: Device to attach.
1989  *
1990  * Parse device's OF node to find a PM domain specifier. If such is found,
1991  * attaches the device to retrieved pm_domain ops.
1992  *
1993  * Both generic and legacy Samsung-specific DT bindings are supported to keep
1994  * backwards compatibility with existing DTBs.
1995  *
1996  * Returns 0 on successfully attached PM domain or negative error code. Note
1997  * that if a power-domain exists for the device, but it cannot be found or
1998  * turned on, then return -EPROBE_DEFER to ensure that the device is not
1999  * probed and to re-try again later.
2000  */
2001 int genpd_dev_pm_attach(struct device *dev)
2002 {
2003 	struct of_phandle_args pd_args;
2004 	struct generic_pm_domain *pd;
2005 	unsigned int i;
2006 	int ret;
2007 
2008 	if (!dev->of_node)
2009 		return -ENODEV;
2010 
2011 	if (dev->pm_domain)
2012 		return -EEXIST;
2013 
2014 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2015 					"#power-domain-cells", 0, &pd_args);
2016 	if (ret < 0) {
2017 		if (ret != -ENOENT)
2018 			return ret;
2019 
2020 		/*
2021 		 * Try legacy Samsung-specific bindings
2022 		 * (for backwards compatibility of DT ABI)
2023 		 */
2024 		pd_args.args_count = 0;
2025 		pd_args.np = of_parse_phandle(dev->of_node,
2026 						"samsung,power-domain", 0);
2027 		if (!pd_args.np)
2028 			return -ENOENT;
2029 	}
2030 
2031 	mutex_lock(&gpd_list_lock);
2032 	pd = genpd_get_from_provider(&pd_args);
2033 	of_node_put(pd_args.np);
2034 	if (IS_ERR(pd)) {
2035 		mutex_unlock(&gpd_list_lock);
2036 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2037 			__func__, PTR_ERR(pd));
2038 		return -EPROBE_DEFER;
2039 	}
2040 
2041 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2042 
2043 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2044 		ret = genpd_add_device(pd, dev, NULL);
2045 		if (ret != -EAGAIN)
2046 			break;
2047 
2048 		mdelay(i);
2049 		cond_resched();
2050 	}
2051 	mutex_unlock(&gpd_list_lock);
2052 
2053 	if (ret < 0) {
2054 		if (ret != -EPROBE_DEFER)
2055 			dev_err(dev, "failed to add to PM domain %s: %d",
2056 				pd->name, ret);
2057 		goto out;
2058 	}
2059 
2060 	dev->pm_domain->detach = genpd_dev_pm_detach;
2061 	dev->pm_domain->sync = genpd_dev_pm_sync;
2062 
2063 	genpd_lock(pd);
2064 	ret = genpd_power_on(pd, 0);
2065 	genpd_unlock(pd);
2066 out:
2067 	return ret ? -EPROBE_DEFER : 0;
2068 }
2069 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2070 
2071 static const struct of_device_id idle_state_match[] = {
2072 	{ .compatible = "domain-idle-state", },
2073 	{ }
2074 };
2075 
2076 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2077 				    struct device_node *state_node)
2078 {
2079 	int err;
2080 	u32 residency;
2081 	u32 entry_latency, exit_latency;
2082 	const struct of_device_id *match_id;
2083 
2084 	match_id = of_match_node(idle_state_match, state_node);
2085 	if (!match_id)
2086 		return -EINVAL;
2087 
2088 	err = of_property_read_u32(state_node, "entry-latency-us",
2089 						&entry_latency);
2090 	if (err) {
2091 		pr_debug(" * %s missing entry-latency-us property\n",
2092 						state_node->full_name);
2093 		return -EINVAL;
2094 	}
2095 
2096 	err = of_property_read_u32(state_node, "exit-latency-us",
2097 						&exit_latency);
2098 	if (err) {
2099 		pr_debug(" * %s missing exit-latency-us property\n",
2100 						state_node->full_name);
2101 		return -EINVAL;
2102 	}
2103 
2104 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2105 	if (!err)
2106 		genpd_state->residency_ns = 1000 * residency;
2107 
2108 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2109 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2110 	genpd_state->fwnode = &state_node->fwnode;
2111 
2112 	return 0;
2113 }
2114 
2115 /**
2116  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2117  *
2118  * @dn: The genpd device node
2119  * @states: The pointer to which the state array will be saved.
2120  * @n: The count of elements in the array returned from this function.
2121  *
2122  * Returns the device states parsed from the OF node. The memory for the states
2123  * is allocated by this function and is the responsibility of the caller to
2124  * free the memory after use.
2125  */
2126 int of_genpd_parse_idle_states(struct device_node *dn,
2127 			struct genpd_power_state **states, int *n)
2128 {
2129 	struct genpd_power_state *st;
2130 	struct device_node *np;
2131 	int i = 0;
2132 	int err, ret;
2133 	int count;
2134 	struct of_phandle_iterator it;
2135 
2136 	count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2137 	if (count <= 0)
2138 		return -EINVAL;
2139 
2140 	st = kcalloc(count, sizeof(*st), GFP_KERNEL);
2141 	if (!st)
2142 		return -ENOMEM;
2143 
2144 	/* Loop over the phandles until all the requested entry is found */
2145 	of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
2146 		np = it.node;
2147 		ret = genpd_parse_state(&st[i++], np);
2148 		if (ret) {
2149 			pr_err
2150 			("Parsing idle state node %s failed with err %d\n",
2151 							np->full_name, ret);
2152 			of_node_put(np);
2153 			kfree(st);
2154 			return ret;
2155 		}
2156 	}
2157 
2158 	*n = count;
2159 	*states = st;
2160 
2161 	return 0;
2162 }
2163 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2164 
2165 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2166 
2167 
2168 /***        debugfs support        ***/
2169 
2170 #ifdef CONFIG_DEBUG_FS
2171 #include <linux/pm.h>
2172 #include <linux/device.h>
2173 #include <linux/debugfs.h>
2174 #include <linux/seq_file.h>
2175 #include <linux/init.h>
2176 #include <linux/kobject.h>
2177 static struct dentry *pm_genpd_debugfs_dir;
2178 
2179 /*
2180  * TODO: This function is a slightly modified version of rtpm_status_show
2181  * from sysfs.c, so generalize it.
2182  */
2183 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2184 {
2185 	static const char * const status_lookup[] = {
2186 		[RPM_ACTIVE] = "active",
2187 		[RPM_RESUMING] = "resuming",
2188 		[RPM_SUSPENDED] = "suspended",
2189 		[RPM_SUSPENDING] = "suspending"
2190 	};
2191 	const char *p = "";
2192 
2193 	if (dev->power.runtime_error)
2194 		p = "error";
2195 	else if (dev->power.disable_depth)
2196 		p = "unsupported";
2197 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2198 		p = status_lookup[dev->power.runtime_status];
2199 	else
2200 		WARN_ON(1);
2201 
2202 	seq_puts(s, p);
2203 }
2204 
2205 static int pm_genpd_summary_one(struct seq_file *s,
2206 				struct generic_pm_domain *genpd)
2207 {
2208 	static const char * const status_lookup[] = {
2209 		[GPD_STATE_ACTIVE] = "on",
2210 		[GPD_STATE_POWER_OFF] = "off"
2211 	};
2212 	struct pm_domain_data *pm_data;
2213 	const char *kobj_path;
2214 	struct gpd_link *link;
2215 	char state[16];
2216 	int ret;
2217 
2218 	ret = genpd_lock_interruptible(genpd);
2219 	if (ret)
2220 		return -ERESTARTSYS;
2221 
2222 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2223 		goto exit;
2224 	if (genpd->status == GPD_STATE_POWER_OFF)
2225 		snprintf(state, sizeof(state), "%s-%u",
2226 			 status_lookup[genpd->status], genpd->state_idx);
2227 	else
2228 		snprintf(state, sizeof(state), "%s",
2229 			 status_lookup[genpd->status]);
2230 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2231 
2232 	/*
2233 	 * Modifications on the list require holding locks on both
2234 	 * master and slave, so we are safe.
2235 	 * Also genpd->name is immutable.
2236 	 */
2237 	list_for_each_entry(link, &genpd->master_links, master_node) {
2238 		seq_printf(s, "%s", link->slave->name);
2239 		if (!list_is_last(&link->master_node, &genpd->master_links))
2240 			seq_puts(s, ", ");
2241 	}
2242 
2243 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2244 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2245 				genpd_is_irq_safe(genpd) ?
2246 				GFP_ATOMIC : GFP_KERNEL);
2247 		if (kobj_path == NULL)
2248 			continue;
2249 
2250 		seq_printf(s, "\n    %-50s  ", kobj_path);
2251 		rtpm_status_str(s, pm_data->dev);
2252 		kfree(kobj_path);
2253 	}
2254 
2255 	seq_puts(s, "\n");
2256 exit:
2257 	genpd_unlock(genpd);
2258 
2259 	return 0;
2260 }
2261 
2262 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2263 {
2264 	struct generic_pm_domain *genpd;
2265 	int ret = 0;
2266 
2267 	seq_puts(s, "domain                          status          slaves\n");
2268 	seq_puts(s, "    /device                                             runtime status\n");
2269 	seq_puts(s, "----------------------------------------------------------------------\n");
2270 
2271 	ret = mutex_lock_interruptible(&gpd_list_lock);
2272 	if (ret)
2273 		return -ERESTARTSYS;
2274 
2275 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2276 		ret = pm_genpd_summary_one(s, genpd);
2277 		if (ret)
2278 			break;
2279 	}
2280 	mutex_unlock(&gpd_list_lock);
2281 
2282 	return ret;
2283 }
2284 
2285 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2286 {
2287 	return single_open(file, pm_genpd_summary_show, NULL);
2288 }
2289 
2290 static const struct file_operations pm_genpd_summary_fops = {
2291 	.open = pm_genpd_summary_open,
2292 	.read = seq_read,
2293 	.llseek = seq_lseek,
2294 	.release = single_release,
2295 };
2296 
2297 static int __init pm_genpd_debug_init(void)
2298 {
2299 	struct dentry *d;
2300 
2301 	pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2302 
2303 	if (!pm_genpd_debugfs_dir)
2304 		return -ENOMEM;
2305 
2306 	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2307 			pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2308 	if (!d)
2309 		return -ENOMEM;
2310 
2311 	return 0;
2312 }
2313 late_initcall(pm_genpd_debug_init);
2314 
2315 static void __exit pm_genpd_debug_exit(void)
2316 {
2317 	debugfs_remove_recursive(pm_genpd_debugfs_dir);
2318 }
2319 __exitcall(pm_genpd_debug_exit);
2320 #endif /* CONFIG_DEBUG_FS */
2321