xref: /openbmc/linux/drivers/base/power/domain.c (revision 8ee90c5c)
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22 
23 #include "power.h"
24 
25 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
26 
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
28 ({								\
29 	type (*__routine)(struct device *__d); 			\
30 	type __ret = (type)0;					\
31 								\
32 	__routine = genpd->dev_ops.callback; 			\
33 	if (__routine) {					\
34 		__ret = __routine(dev); 			\
35 	}							\
36 	__ret;							\
37 })
38 
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
41 
42 struct genpd_lock_ops {
43 	void (*lock)(struct generic_pm_domain *genpd);
44 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
45 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
46 	void (*unlock)(struct generic_pm_domain *genpd);
47 };
48 
49 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
50 {
51 	mutex_lock(&genpd->mlock);
52 }
53 
54 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
55 					int depth)
56 {
57 	mutex_lock_nested(&genpd->mlock, depth);
58 }
59 
60 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
61 {
62 	return mutex_lock_interruptible(&genpd->mlock);
63 }
64 
65 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
66 {
67 	return mutex_unlock(&genpd->mlock);
68 }
69 
70 static const struct genpd_lock_ops genpd_mtx_ops = {
71 	.lock = genpd_lock_mtx,
72 	.lock_nested = genpd_lock_nested_mtx,
73 	.lock_interruptible = genpd_lock_interruptible_mtx,
74 	.unlock = genpd_unlock_mtx,
75 };
76 
77 static void genpd_lock_spin(struct generic_pm_domain *genpd)
78 	__acquires(&genpd->slock)
79 {
80 	unsigned long flags;
81 
82 	spin_lock_irqsave(&genpd->slock, flags);
83 	genpd->lock_flags = flags;
84 }
85 
86 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
87 					int depth)
88 	__acquires(&genpd->slock)
89 {
90 	unsigned long flags;
91 
92 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
93 	genpd->lock_flags = flags;
94 }
95 
96 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
97 	__acquires(&genpd->slock)
98 {
99 	unsigned long flags;
100 
101 	spin_lock_irqsave(&genpd->slock, flags);
102 	genpd->lock_flags = flags;
103 	return 0;
104 }
105 
106 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
107 	__releases(&genpd->slock)
108 {
109 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
110 }
111 
112 static const struct genpd_lock_ops genpd_spin_ops = {
113 	.lock = genpd_lock_spin,
114 	.lock_nested = genpd_lock_nested_spin,
115 	.lock_interruptible = genpd_lock_interruptible_spin,
116 	.unlock = genpd_unlock_spin,
117 };
118 
119 #define genpd_lock(p)			p->lock_ops->lock(p)
120 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
121 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
122 #define genpd_unlock(p)			p->lock_ops->unlock(p)
123 
124 #define genpd_status_on(genpd)		(genpd->status == GPD_STATE_ACTIVE)
125 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
126 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
127 
128 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
129 		const struct generic_pm_domain *genpd)
130 {
131 	bool ret;
132 
133 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
134 
135 	/*
136 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
137 	 * to indicate a suboptimal configuration for PM. For an always on
138 	 * domain this isn't case, thus don't warn.
139 	 */
140 	if (ret && !genpd_is_always_on(genpd))
141 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
142 				genpd->name);
143 
144 	return ret;
145 }
146 
147 /*
148  * Get the generic PM domain for a particular struct device.
149  * This validates the struct device pointer, the PM domain pointer,
150  * and checks that the PM domain pointer is a real generic PM domain.
151  * Any failure results in NULL being returned.
152  */
153 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
154 {
155 	struct generic_pm_domain *genpd = NULL, *gpd;
156 
157 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
158 		return NULL;
159 
160 	mutex_lock(&gpd_list_lock);
161 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
162 		if (&gpd->domain == dev->pm_domain) {
163 			genpd = gpd;
164 			break;
165 		}
166 	}
167 	mutex_unlock(&gpd_list_lock);
168 
169 	return genpd;
170 }
171 
172 /*
173  * This should only be used where we are certain that the pm_domain
174  * attached to the device is a genpd domain.
175  */
176 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
177 {
178 	if (IS_ERR_OR_NULL(dev->pm_domain))
179 		return ERR_PTR(-EINVAL);
180 
181 	return pd_to_genpd(dev->pm_domain);
182 }
183 
184 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
185 			  struct device *dev)
186 {
187 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
188 }
189 
190 static int genpd_start_dev(const struct generic_pm_domain *genpd,
191 			   struct device *dev)
192 {
193 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
194 }
195 
196 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
197 {
198 	bool ret = false;
199 
200 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
201 		ret = !!atomic_dec_and_test(&genpd->sd_count);
202 
203 	return ret;
204 }
205 
206 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
207 {
208 	atomic_inc(&genpd->sd_count);
209 	smp_mb__after_atomic();
210 }
211 
212 #ifdef CONFIG_DEBUG_FS
213 static void genpd_update_accounting(struct generic_pm_domain *genpd)
214 {
215 	ktime_t delta, now;
216 
217 	now = ktime_get();
218 	delta = ktime_sub(now, genpd->accounting_time);
219 
220 	/*
221 	 * If genpd->status is active, it means we are just
222 	 * out of off and so update the idle time and vice
223 	 * versa.
224 	 */
225 	if (genpd->status == GPD_STATE_ACTIVE) {
226 		int state_idx = genpd->state_idx;
227 
228 		genpd->states[state_idx].idle_time =
229 			ktime_add(genpd->states[state_idx].idle_time, delta);
230 	} else {
231 		genpd->on_time = ktime_add(genpd->on_time, delta);
232 	}
233 
234 	genpd->accounting_time = now;
235 }
236 #else
237 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
238 #endif
239 
240 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
241 {
242 	unsigned int state_idx = genpd->state_idx;
243 	ktime_t time_start;
244 	s64 elapsed_ns;
245 	int ret;
246 
247 	if (!genpd->power_on)
248 		return 0;
249 
250 	if (!timed)
251 		return genpd->power_on(genpd);
252 
253 	time_start = ktime_get();
254 	ret = genpd->power_on(genpd);
255 	if (ret)
256 		return ret;
257 
258 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
259 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
260 		return ret;
261 
262 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
263 	genpd->max_off_time_changed = true;
264 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
265 		 genpd->name, "on", elapsed_ns);
266 
267 	return ret;
268 }
269 
270 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
271 {
272 	unsigned int state_idx = genpd->state_idx;
273 	ktime_t time_start;
274 	s64 elapsed_ns;
275 	int ret;
276 
277 	if (!genpd->power_off)
278 		return 0;
279 
280 	if (!timed)
281 		return genpd->power_off(genpd);
282 
283 	time_start = ktime_get();
284 	ret = genpd->power_off(genpd);
285 	if (ret == -EBUSY)
286 		return ret;
287 
288 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
289 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
290 		return ret;
291 
292 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
293 	genpd->max_off_time_changed = true;
294 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
295 		 genpd->name, "off", elapsed_ns);
296 
297 	return ret;
298 }
299 
300 /**
301  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
302  * @genpd: PM domain to power off.
303  *
304  * Queue up the execution of genpd_power_off() unless it's already been done
305  * before.
306  */
307 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
308 {
309 	queue_work(pm_wq, &genpd->power_off_work);
310 }
311 
312 /**
313  * genpd_power_off - Remove power from a given PM domain.
314  * @genpd: PM domain to power down.
315  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
316  * RPM status of the releated device is in an intermediate state, not yet turned
317  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
318  * be RPM_SUSPENDED, while it tries to power off the PM domain.
319  *
320  * If all of the @genpd's devices have been suspended and all of its subdomains
321  * have been powered down, remove power from @genpd.
322  */
323 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
324 			   unsigned int depth)
325 {
326 	struct pm_domain_data *pdd;
327 	struct gpd_link *link;
328 	unsigned int not_suspended = 0;
329 
330 	/*
331 	 * Do not try to power off the domain in the following situations:
332 	 * (1) The domain is already in the "power off" state.
333 	 * (2) System suspend is in progress.
334 	 */
335 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
336 		return 0;
337 
338 	/*
339 	 * Abort power off for the PM domain in the following situations:
340 	 * (1) The domain is configured as always on.
341 	 * (2) When the domain has a subdomain being powered on.
342 	 */
343 	if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
344 		return -EBUSY;
345 
346 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
347 		enum pm_qos_flags_status stat;
348 
349 		stat = dev_pm_qos_flags(pdd->dev,
350 					PM_QOS_FLAG_NO_POWER_OFF
351 						| PM_QOS_FLAG_REMOTE_WAKEUP);
352 		if (stat > PM_QOS_FLAGS_NONE)
353 			return -EBUSY;
354 
355 		/*
356 		 * Do not allow PM domain to be powered off, when an IRQ safe
357 		 * device is part of a non-IRQ safe domain.
358 		 */
359 		if (!pm_runtime_suspended(pdd->dev) ||
360 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
361 			not_suspended++;
362 	}
363 
364 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
365 		return -EBUSY;
366 
367 	if (genpd->gov && genpd->gov->power_down_ok) {
368 		if (!genpd->gov->power_down_ok(&genpd->domain))
369 			return -EAGAIN;
370 	}
371 
372 	if (genpd->power_off) {
373 		int ret;
374 
375 		if (atomic_read(&genpd->sd_count) > 0)
376 			return -EBUSY;
377 
378 		/*
379 		 * If sd_count > 0 at this point, one of the subdomains hasn't
380 		 * managed to call genpd_power_on() for the master yet after
381 		 * incrementing it.  In that case genpd_power_on() will wait
382 		 * for us to drop the lock, so we can call .power_off() and let
383 		 * the genpd_power_on() restore power for us (this shouldn't
384 		 * happen very often).
385 		 */
386 		ret = _genpd_power_off(genpd, true);
387 		if (ret)
388 			return ret;
389 	}
390 
391 	genpd->status = GPD_STATE_POWER_OFF;
392 	genpd_update_accounting(genpd);
393 
394 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
395 		genpd_sd_counter_dec(link->master);
396 		genpd_lock_nested(link->master, depth + 1);
397 		genpd_power_off(link->master, false, depth + 1);
398 		genpd_unlock(link->master);
399 	}
400 
401 	return 0;
402 }
403 
404 /**
405  * genpd_power_on - Restore power to a given PM domain and its masters.
406  * @genpd: PM domain to power up.
407  * @depth: nesting count for lockdep.
408  *
409  * Restore power to @genpd and all of its masters so that it is possible to
410  * resume a device belonging to it.
411  */
412 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
413 {
414 	struct gpd_link *link;
415 	int ret = 0;
416 
417 	if (genpd_status_on(genpd))
418 		return 0;
419 
420 	/*
421 	 * The list is guaranteed not to change while the loop below is being
422 	 * executed, unless one of the masters' .power_on() callbacks fiddles
423 	 * with it.
424 	 */
425 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
426 		struct generic_pm_domain *master = link->master;
427 
428 		genpd_sd_counter_inc(master);
429 
430 		genpd_lock_nested(master, depth + 1);
431 		ret = genpd_power_on(master, depth + 1);
432 		genpd_unlock(master);
433 
434 		if (ret) {
435 			genpd_sd_counter_dec(master);
436 			goto err;
437 		}
438 	}
439 
440 	ret = _genpd_power_on(genpd, true);
441 	if (ret)
442 		goto err;
443 
444 	genpd->status = GPD_STATE_ACTIVE;
445 	genpd_update_accounting(genpd);
446 
447 	return 0;
448 
449  err:
450 	list_for_each_entry_continue_reverse(link,
451 					&genpd->slave_links,
452 					slave_node) {
453 		genpd_sd_counter_dec(link->master);
454 		genpd_lock_nested(link->master, depth + 1);
455 		genpd_power_off(link->master, false, depth + 1);
456 		genpd_unlock(link->master);
457 	}
458 
459 	return ret;
460 }
461 
462 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
463 				     unsigned long val, void *ptr)
464 {
465 	struct generic_pm_domain_data *gpd_data;
466 	struct device *dev;
467 
468 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
469 	dev = gpd_data->base.dev;
470 
471 	for (;;) {
472 		struct generic_pm_domain *genpd;
473 		struct pm_domain_data *pdd;
474 
475 		spin_lock_irq(&dev->power.lock);
476 
477 		pdd = dev->power.subsys_data ?
478 				dev->power.subsys_data->domain_data : NULL;
479 		if (pdd) {
480 			to_gpd_data(pdd)->td.constraint_changed = true;
481 			genpd = dev_to_genpd(dev);
482 		} else {
483 			genpd = ERR_PTR(-ENODATA);
484 		}
485 
486 		spin_unlock_irq(&dev->power.lock);
487 
488 		if (!IS_ERR(genpd)) {
489 			genpd_lock(genpd);
490 			genpd->max_off_time_changed = true;
491 			genpd_unlock(genpd);
492 		}
493 
494 		dev = dev->parent;
495 		if (!dev || dev->power.ignore_children)
496 			break;
497 	}
498 
499 	return NOTIFY_DONE;
500 }
501 
502 /**
503  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
504  * @work: Work structure used for scheduling the execution of this function.
505  */
506 static void genpd_power_off_work_fn(struct work_struct *work)
507 {
508 	struct generic_pm_domain *genpd;
509 
510 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
511 
512 	genpd_lock(genpd);
513 	genpd_power_off(genpd, false, 0);
514 	genpd_unlock(genpd);
515 }
516 
517 /**
518  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
519  * @dev: Device to handle.
520  */
521 static int __genpd_runtime_suspend(struct device *dev)
522 {
523 	int (*cb)(struct device *__dev);
524 
525 	if (dev->type && dev->type->pm)
526 		cb = dev->type->pm->runtime_suspend;
527 	else if (dev->class && dev->class->pm)
528 		cb = dev->class->pm->runtime_suspend;
529 	else if (dev->bus && dev->bus->pm)
530 		cb = dev->bus->pm->runtime_suspend;
531 	else
532 		cb = NULL;
533 
534 	if (!cb && dev->driver && dev->driver->pm)
535 		cb = dev->driver->pm->runtime_suspend;
536 
537 	return cb ? cb(dev) : 0;
538 }
539 
540 /**
541  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
542  * @dev: Device to handle.
543  */
544 static int __genpd_runtime_resume(struct device *dev)
545 {
546 	int (*cb)(struct device *__dev);
547 
548 	if (dev->type && dev->type->pm)
549 		cb = dev->type->pm->runtime_resume;
550 	else if (dev->class && dev->class->pm)
551 		cb = dev->class->pm->runtime_resume;
552 	else if (dev->bus && dev->bus->pm)
553 		cb = dev->bus->pm->runtime_resume;
554 	else
555 		cb = NULL;
556 
557 	if (!cb && dev->driver && dev->driver->pm)
558 		cb = dev->driver->pm->runtime_resume;
559 
560 	return cb ? cb(dev) : 0;
561 }
562 
563 /**
564  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
565  * @dev: Device to suspend.
566  *
567  * Carry out a runtime suspend of a device under the assumption that its
568  * pm_domain field points to the domain member of an object of type
569  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
570  */
571 static int genpd_runtime_suspend(struct device *dev)
572 {
573 	struct generic_pm_domain *genpd;
574 	bool (*suspend_ok)(struct device *__dev);
575 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
576 	bool runtime_pm = pm_runtime_enabled(dev);
577 	ktime_t time_start;
578 	s64 elapsed_ns;
579 	int ret;
580 
581 	dev_dbg(dev, "%s()\n", __func__);
582 
583 	genpd = dev_to_genpd(dev);
584 	if (IS_ERR(genpd))
585 		return -EINVAL;
586 
587 	/*
588 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
589 	 * callbacks for other purposes than runtime PM. In those scenarios
590 	 * runtime PM is disabled. Under these circumstances, we shall skip
591 	 * validating/measuring the PM QoS latency.
592 	 */
593 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
594 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
595 		return -EBUSY;
596 
597 	/* Measure suspend latency. */
598 	time_start = 0;
599 	if (runtime_pm)
600 		time_start = ktime_get();
601 
602 	ret = __genpd_runtime_suspend(dev);
603 	if (ret)
604 		return ret;
605 
606 	ret = genpd_stop_dev(genpd, dev);
607 	if (ret) {
608 		__genpd_runtime_resume(dev);
609 		return ret;
610 	}
611 
612 	/* Update suspend latency value if the measured time exceeds it. */
613 	if (runtime_pm) {
614 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
615 		if (elapsed_ns > td->suspend_latency_ns) {
616 			td->suspend_latency_ns = elapsed_ns;
617 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
618 				elapsed_ns);
619 			genpd->max_off_time_changed = true;
620 			td->constraint_changed = true;
621 		}
622 	}
623 
624 	/*
625 	 * If power.irq_safe is set, this routine may be run with
626 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
627 	 */
628 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
629 		return 0;
630 
631 	genpd_lock(genpd);
632 	genpd_power_off(genpd, true, 0);
633 	genpd_unlock(genpd);
634 
635 	return 0;
636 }
637 
638 /**
639  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
640  * @dev: Device to resume.
641  *
642  * Carry out a runtime resume of a device under the assumption that its
643  * pm_domain field points to the domain member of an object of type
644  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
645  */
646 static int genpd_runtime_resume(struct device *dev)
647 {
648 	struct generic_pm_domain *genpd;
649 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
650 	bool runtime_pm = pm_runtime_enabled(dev);
651 	ktime_t time_start;
652 	s64 elapsed_ns;
653 	int ret;
654 	bool timed = true;
655 
656 	dev_dbg(dev, "%s()\n", __func__);
657 
658 	genpd = dev_to_genpd(dev);
659 	if (IS_ERR(genpd))
660 		return -EINVAL;
661 
662 	/*
663 	 * As we don't power off a non IRQ safe domain, which holds
664 	 * an IRQ safe device, we don't need to restore power to it.
665 	 */
666 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
667 		timed = false;
668 		goto out;
669 	}
670 
671 	genpd_lock(genpd);
672 	ret = genpd_power_on(genpd, 0);
673 	genpd_unlock(genpd);
674 
675 	if (ret)
676 		return ret;
677 
678  out:
679 	/* Measure resume latency. */
680 	time_start = 0;
681 	if (timed && runtime_pm)
682 		time_start = ktime_get();
683 
684 	ret = genpd_start_dev(genpd, dev);
685 	if (ret)
686 		goto err_poweroff;
687 
688 	ret = __genpd_runtime_resume(dev);
689 	if (ret)
690 		goto err_stop;
691 
692 	/* Update resume latency value if the measured time exceeds it. */
693 	if (timed && runtime_pm) {
694 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
695 		if (elapsed_ns > td->resume_latency_ns) {
696 			td->resume_latency_ns = elapsed_ns;
697 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
698 				elapsed_ns);
699 			genpd->max_off_time_changed = true;
700 			td->constraint_changed = true;
701 		}
702 	}
703 
704 	return 0;
705 
706 err_stop:
707 	genpd_stop_dev(genpd, dev);
708 err_poweroff:
709 	if (!pm_runtime_is_irq_safe(dev) ||
710 		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
711 		genpd_lock(genpd);
712 		genpd_power_off(genpd, true, 0);
713 		genpd_unlock(genpd);
714 	}
715 
716 	return ret;
717 }
718 
719 static bool pd_ignore_unused;
720 static int __init pd_ignore_unused_setup(char *__unused)
721 {
722 	pd_ignore_unused = true;
723 	return 1;
724 }
725 __setup("pd_ignore_unused", pd_ignore_unused_setup);
726 
727 /**
728  * genpd_power_off_unused - Power off all PM domains with no devices in use.
729  */
730 static int __init genpd_power_off_unused(void)
731 {
732 	struct generic_pm_domain *genpd;
733 
734 	if (pd_ignore_unused) {
735 		pr_warn("genpd: Not disabling unused power domains\n");
736 		return 0;
737 	}
738 
739 	mutex_lock(&gpd_list_lock);
740 
741 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
742 		genpd_queue_power_off_work(genpd);
743 
744 	mutex_unlock(&gpd_list_lock);
745 
746 	return 0;
747 }
748 late_initcall(genpd_power_off_unused);
749 
750 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
751 
752 /**
753  * pm_genpd_present - Check if the given PM domain has been initialized.
754  * @genpd: PM domain to check.
755  */
756 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
757 {
758 	const struct generic_pm_domain *gpd;
759 
760 	if (IS_ERR_OR_NULL(genpd))
761 		return false;
762 
763 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
764 		if (gpd == genpd)
765 			return true;
766 
767 	return false;
768 }
769 
770 #endif
771 
772 #ifdef CONFIG_PM_SLEEP
773 
774 static bool genpd_dev_active_wakeup(const struct generic_pm_domain *genpd,
775 				    struct device *dev)
776 {
777 	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
778 }
779 
780 /**
781  * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
782  * @genpd: PM domain to power off, if possible.
783  * @use_lock: use the lock.
784  * @depth: nesting count for lockdep.
785  *
786  * Check if the given PM domain can be powered off (during system suspend or
787  * hibernation) and do that if so.  Also, in that case propagate to its masters.
788  *
789  * This function is only called in "noirq" and "syscore" stages of system power
790  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
791  * these cases the lock must be held.
792  */
793 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
794 				 unsigned int depth)
795 {
796 	struct gpd_link *link;
797 
798 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
799 		return;
800 
801 	if (genpd->suspended_count != genpd->device_count
802 	    || atomic_read(&genpd->sd_count) > 0)
803 		return;
804 
805 	/* Choose the deepest state when suspending */
806 	genpd->state_idx = genpd->state_count - 1;
807 	if (_genpd_power_off(genpd, false))
808 		return;
809 
810 	genpd->status = GPD_STATE_POWER_OFF;
811 
812 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
813 		genpd_sd_counter_dec(link->master);
814 
815 		if (use_lock)
816 			genpd_lock_nested(link->master, depth + 1);
817 
818 		genpd_sync_power_off(link->master, use_lock, depth + 1);
819 
820 		if (use_lock)
821 			genpd_unlock(link->master);
822 	}
823 }
824 
825 /**
826  * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
827  * @genpd: PM domain to power on.
828  * @use_lock: use the lock.
829  * @depth: nesting count for lockdep.
830  *
831  * This function is only called in "noirq" and "syscore" stages of system power
832  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
833  * these cases the lock must be held.
834  */
835 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
836 				unsigned int depth)
837 {
838 	struct gpd_link *link;
839 
840 	if (genpd_status_on(genpd))
841 		return;
842 
843 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
844 		genpd_sd_counter_inc(link->master);
845 
846 		if (use_lock)
847 			genpd_lock_nested(link->master, depth + 1);
848 
849 		genpd_sync_power_on(link->master, use_lock, depth + 1);
850 
851 		if (use_lock)
852 			genpd_unlock(link->master);
853 	}
854 
855 	_genpd_power_on(genpd, false);
856 
857 	genpd->status = GPD_STATE_ACTIVE;
858 }
859 
860 /**
861  * resume_needed - Check whether to resume a device before system suspend.
862  * @dev: Device to check.
863  * @genpd: PM domain the device belongs to.
864  *
865  * There are two cases in which a device that can wake up the system from sleep
866  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
867  * to wake up the system and it has to remain active for this purpose while the
868  * system is in the sleep state and (2) if the device is not enabled to wake up
869  * the system from sleep states and it generally doesn't generate wakeup signals
870  * by itself (those signals are generated on its behalf by other parts of the
871  * system).  In the latter case it may be necessary to reconfigure the device's
872  * wakeup settings during system suspend, because it may have been set up to
873  * signal remote wakeup from the system's working state as needed by runtime PM.
874  * Return 'true' in either of the above cases.
875  */
876 static bool resume_needed(struct device *dev,
877 			  const struct generic_pm_domain *genpd)
878 {
879 	bool active_wakeup;
880 
881 	if (!device_can_wakeup(dev))
882 		return false;
883 
884 	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
885 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
886 }
887 
888 /**
889  * pm_genpd_prepare - Start power transition of a device in a PM domain.
890  * @dev: Device to start the transition of.
891  *
892  * Start a power transition of a device (during a system-wide power transition)
893  * under the assumption that its pm_domain field points to the domain member of
894  * an object of type struct generic_pm_domain representing a PM domain
895  * consisting of I/O devices.
896  */
897 static int pm_genpd_prepare(struct device *dev)
898 {
899 	struct generic_pm_domain *genpd;
900 	int ret;
901 
902 	dev_dbg(dev, "%s()\n", __func__);
903 
904 	genpd = dev_to_genpd(dev);
905 	if (IS_ERR(genpd))
906 		return -EINVAL;
907 
908 	/*
909 	 * If a wakeup request is pending for the device, it should be woken up
910 	 * at this point and a system wakeup event should be reported if it's
911 	 * set up to wake up the system from sleep states.
912 	 */
913 	if (resume_needed(dev, genpd))
914 		pm_runtime_resume(dev);
915 
916 	genpd_lock(genpd);
917 
918 	if (genpd->prepared_count++ == 0)
919 		genpd->suspended_count = 0;
920 
921 	genpd_unlock(genpd);
922 
923 	ret = pm_generic_prepare(dev);
924 	if (ret) {
925 		genpd_lock(genpd);
926 
927 		genpd->prepared_count--;
928 
929 		genpd_unlock(genpd);
930 	}
931 
932 	return ret;
933 }
934 
935 /**
936  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
937  *   I/O pm domain.
938  * @dev: Device to suspend.
939  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
940  *
941  * Stop the device and remove power from the domain if all devices in it have
942  * been stopped.
943  */
944 static int genpd_finish_suspend(struct device *dev, bool poweroff)
945 {
946 	struct generic_pm_domain *genpd;
947 	int ret;
948 
949 	genpd = dev_to_genpd(dev);
950 	if (IS_ERR(genpd))
951 		return -EINVAL;
952 
953 	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
954 		return 0;
955 
956 	if (poweroff)
957 		ret = pm_generic_poweroff_noirq(dev);
958 	else
959 		ret = pm_generic_suspend_noirq(dev);
960 	if (ret)
961 		return ret;
962 
963 	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
964 		ret = pm_runtime_force_suspend(dev);
965 		if (ret)
966 			return ret;
967 	}
968 
969 	genpd_lock(genpd);
970 	genpd->suspended_count++;
971 	genpd_sync_power_off(genpd, true, 0);
972 	genpd_unlock(genpd);
973 
974 	return 0;
975 }
976 
977 /**
978  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
979  * @dev: Device to suspend.
980  *
981  * Stop the device and remove power from the domain if all devices in it have
982  * been stopped.
983  */
984 static int pm_genpd_suspend_noirq(struct device *dev)
985 {
986 	dev_dbg(dev, "%s()\n", __func__);
987 
988 	return genpd_finish_suspend(dev, false);
989 }
990 
991 /**
992  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
993  * @dev: Device to resume.
994  *
995  * Restore power to the device's PM domain, if necessary, and start the device.
996  */
997 static int pm_genpd_resume_noirq(struct device *dev)
998 {
999 	struct generic_pm_domain *genpd;
1000 	int ret = 0;
1001 
1002 	dev_dbg(dev, "%s()\n", __func__);
1003 
1004 	genpd = dev_to_genpd(dev);
1005 	if (IS_ERR(genpd))
1006 		return -EINVAL;
1007 
1008 	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
1009 		return 0;
1010 
1011 	genpd_lock(genpd);
1012 	genpd_sync_power_on(genpd, true, 0);
1013 	genpd->suspended_count--;
1014 	genpd_unlock(genpd);
1015 
1016 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
1017 		ret = pm_runtime_force_resume(dev);
1018 
1019 	ret = pm_generic_resume_noirq(dev);
1020 	if (ret)
1021 		return ret;
1022 
1023 	return ret;
1024 }
1025 
1026 /**
1027  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1028  * @dev: Device to freeze.
1029  *
1030  * Carry out a late freeze of a device under the assumption that its
1031  * pm_domain field points to the domain member of an object of type
1032  * struct generic_pm_domain representing a power domain consisting of I/O
1033  * devices.
1034  */
1035 static int pm_genpd_freeze_noirq(struct device *dev)
1036 {
1037 	const struct generic_pm_domain *genpd;
1038 	int ret = 0;
1039 
1040 	dev_dbg(dev, "%s()\n", __func__);
1041 
1042 	genpd = dev_to_genpd(dev);
1043 	if (IS_ERR(genpd))
1044 		return -EINVAL;
1045 
1046 	ret = pm_generic_freeze_noirq(dev);
1047 	if (ret)
1048 		return ret;
1049 
1050 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
1051 		ret = pm_runtime_force_suspend(dev);
1052 
1053 	return ret;
1054 }
1055 
1056 /**
1057  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1058  * @dev: Device to thaw.
1059  *
1060  * Start the device, unless power has been removed from the domain already
1061  * before the system transition.
1062  */
1063 static int pm_genpd_thaw_noirq(struct device *dev)
1064 {
1065 	const struct generic_pm_domain *genpd;
1066 	int ret = 0;
1067 
1068 	dev_dbg(dev, "%s()\n", __func__);
1069 
1070 	genpd = dev_to_genpd(dev);
1071 	if (IS_ERR(genpd))
1072 		return -EINVAL;
1073 
1074 	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
1075 		ret = pm_runtime_force_resume(dev);
1076 		if (ret)
1077 			return ret;
1078 	}
1079 
1080 	return pm_generic_thaw_noirq(dev);
1081 }
1082 
1083 /**
1084  * pm_genpd_poweroff_noirq - Completion of hibernation of device in an
1085  *   I/O PM domain.
1086  * @dev: Device to poweroff.
1087  *
1088  * Stop the device and remove power from the domain if all devices in it have
1089  * been stopped.
1090  */
1091 static int pm_genpd_poweroff_noirq(struct device *dev)
1092 {
1093 	dev_dbg(dev, "%s()\n", __func__);
1094 
1095 	return genpd_finish_suspend(dev, true);
1096 }
1097 
1098 /**
1099  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1100  * @dev: Device to resume.
1101  *
1102  * Make sure the domain will be in the same power state as before the
1103  * hibernation the system is resuming from and start the device if necessary.
1104  */
1105 static int pm_genpd_restore_noirq(struct device *dev)
1106 {
1107 	struct generic_pm_domain *genpd;
1108 	int ret = 0;
1109 
1110 	dev_dbg(dev, "%s()\n", __func__);
1111 
1112 	genpd = dev_to_genpd(dev);
1113 	if (IS_ERR(genpd))
1114 		return -EINVAL;
1115 
1116 	/*
1117 	 * At this point suspended_count == 0 means we are being run for the
1118 	 * first time for the given domain in the present cycle.
1119 	 */
1120 	genpd_lock(genpd);
1121 	if (genpd->suspended_count++ == 0)
1122 		/*
1123 		 * The boot kernel might put the domain into arbitrary state,
1124 		 * so make it appear as powered off to genpd_sync_power_on(),
1125 		 * so that it tries to power it on in case it was really off.
1126 		 */
1127 		genpd->status = GPD_STATE_POWER_OFF;
1128 
1129 	genpd_sync_power_on(genpd, true, 0);
1130 	genpd_unlock(genpd);
1131 
1132 	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
1133 		ret = pm_runtime_force_resume(dev);
1134 		if (ret)
1135 			return ret;
1136 	}
1137 
1138 	return pm_generic_restore_noirq(dev);
1139 }
1140 
1141 /**
1142  * pm_genpd_complete - Complete power transition of a device in a power domain.
1143  * @dev: Device to complete the transition of.
1144  *
1145  * Complete a power transition of a device (during a system-wide power
1146  * transition) under the assumption that its pm_domain field points to the
1147  * domain member of an object of type struct generic_pm_domain representing
1148  * a power domain consisting of I/O devices.
1149  */
1150 static void pm_genpd_complete(struct device *dev)
1151 {
1152 	struct generic_pm_domain *genpd;
1153 
1154 	dev_dbg(dev, "%s()\n", __func__);
1155 
1156 	genpd = dev_to_genpd(dev);
1157 	if (IS_ERR(genpd))
1158 		return;
1159 
1160 	pm_generic_complete(dev);
1161 
1162 	genpd_lock(genpd);
1163 
1164 	genpd->prepared_count--;
1165 	if (!genpd->prepared_count)
1166 		genpd_queue_power_off_work(genpd);
1167 
1168 	genpd_unlock(genpd);
1169 }
1170 
1171 /**
1172  * genpd_syscore_switch - Switch power during system core suspend or resume.
1173  * @dev: Device that normally is marked as "always on" to switch power for.
1174  *
1175  * This routine may only be called during the system core (syscore) suspend or
1176  * resume phase for devices whose "always on" flags are set.
1177  */
1178 static void genpd_syscore_switch(struct device *dev, bool suspend)
1179 {
1180 	struct generic_pm_domain *genpd;
1181 
1182 	genpd = dev_to_genpd(dev);
1183 	if (!pm_genpd_present(genpd))
1184 		return;
1185 
1186 	if (suspend) {
1187 		genpd->suspended_count++;
1188 		genpd_sync_power_off(genpd, false, 0);
1189 	} else {
1190 		genpd_sync_power_on(genpd, false, 0);
1191 		genpd->suspended_count--;
1192 	}
1193 }
1194 
1195 void pm_genpd_syscore_poweroff(struct device *dev)
1196 {
1197 	genpd_syscore_switch(dev, true);
1198 }
1199 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1200 
1201 void pm_genpd_syscore_poweron(struct device *dev)
1202 {
1203 	genpd_syscore_switch(dev, false);
1204 }
1205 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1206 
1207 #else /* !CONFIG_PM_SLEEP */
1208 
1209 #define pm_genpd_prepare		NULL
1210 #define pm_genpd_suspend_noirq		NULL
1211 #define pm_genpd_resume_noirq		NULL
1212 #define pm_genpd_freeze_noirq		NULL
1213 #define pm_genpd_thaw_noirq		NULL
1214 #define pm_genpd_poweroff_noirq		NULL
1215 #define pm_genpd_restore_noirq		NULL
1216 #define pm_genpd_complete		NULL
1217 
1218 #endif /* CONFIG_PM_SLEEP */
1219 
1220 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1221 					struct generic_pm_domain *genpd,
1222 					struct gpd_timing_data *td)
1223 {
1224 	struct generic_pm_domain_data *gpd_data;
1225 	int ret;
1226 
1227 	ret = dev_pm_get_subsys_data(dev);
1228 	if (ret)
1229 		return ERR_PTR(ret);
1230 
1231 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1232 	if (!gpd_data) {
1233 		ret = -ENOMEM;
1234 		goto err_put;
1235 	}
1236 
1237 	if (td)
1238 		gpd_data->td = *td;
1239 
1240 	gpd_data->base.dev = dev;
1241 	gpd_data->td.constraint_changed = true;
1242 	gpd_data->td.effective_constraint_ns = -1;
1243 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1244 
1245 	spin_lock_irq(&dev->power.lock);
1246 
1247 	if (dev->power.subsys_data->domain_data) {
1248 		ret = -EINVAL;
1249 		goto err_free;
1250 	}
1251 
1252 	dev->power.subsys_data->domain_data = &gpd_data->base;
1253 
1254 	spin_unlock_irq(&dev->power.lock);
1255 
1256 	return gpd_data;
1257 
1258  err_free:
1259 	spin_unlock_irq(&dev->power.lock);
1260 	kfree(gpd_data);
1261  err_put:
1262 	dev_pm_put_subsys_data(dev);
1263 	return ERR_PTR(ret);
1264 }
1265 
1266 static void genpd_free_dev_data(struct device *dev,
1267 				struct generic_pm_domain_data *gpd_data)
1268 {
1269 	spin_lock_irq(&dev->power.lock);
1270 
1271 	dev->power.subsys_data->domain_data = NULL;
1272 
1273 	spin_unlock_irq(&dev->power.lock);
1274 
1275 	kfree(gpd_data);
1276 	dev_pm_put_subsys_data(dev);
1277 }
1278 
1279 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1280 			    struct gpd_timing_data *td)
1281 {
1282 	struct generic_pm_domain_data *gpd_data;
1283 	int ret = 0;
1284 
1285 	dev_dbg(dev, "%s()\n", __func__);
1286 
1287 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1288 		return -EINVAL;
1289 
1290 	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1291 	if (IS_ERR(gpd_data))
1292 		return PTR_ERR(gpd_data);
1293 
1294 	genpd_lock(genpd);
1295 
1296 	if (genpd->prepared_count > 0) {
1297 		ret = -EAGAIN;
1298 		goto out;
1299 	}
1300 
1301 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1302 	if (ret)
1303 		goto out;
1304 
1305 	dev_pm_domain_set(dev, &genpd->domain);
1306 
1307 	genpd->device_count++;
1308 	genpd->max_off_time_changed = true;
1309 
1310 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1311 
1312  out:
1313 	genpd_unlock(genpd);
1314 
1315 	if (ret)
1316 		genpd_free_dev_data(dev, gpd_data);
1317 	else
1318 		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1319 
1320 	return ret;
1321 }
1322 
1323 /**
1324  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1325  * @genpd: PM domain to add the device to.
1326  * @dev: Device to be added.
1327  * @td: Set of PM QoS timing parameters to attach to the device.
1328  */
1329 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1330 			  struct gpd_timing_data *td)
1331 {
1332 	int ret;
1333 
1334 	mutex_lock(&gpd_list_lock);
1335 	ret = genpd_add_device(genpd, dev, td);
1336 	mutex_unlock(&gpd_list_lock);
1337 
1338 	return ret;
1339 }
1340 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1341 
1342 static int genpd_remove_device(struct generic_pm_domain *genpd,
1343 			       struct device *dev)
1344 {
1345 	struct generic_pm_domain_data *gpd_data;
1346 	struct pm_domain_data *pdd;
1347 	int ret = 0;
1348 
1349 	dev_dbg(dev, "%s()\n", __func__);
1350 
1351 	pdd = dev->power.subsys_data->domain_data;
1352 	gpd_data = to_gpd_data(pdd);
1353 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1354 
1355 	genpd_lock(genpd);
1356 
1357 	if (genpd->prepared_count > 0) {
1358 		ret = -EAGAIN;
1359 		goto out;
1360 	}
1361 
1362 	genpd->device_count--;
1363 	genpd->max_off_time_changed = true;
1364 
1365 	if (genpd->detach_dev)
1366 		genpd->detach_dev(genpd, dev);
1367 
1368 	dev_pm_domain_set(dev, NULL);
1369 
1370 	list_del_init(&pdd->list_node);
1371 
1372 	genpd_unlock(genpd);
1373 
1374 	genpd_free_dev_data(dev, gpd_data);
1375 
1376 	return 0;
1377 
1378  out:
1379 	genpd_unlock(genpd);
1380 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1381 
1382 	return ret;
1383 }
1384 
1385 /**
1386  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1387  * @genpd: PM domain to remove the device from.
1388  * @dev: Device to be removed.
1389  */
1390 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1391 			   struct device *dev)
1392 {
1393 	if (!genpd || genpd != genpd_lookup_dev(dev))
1394 		return -EINVAL;
1395 
1396 	return genpd_remove_device(genpd, dev);
1397 }
1398 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1399 
1400 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1401 			       struct generic_pm_domain *subdomain)
1402 {
1403 	struct gpd_link *link, *itr;
1404 	int ret = 0;
1405 
1406 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1407 	    || genpd == subdomain)
1408 		return -EINVAL;
1409 
1410 	/*
1411 	 * If the domain can be powered on/off in an IRQ safe
1412 	 * context, ensure that the subdomain can also be
1413 	 * powered on/off in that context.
1414 	 */
1415 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1416 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1417 				genpd->name, subdomain->name);
1418 		return -EINVAL;
1419 	}
1420 
1421 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1422 	if (!link)
1423 		return -ENOMEM;
1424 
1425 	genpd_lock(subdomain);
1426 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1427 
1428 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1429 		ret = -EINVAL;
1430 		goto out;
1431 	}
1432 
1433 	list_for_each_entry(itr, &genpd->master_links, master_node) {
1434 		if (itr->slave == subdomain && itr->master == genpd) {
1435 			ret = -EINVAL;
1436 			goto out;
1437 		}
1438 	}
1439 
1440 	link->master = genpd;
1441 	list_add_tail(&link->master_node, &genpd->master_links);
1442 	link->slave = subdomain;
1443 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1444 	if (genpd_status_on(subdomain))
1445 		genpd_sd_counter_inc(genpd);
1446 
1447  out:
1448 	genpd_unlock(genpd);
1449 	genpd_unlock(subdomain);
1450 	if (ret)
1451 		kfree(link);
1452 	return ret;
1453 }
1454 
1455 /**
1456  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1457  * @genpd: Master PM domain to add the subdomain to.
1458  * @subdomain: Subdomain to be added.
1459  */
1460 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1461 			   struct generic_pm_domain *subdomain)
1462 {
1463 	int ret;
1464 
1465 	mutex_lock(&gpd_list_lock);
1466 	ret = genpd_add_subdomain(genpd, subdomain);
1467 	mutex_unlock(&gpd_list_lock);
1468 
1469 	return ret;
1470 }
1471 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1472 
1473 /**
1474  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1475  * @genpd: Master PM domain to remove the subdomain from.
1476  * @subdomain: Subdomain to be removed.
1477  */
1478 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1479 			      struct generic_pm_domain *subdomain)
1480 {
1481 	struct gpd_link *l, *link;
1482 	int ret = -EINVAL;
1483 
1484 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1485 		return -EINVAL;
1486 
1487 	genpd_lock(subdomain);
1488 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1489 
1490 	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1491 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1492 			subdomain->name);
1493 		ret = -EBUSY;
1494 		goto out;
1495 	}
1496 
1497 	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1498 		if (link->slave != subdomain)
1499 			continue;
1500 
1501 		list_del(&link->master_node);
1502 		list_del(&link->slave_node);
1503 		kfree(link);
1504 		if (genpd_status_on(subdomain))
1505 			genpd_sd_counter_dec(genpd);
1506 
1507 		ret = 0;
1508 		break;
1509 	}
1510 
1511 out:
1512 	genpd_unlock(genpd);
1513 	genpd_unlock(subdomain);
1514 
1515 	return ret;
1516 }
1517 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1518 
1519 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1520 {
1521 	struct genpd_power_state *state;
1522 
1523 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1524 	if (!state)
1525 		return -ENOMEM;
1526 
1527 	genpd->states = state;
1528 	genpd->state_count = 1;
1529 	genpd->free = state;
1530 
1531 	return 0;
1532 }
1533 
1534 static void genpd_lock_init(struct generic_pm_domain *genpd)
1535 {
1536 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1537 		spin_lock_init(&genpd->slock);
1538 		genpd->lock_ops = &genpd_spin_ops;
1539 	} else {
1540 		mutex_init(&genpd->mlock);
1541 		genpd->lock_ops = &genpd_mtx_ops;
1542 	}
1543 }
1544 
1545 /**
1546  * pm_genpd_init - Initialize a generic I/O PM domain object.
1547  * @genpd: PM domain object to initialize.
1548  * @gov: PM domain governor to associate with the domain (may be NULL).
1549  * @is_off: Initial value of the domain's power_is_off field.
1550  *
1551  * Returns 0 on successful initialization, else a negative error code.
1552  */
1553 int pm_genpd_init(struct generic_pm_domain *genpd,
1554 		  struct dev_power_governor *gov, bool is_off)
1555 {
1556 	int ret;
1557 
1558 	if (IS_ERR_OR_NULL(genpd))
1559 		return -EINVAL;
1560 
1561 	INIT_LIST_HEAD(&genpd->master_links);
1562 	INIT_LIST_HEAD(&genpd->slave_links);
1563 	INIT_LIST_HEAD(&genpd->dev_list);
1564 	genpd_lock_init(genpd);
1565 	genpd->gov = gov;
1566 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1567 	atomic_set(&genpd->sd_count, 0);
1568 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1569 	genpd->device_count = 0;
1570 	genpd->max_off_time_ns = -1;
1571 	genpd->max_off_time_changed = true;
1572 	genpd->provider = NULL;
1573 	genpd->has_provider = false;
1574 	genpd->accounting_time = ktime_get();
1575 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1576 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1577 	genpd->domain.ops.prepare = pm_genpd_prepare;
1578 	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1579 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1580 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1581 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1582 	genpd->domain.ops.poweroff_noirq = pm_genpd_poweroff_noirq;
1583 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1584 	genpd->domain.ops.complete = pm_genpd_complete;
1585 
1586 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1587 		genpd->dev_ops.stop = pm_clk_suspend;
1588 		genpd->dev_ops.start = pm_clk_resume;
1589 	}
1590 
1591 	/* Always-on domains must be powered on at initialization. */
1592 	if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1593 		return -EINVAL;
1594 
1595 	/* Use only one "off" state if there were no states declared */
1596 	if (genpd->state_count == 0) {
1597 		ret = genpd_set_default_power_state(genpd);
1598 		if (ret)
1599 			return ret;
1600 	}
1601 
1602 	mutex_lock(&gpd_list_lock);
1603 	list_add(&genpd->gpd_list_node, &gpd_list);
1604 	mutex_unlock(&gpd_list_lock);
1605 
1606 	return 0;
1607 }
1608 EXPORT_SYMBOL_GPL(pm_genpd_init);
1609 
1610 static int genpd_remove(struct generic_pm_domain *genpd)
1611 {
1612 	struct gpd_link *l, *link;
1613 
1614 	if (IS_ERR_OR_NULL(genpd))
1615 		return -EINVAL;
1616 
1617 	genpd_lock(genpd);
1618 
1619 	if (genpd->has_provider) {
1620 		genpd_unlock(genpd);
1621 		pr_err("Provider present, unable to remove %s\n", genpd->name);
1622 		return -EBUSY;
1623 	}
1624 
1625 	if (!list_empty(&genpd->master_links) || genpd->device_count) {
1626 		genpd_unlock(genpd);
1627 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1628 		return -EBUSY;
1629 	}
1630 
1631 	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1632 		list_del(&link->master_node);
1633 		list_del(&link->slave_node);
1634 		kfree(link);
1635 	}
1636 
1637 	list_del(&genpd->gpd_list_node);
1638 	genpd_unlock(genpd);
1639 	cancel_work_sync(&genpd->power_off_work);
1640 	kfree(genpd->free);
1641 	pr_debug("%s: removed %s\n", __func__, genpd->name);
1642 
1643 	return 0;
1644 }
1645 
1646 /**
1647  * pm_genpd_remove - Remove a generic I/O PM domain
1648  * @genpd: Pointer to PM domain that is to be removed.
1649  *
1650  * To remove the PM domain, this function:
1651  *  - Removes the PM domain as a subdomain to any parent domains,
1652  *    if it was added.
1653  *  - Removes the PM domain from the list of registered PM domains.
1654  *
1655  * The PM domain will only be removed, if the associated provider has
1656  * been removed, it is not a parent to any other PM domain and has no
1657  * devices associated with it.
1658  */
1659 int pm_genpd_remove(struct generic_pm_domain *genpd)
1660 {
1661 	int ret;
1662 
1663 	mutex_lock(&gpd_list_lock);
1664 	ret = genpd_remove(genpd);
1665 	mutex_unlock(&gpd_list_lock);
1666 
1667 	return ret;
1668 }
1669 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1670 
1671 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1672 
1673 /*
1674  * Device Tree based PM domain providers.
1675  *
1676  * The code below implements generic device tree based PM domain providers that
1677  * bind device tree nodes with generic PM domains registered in the system.
1678  *
1679  * Any driver that registers generic PM domains and needs to support binding of
1680  * devices to these domains is supposed to register a PM domain provider, which
1681  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1682  *
1683  * Two simple mapping functions have been provided for convenience:
1684  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1685  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1686  *    index.
1687  */
1688 
1689 /**
1690  * struct of_genpd_provider - PM domain provider registration structure
1691  * @link: Entry in global list of PM domain providers
1692  * @node: Pointer to device tree node of PM domain provider
1693  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1694  *         into a PM domain.
1695  * @data: context pointer to be passed into @xlate callback
1696  */
1697 struct of_genpd_provider {
1698 	struct list_head link;
1699 	struct device_node *node;
1700 	genpd_xlate_t xlate;
1701 	void *data;
1702 };
1703 
1704 /* List of registered PM domain providers. */
1705 static LIST_HEAD(of_genpd_providers);
1706 /* Mutex to protect the list above. */
1707 static DEFINE_MUTEX(of_genpd_mutex);
1708 
1709 /**
1710  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1711  * @genpdspec: OF phandle args to map into a PM domain
1712  * @data: xlate function private data - pointer to struct generic_pm_domain
1713  *
1714  * This is a generic xlate function that can be used to model PM domains that
1715  * have their own device tree nodes. The private data of xlate function needs
1716  * to be a valid pointer to struct generic_pm_domain.
1717  */
1718 static struct generic_pm_domain *genpd_xlate_simple(
1719 					struct of_phandle_args *genpdspec,
1720 					void *data)
1721 {
1722 	return data;
1723 }
1724 
1725 /**
1726  * genpd_xlate_onecell() - Xlate function using a single index.
1727  * @genpdspec: OF phandle args to map into a PM domain
1728  * @data: xlate function private data - pointer to struct genpd_onecell_data
1729  *
1730  * This is a generic xlate function that can be used to model simple PM domain
1731  * controllers that have one device tree node and provide multiple PM domains.
1732  * A single cell is used as an index into an array of PM domains specified in
1733  * the genpd_onecell_data struct when registering the provider.
1734  */
1735 static struct generic_pm_domain *genpd_xlate_onecell(
1736 					struct of_phandle_args *genpdspec,
1737 					void *data)
1738 {
1739 	struct genpd_onecell_data *genpd_data = data;
1740 	unsigned int idx = genpdspec->args[0];
1741 
1742 	if (genpdspec->args_count != 1)
1743 		return ERR_PTR(-EINVAL);
1744 
1745 	if (idx >= genpd_data->num_domains) {
1746 		pr_err("%s: invalid domain index %u\n", __func__, idx);
1747 		return ERR_PTR(-EINVAL);
1748 	}
1749 
1750 	if (!genpd_data->domains[idx])
1751 		return ERR_PTR(-ENOENT);
1752 
1753 	return genpd_data->domains[idx];
1754 }
1755 
1756 /**
1757  * genpd_add_provider() - Register a PM domain provider for a node
1758  * @np: Device node pointer associated with the PM domain provider.
1759  * @xlate: Callback for decoding PM domain from phandle arguments.
1760  * @data: Context pointer for @xlate callback.
1761  */
1762 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1763 			      void *data)
1764 {
1765 	struct of_genpd_provider *cp;
1766 
1767 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1768 	if (!cp)
1769 		return -ENOMEM;
1770 
1771 	cp->node = of_node_get(np);
1772 	cp->data = data;
1773 	cp->xlate = xlate;
1774 
1775 	mutex_lock(&of_genpd_mutex);
1776 	list_add(&cp->link, &of_genpd_providers);
1777 	mutex_unlock(&of_genpd_mutex);
1778 	pr_debug("Added domain provider from %pOF\n", np);
1779 
1780 	return 0;
1781 }
1782 
1783 /**
1784  * of_genpd_add_provider_simple() - Register a simple PM domain provider
1785  * @np: Device node pointer associated with the PM domain provider.
1786  * @genpd: Pointer to PM domain associated with the PM domain provider.
1787  */
1788 int of_genpd_add_provider_simple(struct device_node *np,
1789 				 struct generic_pm_domain *genpd)
1790 {
1791 	int ret = -EINVAL;
1792 
1793 	if (!np || !genpd)
1794 		return -EINVAL;
1795 
1796 	mutex_lock(&gpd_list_lock);
1797 
1798 	if (pm_genpd_present(genpd)) {
1799 		ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1800 		if (!ret) {
1801 			genpd->provider = &np->fwnode;
1802 			genpd->has_provider = true;
1803 		}
1804 	}
1805 
1806 	mutex_unlock(&gpd_list_lock);
1807 
1808 	return ret;
1809 }
1810 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1811 
1812 /**
1813  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1814  * @np: Device node pointer associated with the PM domain provider.
1815  * @data: Pointer to the data associated with the PM domain provider.
1816  */
1817 int of_genpd_add_provider_onecell(struct device_node *np,
1818 				  struct genpd_onecell_data *data)
1819 {
1820 	unsigned int i;
1821 	int ret = -EINVAL;
1822 
1823 	if (!np || !data)
1824 		return -EINVAL;
1825 
1826 	mutex_lock(&gpd_list_lock);
1827 
1828 	if (!data->xlate)
1829 		data->xlate = genpd_xlate_onecell;
1830 
1831 	for (i = 0; i < data->num_domains; i++) {
1832 		if (!data->domains[i])
1833 			continue;
1834 		if (!pm_genpd_present(data->domains[i]))
1835 			goto error;
1836 
1837 		data->domains[i]->provider = &np->fwnode;
1838 		data->domains[i]->has_provider = true;
1839 	}
1840 
1841 	ret = genpd_add_provider(np, data->xlate, data);
1842 	if (ret < 0)
1843 		goto error;
1844 
1845 	mutex_unlock(&gpd_list_lock);
1846 
1847 	return 0;
1848 
1849 error:
1850 	while (i--) {
1851 		if (!data->domains[i])
1852 			continue;
1853 		data->domains[i]->provider = NULL;
1854 		data->domains[i]->has_provider = false;
1855 	}
1856 
1857 	mutex_unlock(&gpd_list_lock);
1858 
1859 	return ret;
1860 }
1861 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1862 
1863 /**
1864  * of_genpd_del_provider() - Remove a previously registered PM domain provider
1865  * @np: Device node pointer associated with the PM domain provider
1866  */
1867 void of_genpd_del_provider(struct device_node *np)
1868 {
1869 	struct of_genpd_provider *cp, *tmp;
1870 	struct generic_pm_domain *gpd;
1871 
1872 	mutex_lock(&gpd_list_lock);
1873 	mutex_lock(&of_genpd_mutex);
1874 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
1875 		if (cp->node == np) {
1876 			/*
1877 			 * For each PM domain associated with the
1878 			 * provider, set the 'has_provider' to false
1879 			 * so that the PM domain can be safely removed.
1880 			 */
1881 			list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1882 				if (gpd->provider == &np->fwnode)
1883 					gpd->has_provider = false;
1884 
1885 			list_del(&cp->link);
1886 			of_node_put(cp->node);
1887 			kfree(cp);
1888 			break;
1889 		}
1890 	}
1891 	mutex_unlock(&of_genpd_mutex);
1892 	mutex_unlock(&gpd_list_lock);
1893 }
1894 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1895 
1896 /**
1897  * genpd_get_from_provider() - Look-up PM domain
1898  * @genpdspec: OF phandle args to use for look-up
1899  *
1900  * Looks for a PM domain provider under the node specified by @genpdspec and if
1901  * found, uses xlate function of the provider to map phandle args to a PM
1902  * domain.
1903  *
1904  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1905  * on failure.
1906  */
1907 static struct generic_pm_domain *genpd_get_from_provider(
1908 					struct of_phandle_args *genpdspec)
1909 {
1910 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1911 	struct of_genpd_provider *provider;
1912 
1913 	if (!genpdspec)
1914 		return ERR_PTR(-EINVAL);
1915 
1916 	mutex_lock(&of_genpd_mutex);
1917 
1918 	/* Check if we have such a provider in our array */
1919 	list_for_each_entry(provider, &of_genpd_providers, link) {
1920 		if (provider->node == genpdspec->np)
1921 			genpd = provider->xlate(genpdspec, provider->data);
1922 		if (!IS_ERR(genpd))
1923 			break;
1924 	}
1925 
1926 	mutex_unlock(&of_genpd_mutex);
1927 
1928 	return genpd;
1929 }
1930 
1931 /**
1932  * of_genpd_add_device() - Add a device to an I/O PM domain
1933  * @genpdspec: OF phandle args to use for look-up PM domain
1934  * @dev: Device to be added.
1935  *
1936  * Looks-up an I/O PM domain based upon phandle args provided and adds
1937  * the device to the PM domain. Returns a negative error code on failure.
1938  */
1939 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1940 {
1941 	struct generic_pm_domain *genpd;
1942 	int ret;
1943 
1944 	mutex_lock(&gpd_list_lock);
1945 
1946 	genpd = genpd_get_from_provider(genpdspec);
1947 	if (IS_ERR(genpd)) {
1948 		ret = PTR_ERR(genpd);
1949 		goto out;
1950 	}
1951 
1952 	ret = genpd_add_device(genpd, dev, NULL);
1953 
1954 out:
1955 	mutex_unlock(&gpd_list_lock);
1956 
1957 	return ret;
1958 }
1959 EXPORT_SYMBOL_GPL(of_genpd_add_device);
1960 
1961 /**
1962  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1963  * @parent_spec: OF phandle args to use for parent PM domain look-up
1964  * @subdomain_spec: OF phandle args to use for subdomain look-up
1965  *
1966  * Looks-up a parent PM domain and subdomain based upon phandle args
1967  * provided and adds the subdomain to the parent PM domain. Returns a
1968  * negative error code on failure.
1969  */
1970 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1971 			   struct of_phandle_args *subdomain_spec)
1972 {
1973 	struct generic_pm_domain *parent, *subdomain;
1974 	int ret;
1975 
1976 	mutex_lock(&gpd_list_lock);
1977 
1978 	parent = genpd_get_from_provider(parent_spec);
1979 	if (IS_ERR(parent)) {
1980 		ret = PTR_ERR(parent);
1981 		goto out;
1982 	}
1983 
1984 	subdomain = genpd_get_from_provider(subdomain_spec);
1985 	if (IS_ERR(subdomain)) {
1986 		ret = PTR_ERR(subdomain);
1987 		goto out;
1988 	}
1989 
1990 	ret = genpd_add_subdomain(parent, subdomain);
1991 
1992 out:
1993 	mutex_unlock(&gpd_list_lock);
1994 
1995 	return ret;
1996 }
1997 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1998 
1999 /**
2000  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2001  * @provider: Pointer to device structure associated with provider
2002  *
2003  * Find the last PM domain that was added by a particular provider and
2004  * remove this PM domain from the list of PM domains. The provider is
2005  * identified by the 'provider' device structure that is passed. The PM
2006  * domain will only be removed, if the provider associated with domain
2007  * has been removed.
2008  *
2009  * Returns a valid pointer to struct generic_pm_domain on success or
2010  * ERR_PTR() on failure.
2011  */
2012 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2013 {
2014 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2015 	int ret;
2016 
2017 	if (IS_ERR_OR_NULL(np))
2018 		return ERR_PTR(-EINVAL);
2019 
2020 	mutex_lock(&gpd_list_lock);
2021 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2022 		if (gpd->provider == &np->fwnode) {
2023 			ret = genpd_remove(gpd);
2024 			genpd = ret ? ERR_PTR(ret) : gpd;
2025 			break;
2026 		}
2027 	}
2028 	mutex_unlock(&gpd_list_lock);
2029 
2030 	return genpd;
2031 }
2032 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2033 
2034 /**
2035  * genpd_dev_pm_detach - Detach a device from its PM domain.
2036  * @dev: Device to detach.
2037  * @power_off: Currently not used
2038  *
2039  * Try to locate a corresponding generic PM domain, which the device was
2040  * attached to previously. If such is found, the device is detached from it.
2041  */
2042 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2043 {
2044 	struct generic_pm_domain *pd;
2045 	unsigned int i;
2046 	int ret = 0;
2047 
2048 	pd = dev_to_genpd(dev);
2049 	if (IS_ERR(pd))
2050 		return;
2051 
2052 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2053 
2054 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2055 		ret = genpd_remove_device(pd, dev);
2056 		if (ret != -EAGAIN)
2057 			break;
2058 
2059 		mdelay(i);
2060 		cond_resched();
2061 	}
2062 
2063 	if (ret < 0) {
2064 		dev_err(dev, "failed to remove from PM domain %s: %d",
2065 			pd->name, ret);
2066 		return;
2067 	}
2068 
2069 	/* Check if PM domain can be powered off after removing this device. */
2070 	genpd_queue_power_off_work(pd);
2071 }
2072 
2073 static void genpd_dev_pm_sync(struct device *dev)
2074 {
2075 	struct generic_pm_domain *pd;
2076 
2077 	pd = dev_to_genpd(dev);
2078 	if (IS_ERR(pd))
2079 		return;
2080 
2081 	genpd_queue_power_off_work(pd);
2082 }
2083 
2084 /**
2085  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2086  * @dev: Device to attach.
2087  *
2088  * Parse device's OF node to find a PM domain specifier. If such is found,
2089  * attaches the device to retrieved pm_domain ops.
2090  *
2091  * Both generic and legacy Samsung-specific DT bindings are supported to keep
2092  * backwards compatibility with existing DTBs.
2093  *
2094  * Returns 0 on successfully attached PM domain or negative error code. Note
2095  * that if a power-domain exists for the device, but it cannot be found or
2096  * turned on, then return -EPROBE_DEFER to ensure that the device is not
2097  * probed and to re-try again later.
2098  */
2099 int genpd_dev_pm_attach(struct device *dev)
2100 {
2101 	struct of_phandle_args pd_args;
2102 	struct generic_pm_domain *pd;
2103 	unsigned int i;
2104 	int ret;
2105 
2106 	if (!dev->of_node)
2107 		return -ENODEV;
2108 
2109 	if (dev->pm_domain)
2110 		return -EEXIST;
2111 
2112 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2113 					"#power-domain-cells", 0, &pd_args);
2114 	if (ret < 0) {
2115 		if (ret != -ENOENT)
2116 			return ret;
2117 
2118 		/*
2119 		 * Try legacy Samsung-specific bindings
2120 		 * (for backwards compatibility of DT ABI)
2121 		 */
2122 		pd_args.args_count = 0;
2123 		pd_args.np = of_parse_phandle(dev->of_node,
2124 						"samsung,power-domain", 0);
2125 		if (!pd_args.np)
2126 			return -ENOENT;
2127 	}
2128 
2129 	mutex_lock(&gpd_list_lock);
2130 	pd = genpd_get_from_provider(&pd_args);
2131 	of_node_put(pd_args.np);
2132 	if (IS_ERR(pd)) {
2133 		mutex_unlock(&gpd_list_lock);
2134 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2135 			__func__, PTR_ERR(pd));
2136 		return -EPROBE_DEFER;
2137 	}
2138 
2139 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2140 
2141 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2142 		ret = genpd_add_device(pd, dev, NULL);
2143 		if (ret != -EAGAIN)
2144 			break;
2145 
2146 		mdelay(i);
2147 		cond_resched();
2148 	}
2149 	mutex_unlock(&gpd_list_lock);
2150 
2151 	if (ret < 0) {
2152 		if (ret != -EPROBE_DEFER)
2153 			dev_err(dev, "failed to add to PM domain %s: %d",
2154 				pd->name, ret);
2155 		goto out;
2156 	}
2157 
2158 	dev->pm_domain->detach = genpd_dev_pm_detach;
2159 	dev->pm_domain->sync = genpd_dev_pm_sync;
2160 
2161 	genpd_lock(pd);
2162 	ret = genpd_power_on(pd, 0);
2163 	genpd_unlock(pd);
2164 out:
2165 	return ret ? -EPROBE_DEFER : 0;
2166 }
2167 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2168 
2169 static const struct of_device_id idle_state_match[] = {
2170 	{ .compatible = "domain-idle-state", },
2171 	{ }
2172 };
2173 
2174 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2175 				    struct device_node *state_node)
2176 {
2177 	int err;
2178 	u32 residency;
2179 	u32 entry_latency, exit_latency;
2180 
2181 	err = of_property_read_u32(state_node, "entry-latency-us",
2182 						&entry_latency);
2183 	if (err) {
2184 		pr_debug(" * %pOF missing entry-latency-us property\n",
2185 						state_node);
2186 		return -EINVAL;
2187 	}
2188 
2189 	err = of_property_read_u32(state_node, "exit-latency-us",
2190 						&exit_latency);
2191 	if (err) {
2192 		pr_debug(" * %pOF missing exit-latency-us property\n",
2193 						state_node);
2194 		return -EINVAL;
2195 	}
2196 
2197 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2198 	if (!err)
2199 		genpd_state->residency_ns = 1000 * residency;
2200 
2201 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2202 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2203 	genpd_state->fwnode = &state_node->fwnode;
2204 
2205 	return 0;
2206 }
2207 
2208 /**
2209  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2210  *
2211  * @dn: The genpd device node
2212  * @states: The pointer to which the state array will be saved.
2213  * @n: The count of elements in the array returned from this function.
2214  *
2215  * Returns the device states parsed from the OF node. The memory for the states
2216  * is allocated by this function and is the responsibility of the caller to
2217  * free the memory after use.
2218  */
2219 int of_genpd_parse_idle_states(struct device_node *dn,
2220 			struct genpd_power_state **states, int *n)
2221 {
2222 	struct genpd_power_state *st;
2223 	struct device_node *np;
2224 	int i = 0;
2225 	int err, ret;
2226 	int count;
2227 	struct of_phandle_iterator it;
2228 	const struct of_device_id *match_id;
2229 
2230 	count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2231 	if (count <= 0)
2232 		return -EINVAL;
2233 
2234 	st = kcalloc(count, sizeof(*st), GFP_KERNEL);
2235 	if (!st)
2236 		return -ENOMEM;
2237 
2238 	/* Loop over the phandles until all the requested entry is found */
2239 	of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
2240 		np = it.node;
2241 		match_id = of_match_node(idle_state_match, np);
2242 		if (!match_id)
2243 			continue;
2244 		ret = genpd_parse_state(&st[i++], np);
2245 		if (ret) {
2246 			pr_err
2247 			("Parsing idle state node %pOF failed with err %d\n",
2248 							np, ret);
2249 			of_node_put(np);
2250 			kfree(st);
2251 			return ret;
2252 		}
2253 	}
2254 
2255 	*n = i;
2256 	if (!i)
2257 		kfree(st);
2258 	else
2259 		*states = st;
2260 
2261 	return 0;
2262 }
2263 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2264 
2265 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2266 
2267 
2268 /***        debugfs support        ***/
2269 
2270 #ifdef CONFIG_DEBUG_FS
2271 #include <linux/pm.h>
2272 #include <linux/device.h>
2273 #include <linux/debugfs.h>
2274 #include <linux/seq_file.h>
2275 #include <linux/init.h>
2276 #include <linux/kobject.h>
2277 static struct dentry *pm_genpd_debugfs_dir;
2278 
2279 /*
2280  * TODO: This function is a slightly modified version of rtpm_status_show
2281  * from sysfs.c, so generalize it.
2282  */
2283 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2284 {
2285 	static const char * const status_lookup[] = {
2286 		[RPM_ACTIVE] = "active",
2287 		[RPM_RESUMING] = "resuming",
2288 		[RPM_SUSPENDED] = "suspended",
2289 		[RPM_SUSPENDING] = "suspending"
2290 	};
2291 	const char *p = "";
2292 
2293 	if (dev->power.runtime_error)
2294 		p = "error";
2295 	else if (dev->power.disable_depth)
2296 		p = "unsupported";
2297 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2298 		p = status_lookup[dev->power.runtime_status];
2299 	else
2300 		WARN_ON(1);
2301 
2302 	seq_puts(s, p);
2303 }
2304 
2305 static int pm_genpd_summary_one(struct seq_file *s,
2306 				struct generic_pm_domain *genpd)
2307 {
2308 	static const char * const status_lookup[] = {
2309 		[GPD_STATE_ACTIVE] = "on",
2310 		[GPD_STATE_POWER_OFF] = "off"
2311 	};
2312 	struct pm_domain_data *pm_data;
2313 	const char *kobj_path;
2314 	struct gpd_link *link;
2315 	char state[16];
2316 	int ret;
2317 
2318 	ret = genpd_lock_interruptible(genpd);
2319 	if (ret)
2320 		return -ERESTARTSYS;
2321 
2322 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2323 		goto exit;
2324 	if (!genpd_status_on(genpd))
2325 		snprintf(state, sizeof(state), "%s-%u",
2326 			 status_lookup[genpd->status], genpd->state_idx);
2327 	else
2328 		snprintf(state, sizeof(state), "%s",
2329 			 status_lookup[genpd->status]);
2330 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2331 
2332 	/*
2333 	 * Modifications on the list require holding locks on both
2334 	 * master and slave, so we are safe.
2335 	 * Also genpd->name is immutable.
2336 	 */
2337 	list_for_each_entry(link, &genpd->master_links, master_node) {
2338 		seq_printf(s, "%s", link->slave->name);
2339 		if (!list_is_last(&link->master_node, &genpd->master_links))
2340 			seq_puts(s, ", ");
2341 	}
2342 
2343 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2344 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2345 				genpd_is_irq_safe(genpd) ?
2346 				GFP_ATOMIC : GFP_KERNEL);
2347 		if (kobj_path == NULL)
2348 			continue;
2349 
2350 		seq_printf(s, "\n    %-50s  ", kobj_path);
2351 		rtpm_status_str(s, pm_data->dev);
2352 		kfree(kobj_path);
2353 	}
2354 
2355 	seq_puts(s, "\n");
2356 exit:
2357 	genpd_unlock(genpd);
2358 
2359 	return 0;
2360 }
2361 
2362 static int genpd_summary_show(struct seq_file *s, void *data)
2363 {
2364 	struct generic_pm_domain *genpd;
2365 	int ret = 0;
2366 
2367 	seq_puts(s, "domain                          status          slaves\n");
2368 	seq_puts(s, "    /device                                             runtime status\n");
2369 	seq_puts(s, "----------------------------------------------------------------------\n");
2370 
2371 	ret = mutex_lock_interruptible(&gpd_list_lock);
2372 	if (ret)
2373 		return -ERESTARTSYS;
2374 
2375 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2376 		ret = pm_genpd_summary_one(s, genpd);
2377 		if (ret)
2378 			break;
2379 	}
2380 	mutex_unlock(&gpd_list_lock);
2381 
2382 	return ret;
2383 }
2384 
2385 static int genpd_status_show(struct seq_file *s, void *data)
2386 {
2387 	static const char * const status_lookup[] = {
2388 		[GPD_STATE_ACTIVE] = "on",
2389 		[GPD_STATE_POWER_OFF] = "off"
2390 	};
2391 
2392 	struct generic_pm_domain *genpd = s->private;
2393 	int ret = 0;
2394 
2395 	ret = genpd_lock_interruptible(genpd);
2396 	if (ret)
2397 		return -ERESTARTSYS;
2398 
2399 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2400 		goto exit;
2401 
2402 	if (genpd->status == GPD_STATE_POWER_OFF)
2403 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2404 			genpd->state_idx);
2405 	else
2406 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
2407 exit:
2408 	genpd_unlock(genpd);
2409 	return ret;
2410 }
2411 
2412 static int genpd_sub_domains_show(struct seq_file *s, void *data)
2413 {
2414 	struct generic_pm_domain *genpd = s->private;
2415 	struct gpd_link *link;
2416 	int ret = 0;
2417 
2418 	ret = genpd_lock_interruptible(genpd);
2419 	if (ret)
2420 		return -ERESTARTSYS;
2421 
2422 	list_for_each_entry(link, &genpd->master_links, master_node)
2423 		seq_printf(s, "%s\n", link->slave->name);
2424 
2425 	genpd_unlock(genpd);
2426 	return ret;
2427 }
2428 
2429 static int genpd_idle_states_show(struct seq_file *s, void *data)
2430 {
2431 	struct generic_pm_domain *genpd = s->private;
2432 	unsigned int i;
2433 	int ret = 0;
2434 
2435 	ret = genpd_lock_interruptible(genpd);
2436 	if (ret)
2437 		return -ERESTARTSYS;
2438 
2439 	seq_puts(s, "State          Time Spent(ms)\n");
2440 
2441 	for (i = 0; i < genpd->state_count; i++) {
2442 		ktime_t delta = 0;
2443 		s64 msecs;
2444 
2445 		if ((genpd->status == GPD_STATE_POWER_OFF) &&
2446 				(genpd->state_idx == i))
2447 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
2448 
2449 		msecs = ktime_to_ms(
2450 			ktime_add(genpd->states[i].idle_time, delta));
2451 		seq_printf(s, "S%-13i %lld\n", i, msecs);
2452 	}
2453 
2454 	genpd_unlock(genpd);
2455 	return ret;
2456 }
2457 
2458 static int genpd_active_time_show(struct seq_file *s, void *data)
2459 {
2460 	struct generic_pm_domain *genpd = s->private;
2461 	ktime_t delta = 0;
2462 	int ret = 0;
2463 
2464 	ret = genpd_lock_interruptible(genpd);
2465 	if (ret)
2466 		return -ERESTARTSYS;
2467 
2468 	if (genpd->status == GPD_STATE_ACTIVE)
2469 		delta = ktime_sub(ktime_get(), genpd->accounting_time);
2470 
2471 	seq_printf(s, "%lld ms\n", ktime_to_ms(
2472 				ktime_add(genpd->on_time, delta)));
2473 
2474 	genpd_unlock(genpd);
2475 	return ret;
2476 }
2477 
2478 static int genpd_total_idle_time_show(struct seq_file *s, void *data)
2479 {
2480 	struct generic_pm_domain *genpd = s->private;
2481 	ktime_t delta = 0, total = 0;
2482 	unsigned int i;
2483 	int ret = 0;
2484 
2485 	ret = genpd_lock_interruptible(genpd);
2486 	if (ret)
2487 		return -ERESTARTSYS;
2488 
2489 	for (i = 0; i < genpd->state_count; i++) {
2490 
2491 		if ((genpd->status == GPD_STATE_POWER_OFF) &&
2492 				(genpd->state_idx == i))
2493 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
2494 
2495 		total = ktime_add(total, genpd->states[i].idle_time);
2496 	}
2497 	total = ktime_add(total, delta);
2498 
2499 	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2500 
2501 	genpd_unlock(genpd);
2502 	return ret;
2503 }
2504 
2505 
2506 static int genpd_devices_show(struct seq_file *s, void *data)
2507 {
2508 	struct generic_pm_domain *genpd = s->private;
2509 	struct pm_domain_data *pm_data;
2510 	const char *kobj_path;
2511 	int ret = 0;
2512 
2513 	ret = genpd_lock_interruptible(genpd);
2514 	if (ret)
2515 		return -ERESTARTSYS;
2516 
2517 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2518 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2519 				genpd_is_irq_safe(genpd) ?
2520 				GFP_ATOMIC : GFP_KERNEL);
2521 		if (kobj_path == NULL)
2522 			continue;
2523 
2524 		seq_printf(s, "%s\n", kobj_path);
2525 		kfree(kobj_path);
2526 	}
2527 
2528 	genpd_unlock(genpd);
2529 	return ret;
2530 }
2531 
2532 #define define_genpd_open_function(name) \
2533 static int genpd_##name##_open(struct inode *inode, struct file *file) \
2534 { \
2535 	return single_open(file, genpd_##name##_show, inode->i_private); \
2536 }
2537 
2538 define_genpd_open_function(summary);
2539 define_genpd_open_function(status);
2540 define_genpd_open_function(sub_domains);
2541 define_genpd_open_function(idle_states);
2542 define_genpd_open_function(active_time);
2543 define_genpd_open_function(total_idle_time);
2544 define_genpd_open_function(devices);
2545 
2546 #define define_genpd_debugfs_fops(name) \
2547 static const struct file_operations genpd_##name##_fops = { \
2548 	.open = genpd_##name##_open, \
2549 	.read = seq_read, \
2550 	.llseek = seq_lseek, \
2551 	.release = single_release, \
2552 }
2553 
2554 define_genpd_debugfs_fops(summary);
2555 define_genpd_debugfs_fops(status);
2556 define_genpd_debugfs_fops(sub_domains);
2557 define_genpd_debugfs_fops(idle_states);
2558 define_genpd_debugfs_fops(active_time);
2559 define_genpd_debugfs_fops(total_idle_time);
2560 define_genpd_debugfs_fops(devices);
2561 
2562 static int __init pm_genpd_debug_init(void)
2563 {
2564 	struct dentry *d;
2565 	struct generic_pm_domain *genpd;
2566 
2567 	pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2568 
2569 	if (!pm_genpd_debugfs_dir)
2570 		return -ENOMEM;
2571 
2572 	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2573 			pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
2574 	if (!d)
2575 		return -ENOMEM;
2576 
2577 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2578 		d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
2579 		if (!d)
2580 			return -ENOMEM;
2581 
2582 		debugfs_create_file("current_state", 0444,
2583 				d, genpd, &genpd_status_fops);
2584 		debugfs_create_file("sub_domains", 0444,
2585 				d, genpd, &genpd_sub_domains_fops);
2586 		debugfs_create_file("idle_states", 0444,
2587 				d, genpd, &genpd_idle_states_fops);
2588 		debugfs_create_file("active_time", 0444,
2589 				d, genpd, &genpd_active_time_fops);
2590 		debugfs_create_file("total_idle_time", 0444,
2591 				d, genpd, &genpd_total_idle_time_fops);
2592 		debugfs_create_file("devices", 0444,
2593 				d, genpd, &genpd_devices_fops);
2594 	}
2595 
2596 	return 0;
2597 }
2598 late_initcall(pm_genpd_debug_init);
2599 
2600 static void __exit pm_genpd_debug_exit(void)
2601 {
2602 	debugfs_remove_recursive(pm_genpd_debugfs_dir);
2603 }
2604 __exitcall(pm_genpd_debug_exit);
2605 #endif /* CONFIG_DEBUG_FS */
2606