xref: /openbmc/linux/drivers/base/power/domain.c (revision 95777591)
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 
24 #include "power.h"
25 
26 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
27 
28 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
29 ({								\
30 	type (*__routine)(struct device *__d); 			\
31 	type __ret = (type)0;					\
32 								\
33 	__routine = genpd->dev_ops.callback; 			\
34 	if (__routine) {					\
35 		__ret = __routine(dev); 			\
36 	}							\
37 	__ret;							\
38 })
39 
40 static LIST_HEAD(gpd_list);
41 static DEFINE_MUTEX(gpd_list_lock);
42 
43 struct genpd_lock_ops {
44 	void (*lock)(struct generic_pm_domain *genpd);
45 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
46 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
47 	void (*unlock)(struct generic_pm_domain *genpd);
48 };
49 
50 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
51 {
52 	mutex_lock(&genpd->mlock);
53 }
54 
55 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
56 					int depth)
57 {
58 	mutex_lock_nested(&genpd->mlock, depth);
59 }
60 
61 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
62 {
63 	return mutex_lock_interruptible(&genpd->mlock);
64 }
65 
66 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
67 {
68 	return mutex_unlock(&genpd->mlock);
69 }
70 
71 static const struct genpd_lock_ops genpd_mtx_ops = {
72 	.lock = genpd_lock_mtx,
73 	.lock_nested = genpd_lock_nested_mtx,
74 	.lock_interruptible = genpd_lock_interruptible_mtx,
75 	.unlock = genpd_unlock_mtx,
76 };
77 
78 static void genpd_lock_spin(struct generic_pm_domain *genpd)
79 	__acquires(&genpd->slock)
80 {
81 	unsigned long flags;
82 
83 	spin_lock_irqsave(&genpd->slock, flags);
84 	genpd->lock_flags = flags;
85 }
86 
87 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
88 					int depth)
89 	__acquires(&genpd->slock)
90 {
91 	unsigned long flags;
92 
93 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
94 	genpd->lock_flags = flags;
95 }
96 
97 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
98 	__acquires(&genpd->slock)
99 {
100 	unsigned long flags;
101 
102 	spin_lock_irqsave(&genpd->slock, flags);
103 	genpd->lock_flags = flags;
104 	return 0;
105 }
106 
107 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
108 	__releases(&genpd->slock)
109 {
110 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
111 }
112 
113 static const struct genpd_lock_ops genpd_spin_ops = {
114 	.lock = genpd_lock_spin,
115 	.lock_nested = genpd_lock_nested_spin,
116 	.lock_interruptible = genpd_lock_interruptible_spin,
117 	.unlock = genpd_unlock_spin,
118 };
119 
120 #define genpd_lock(p)			p->lock_ops->lock(p)
121 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
122 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
123 #define genpd_unlock(p)			p->lock_ops->unlock(p)
124 
125 #define genpd_status_on(genpd)		(genpd->status == GPD_STATE_ACTIVE)
126 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
127 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
128 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
129 
130 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
131 		const struct generic_pm_domain *genpd)
132 {
133 	bool ret;
134 
135 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
136 
137 	/*
138 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
139 	 * to indicate a suboptimal configuration for PM. For an always on
140 	 * domain this isn't case, thus don't warn.
141 	 */
142 	if (ret && !genpd_is_always_on(genpd))
143 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
144 				genpd->name);
145 
146 	return ret;
147 }
148 
149 /*
150  * Get the generic PM domain for a particular struct device.
151  * This validates the struct device pointer, the PM domain pointer,
152  * and checks that the PM domain pointer is a real generic PM domain.
153  * Any failure results in NULL being returned.
154  */
155 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
156 {
157 	struct generic_pm_domain *genpd = NULL, *gpd;
158 
159 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
160 		return NULL;
161 
162 	mutex_lock(&gpd_list_lock);
163 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
164 		if (&gpd->domain == dev->pm_domain) {
165 			genpd = gpd;
166 			break;
167 		}
168 	}
169 	mutex_unlock(&gpd_list_lock);
170 
171 	return genpd;
172 }
173 
174 /*
175  * This should only be used where we are certain that the pm_domain
176  * attached to the device is a genpd domain.
177  */
178 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
179 {
180 	if (IS_ERR_OR_NULL(dev->pm_domain))
181 		return ERR_PTR(-EINVAL);
182 
183 	return pd_to_genpd(dev->pm_domain);
184 }
185 
186 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
187 			  struct device *dev)
188 {
189 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
190 }
191 
192 static int genpd_start_dev(const struct generic_pm_domain *genpd,
193 			   struct device *dev)
194 {
195 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
196 }
197 
198 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
199 {
200 	bool ret = false;
201 
202 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
203 		ret = !!atomic_dec_and_test(&genpd->sd_count);
204 
205 	return ret;
206 }
207 
208 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
209 {
210 	atomic_inc(&genpd->sd_count);
211 	smp_mb__after_atomic();
212 }
213 
214 #ifdef CONFIG_DEBUG_FS
215 static void genpd_update_accounting(struct generic_pm_domain *genpd)
216 {
217 	ktime_t delta, now;
218 
219 	now = ktime_get();
220 	delta = ktime_sub(now, genpd->accounting_time);
221 
222 	/*
223 	 * If genpd->status is active, it means we are just
224 	 * out of off and so update the idle time and vice
225 	 * versa.
226 	 */
227 	if (genpd->status == GPD_STATE_ACTIVE) {
228 		int state_idx = genpd->state_idx;
229 
230 		genpd->states[state_idx].idle_time =
231 			ktime_add(genpd->states[state_idx].idle_time, delta);
232 	} else {
233 		genpd->on_time = ktime_add(genpd->on_time, delta);
234 	}
235 
236 	genpd->accounting_time = now;
237 }
238 #else
239 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
240 #endif
241 
242 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
243 					   unsigned int state)
244 {
245 	struct generic_pm_domain_data *pd_data;
246 	struct pm_domain_data *pdd;
247 	struct gpd_link *link;
248 
249 	/* New requested state is same as Max requested state */
250 	if (state == genpd->performance_state)
251 		return state;
252 
253 	/* New requested state is higher than Max requested state */
254 	if (state > genpd->performance_state)
255 		return state;
256 
257 	/* Traverse all devices within the domain */
258 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
259 		pd_data = to_gpd_data(pdd);
260 
261 		if (pd_data->performance_state > state)
262 			state = pd_data->performance_state;
263 	}
264 
265 	/*
266 	 * Traverse all sub-domains within the domain. This can be
267 	 * done without any additional locking as the link->performance_state
268 	 * field is protected by the master genpd->lock, which is already taken.
269 	 *
270 	 * Also note that link->performance_state (subdomain's performance state
271 	 * requirement to master domain) is different from
272 	 * link->slave->performance_state (current performance state requirement
273 	 * of the devices/sub-domains of the subdomain) and so can have a
274 	 * different value.
275 	 *
276 	 * Note that we also take vote from powered-off sub-domains into account
277 	 * as the same is done for devices right now.
278 	 */
279 	list_for_each_entry(link, &genpd->master_links, master_node) {
280 		if (link->performance_state > state)
281 			state = link->performance_state;
282 	}
283 
284 	return state;
285 }
286 
287 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
288 					unsigned int state, int depth)
289 {
290 	struct generic_pm_domain *master;
291 	struct gpd_link *link;
292 	int master_state, ret;
293 
294 	if (state == genpd->performance_state)
295 		return 0;
296 
297 	/* Propagate to masters of genpd */
298 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
299 		master = link->master;
300 
301 		if (!master->set_performance_state)
302 			continue;
303 
304 		/* Find master's performance state */
305 		ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
306 							 master->opp_table,
307 							 state);
308 		if (unlikely(ret < 0))
309 			goto err;
310 
311 		master_state = ret;
312 
313 		genpd_lock_nested(master, depth + 1);
314 
315 		link->prev_performance_state = link->performance_state;
316 		link->performance_state = master_state;
317 		master_state = _genpd_reeval_performance_state(master,
318 						master_state);
319 		ret = _genpd_set_performance_state(master, master_state, depth + 1);
320 		if (ret)
321 			link->performance_state = link->prev_performance_state;
322 
323 		genpd_unlock(master);
324 
325 		if (ret)
326 			goto err;
327 	}
328 
329 	ret = genpd->set_performance_state(genpd, state);
330 	if (ret)
331 		goto err;
332 
333 	genpd->performance_state = state;
334 	return 0;
335 
336 err:
337 	/* Encountered an error, lets rollback */
338 	list_for_each_entry_continue_reverse(link, &genpd->slave_links,
339 					     slave_node) {
340 		master = link->master;
341 
342 		if (!master->set_performance_state)
343 			continue;
344 
345 		genpd_lock_nested(master, depth + 1);
346 
347 		master_state = link->prev_performance_state;
348 		link->performance_state = master_state;
349 
350 		master_state = _genpd_reeval_performance_state(master,
351 						master_state);
352 		if (_genpd_set_performance_state(master, master_state, depth + 1)) {
353 			pr_err("%s: Failed to roll back to %d performance state\n",
354 			       master->name, master_state);
355 		}
356 
357 		genpd_unlock(master);
358 	}
359 
360 	return ret;
361 }
362 
363 /**
364  * dev_pm_genpd_set_performance_state- Set performance state of device's power
365  * domain.
366  *
367  * @dev: Device for which the performance-state needs to be set.
368  * @state: Target performance state of the device. This can be set as 0 when the
369  *	   device doesn't have any performance state constraints left (And so
370  *	   the device wouldn't participate anymore to find the target
371  *	   performance state of the genpd).
372  *
373  * It is assumed that the users guarantee that the genpd wouldn't be detached
374  * while this routine is getting called.
375  *
376  * Returns 0 on success and negative error values on failures.
377  */
378 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
379 {
380 	struct generic_pm_domain *genpd;
381 	struct generic_pm_domain_data *gpd_data;
382 	unsigned int prev;
383 	int ret;
384 
385 	genpd = dev_to_genpd(dev);
386 	if (IS_ERR(genpd))
387 		return -ENODEV;
388 
389 	if (unlikely(!genpd->set_performance_state))
390 		return -EINVAL;
391 
392 	if (unlikely(!dev->power.subsys_data ||
393 		     !dev->power.subsys_data->domain_data)) {
394 		WARN_ON(1);
395 		return -EINVAL;
396 	}
397 
398 	genpd_lock(genpd);
399 
400 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
401 	prev = gpd_data->performance_state;
402 	gpd_data->performance_state = state;
403 
404 	state = _genpd_reeval_performance_state(genpd, state);
405 	ret = _genpd_set_performance_state(genpd, state, 0);
406 	if (ret)
407 		gpd_data->performance_state = prev;
408 
409 	genpd_unlock(genpd);
410 
411 	return ret;
412 }
413 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
414 
415 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
416 {
417 	unsigned int state_idx = genpd->state_idx;
418 	ktime_t time_start;
419 	s64 elapsed_ns;
420 	int ret;
421 
422 	if (!genpd->power_on)
423 		return 0;
424 
425 	if (!timed)
426 		return genpd->power_on(genpd);
427 
428 	time_start = ktime_get();
429 	ret = genpd->power_on(genpd);
430 	if (ret)
431 		return ret;
432 
433 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
434 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
435 		return ret;
436 
437 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
438 	genpd->max_off_time_changed = true;
439 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
440 		 genpd->name, "on", elapsed_ns);
441 
442 	return ret;
443 }
444 
445 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
446 {
447 	unsigned int state_idx = genpd->state_idx;
448 	ktime_t time_start;
449 	s64 elapsed_ns;
450 	int ret;
451 
452 	if (!genpd->power_off)
453 		return 0;
454 
455 	if (!timed)
456 		return genpd->power_off(genpd);
457 
458 	time_start = ktime_get();
459 	ret = genpd->power_off(genpd);
460 	if (ret == -EBUSY)
461 		return ret;
462 
463 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
464 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
465 		return ret;
466 
467 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
468 	genpd->max_off_time_changed = true;
469 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
470 		 genpd->name, "off", elapsed_ns);
471 
472 	return ret;
473 }
474 
475 /**
476  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
477  * @genpd: PM domain to power off.
478  *
479  * Queue up the execution of genpd_power_off() unless it's already been done
480  * before.
481  */
482 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
483 {
484 	queue_work(pm_wq, &genpd->power_off_work);
485 }
486 
487 /**
488  * genpd_power_off - Remove power from a given PM domain.
489  * @genpd: PM domain to power down.
490  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
491  * RPM status of the releated device is in an intermediate state, not yet turned
492  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
493  * be RPM_SUSPENDED, while it tries to power off the PM domain.
494  *
495  * If all of the @genpd's devices have been suspended and all of its subdomains
496  * have been powered down, remove power from @genpd.
497  */
498 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
499 			   unsigned int depth)
500 {
501 	struct pm_domain_data *pdd;
502 	struct gpd_link *link;
503 	unsigned int not_suspended = 0;
504 
505 	/*
506 	 * Do not try to power off the domain in the following situations:
507 	 * (1) The domain is already in the "power off" state.
508 	 * (2) System suspend is in progress.
509 	 */
510 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
511 		return 0;
512 
513 	/*
514 	 * Abort power off for the PM domain in the following situations:
515 	 * (1) The domain is configured as always on.
516 	 * (2) When the domain has a subdomain being powered on.
517 	 */
518 	if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
519 		return -EBUSY;
520 
521 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
522 		enum pm_qos_flags_status stat;
523 
524 		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
525 		if (stat > PM_QOS_FLAGS_NONE)
526 			return -EBUSY;
527 
528 		/*
529 		 * Do not allow PM domain to be powered off, when an IRQ safe
530 		 * device is part of a non-IRQ safe domain.
531 		 */
532 		if (!pm_runtime_suspended(pdd->dev) ||
533 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
534 			not_suspended++;
535 	}
536 
537 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
538 		return -EBUSY;
539 
540 	if (genpd->gov && genpd->gov->power_down_ok) {
541 		if (!genpd->gov->power_down_ok(&genpd->domain))
542 			return -EAGAIN;
543 	}
544 
545 	/* Default to shallowest state. */
546 	if (!genpd->gov)
547 		genpd->state_idx = 0;
548 
549 	if (genpd->power_off) {
550 		int ret;
551 
552 		if (atomic_read(&genpd->sd_count) > 0)
553 			return -EBUSY;
554 
555 		/*
556 		 * If sd_count > 0 at this point, one of the subdomains hasn't
557 		 * managed to call genpd_power_on() for the master yet after
558 		 * incrementing it.  In that case genpd_power_on() will wait
559 		 * for us to drop the lock, so we can call .power_off() and let
560 		 * the genpd_power_on() restore power for us (this shouldn't
561 		 * happen very often).
562 		 */
563 		ret = _genpd_power_off(genpd, true);
564 		if (ret)
565 			return ret;
566 	}
567 
568 	genpd->status = GPD_STATE_POWER_OFF;
569 	genpd_update_accounting(genpd);
570 
571 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
572 		genpd_sd_counter_dec(link->master);
573 		genpd_lock_nested(link->master, depth + 1);
574 		genpd_power_off(link->master, false, depth + 1);
575 		genpd_unlock(link->master);
576 	}
577 
578 	return 0;
579 }
580 
581 /**
582  * genpd_power_on - Restore power to a given PM domain and its masters.
583  * @genpd: PM domain to power up.
584  * @depth: nesting count for lockdep.
585  *
586  * Restore power to @genpd and all of its masters so that it is possible to
587  * resume a device belonging to it.
588  */
589 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
590 {
591 	struct gpd_link *link;
592 	int ret = 0;
593 
594 	if (genpd_status_on(genpd))
595 		return 0;
596 
597 	/*
598 	 * The list is guaranteed not to change while the loop below is being
599 	 * executed, unless one of the masters' .power_on() callbacks fiddles
600 	 * with it.
601 	 */
602 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
603 		struct generic_pm_domain *master = link->master;
604 
605 		genpd_sd_counter_inc(master);
606 
607 		genpd_lock_nested(master, depth + 1);
608 		ret = genpd_power_on(master, depth + 1);
609 		genpd_unlock(master);
610 
611 		if (ret) {
612 			genpd_sd_counter_dec(master);
613 			goto err;
614 		}
615 	}
616 
617 	ret = _genpd_power_on(genpd, true);
618 	if (ret)
619 		goto err;
620 
621 	genpd->status = GPD_STATE_ACTIVE;
622 	genpd_update_accounting(genpd);
623 
624 	return 0;
625 
626  err:
627 	list_for_each_entry_continue_reverse(link,
628 					&genpd->slave_links,
629 					slave_node) {
630 		genpd_sd_counter_dec(link->master);
631 		genpd_lock_nested(link->master, depth + 1);
632 		genpd_power_off(link->master, false, depth + 1);
633 		genpd_unlock(link->master);
634 	}
635 
636 	return ret;
637 }
638 
639 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
640 				     unsigned long val, void *ptr)
641 {
642 	struct generic_pm_domain_data *gpd_data;
643 	struct device *dev;
644 
645 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
646 	dev = gpd_data->base.dev;
647 
648 	for (;;) {
649 		struct generic_pm_domain *genpd;
650 		struct pm_domain_data *pdd;
651 
652 		spin_lock_irq(&dev->power.lock);
653 
654 		pdd = dev->power.subsys_data ?
655 				dev->power.subsys_data->domain_data : NULL;
656 		if (pdd) {
657 			to_gpd_data(pdd)->td.constraint_changed = true;
658 			genpd = dev_to_genpd(dev);
659 		} else {
660 			genpd = ERR_PTR(-ENODATA);
661 		}
662 
663 		spin_unlock_irq(&dev->power.lock);
664 
665 		if (!IS_ERR(genpd)) {
666 			genpd_lock(genpd);
667 			genpd->max_off_time_changed = true;
668 			genpd_unlock(genpd);
669 		}
670 
671 		dev = dev->parent;
672 		if (!dev || dev->power.ignore_children)
673 			break;
674 	}
675 
676 	return NOTIFY_DONE;
677 }
678 
679 /**
680  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
681  * @work: Work structure used for scheduling the execution of this function.
682  */
683 static void genpd_power_off_work_fn(struct work_struct *work)
684 {
685 	struct generic_pm_domain *genpd;
686 
687 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
688 
689 	genpd_lock(genpd);
690 	genpd_power_off(genpd, false, 0);
691 	genpd_unlock(genpd);
692 }
693 
694 /**
695  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
696  * @dev: Device to handle.
697  */
698 static int __genpd_runtime_suspend(struct device *dev)
699 {
700 	int (*cb)(struct device *__dev);
701 
702 	if (dev->type && dev->type->pm)
703 		cb = dev->type->pm->runtime_suspend;
704 	else if (dev->class && dev->class->pm)
705 		cb = dev->class->pm->runtime_suspend;
706 	else if (dev->bus && dev->bus->pm)
707 		cb = dev->bus->pm->runtime_suspend;
708 	else
709 		cb = NULL;
710 
711 	if (!cb && dev->driver && dev->driver->pm)
712 		cb = dev->driver->pm->runtime_suspend;
713 
714 	return cb ? cb(dev) : 0;
715 }
716 
717 /**
718  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
719  * @dev: Device to handle.
720  */
721 static int __genpd_runtime_resume(struct device *dev)
722 {
723 	int (*cb)(struct device *__dev);
724 
725 	if (dev->type && dev->type->pm)
726 		cb = dev->type->pm->runtime_resume;
727 	else if (dev->class && dev->class->pm)
728 		cb = dev->class->pm->runtime_resume;
729 	else if (dev->bus && dev->bus->pm)
730 		cb = dev->bus->pm->runtime_resume;
731 	else
732 		cb = NULL;
733 
734 	if (!cb && dev->driver && dev->driver->pm)
735 		cb = dev->driver->pm->runtime_resume;
736 
737 	return cb ? cb(dev) : 0;
738 }
739 
740 /**
741  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
742  * @dev: Device to suspend.
743  *
744  * Carry out a runtime suspend of a device under the assumption that its
745  * pm_domain field points to the domain member of an object of type
746  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
747  */
748 static int genpd_runtime_suspend(struct device *dev)
749 {
750 	struct generic_pm_domain *genpd;
751 	bool (*suspend_ok)(struct device *__dev);
752 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
753 	bool runtime_pm = pm_runtime_enabled(dev);
754 	ktime_t time_start;
755 	s64 elapsed_ns;
756 	int ret;
757 
758 	dev_dbg(dev, "%s()\n", __func__);
759 
760 	genpd = dev_to_genpd(dev);
761 	if (IS_ERR(genpd))
762 		return -EINVAL;
763 
764 	/*
765 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
766 	 * callbacks for other purposes than runtime PM. In those scenarios
767 	 * runtime PM is disabled. Under these circumstances, we shall skip
768 	 * validating/measuring the PM QoS latency.
769 	 */
770 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
771 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
772 		return -EBUSY;
773 
774 	/* Measure suspend latency. */
775 	time_start = 0;
776 	if (runtime_pm)
777 		time_start = ktime_get();
778 
779 	ret = __genpd_runtime_suspend(dev);
780 	if (ret)
781 		return ret;
782 
783 	ret = genpd_stop_dev(genpd, dev);
784 	if (ret) {
785 		__genpd_runtime_resume(dev);
786 		return ret;
787 	}
788 
789 	/* Update suspend latency value if the measured time exceeds it. */
790 	if (runtime_pm) {
791 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
792 		if (elapsed_ns > td->suspend_latency_ns) {
793 			td->suspend_latency_ns = elapsed_ns;
794 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
795 				elapsed_ns);
796 			genpd->max_off_time_changed = true;
797 			td->constraint_changed = true;
798 		}
799 	}
800 
801 	/*
802 	 * If power.irq_safe is set, this routine may be run with
803 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
804 	 */
805 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
806 		return 0;
807 
808 	genpd_lock(genpd);
809 	genpd_power_off(genpd, true, 0);
810 	genpd_unlock(genpd);
811 
812 	return 0;
813 }
814 
815 /**
816  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
817  * @dev: Device to resume.
818  *
819  * Carry out a runtime resume of a device under the assumption that its
820  * pm_domain field points to the domain member of an object of type
821  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
822  */
823 static int genpd_runtime_resume(struct device *dev)
824 {
825 	struct generic_pm_domain *genpd;
826 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
827 	bool runtime_pm = pm_runtime_enabled(dev);
828 	ktime_t time_start;
829 	s64 elapsed_ns;
830 	int ret;
831 	bool timed = true;
832 
833 	dev_dbg(dev, "%s()\n", __func__);
834 
835 	genpd = dev_to_genpd(dev);
836 	if (IS_ERR(genpd))
837 		return -EINVAL;
838 
839 	/*
840 	 * As we don't power off a non IRQ safe domain, which holds
841 	 * an IRQ safe device, we don't need to restore power to it.
842 	 */
843 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
844 		timed = false;
845 		goto out;
846 	}
847 
848 	genpd_lock(genpd);
849 	ret = genpd_power_on(genpd, 0);
850 	genpd_unlock(genpd);
851 
852 	if (ret)
853 		return ret;
854 
855  out:
856 	/* Measure resume latency. */
857 	time_start = 0;
858 	if (timed && runtime_pm)
859 		time_start = ktime_get();
860 
861 	ret = genpd_start_dev(genpd, dev);
862 	if (ret)
863 		goto err_poweroff;
864 
865 	ret = __genpd_runtime_resume(dev);
866 	if (ret)
867 		goto err_stop;
868 
869 	/* Update resume latency value if the measured time exceeds it. */
870 	if (timed && runtime_pm) {
871 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
872 		if (elapsed_ns > td->resume_latency_ns) {
873 			td->resume_latency_ns = elapsed_ns;
874 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
875 				elapsed_ns);
876 			genpd->max_off_time_changed = true;
877 			td->constraint_changed = true;
878 		}
879 	}
880 
881 	return 0;
882 
883 err_stop:
884 	genpd_stop_dev(genpd, dev);
885 err_poweroff:
886 	if (!pm_runtime_is_irq_safe(dev) ||
887 		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
888 		genpd_lock(genpd);
889 		genpd_power_off(genpd, true, 0);
890 		genpd_unlock(genpd);
891 	}
892 
893 	return ret;
894 }
895 
896 static bool pd_ignore_unused;
897 static int __init pd_ignore_unused_setup(char *__unused)
898 {
899 	pd_ignore_unused = true;
900 	return 1;
901 }
902 __setup("pd_ignore_unused", pd_ignore_unused_setup);
903 
904 /**
905  * genpd_power_off_unused - Power off all PM domains with no devices in use.
906  */
907 static int __init genpd_power_off_unused(void)
908 {
909 	struct generic_pm_domain *genpd;
910 
911 	if (pd_ignore_unused) {
912 		pr_warn("genpd: Not disabling unused power domains\n");
913 		return 0;
914 	}
915 
916 	mutex_lock(&gpd_list_lock);
917 
918 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
919 		genpd_queue_power_off_work(genpd);
920 
921 	mutex_unlock(&gpd_list_lock);
922 
923 	return 0;
924 }
925 late_initcall(genpd_power_off_unused);
926 
927 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
928 
929 static bool genpd_present(const struct generic_pm_domain *genpd)
930 {
931 	const struct generic_pm_domain *gpd;
932 
933 	if (IS_ERR_OR_NULL(genpd))
934 		return false;
935 
936 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
937 		if (gpd == genpd)
938 			return true;
939 
940 	return false;
941 }
942 
943 #endif
944 
945 #ifdef CONFIG_PM_SLEEP
946 
947 /**
948  * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
949  * @genpd: PM domain to power off, if possible.
950  * @use_lock: use the lock.
951  * @depth: nesting count for lockdep.
952  *
953  * Check if the given PM domain can be powered off (during system suspend or
954  * hibernation) and do that if so.  Also, in that case propagate to its masters.
955  *
956  * This function is only called in "noirq" and "syscore" stages of system power
957  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
958  * these cases the lock must be held.
959  */
960 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
961 				 unsigned int depth)
962 {
963 	struct gpd_link *link;
964 
965 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
966 		return;
967 
968 	if (genpd->suspended_count != genpd->device_count
969 	    || atomic_read(&genpd->sd_count) > 0)
970 		return;
971 
972 	/* Choose the deepest state when suspending */
973 	genpd->state_idx = genpd->state_count - 1;
974 	if (_genpd_power_off(genpd, false))
975 		return;
976 
977 	genpd->status = GPD_STATE_POWER_OFF;
978 
979 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
980 		genpd_sd_counter_dec(link->master);
981 
982 		if (use_lock)
983 			genpd_lock_nested(link->master, depth + 1);
984 
985 		genpd_sync_power_off(link->master, use_lock, depth + 1);
986 
987 		if (use_lock)
988 			genpd_unlock(link->master);
989 	}
990 }
991 
992 /**
993  * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
994  * @genpd: PM domain to power on.
995  * @use_lock: use the lock.
996  * @depth: nesting count for lockdep.
997  *
998  * This function is only called in "noirq" and "syscore" stages of system power
999  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1000  * these cases the lock must be held.
1001  */
1002 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1003 				unsigned int depth)
1004 {
1005 	struct gpd_link *link;
1006 
1007 	if (genpd_status_on(genpd))
1008 		return;
1009 
1010 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
1011 		genpd_sd_counter_inc(link->master);
1012 
1013 		if (use_lock)
1014 			genpd_lock_nested(link->master, depth + 1);
1015 
1016 		genpd_sync_power_on(link->master, use_lock, depth + 1);
1017 
1018 		if (use_lock)
1019 			genpd_unlock(link->master);
1020 	}
1021 
1022 	_genpd_power_on(genpd, false);
1023 
1024 	genpd->status = GPD_STATE_ACTIVE;
1025 }
1026 
1027 /**
1028  * resume_needed - Check whether to resume a device before system suspend.
1029  * @dev: Device to check.
1030  * @genpd: PM domain the device belongs to.
1031  *
1032  * There are two cases in which a device that can wake up the system from sleep
1033  * states should be resumed by genpd_prepare(): (1) if the device is enabled
1034  * to wake up the system and it has to remain active for this purpose while the
1035  * system is in the sleep state and (2) if the device is not enabled to wake up
1036  * the system from sleep states and it generally doesn't generate wakeup signals
1037  * by itself (those signals are generated on its behalf by other parts of the
1038  * system).  In the latter case it may be necessary to reconfigure the device's
1039  * wakeup settings during system suspend, because it may have been set up to
1040  * signal remote wakeup from the system's working state as needed by runtime PM.
1041  * Return 'true' in either of the above cases.
1042  */
1043 static bool resume_needed(struct device *dev,
1044 			  const struct generic_pm_domain *genpd)
1045 {
1046 	bool active_wakeup;
1047 
1048 	if (!device_can_wakeup(dev))
1049 		return false;
1050 
1051 	active_wakeup = genpd_is_active_wakeup(genpd);
1052 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1053 }
1054 
1055 /**
1056  * genpd_prepare - Start power transition of a device in a PM domain.
1057  * @dev: Device to start the transition of.
1058  *
1059  * Start a power transition of a device (during a system-wide power transition)
1060  * under the assumption that its pm_domain field points to the domain member of
1061  * an object of type struct generic_pm_domain representing a PM domain
1062  * consisting of I/O devices.
1063  */
1064 static int genpd_prepare(struct device *dev)
1065 {
1066 	struct generic_pm_domain *genpd;
1067 	int ret;
1068 
1069 	dev_dbg(dev, "%s()\n", __func__);
1070 
1071 	genpd = dev_to_genpd(dev);
1072 	if (IS_ERR(genpd))
1073 		return -EINVAL;
1074 
1075 	/*
1076 	 * If a wakeup request is pending for the device, it should be woken up
1077 	 * at this point and a system wakeup event should be reported if it's
1078 	 * set up to wake up the system from sleep states.
1079 	 */
1080 	if (resume_needed(dev, genpd))
1081 		pm_runtime_resume(dev);
1082 
1083 	genpd_lock(genpd);
1084 
1085 	if (genpd->prepared_count++ == 0)
1086 		genpd->suspended_count = 0;
1087 
1088 	genpd_unlock(genpd);
1089 
1090 	ret = pm_generic_prepare(dev);
1091 	if (ret < 0) {
1092 		genpd_lock(genpd);
1093 
1094 		genpd->prepared_count--;
1095 
1096 		genpd_unlock(genpd);
1097 	}
1098 
1099 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1100 	return ret >= 0 ? 0 : ret;
1101 }
1102 
1103 /**
1104  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1105  *   I/O pm domain.
1106  * @dev: Device to suspend.
1107  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1108  *
1109  * Stop the device and remove power from the domain if all devices in it have
1110  * been stopped.
1111  */
1112 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1113 {
1114 	struct generic_pm_domain *genpd;
1115 	int ret = 0;
1116 
1117 	genpd = dev_to_genpd(dev);
1118 	if (IS_ERR(genpd))
1119 		return -EINVAL;
1120 
1121 	if (poweroff)
1122 		ret = pm_generic_poweroff_noirq(dev);
1123 	else
1124 		ret = pm_generic_suspend_noirq(dev);
1125 	if (ret)
1126 		return ret;
1127 
1128 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1129 		return 0;
1130 
1131 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1132 	    !pm_runtime_status_suspended(dev)) {
1133 		ret = genpd_stop_dev(genpd, dev);
1134 		if (ret) {
1135 			if (poweroff)
1136 				pm_generic_restore_noirq(dev);
1137 			else
1138 				pm_generic_resume_noirq(dev);
1139 			return ret;
1140 		}
1141 	}
1142 
1143 	genpd_lock(genpd);
1144 	genpd->suspended_count++;
1145 	genpd_sync_power_off(genpd, true, 0);
1146 	genpd_unlock(genpd);
1147 
1148 	return 0;
1149 }
1150 
1151 /**
1152  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1153  * @dev: Device to suspend.
1154  *
1155  * Stop the device and remove power from the domain if all devices in it have
1156  * been stopped.
1157  */
1158 static int genpd_suspend_noirq(struct device *dev)
1159 {
1160 	dev_dbg(dev, "%s()\n", __func__);
1161 
1162 	return genpd_finish_suspend(dev, false);
1163 }
1164 
1165 /**
1166  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1167  * @dev: Device to resume.
1168  *
1169  * Restore power to the device's PM domain, if necessary, and start the device.
1170  */
1171 static int genpd_resume_noirq(struct device *dev)
1172 {
1173 	struct generic_pm_domain *genpd;
1174 	int ret;
1175 
1176 	dev_dbg(dev, "%s()\n", __func__);
1177 
1178 	genpd = dev_to_genpd(dev);
1179 	if (IS_ERR(genpd))
1180 		return -EINVAL;
1181 
1182 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1183 		return pm_generic_resume_noirq(dev);
1184 
1185 	genpd_lock(genpd);
1186 	genpd_sync_power_on(genpd, true, 0);
1187 	genpd->suspended_count--;
1188 	genpd_unlock(genpd);
1189 
1190 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1191 	    !pm_runtime_status_suspended(dev)) {
1192 		ret = genpd_start_dev(genpd, dev);
1193 		if (ret)
1194 			return ret;
1195 	}
1196 
1197 	return pm_generic_resume_noirq(dev);
1198 }
1199 
1200 /**
1201  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1202  * @dev: Device to freeze.
1203  *
1204  * Carry out a late freeze of a device under the assumption that its
1205  * pm_domain field points to the domain member of an object of type
1206  * struct generic_pm_domain representing a power domain consisting of I/O
1207  * devices.
1208  */
1209 static int genpd_freeze_noirq(struct device *dev)
1210 {
1211 	const struct generic_pm_domain *genpd;
1212 	int ret = 0;
1213 
1214 	dev_dbg(dev, "%s()\n", __func__);
1215 
1216 	genpd = dev_to_genpd(dev);
1217 	if (IS_ERR(genpd))
1218 		return -EINVAL;
1219 
1220 	ret = pm_generic_freeze_noirq(dev);
1221 	if (ret)
1222 		return ret;
1223 
1224 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1225 	    !pm_runtime_status_suspended(dev))
1226 		ret = genpd_stop_dev(genpd, dev);
1227 
1228 	return ret;
1229 }
1230 
1231 /**
1232  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1233  * @dev: Device to thaw.
1234  *
1235  * Start the device, unless power has been removed from the domain already
1236  * before the system transition.
1237  */
1238 static int genpd_thaw_noirq(struct device *dev)
1239 {
1240 	const struct generic_pm_domain *genpd;
1241 	int ret = 0;
1242 
1243 	dev_dbg(dev, "%s()\n", __func__);
1244 
1245 	genpd = dev_to_genpd(dev);
1246 	if (IS_ERR(genpd))
1247 		return -EINVAL;
1248 
1249 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1250 	    !pm_runtime_status_suspended(dev)) {
1251 		ret = genpd_start_dev(genpd, dev);
1252 		if (ret)
1253 			return ret;
1254 	}
1255 
1256 	return pm_generic_thaw_noirq(dev);
1257 }
1258 
1259 /**
1260  * genpd_poweroff_noirq - Completion of hibernation of device in an
1261  *   I/O PM domain.
1262  * @dev: Device to poweroff.
1263  *
1264  * Stop the device and remove power from the domain if all devices in it have
1265  * been stopped.
1266  */
1267 static int genpd_poweroff_noirq(struct device *dev)
1268 {
1269 	dev_dbg(dev, "%s()\n", __func__);
1270 
1271 	return genpd_finish_suspend(dev, true);
1272 }
1273 
1274 /**
1275  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1276  * @dev: Device to resume.
1277  *
1278  * Make sure the domain will be in the same power state as before the
1279  * hibernation the system is resuming from and start the device if necessary.
1280  */
1281 static int genpd_restore_noirq(struct device *dev)
1282 {
1283 	struct generic_pm_domain *genpd;
1284 	int ret = 0;
1285 
1286 	dev_dbg(dev, "%s()\n", __func__);
1287 
1288 	genpd = dev_to_genpd(dev);
1289 	if (IS_ERR(genpd))
1290 		return -EINVAL;
1291 
1292 	/*
1293 	 * At this point suspended_count == 0 means we are being run for the
1294 	 * first time for the given domain in the present cycle.
1295 	 */
1296 	genpd_lock(genpd);
1297 	if (genpd->suspended_count++ == 0)
1298 		/*
1299 		 * The boot kernel might put the domain into arbitrary state,
1300 		 * so make it appear as powered off to genpd_sync_power_on(),
1301 		 * so that it tries to power it on in case it was really off.
1302 		 */
1303 		genpd->status = GPD_STATE_POWER_OFF;
1304 
1305 	genpd_sync_power_on(genpd, true, 0);
1306 	genpd_unlock(genpd);
1307 
1308 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1309 	    !pm_runtime_status_suspended(dev)) {
1310 		ret = genpd_start_dev(genpd, dev);
1311 		if (ret)
1312 			return ret;
1313 	}
1314 
1315 	return pm_generic_restore_noirq(dev);
1316 }
1317 
1318 /**
1319  * genpd_complete - Complete power transition of a device in a power domain.
1320  * @dev: Device to complete the transition of.
1321  *
1322  * Complete a power transition of a device (during a system-wide power
1323  * transition) under the assumption that its pm_domain field points to the
1324  * domain member of an object of type struct generic_pm_domain representing
1325  * a power domain consisting of I/O devices.
1326  */
1327 static void genpd_complete(struct device *dev)
1328 {
1329 	struct generic_pm_domain *genpd;
1330 
1331 	dev_dbg(dev, "%s()\n", __func__);
1332 
1333 	genpd = dev_to_genpd(dev);
1334 	if (IS_ERR(genpd))
1335 		return;
1336 
1337 	pm_generic_complete(dev);
1338 
1339 	genpd_lock(genpd);
1340 
1341 	genpd->prepared_count--;
1342 	if (!genpd->prepared_count)
1343 		genpd_queue_power_off_work(genpd);
1344 
1345 	genpd_unlock(genpd);
1346 }
1347 
1348 /**
1349  * genpd_syscore_switch - Switch power during system core suspend or resume.
1350  * @dev: Device that normally is marked as "always on" to switch power for.
1351  *
1352  * This routine may only be called during the system core (syscore) suspend or
1353  * resume phase for devices whose "always on" flags are set.
1354  */
1355 static void genpd_syscore_switch(struct device *dev, bool suspend)
1356 {
1357 	struct generic_pm_domain *genpd;
1358 
1359 	genpd = dev_to_genpd(dev);
1360 	if (!genpd_present(genpd))
1361 		return;
1362 
1363 	if (suspend) {
1364 		genpd->suspended_count++;
1365 		genpd_sync_power_off(genpd, false, 0);
1366 	} else {
1367 		genpd_sync_power_on(genpd, false, 0);
1368 		genpd->suspended_count--;
1369 	}
1370 }
1371 
1372 void pm_genpd_syscore_poweroff(struct device *dev)
1373 {
1374 	genpd_syscore_switch(dev, true);
1375 }
1376 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1377 
1378 void pm_genpd_syscore_poweron(struct device *dev)
1379 {
1380 	genpd_syscore_switch(dev, false);
1381 }
1382 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1383 
1384 #else /* !CONFIG_PM_SLEEP */
1385 
1386 #define genpd_prepare		NULL
1387 #define genpd_suspend_noirq	NULL
1388 #define genpd_resume_noirq	NULL
1389 #define genpd_freeze_noirq	NULL
1390 #define genpd_thaw_noirq	NULL
1391 #define genpd_poweroff_noirq	NULL
1392 #define genpd_restore_noirq	NULL
1393 #define genpd_complete		NULL
1394 
1395 #endif /* CONFIG_PM_SLEEP */
1396 
1397 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1398 					struct gpd_timing_data *td)
1399 {
1400 	struct generic_pm_domain_data *gpd_data;
1401 	int ret;
1402 
1403 	ret = dev_pm_get_subsys_data(dev);
1404 	if (ret)
1405 		return ERR_PTR(ret);
1406 
1407 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1408 	if (!gpd_data) {
1409 		ret = -ENOMEM;
1410 		goto err_put;
1411 	}
1412 
1413 	if (td)
1414 		gpd_data->td = *td;
1415 
1416 	gpd_data->base.dev = dev;
1417 	gpd_data->td.constraint_changed = true;
1418 	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1419 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1420 
1421 	spin_lock_irq(&dev->power.lock);
1422 
1423 	if (dev->power.subsys_data->domain_data) {
1424 		ret = -EINVAL;
1425 		goto err_free;
1426 	}
1427 
1428 	dev->power.subsys_data->domain_data = &gpd_data->base;
1429 
1430 	spin_unlock_irq(&dev->power.lock);
1431 
1432 	return gpd_data;
1433 
1434  err_free:
1435 	spin_unlock_irq(&dev->power.lock);
1436 	kfree(gpd_data);
1437  err_put:
1438 	dev_pm_put_subsys_data(dev);
1439 	return ERR_PTR(ret);
1440 }
1441 
1442 static void genpd_free_dev_data(struct device *dev,
1443 				struct generic_pm_domain_data *gpd_data)
1444 {
1445 	spin_lock_irq(&dev->power.lock);
1446 
1447 	dev->power.subsys_data->domain_data = NULL;
1448 
1449 	spin_unlock_irq(&dev->power.lock);
1450 
1451 	kfree(gpd_data);
1452 	dev_pm_put_subsys_data(dev);
1453 }
1454 
1455 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1456 			    struct gpd_timing_data *td)
1457 {
1458 	struct generic_pm_domain_data *gpd_data;
1459 	int ret;
1460 
1461 	dev_dbg(dev, "%s()\n", __func__);
1462 
1463 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1464 		return -EINVAL;
1465 
1466 	gpd_data = genpd_alloc_dev_data(dev, td);
1467 	if (IS_ERR(gpd_data))
1468 		return PTR_ERR(gpd_data);
1469 
1470 	genpd_lock(genpd);
1471 
1472 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1473 	if (ret)
1474 		goto out;
1475 
1476 	dev_pm_domain_set(dev, &genpd->domain);
1477 
1478 	genpd->device_count++;
1479 	genpd->max_off_time_changed = true;
1480 
1481 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1482 
1483  out:
1484 	genpd_unlock(genpd);
1485 
1486 	if (ret)
1487 		genpd_free_dev_data(dev, gpd_data);
1488 	else
1489 		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1490 
1491 	return ret;
1492 }
1493 
1494 /**
1495  * pm_genpd_add_device - Add a device to an I/O PM domain.
1496  * @genpd: PM domain to add the device to.
1497  * @dev: Device to be added.
1498  */
1499 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1500 {
1501 	int ret;
1502 
1503 	mutex_lock(&gpd_list_lock);
1504 	ret = genpd_add_device(genpd, dev, NULL);
1505 	mutex_unlock(&gpd_list_lock);
1506 
1507 	return ret;
1508 }
1509 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1510 
1511 static int genpd_remove_device(struct generic_pm_domain *genpd,
1512 			       struct device *dev)
1513 {
1514 	struct generic_pm_domain_data *gpd_data;
1515 	struct pm_domain_data *pdd;
1516 	int ret = 0;
1517 
1518 	dev_dbg(dev, "%s()\n", __func__);
1519 
1520 	pdd = dev->power.subsys_data->domain_data;
1521 	gpd_data = to_gpd_data(pdd);
1522 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1523 
1524 	genpd_lock(genpd);
1525 
1526 	if (genpd->prepared_count > 0) {
1527 		ret = -EAGAIN;
1528 		goto out;
1529 	}
1530 
1531 	genpd->device_count--;
1532 	genpd->max_off_time_changed = true;
1533 
1534 	if (genpd->detach_dev)
1535 		genpd->detach_dev(genpd, dev);
1536 
1537 	dev_pm_domain_set(dev, NULL);
1538 
1539 	list_del_init(&pdd->list_node);
1540 
1541 	genpd_unlock(genpd);
1542 
1543 	genpd_free_dev_data(dev, gpd_data);
1544 
1545 	return 0;
1546 
1547  out:
1548 	genpd_unlock(genpd);
1549 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1550 
1551 	return ret;
1552 }
1553 
1554 /**
1555  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1556  * @dev: Device to be removed.
1557  */
1558 int pm_genpd_remove_device(struct device *dev)
1559 {
1560 	struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
1561 
1562 	if (!genpd)
1563 		return -EINVAL;
1564 
1565 	return genpd_remove_device(genpd, dev);
1566 }
1567 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1568 
1569 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1570 			       struct generic_pm_domain *subdomain)
1571 {
1572 	struct gpd_link *link, *itr;
1573 	int ret = 0;
1574 
1575 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1576 	    || genpd == subdomain)
1577 		return -EINVAL;
1578 
1579 	/*
1580 	 * If the domain can be powered on/off in an IRQ safe
1581 	 * context, ensure that the subdomain can also be
1582 	 * powered on/off in that context.
1583 	 */
1584 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1585 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1586 				genpd->name, subdomain->name);
1587 		return -EINVAL;
1588 	}
1589 
1590 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1591 	if (!link)
1592 		return -ENOMEM;
1593 
1594 	genpd_lock(subdomain);
1595 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1596 
1597 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1598 		ret = -EINVAL;
1599 		goto out;
1600 	}
1601 
1602 	list_for_each_entry(itr, &genpd->master_links, master_node) {
1603 		if (itr->slave == subdomain && itr->master == genpd) {
1604 			ret = -EINVAL;
1605 			goto out;
1606 		}
1607 	}
1608 
1609 	link->master = genpd;
1610 	list_add_tail(&link->master_node, &genpd->master_links);
1611 	link->slave = subdomain;
1612 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1613 	if (genpd_status_on(subdomain))
1614 		genpd_sd_counter_inc(genpd);
1615 
1616  out:
1617 	genpd_unlock(genpd);
1618 	genpd_unlock(subdomain);
1619 	if (ret)
1620 		kfree(link);
1621 	return ret;
1622 }
1623 
1624 /**
1625  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1626  * @genpd: Master PM domain to add the subdomain to.
1627  * @subdomain: Subdomain to be added.
1628  */
1629 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1630 			   struct generic_pm_domain *subdomain)
1631 {
1632 	int ret;
1633 
1634 	mutex_lock(&gpd_list_lock);
1635 	ret = genpd_add_subdomain(genpd, subdomain);
1636 	mutex_unlock(&gpd_list_lock);
1637 
1638 	return ret;
1639 }
1640 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1641 
1642 /**
1643  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1644  * @genpd: Master PM domain to remove the subdomain from.
1645  * @subdomain: Subdomain to be removed.
1646  */
1647 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1648 			      struct generic_pm_domain *subdomain)
1649 {
1650 	struct gpd_link *l, *link;
1651 	int ret = -EINVAL;
1652 
1653 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1654 		return -EINVAL;
1655 
1656 	genpd_lock(subdomain);
1657 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1658 
1659 	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1660 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1661 			subdomain->name);
1662 		ret = -EBUSY;
1663 		goto out;
1664 	}
1665 
1666 	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1667 		if (link->slave != subdomain)
1668 			continue;
1669 
1670 		list_del(&link->master_node);
1671 		list_del(&link->slave_node);
1672 		kfree(link);
1673 		if (genpd_status_on(subdomain))
1674 			genpd_sd_counter_dec(genpd);
1675 
1676 		ret = 0;
1677 		break;
1678 	}
1679 
1680 out:
1681 	genpd_unlock(genpd);
1682 	genpd_unlock(subdomain);
1683 
1684 	return ret;
1685 }
1686 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1687 
1688 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1689 {
1690 	struct genpd_power_state *state;
1691 
1692 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1693 	if (!state)
1694 		return -ENOMEM;
1695 
1696 	genpd->states = state;
1697 	genpd->state_count = 1;
1698 	genpd->free = state;
1699 
1700 	return 0;
1701 }
1702 
1703 static void genpd_lock_init(struct generic_pm_domain *genpd)
1704 {
1705 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1706 		spin_lock_init(&genpd->slock);
1707 		genpd->lock_ops = &genpd_spin_ops;
1708 	} else {
1709 		mutex_init(&genpd->mlock);
1710 		genpd->lock_ops = &genpd_mtx_ops;
1711 	}
1712 }
1713 
1714 /**
1715  * pm_genpd_init - Initialize a generic I/O PM domain object.
1716  * @genpd: PM domain object to initialize.
1717  * @gov: PM domain governor to associate with the domain (may be NULL).
1718  * @is_off: Initial value of the domain's power_is_off field.
1719  *
1720  * Returns 0 on successful initialization, else a negative error code.
1721  */
1722 int pm_genpd_init(struct generic_pm_domain *genpd,
1723 		  struct dev_power_governor *gov, bool is_off)
1724 {
1725 	int ret;
1726 
1727 	if (IS_ERR_OR_NULL(genpd))
1728 		return -EINVAL;
1729 
1730 	INIT_LIST_HEAD(&genpd->master_links);
1731 	INIT_LIST_HEAD(&genpd->slave_links);
1732 	INIT_LIST_HEAD(&genpd->dev_list);
1733 	genpd_lock_init(genpd);
1734 	genpd->gov = gov;
1735 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1736 	atomic_set(&genpd->sd_count, 0);
1737 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1738 	genpd->device_count = 0;
1739 	genpd->max_off_time_ns = -1;
1740 	genpd->max_off_time_changed = true;
1741 	genpd->provider = NULL;
1742 	genpd->has_provider = false;
1743 	genpd->accounting_time = ktime_get();
1744 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1745 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1746 	genpd->domain.ops.prepare = genpd_prepare;
1747 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1748 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1749 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1750 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1751 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1752 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1753 	genpd->domain.ops.complete = genpd_complete;
1754 
1755 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1756 		genpd->dev_ops.stop = pm_clk_suspend;
1757 		genpd->dev_ops.start = pm_clk_resume;
1758 	}
1759 
1760 	/* Always-on domains must be powered on at initialization. */
1761 	if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1762 		return -EINVAL;
1763 
1764 	/* Use only one "off" state if there were no states declared */
1765 	if (genpd->state_count == 0) {
1766 		ret = genpd_set_default_power_state(genpd);
1767 		if (ret)
1768 			return ret;
1769 	} else if (!gov) {
1770 		pr_warn("%s : no governor for states\n", genpd->name);
1771 	}
1772 
1773 	device_initialize(&genpd->dev);
1774 	dev_set_name(&genpd->dev, "%s", genpd->name);
1775 
1776 	mutex_lock(&gpd_list_lock);
1777 	list_add(&genpd->gpd_list_node, &gpd_list);
1778 	mutex_unlock(&gpd_list_lock);
1779 
1780 	return 0;
1781 }
1782 EXPORT_SYMBOL_GPL(pm_genpd_init);
1783 
1784 static int genpd_remove(struct generic_pm_domain *genpd)
1785 {
1786 	struct gpd_link *l, *link;
1787 
1788 	if (IS_ERR_OR_NULL(genpd))
1789 		return -EINVAL;
1790 
1791 	genpd_lock(genpd);
1792 
1793 	if (genpd->has_provider) {
1794 		genpd_unlock(genpd);
1795 		pr_err("Provider present, unable to remove %s\n", genpd->name);
1796 		return -EBUSY;
1797 	}
1798 
1799 	if (!list_empty(&genpd->master_links) || genpd->device_count) {
1800 		genpd_unlock(genpd);
1801 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1802 		return -EBUSY;
1803 	}
1804 
1805 	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1806 		list_del(&link->master_node);
1807 		list_del(&link->slave_node);
1808 		kfree(link);
1809 	}
1810 
1811 	list_del(&genpd->gpd_list_node);
1812 	genpd_unlock(genpd);
1813 	cancel_work_sync(&genpd->power_off_work);
1814 	kfree(genpd->free);
1815 	pr_debug("%s: removed %s\n", __func__, genpd->name);
1816 
1817 	return 0;
1818 }
1819 
1820 /**
1821  * pm_genpd_remove - Remove a generic I/O PM domain
1822  * @genpd: Pointer to PM domain that is to be removed.
1823  *
1824  * To remove the PM domain, this function:
1825  *  - Removes the PM domain as a subdomain to any parent domains,
1826  *    if it was added.
1827  *  - Removes the PM domain from the list of registered PM domains.
1828  *
1829  * The PM domain will only be removed, if the associated provider has
1830  * been removed, it is not a parent to any other PM domain and has no
1831  * devices associated with it.
1832  */
1833 int pm_genpd_remove(struct generic_pm_domain *genpd)
1834 {
1835 	int ret;
1836 
1837 	mutex_lock(&gpd_list_lock);
1838 	ret = genpd_remove(genpd);
1839 	mutex_unlock(&gpd_list_lock);
1840 
1841 	return ret;
1842 }
1843 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1844 
1845 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1846 
1847 /*
1848  * Device Tree based PM domain providers.
1849  *
1850  * The code below implements generic device tree based PM domain providers that
1851  * bind device tree nodes with generic PM domains registered in the system.
1852  *
1853  * Any driver that registers generic PM domains and needs to support binding of
1854  * devices to these domains is supposed to register a PM domain provider, which
1855  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1856  *
1857  * Two simple mapping functions have been provided for convenience:
1858  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1859  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1860  *    index.
1861  */
1862 
1863 /**
1864  * struct of_genpd_provider - PM domain provider registration structure
1865  * @link: Entry in global list of PM domain providers
1866  * @node: Pointer to device tree node of PM domain provider
1867  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1868  *         into a PM domain.
1869  * @data: context pointer to be passed into @xlate callback
1870  */
1871 struct of_genpd_provider {
1872 	struct list_head link;
1873 	struct device_node *node;
1874 	genpd_xlate_t xlate;
1875 	void *data;
1876 };
1877 
1878 /* List of registered PM domain providers. */
1879 static LIST_HEAD(of_genpd_providers);
1880 /* Mutex to protect the list above. */
1881 static DEFINE_MUTEX(of_genpd_mutex);
1882 
1883 /**
1884  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1885  * @genpdspec: OF phandle args to map into a PM domain
1886  * @data: xlate function private data - pointer to struct generic_pm_domain
1887  *
1888  * This is a generic xlate function that can be used to model PM domains that
1889  * have their own device tree nodes. The private data of xlate function needs
1890  * to be a valid pointer to struct generic_pm_domain.
1891  */
1892 static struct generic_pm_domain *genpd_xlate_simple(
1893 					struct of_phandle_args *genpdspec,
1894 					void *data)
1895 {
1896 	return data;
1897 }
1898 
1899 /**
1900  * genpd_xlate_onecell() - Xlate function using a single index.
1901  * @genpdspec: OF phandle args to map into a PM domain
1902  * @data: xlate function private data - pointer to struct genpd_onecell_data
1903  *
1904  * This is a generic xlate function that can be used to model simple PM domain
1905  * controllers that have one device tree node and provide multiple PM domains.
1906  * A single cell is used as an index into an array of PM domains specified in
1907  * the genpd_onecell_data struct when registering the provider.
1908  */
1909 static struct generic_pm_domain *genpd_xlate_onecell(
1910 					struct of_phandle_args *genpdspec,
1911 					void *data)
1912 {
1913 	struct genpd_onecell_data *genpd_data = data;
1914 	unsigned int idx = genpdspec->args[0];
1915 
1916 	if (genpdspec->args_count != 1)
1917 		return ERR_PTR(-EINVAL);
1918 
1919 	if (idx >= genpd_data->num_domains) {
1920 		pr_err("%s: invalid domain index %u\n", __func__, idx);
1921 		return ERR_PTR(-EINVAL);
1922 	}
1923 
1924 	if (!genpd_data->domains[idx])
1925 		return ERR_PTR(-ENOENT);
1926 
1927 	return genpd_data->domains[idx];
1928 }
1929 
1930 /**
1931  * genpd_add_provider() - Register a PM domain provider for a node
1932  * @np: Device node pointer associated with the PM domain provider.
1933  * @xlate: Callback for decoding PM domain from phandle arguments.
1934  * @data: Context pointer for @xlate callback.
1935  */
1936 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1937 			      void *data)
1938 {
1939 	struct of_genpd_provider *cp;
1940 
1941 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1942 	if (!cp)
1943 		return -ENOMEM;
1944 
1945 	cp->node = of_node_get(np);
1946 	cp->data = data;
1947 	cp->xlate = xlate;
1948 
1949 	mutex_lock(&of_genpd_mutex);
1950 	list_add(&cp->link, &of_genpd_providers);
1951 	mutex_unlock(&of_genpd_mutex);
1952 	pr_debug("Added domain provider from %pOF\n", np);
1953 
1954 	return 0;
1955 }
1956 
1957 /**
1958  * of_genpd_add_provider_simple() - Register a simple PM domain provider
1959  * @np: Device node pointer associated with the PM domain provider.
1960  * @genpd: Pointer to PM domain associated with the PM domain provider.
1961  */
1962 int of_genpd_add_provider_simple(struct device_node *np,
1963 				 struct generic_pm_domain *genpd)
1964 {
1965 	int ret = -EINVAL;
1966 
1967 	if (!np || !genpd)
1968 		return -EINVAL;
1969 
1970 	mutex_lock(&gpd_list_lock);
1971 
1972 	if (!genpd_present(genpd))
1973 		goto unlock;
1974 
1975 	genpd->dev.of_node = np;
1976 
1977 	/* Parse genpd OPP table */
1978 	if (genpd->set_performance_state) {
1979 		ret = dev_pm_opp_of_add_table(&genpd->dev);
1980 		if (ret) {
1981 			dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
1982 				ret);
1983 			goto unlock;
1984 		}
1985 
1986 		/*
1987 		 * Save table for faster processing while setting performance
1988 		 * state.
1989 		 */
1990 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
1991 		WARN_ON(!genpd->opp_table);
1992 	}
1993 
1994 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1995 	if (ret) {
1996 		if (genpd->set_performance_state) {
1997 			dev_pm_opp_put_opp_table(genpd->opp_table);
1998 			dev_pm_opp_of_remove_table(&genpd->dev);
1999 		}
2000 
2001 		goto unlock;
2002 	}
2003 
2004 	genpd->provider = &np->fwnode;
2005 	genpd->has_provider = true;
2006 
2007 unlock:
2008 	mutex_unlock(&gpd_list_lock);
2009 
2010 	return ret;
2011 }
2012 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2013 
2014 /**
2015  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2016  * @np: Device node pointer associated with the PM domain provider.
2017  * @data: Pointer to the data associated with the PM domain provider.
2018  */
2019 int of_genpd_add_provider_onecell(struct device_node *np,
2020 				  struct genpd_onecell_data *data)
2021 {
2022 	struct generic_pm_domain *genpd;
2023 	unsigned int i;
2024 	int ret = -EINVAL;
2025 
2026 	if (!np || !data)
2027 		return -EINVAL;
2028 
2029 	mutex_lock(&gpd_list_lock);
2030 
2031 	if (!data->xlate)
2032 		data->xlate = genpd_xlate_onecell;
2033 
2034 	for (i = 0; i < data->num_domains; i++) {
2035 		genpd = data->domains[i];
2036 
2037 		if (!genpd)
2038 			continue;
2039 		if (!genpd_present(genpd))
2040 			goto error;
2041 
2042 		genpd->dev.of_node = np;
2043 
2044 		/* Parse genpd OPP table */
2045 		if (genpd->set_performance_state) {
2046 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2047 			if (ret) {
2048 				dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2049 					i, ret);
2050 				goto error;
2051 			}
2052 
2053 			/*
2054 			 * Save table for faster processing while setting
2055 			 * performance state.
2056 			 */
2057 			genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2058 			WARN_ON(!genpd->opp_table);
2059 		}
2060 
2061 		genpd->provider = &np->fwnode;
2062 		genpd->has_provider = true;
2063 	}
2064 
2065 	ret = genpd_add_provider(np, data->xlate, data);
2066 	if (ret < 0)
2067 		goto error;
2068 
2069 	mutex_unlock(&gpd_list_lock);
2070 
2071 	return 0;
2072 
2073 error:
2074 	while (i--) {
2075 		genpd = data->domains[i];
2076 
2077 		if (!genpd)
2078 			continue;
2079 
2080 		genpd->provider = NULL;
2081 		genpd->has_provider = false;
2082 
2083 		if (genpd->set_performance_state) {
2084 			dev_pm_opp_put_opp_table(genpd->opp_table);
2085 			dev_pm_opp_of_remove_table(&genpd->dev);
2086 		}
2087 	}
2088 
2089 	mutex_unlock(&gpd_list_lock);
2090 
2091 	return ret;
2092 }
2093 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2094 
2095 /**
2096  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2097  * @np: Device node pointer associated with the PM domain provider
2098  */
2099 void of_genpd_del_provider(struct device_node *np)
2100 {
2101 	struct of_genpd_provider *cp, *tmp;
2102 	struct generic_pm_domain *gpd;
2103 
2104 	mutex_lock(&gpd_list_lock);
2105 	mutex_lock(&of_genpd_mutex);
2106 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2107 		if (cp->node == np) {
2108 			/*
2109 			 * For each PM domain associated with the
2110 			 * provider, set the 'has_provider' to false
2111 			 * so that the PM domain can be safely removed.
2112 			 */
2113 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2114 				if (gpd->provider == &np->fwnode) {
2115 					gpd->has_provider = false;
2116 
2117 					if (!gpd->set_performance_state)
2118 						continue;
2119 
2120 					dev_pm_opp_put_opp_table(gpd->opp_table);
2121 					dev_pm_opp_of_remove_table(&gpd->dev);
2122 				}
2123 			}
2124 
2125 			list_del(&cp->link);
2126 			of_node_put(cp->node);
2127 			kfree(cp);
2128 			break;
2129 		}
2130 	}
2131 	mutex_unlock(&of_genpd_mutex);
2132 	mutex_unlock(&gpd_list_lock);
2133 }
2134 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2135 
2136 /**
2137  * genpd_get_from_provider() - Look-up PM domain
2138  * @genpdspec: OF phandle args to use for look-up
2139  *
2140  * Looks for a PM domain provider under the node specified by @genpdspec and if
2141  * found, uses xlate function of the provider to map phandle args to a PM
2142  * domain.
2143  *
2144  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2145  * on failure.
2146  */
2147 static struct generic_pm_domain *genpd_get_from_provider(
2148 					struct of_phandle_args *genpdspec)
2149 {
2150 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2151 	struct of_genpd_provider *provider;
2152 
2153 	if (!genpdspec)
2154 		return ERR_PTR(-EINVAL);
2155 
2156 	mutex_lock(&of_genpd_mutex);
2157 
2158 	/* Check if we have such a provider in our array */
2159 	list_for_each_entry(provider, &of_genpd_providers, link) {
2160 		if (provider->node == genpdspec->np)
2161 			genpd = provider->xlate(genpdspec, provider->data);
2162 		if (!IS_ERR(genpd))
2163 			break;
2164 	}
2165 
2166 	mutex_unlock(&of_genpd_mutex);
2167 
2168 	return genpd;
2169 }
2170 
2171 /**
2172  * of_genpd_add_device() - Add a device to an I/O PM domain
2173  * @genpdspec: OF phandle args to use for look-up PM domain
2174  * @dev: Device to be added.
2175  *
2176  * Looks-up an I/O PM domain based upon phandle args provided and adds
2177  * the device to the PM domain. Returns a negative error code on failure.
2178  */
2179 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2180 {
2181 	struct generic_pm_domain *genpd;
2182 	int ret;
2183 
2184 	mutex_lock(&gpd_list_lock);
2185 
2186 	genpd = genpd_get_from_provider(genpdspec);
2187 	if (IS_ERR(genpd)) {
2188 		ret = PTR_ERR(genpd);
2189 		goto out;
2190 	}
2191 
2192 	ret = genpd_add_device(genpd, dev, NULL);
2193 
2194 out:
2195 	mutex_unlock(&gpd_list_lock);
2196 
2197 	return ret;
2198 }
2199 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2200 
2201 /**
2202  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2203  * @parent_spec: OF phandle args to use for parent PM domain look-up
2204  * @subdomain_spec: OF phandle args to use for subdomain look-up
2205  *
2206  * Looks-up a parent PM domain and subdomain based upon phandle args
2207  * provided and adds the subdomain to the parent PM domain. Returns a
2208  * negative error code on failure.
2209  */
2210 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2211 			   struct of_phandle_args *subdomain_spec)
2212 {
2213 	struct generic_pm_domain *parent, *subdomain;
2214 	int ret;
2215 
2216 	mutex_lock(&gpd_list_lock);
2217 
2218 	parent = genpd_get_from_provider(parent_spec);
2219 	if (IS_ERR(parent)) {
2220 		ret = PTR_ERR(parent);
2221 		goto out;
2222 	}
2223 
2224 	subdomain = genpd_get_from_provider(subdomain_spec);
2225 	if (IS_ERR(subdomain)) {
2226 		ret = PTR_ERR(subdomain);
2227 		goto out;
2228 	}
2229 
2230 	ret = genpd_add_subdomain(parent, subdomain);
2231 
2232 out:
2233 	mutex_unlock(&gpd_list_lock);
2234 
2235 	return ret;
2236 }
2237 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2238 
2239 /**
2240  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2241  * @provider: Pointer to device structure associated with provider
2242  *
2243  * Find the last PM domain that was added by a particular provider and
2244  * remove this PM domain from the list of PM domains. The provider is
2245  * identified by the 'provider' device structure that is passed. The PM
2246  * domain will only be removed, if the provider associated with domain
2247  * has been removed.
2248  *
2249  * Returns a valid pointer to struct generic_pm_domain on success or
2250  * ERR_PTR() on failure.
2251  */
2252 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2253 {
2254 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2255 	int ret;
2256 
2257 	if (IS_ERR_OR_NULL(np))
2258 		return ERR_PTR(-EINVAL);
2259 
2260 	mutex_lock(&gpd_list_lock);
2261 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2262 		if (gpd->provider == &np->fwnode) {
2263 			ret = genpd_remove(gpd);
2264 			genpd = ret ? ERR_PTR(ret) : gpd;
2265 			break;
2266 		}
2267 	}
2268 	mutex_unlock(&gpd_list_lock);
2269 
2270 	return genpd;
2271 }
2272 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2273 
2274 static void genpd_release_dev(struct device *dev)
2275 {
2276 	kfree(dev);
2277 }
2278 
2279 static struct bus_type genpd_bus_type = {
2280 	.name		= "genpd",
2281 };
2282 
2283 /**
2284  * genpd_dev_pm_detach - Detach a device from its PM domain.
2285  * @dev: Device to detach.
2286  * @power_off: Currently not used
2287  *
2288  * Try to locate a corresponding generic PM domain, which the device was
2289  * attached to previously. If such is found, the device is detached from it.
2290  */
2291 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2292 {
2293 	struct generic_pm_domain *pd;
2294 	unsigned int i;
2295 	int ret = 0;
2296 
2297 	pd = dev_to_genpd(dev);
2298 	if (IS_ERR(pd))
2299 		return;
2300 
2301 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2302 
2303 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2304 		ret = genpd_remove_device(pd, dev);
2305 		if (ret != -EAGAIN)
2306 			break;
2307 
2308 		mdelay(i);
2309 		cond_resched();
2310 	}
2311 
2312 	if (ret < 0) {
2313 		dev_err(dev, "failed to remove from PM domain %s: %d",
2314 			pd->name, ret);
2315 		return;
2316 	}
2317 
2318 	/* Check if PM domain can be powered off after removing this device. */
2319 	genpd_queue_power_off_work(pd);
2320 
2321 	/* Unregister the device if it was created by genpd. */
2322 	if (dev->bus == &genpd_bus_type)
2323 		device_unregister(dev);
2324 }
2325 
2326 static void genpd_dev_pm_sync(struct device *dev)
2327 {
2328 	struct generic_pm_domain *pd;
2329 
2330 	pd = dev_to_genpd(dev);
2331 	if (IS_ERR(pd))
2332 		return;
2333 
2334 	genpd_queue_power_off_work(pd);
2335 }
2336 
2337 static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2338 				 unsigned int index, bool power_on)
2339 {
2340 	struct of_phandle_args pd_args;
2341 	struct generic_pm_domain *pd;
2342 	int ret;
2343 
2344 	ret = of_parse_phandle_with_args(np, "power-domains",
2345 				"#power-domain-cells", index, &pd_args);
2346 	if (ret < 0)
2347 		return ret;
2348 
2349 	mutex_lock(&gpd_list_lock);
2350 	pd = genpd_get_from_provider(&pd_args);
2351 	of_node_put(pd_args.np);
2352 	if (IS_ERR(pd)) {
2353 		mutex_unlock(&gpd_list_lock);
2354 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2355 			__func__, PTR_ERR(pd));
2356 		return driver_deferred_probe_check_state(dev);
2357 	}
2358 
2359 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2360 
2361 	ret = genpd_add_device(pd, dev, NULL);
2362 	mutex_unlock(&gpd_list_lock);
2363 
2364 	if (ret < 0) {
2365 		if (ret != -EPROBE_DEFER)
2366 			dev_err(dev, "failed to add to PM domain %s: %d",
2367 				pd->name, ret);
2368 		return ret;
2369 	}
2370 
2371 	dev->pm_domain->detach = genpd_dev_pm_detach;
2372 	dev->pm_domain->sync = genpd_dev_pm_sync;
2373 
2374 	if (power_on) {
2375 		genpd_lock(pd);
2376 		ret = genpd_power_on(pd, 0);
2377 		genpd_unlock(pd);
2378 	}
2379 
2380 	if (ret)
2381 		genpd_remove_device(pd, dev);
2382 
2383 	return ret ? -EPROBE_DEFER : 1;
2384 }
2385 
2386 /**
2387  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2388  * @dev: Device to attach.
2389  *
2390  * Parse device's OF node to find a PM domain specifier. If such is found,
2391  * attaches the device to retrieved pm_domain ops.
2392  *
2393  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2394  * PM domain or when multiple power-domains exists for it, else a negative error
2395  * code. Note that if a power-domain exists for the device, but it cannot be
2396  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2397  * not probed and to re-try again later.
2398  */
2399 int genpd_dev_pm_attach(struct device *dev)
2400 {
2401 	if (!dev->of_node)
2402 		return 0;
2403 
2404 	/*
2405 	 * Devices with multiple PM domains must be attached separately, as we
2406 	 * can only attach one PM domain per device.
2407 	 */
2408 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2409 				       "#power-domain-cells") != 1)
2410 		return 0;
2411 
2412 	return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
2413 }
2414 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2415 
2416 /**
2417  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2418  * @dev: The device used to lookup the PM domain.
2419  * @index: The index of the PM domain.
2420  *
2421  * Parse device's OF node to find a PM domain specifier at the provided @index.
2422  * If such is found, creates a virtual device and attaches it to the retrieved
2423  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2424  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2425  *
2426  * Returns the created virtual device if successfully attached PM domain, NULL
2427  * when the device don't need a PM domain, else an ERR_PTR() in case of
2428  * failures. If a power-domain exists for the device, but cannot be found or
2429  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2430  * is not probed and to re-try again later.
2431  */
2432 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2433 					 unsigned int index)
2434 {
2435 	struct device *virt_dev;
2436 	int num_domains;
2437 	int ret;
2438 
2439 	if (!dev->of_node)
2440 		return NULL;
2441 
2442 	/* Deal only with devices using multiple PM domains. */
2443 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2444 						 "#power-domain-cells");
2445 	if (num_domains < 2 || index >= num_domains)
2446 		return NULL;
2447 
2448 	/* Allocate and register device on the genpd bus. */
2449 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2450 	if (!virt_dev)
2451 		return ERR_PTR(-ENOMEM);
2452 
2453 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2454 	virt_dev->bus = &genpd_bus_type;
2455 	virt_dev->release = genpd_release_dev;
2456 
2457 	ret = device_register(virt_dev);
2458 	if (ret) {
2459 		kfree(virt_dev);
2460 		return ERR_PTR(ret);
2461 	}
2462 
2463 	/* Try to attach the device to the PM domain at the specified index. */
2464 	ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
2465 	if (ret < 1) {
2466 		device_unregister(virt_dev);
2467 		return ret ? ERR_PTR(ret) : NULL;
2468 	}
2469 
2470 	pm_runtime_enable(virt_dev);
2471 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2472 
2473 	return virt_dev;
2474 }
2475 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2476 
2477 /**
2478  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2479  * @dev: The device used to lookup the PM domain.
2480  * @name: The name of the PM domain.
2481  *
2482  * Parse device's OF node to find a PM domain specifier using the
2483  * power-domain-names DT property. For further description see
2484  * genpd_dev_pm_attach_by_id().
2485  */
2486 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2487 {
2488 	int index;
2489 
2490 	if (!dev->of_node)
2491 		return NULL;
2492 
2493 	index = of_property_match_string(dev->of_node, "power-domain-names",
2494 					 name);
2495 	if (index < 0)
2496 		return NULL;
2497 
2498 	return genpd_dev_pm_attach_by_id(dev, index);
2499 }
2500 
2501 static const struct of_device_id idle_state_match[] = {
2502 	{ .compatible = "domain-idle-state", },
2503 	{ }
2504 };
2505 
2506 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2507 				    struct device_node *state_node)
2508 {
2509 	int err;
2510 	u32 residency;
2511 	u32 entry_latency, exit_latency;
2512 
2513 	err = of_property_read_u32(state_node, "entry-latency-us",
2514 						&entry_latency);
2515 	if (err) {
2516 		pr_debug(" * %pOF missing entry-latency-us property\n",
2517 						state_node);
2518 		return -EINVAL;
2519 	}
2520 
2521 	err = of_property_read_u32(state_node, "exit-latency-us",
2522 						&exit_latency);
2523 	if (err) {
2524 		pr_debug(" * %pOF missing exit-latency-us property\n",
2525 						state_node);
2526 		return -EINVAL;
2527 	}
2528 
2529 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2530 	if (!err)
2531 		genpd_state->residency_ns = 1000 * residency;
2532 
2533 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2534 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2535 	genpd_state->fwnode = &state_node->fwnode;
2536 
2537 	return 0;
2538 }
2539 
2540 static int genpd_iterate_idle_states(struct device_node *dn,
2541 				     struct genpd_power_state *states)
2542 {
2543 	int ret;
2544 	struct of_phandle_iterator it;
2545 	struct device_node *np;
2546 	int i = 0;
2547 
2548 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2549 	if (ret <= 0)
2550 		return ret;
2551 
2552 	/* Loop over the phandles until all the requested entry is found */
2553 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2554 		np = it.node;
2555 		if (!of_match_node(idle_state_match, np))
2556 			continue;
2557 		if (states) {
2558 			ret = genpd_parse_state(&states[i], np);
2559 			if (ret) {
2560 				pr_err("Parsing idle state node %pOF failed with err %d\n",
2561 				       np, ret);
2562 				of_node_put(np);
2563 				return ret;
2564 			}
2565 		}
2566 		i++;
2567 	}
2568 
2569 	return i;
2570 }
2571 
2572 /**
2573  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2574  *
2575  * @dn: The genpd device node
2576  * @states: The pointer to which the state array will be saved.
2577  * @n: The count of elements in the array returned from this function.
2578  *
2579  * Returns the device states parsed from the OF node. The memory for the states
2580  * is allocated by this function and is the responsibility of the caller to
2581  * free the memory after use. If any or zero compatible domain idle states is
2582  * found it returns 0 and in case of errors, a negative error code is returned.
2583  */
2584 int of_genpd_parse_idle_states(struct device_node *dn,
2585 			struct genpd_power_state **states, int *n)
2586 {
2587 	struct genpd_power_state *st;
2588 	int ret;
2589 
2590 	ret = genpd_iterate_idle_states(dn, NULL);
2591 	if (ret < 0)
2592 		return ret;
2593 
2594 	if (!ret) {
2595 		*states = NULL;
2596 		*n = 0;
2597 		return 0;
2598 	}
2599 
2600 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2601 	if (!st)
2602 		return -ENOMEM;
2603 
2604 	ret = genpd_iterate_idle_states(dn, st);
2605 	if (ret <= 0) {
2606 		kfree(st);
2607 		return ret < 0 ? ret : -EINVAL;
2608 	}
2609 
2610 	*states = st;
2611 	*n = ret;
2612 
2613 	return 0;
2614 }
2615 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2616 
2617 /**
2618  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2619  *
2620  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2621  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2622  *	state.
2623  *
2624  * Returns performance state encoded in the OPP of the genpd. This calls
2625  * platform specific genpd->opp_to_performance_state() callback to translate
2626  * power domain OPP to performance state.
2627  *
2628  * Returns performance state on success and 0 on failure.
2629  */
2630 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2631 					       struct dev_pm_opp *opp)
2632 {
2633 	struct generic_pm_domain *genpd = NULL;
2634 	int state;
2635 
2636 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2637 
2638 	if (unlikely(!genpd->opp_to_performance_state))
2639 		return 0;
2640 
2641 	genpd_lock(genpd);
2642 	state = genpd->opp_to_performance_state(genpd, opp);
2643 	genpd_unlock(genpd);
2644 
2645 	return state;
2646 }
2647 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2648 
2649 static int __init genpd_bus_init(void)
2650 {
2651 	return bus_register(&genpd_bus_type);
2652 }
2653 core_initcall(genpd_bus_init);
2654 
2655 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2656 
2657 
2658 /***        debugfs support        ***/
2659 
2660 #ifdef CONFIG_DEBUG_FS
2661 #include <linux/pm.h>
2662 #include <linux/device.h>
2663 #include <linux/debugfs.h>
2664 #include <linux/seq_file.h>
2665 #include <linux/init.h>
2666 #include <linux/kobject.h>
2667 static struct dentry *genpd_debugfs_dir;
2668 
2669 /*
2670  * TODO: This function is a slightly modified version of rtpm_status_show
2671  * from sysfs.c, so generalize it.
2672  */
2673 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2674 {
2675 	static const char * const status_lookup[] = {
2676 		[RPM_ACTIVE] = "active",
2677 		[RPM_RESUMING] = "resuming",
2678 		[RPM_SUSPENDED] = "suspended",
2679 		[RPM_SUSPENDING] = "suspending"
2680 	};
2681 	const char *p = "";
2682 
2683 	if (dev->power.runtime_error)
2684 		p = "error";
2685 	else if (dev->power.disable_depth)
2686 		p = "unsupported";
2687 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2688 		p = status_lookup[dev->power.runtime_status];
2689 	else
2690 		WARN_ON(1);
2691 
2692 	seq_puts(s, p);
2693 }
2694 
2695 static int genpd_summary_one(struct seq_file *s,
2696 			struct generic_pm_domain *genpd)
2697 {
2698 	static const char * const status_lookup[] = {
2699 		[GPD_STATE_ACTIVE] = "on",
2700 		[GPD_STATE_POWER_OFF] = "off"
2701 	};
2702 	struct pm_domain_data *pm_data;
2703 	const char *kobj_path;
2704 	struct gpd_link *link;
2705 	char state[16];
2706 	int ret;
2707 
2708 	ret = genpd_lock_interruptible(genpd);
2709 	if (ret)
2710 		return -ERESTARTSYS;
2711 
2712 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2713 		goto exit;
2714 	if (!genpd_status_on(genpd))
2715 		snprintf(state, sizeof(state), "%s-%u",
2716 			 status_lookup[genpd->status], genpd->state_idx);
2717 	else
2718 		snprintf(state, sizeof(state), "%s",
2719 			 status_lookup[genpd->status]);
2720 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2721 
2722 	/*
2723 	 * Modifications on the list require holding locks on both
2724 	 * master and slave, so we are safe.
2725 	 * Also genpd->name is immutable.
2726 	 */
2727 	list_for_each_entry(link, &genpd->master_links, master_node) {
2728 		seq_printf(s, "%s", link->slave->name);
2729 		if (!list_is_last(&link->master_node, &genpd->master_links))
2730 			seq_puts(s, ", ");
2731 	}
2732 
2733 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2734 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2735 				genpd_is_irq_safe(genpd) ?
2736 				GFP_ATOMIC : GFP_KERNEL);
2737 		if (kobj_path == NULL)
2738 			continue;
2739 
2740 		seq_printf(s, "\n    %-50s  ", kobj_path);
2741 		rtpm_status_str(s, pm_data->dev);
2742 		kfree(kobj_path);
2743 	}
2744 
2745 	seq_puts(s, "\n");
2746 exit:
2747 	genpd_unlock(genpd);
2748 
2749 	return 0;
2750 }
2751 
2752 static int summary_show(struct seq_file *s, void *data)
2753 {
2754 	struct generic_pm_domain *genpd;
2755 	int ret = 0;
2756 
2757 	seq_puts(s, "domain                          status          slaves\n");
2758 	seq_puts(s, "    /device                                             runtime status\n");
2759 	seq_puts(s, "----------------------------------------------------------------------\n");
2760 
2761 	ret = mutex_lock_interruptible(&gpd_list_lock);
2762 	if (ret)
2763 		return -ERESTARTSYS;
2764 
2765 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2766 		ret = genpd_summary_one(s, genpd);
2767 		if (ret)
2768 			break;
2769 	}
2770 	mutex_unlock(&gpd_list_lock);
2771 
2772 	return ret;
2773 }
2774 
2775 static int status_show(struct seq_file *s, void *data)
2776 {
2777 	static const char * const status_lookup[] = {
2778 		[GPD_STATE_ACTIVE] = "on",
2779 		[GPD_STATE_POWER_OFF] = "off"
2780 	};
2781 
2782 	struct generic_pm_domain *genpd = s->private;
2783 	int ret = 0;
2784 
2785 	ret = genpd_lock_interruptible(genpd);
2786 	if (ret)
2787 		return -ERESTARTSYS;
2788 
2789 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2790 		goto exit;
2791 
2792 	if (genpd->status == GPD_STATE_POWER_OFF)
2793 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2794 			genpd->state_idx);
2795 	else
2796 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
2797 exit:
2798 	genpd_unlock(genpd);
2799 	return ret;
2800 }
2801 
2802 static int sub_domains_show(struct seq_file *s, void *data)
2803 {
2804 	struct generic_pm_domain *genpd = s->private;
2805 	struct gpd_link *link;
2806 	int ret = 0;
2807 
2808 	ret = genpd_lock_interruptible(genpd);
2809 	if (ret)
2810 		return -ERESTARTSYS;
2811 
2812 	list_for_each_entry(link, &genpd->master_links, master_node)
2813 		seq_printf(s, "%s\n", link->slave->name);
2814 
2815 	genpd_unlock(genpd);
2816 	return ret;
2817 }
2818 
2819 static int idle_states_show(struct seq_file *s, void *data)
2820 {
2821 	struct generic_pm_domain *genpd = s->private;
2822 	unsigned int i;
2823 	int ret = 0;
2824 
2825 	ret = genpd_lock_interruptible(genpd);
2826 	if (ret)
2827 		return -ERESTARTSYS;
2828 
2829 	seq_puts(s, "State          Time Spent(ms)\n");
2830 
2831 	for (i = 0; i < genpd->state_count; i++) {
2832 		ktime_t delta = 0;
2833 		s64 msecs;
2834 
2835 		if ((genpd->status == GPD_STATE_POWER_OFF) &&
2836 				(genpd->state_idx == i))
2837 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
2838 
2839 		msecs = ktime_to_ms(
2840 			ktime_add(genpd->states[i].idle_time, delta));
2841 		seq_printf(s, "S%-13i %lld\n", i, msecs);
2842 	}
2843 
2844 	genpd_unlock(genpd);
2845 	return ret;
2846 }
2847 
2848 static int active_time_show(struct seq_file *s, void *data)
2849 {
2850 	struct generic_pm_domain *genpd = s->private;
2851 	ktime_t delta = 0;
2852 	int ret = 0;
2853 
2854 	ret = genpd_lock_interruptible(genpd);
2855 	if (ret)
2856 		return -ERESTARTSYS;
2857 
2858 	if (genpd->status == GPD_STATE_ACTIVE)
2859 		delta = ktime_sub(ktime_get(), genpd->accounting_time);
2860 
2861 	seq_printf(s, "%lld ms\n", ktime_to_ms(
2862 				ktime_add(genpd->on_time, delta)));
2863 
2864 	genpd_unlock(genpd);
2865 	return ret;
2866 }
2867 
2868 static int total_idle_time_show(struct seq_file *s, void *data)
2869 {
2870 	struct generic_pm_domain *genpd = s->private;
2871 	ktime_t delta = 0, total = 0;
2872 	unsigned int i;
2873 	int ret = 0;
2874 
2875 	ret = genpd_lock_interruptible(genpd);
2876 	if (ret)
2877 		return -ERESTARTSYS;
2878 
2879 	for (i = 0; i < genpd->state_count; i++) {
2880 
2881 		if ((genpd->status == GPD_STATE_POWER_OFF) &&
2882 				(genpd->state_idx == i))
2883 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
2884 
2885 		total = ktime_add(total, genpd->states[i].idle_time);
2886 	}
2887 	total = ktime_add(total, delta);
2888 
2889 	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2890 
2891 	genpd_unlock(genpd);
2892 	return ret;
2893 }
2894 
2895 
2896 static int devices_show(struct seq_file *s, void *data)
2897 {
2898 	struct generic_pm_domain *genpd = s->private;
2899 	struct pm_domain_data *pm_data;
2900 	const char *kobj_path;
2901 	int ret = 0;
2902 
2903 	ret = genpd_lock_interruptible(genpd);
2904 	if (ret)
2905 		return -ERESTARTSYS;
2906 
2907 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2908 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2909 				genpd_is_irq_safe(genpd) ?
2910 				GFP_ATOMIC : GFP_KERNEL);
2911 		if (kobj_path == NULL)
2912 			continue;
2913 
2914 		seq_printf(s, "%s\n", kobj_path);
2915 		kfree(kobj_path);
2916 	}
2917 
2918 	genpd_unlock(genpd);
2919 	return ret;
2920 }
2921 
2922 static int perf_state_show(struct seq_file *s, void *data)
2923 {
2924 	struct generic_pm_domain *genpd = s->private;
2925 
2926 	if (genpd_lock_interruptible(genpd))
2927 		return -ERESTARTSYS;
2928 
2929 	seq_printf(s, "%u\n", genpd->performance_state);
2930 
2931 	genpd_unlock(genpd);
2932 	return 0;
2933 }
2934 
2935 DEFINE_SHOW_ATTRIBUTE(summary);
2936 DEFINE_SHOW_ATTRIBUTE(status);
2937 DEFINE_SHOW_ATTRIBUTE(sub_domains);
2938 DEFINE_SHOW_ATTRIBUTE(idle_states);
2939 DEFINE_SHOW_ATTRIBUTE(active_time);
2940 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
2941 DEFINE_SHOW_ATTRIBUTE(devices);
2942 DEFINE_SHOW_ATTRIBUTE(perf_state);
2943 
2944 static int __init genpd_debug_init(void)
2945 {
2946 	struct dentry *d;
2947 	struct generic_pm_domain *genpd;
2948 
2949 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2950 
2951 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
2952 			    NULL, &summary_fops);
2953 
2954 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2955 		d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
2956 
2957 		debugfs_create_file("current_state", 0444,
2958 				d, genpd, &status_fops);
2959 		debugfs_create_file("sub_domains", 0444,
2960 				d, genpd, &sub_domains_fops);
2961 		debugfs_create_file("idle_states", 0444,
2962 				d, genpd, &idle_states_fops);
2963 		debugfs_create_file("active_time", 0444,
2964 				d, genpd, &active_time_fops);
2965 		debugfs_create_file("total_idle_time", 0444,
2966 				d, genpd, &total_idle_time_fops);
2967 		debugfs_create_file("devices", 0444,
2968 				d, genpd, &devices_fops);
2969 		if (genpd->set_performance_state)
2970 			debugfs_create_file("perf_state", 0444,
2971 					    d, genpd, &perf_state_fops);
2972 	}
2973 
2974 	return 0;
2975 }
2976 late_initcall(genpd_debug_init);
2977 
2978 static void __exit genpd_debug_exit(void)
2979 {
2980 	debugfs_remove_recursive(genpd_debugfs_dir);
2981 }
2982 __exitcall(genpd_debug_exit);
2983 #endif /* CONFIG_DEBUG_FS */
2984