xref: /openbmc/linux/drivers/base/power/domain.c (revision b8d312aa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 
25 #include "power.h"
26 
27 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
28 
29 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
30 ({								\
31 	type (*__routine)(struct device *__d); 			\
32 	type __ret = (type)0;					\
33 								\
34 	__routine = genpd->dev_ops.callback; 			\
35 	if (__routine) {					\
36 		__ret = __routine(dev); 			\
37 	}							\
38 	__ret;							\
39 })
40 
41 static LIST_HEAD(gpd_list);
42 static DEFINE_MUTEX(gpd_list_lock);
43 
44 struct genpd_lock_ops {
45 	void (*lock)(struct generic_pm_domain *genpd);
46 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
47 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
48 	void (*unlock)(struct generic_pm_domain *genpd);
49 };
50 
51 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
52 {
53 	mutex_lock(&genpd->mlock);
54 }
55 
56 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
57 					int depth)
58 {
59 	mutex_lock_nested(&genpd->mlock, depth);
60 }
61 
62 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
63 {
64 	return mutex_lock_interruptible(&genpd->mlock);
65 }
66 
67 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
68 {
69 	return mutex_unlock(&genpd->mlock);
70 }
71 
72 static const struct genpd_lock_ops genpd_mtx_ops = {
73 	.lock = genpd_lock_mtx,
74 	.lock_nested = genpd_lock_nested_mtx,
75 	.lock_interruptible = genpd_lock_interruptible_mtx,
76 	.unlock = genpd_unlock_mtx,
77 };
78 
79 static void genpd_lock_spin(struct generic_pm_domain *genpd)
80 	__acquires(&genpd->slock)
81 {
82 	unsigned long flags;
83 
84 	spin_lock_irqsave(&genpd->slock, flags);
85 	genpd->lock_flags = flags;
86 }
87 
88 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
89 					int depth)
90 	__acquires(&genpd->slock)
91 {
92 	unsigned long flags;
93 
94 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
95 	genpd->lock_flags = flags;
96 }
97 
98 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
99 	__acquires(&genpd->slock)
100 {
101 	unsigned long flags;
102 
103 	spin_lock_irqsave(&genpd->slock, flags);
104 	genpd->lock_flags = flags;
105 	return 0;
106 }
107 
108 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
109 	__releases(&genpd->slock)
110 {
111 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
112 }
113 
114 static const struct genpd_lock_ops genpd_spin_ops = {
115 	.lock = genpd_lock_spin,
116 	.lock_nested = genpd_lock_nested_spin,
117 	.lock_interruptible = genpd_lock_interruptible_spin,
118 	.unlock = genpd_unlock_spin,
119 };
120 
121 #define genpd_lock(p)			p->lock_ops->lock(p)
122 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
123 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
124 #define genpd_unlock(p)			p->lock_ops->unlock(p)
125 
126 #define genpd_status_on(genpd)		(genpd->status == GPD_STATE_ACTIVE)
127 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
128 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
129 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
130 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
131 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
132 
133 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
134 		const struct generic_pm_domain *genpd)
135 {
136 	bool ret;
137 
138 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
139 
140 	/*
141 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
142 	 * to indicate a suboptimal configuration for PM. For an always on
143 	 * domain this isn't case, thus don't warn.
144 	 */
145 	if (ret && !genpd_is_always_on(genpd))
146 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
147 				genpd->name);
148 
149 	return ret;
150 }
151 
152 /*
153  * Get the generic PM domain for a particular struct device.
154  * This validates the struct device pointer, the PM domain pointer,
155  * and checks that the PM domain pointer is a real generic PM domain.
156  * Any failure results in NULL being returned.
157  */
158 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
159 {
160 	struct generic_pm_domain *genpd = NULL, *gpd;
161 
162 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
163 		return NULL;
164 
165 	mutex_lock(&gpd_list_lock);
166 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
167 		if (&gpd->domain == dev->pm_domain) {
168 			genpd = gpd;
169 			break;
170 		}
171 	}
172 	mutex_unlock(&gpd_list_lock);
173 
174 	return genpd;
175 }
176 
177 /*
178  * This should only be used where we are certain that the pm_domain
179  * attached to the device is a genpd domain.
180  */
181 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
182 {
183 	if (IS_ERR_OR_NULL(dev->pm_domain))
184 		return ERR_PTR(-EINVAL);
185 
186 	return pd_to_genpd(dev->pm_domain);
187 }
188 
189 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
190 			  struct device *dev)
191 {
192 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
193 }
194 
195 static int genpd_start_dev(const struct generic_pm_domain *genpd,
196 			   struct device *dev)
197 {
198 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
199 }
200 
201 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
202 {
203 	bool ret = false;
204 
205 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
206 		ret = !!atomic_dec_and_test(&genpd->sd_count);
207 
208 	return ret;
209 }
210 
211 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
212 {
213 	atomic_inc(&genpd->sd_count);
214 	smp_mb__after_atomic();
215 }
216 
217 #ifdef CONFIG_DEBUG_FS
218 static void genpd_update_accounting(struct generic_pm_domain *genpd)
219 {
220 	ktime_t delta, now;
221 
222 	now = ktime_get();
223 	delta = ktime_sub(now, genpd->accounting_time);
224 
225 	/*
226 	 * If genpd->status is active, it means we are just
227 	 * out of off and so update the idle time and vice
228 	 * versa.
229 	 */
230 	if (genpd->status == GPD_STATE_ACTIVE) {
231 		int state_idx = genpd->state_idx;
232 
233 		genpd->states[state_idx].idle_time =
234 			ktime_add(genpd->states[state_idx].idle_time, delta);
235 	} else {
236 		genpd->on_time = ktime_add(genpd->on_time, delta);
237 	}
238 
239 	genpd->accounting_time = now;
240 }
241 #else
242 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
243 #endif
244 
245 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
246 					   unsigned int state)
247 {
248 	struct generic_pm_domain_data *pd_data;
249 	struct pm_domain_data *pdd;
250 	struct gpd_link *link;
251 
252 	/* New requested state is same as Max requested state */
253 	if (state == genpd->performance_state)
254 		return state;
255 
256 	/* New requested state is higher than Max requested state */
257 	if (state > genpd->performance_state)
258 		return state;
259 
260 	/* Traverse all devices within the domain */
261 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
262 		pd_data = to_gpd_data(pdd);
263 
264 		if (pd_data->performance_state > state)
265 			state = pd_data->performance_state;
266 	}
267 
268 	/*
269 	 * Traverse all sub-domains within the domain. This can be
270 	 * done without any additional locking as the link->performance_state
271 	 * field is protected by the master genpd->lock, which is already taken.
272 	 *
273 	 * Also note that link->performance_state (subdomain's performance state
274 	 * requirement to master domain) is different from
275 	 * link->slave->performance_state (current performance state requirement
276 	 * of the devices/sub-domains of the subdomain) and so can have a
277 	 * different value.
278 	 *
279 	 * Note that we also take vote from powered-off sub-domains into account
280 	 * as the same is done for devices right now.
281 	 */
282 	list_for_each_entry(link, &genpd->master_links, master_node) {
283 		if (link->performance_state > state)
284 			state = link->performance_state;
285 	}
286 
287 	return state;
288 }
289 
290 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
291 					unsigned int state, int depth)
292 {
293 	struct generic_pm_domain *master;
294 	struct gpd_link *link;
295 	int master_state, ret;
296 
297 	if (state == genpd->performance_state)
298 		return 0;
299 
300 	/* Propagate to masters of genpd */
301 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
302 		master = link->master;
303 
304 		if (!master->set_performance_state)
305 			continue;
306 
307 		/* Find master's performance state */
308 		ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
309 							 master->opp_table,
310 							 state);
311 		if (unlikely(ret < 0))
312 			goto err;
313 
314 		master_state = ret;
315 
316 		genpd_lock_nested(master, depth + 1);
317 
318 		link->prev_performance_state = link->performance_state;
319 		link->performance_state = master_state;
320 		master_state = _genpd_reeval_performance_state(master,
321 						master_state);
322 		ret = _genpd_set_performance_state(master, master_state, depth + 1);
323 		if (ret)
324 			link->performance_state = link->prev_performance_state;
325 
326 		genpd_unlock(master);
327 
328 		if (ret)
329 			goto err;
330 	}
331 
332 	ret = genpd->set_performance_state(genpd, state);
333 	if (ret)
334 		goto err;
335 
336 	genpd->performance_state = state;
337 	return 0;
338 
339 err:
340 	/* Encountered an error, lets rollback */
341 	list_for_each_entry_continue_reverse(link, &genpd->slave_links,
342 					     slave_node) {
343 		master = link->master;
344 
345 		if (!master->set_performance_state)
346 			continue;
347 
348 		genpd_lock_nested(master, depth + 1);
349 
350 		master_state = link->prev_performance_state;
351 		link->performance_state = master_state;
352 
353 		master_state = _genpd_reeval_performance_state(master,
354 						master_state);
355 		if (_genpd_set_performance_state(master, master_state, depth + 1)) {
356 			pr_err("%s: Failed to roll back to %d performance state\n",
357 			       master->name, master_state);
358 		}
359 
360 		genpd_unlock(master);
361 	}
362 
363 	return ret;
364 }
365 
366 /**
367  * dev_pm_genpd_set_performance_state- Set performance state of device's power
368  * domain.
369  *
370  * @dev: Device for which the performance-state needs to be set.
371  * @state: Target performance state of the device. This can be set as 0 when the
372  *	   device doesn't have any performance state constraints left (And so
373  *	   the device wouldn't participate anymore to find the target
374  *	   performance state of the genpd).
375  *
376  * It is assumed that the users guarantee that the genpd wouldn't be detached
377  * while this routine is getting called.
378  *
379  * Returns 0 on success and negative error values on failures.
380  */
381 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
382 {
383 	struct generic_pm_domain *genpd;
384 	struct generic_pm_domain_data *gpd_data;
385 	unsigned int prev;
386 	int ret;
387 
388 	genpd = dev_to_genpd(dev);
389 	if (IS_ERR(genpd))
390 		return -ENODEV;
391 
392 	if (unlikely(!genpd->set_performance_state))
393 		return -EINVAL;
394 
395 	if (WARN_ON(!dev->power.subsys_data ||
396 		     !dev->power.subsys_data->domain_data))
397 		return -EINVAL;
398 
399 	genpd_lock(genpd);
400 
401 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
402 	prev = gpd_data->performance_state;
403 	gpd_data->performance_state = state;
404 
405 	state = _genpd_reeval_performance_state(genpd, state);
406 	ret = _genpd_set_performance_state(genpd, state, 0);
407 	if (ret)
408 		gpd_data->performance_state = prev;
409 
410 	genpd_unlock(genpd);
411 
412 	return ret;
413 }
414 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
415 
416 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
417 {
418 	unsigned int state_idx = genpd->state_idx;
419 	ktime_t time_start;
420 	s64 elapsed_ns;
421 	int ret;
422 
423 	if (!genpd->power_on)
424 		return 0;
425 
426 	if (!timed)
427 		return genpd->power_on(genpd);
428 
429 	time_start = ktime_get();
430 	ret = genpd->power_on(genpd);
431 	if (ret)
432 		return ret;
433 
434 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
435 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
436 		return ret;
437 
438 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
439 	genpd->max_off_time_changed = true;
440 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
441 		 genpd->name, "on", elapsed_ns);
442 
443 	return ret;
444 }
445 
446 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
447 {
448 	unsigned int state_idx = genpd->state_idx;
449 	ktime_t time_start;
450 	s64 elapsed_ns;
451 	int ret;
452 
453 	if (!genpd->power_off)
454 		return 0;
455 
456 	if (!timed)
457 		return genpd->power_off(genpd);
458 
459 	time_start = ktime_get();
460 	ret = genpd->power_off(genpd);
461 	if (ret)
462 		return ret;
463 
464 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
465 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
466 		return 0;
467 
468 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
469 	genpd->max_off_time_changed = true;
470 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
471 		 genpd->name, "off", elapsed_ns);
472 
473 	return 0;
474 }
475 
476 /**
477  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
478  * @genpd: PM domain to power off.
479  *
480  * Queue up the execution of genpd_power_off() unless it's already been done
481  * before.
482  */
483 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
484 {
485 	queue_work(pm_wq, &genpd->power_off_work);
486 }
487 
488 /**
489  * genpd_power_off - Remove power from a given PM domain.
490  * @genpd: PM domain to power down.
491  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
492  * RPM status of the releated device is in an intermediate state, not yet turned
493  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
494  * be RPM_SUSPENDED, while it tries to power off the PM domain.
495  *
496  * If all of the @genpd's devices have been suspended and all of its subdomains
497  * have been powered down, remove power from @genpd.
498  */
499 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
500 			   unsigned int depth)
501 {
502 	struct pm_domain_data *pdd;
503 	struct gpd_link *link;
504 	unsigned int not_suspended = 0;
505 
506 	/*
507 	 * Do not try to power off the domain in the following situations:
508 	 * (1) The domain is already in the "power off" state.
509 	 * (2) System suspend is in progress.
510 	 */
511 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
512 		return 0;
513 
514 	/*
515 	 * Abort power off for the PM domain in the following situations:
516 	 * (1) The domain is configured as always on.
517 	 * (2) When the domain has a subdomain being powered on.
518 	 */
519 	if (genpd_is_always_on(genpd) ||
520 			genpd_is_rpm_always_on(genpd) ||
521 			atomic_read(&genpd->sd_count) > 0)
522 		return -EBUSY;
523 
524 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
525 		enum pm_qos_flags_status stat;
526 
527 		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
528 		if (stat > PM_QOS_FLAGS_NONE)
529 			return -EBUSY;
530 
531 		/*
532 		 * Do not allow PM domain to be powered off, when an IRQ safe
533 		 * device is part of a non-IRQ safe domain.
534 		 */
535 		if (!pm_runtime_suspended(pdd->dev) ||
536 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
537 			not_suspended++;
538 	}
539 
540 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
541 		return -EBUSY;
542 
543 	if (genpd->gov && genpd->gov->power_down_ok) {
544 		if (!genpd->gov->power_down_ok(&genpd->domain))
545 			return -EAGAIN;
546 	}
547 
548 	/* Default to shallowest state. */
549 	if (!genpd->gov)
550 		genpd->state_idx = 0;
551 
552 	if (genpd->power_off) {
553 		int ret;
554 
555 		if (atomic_read(&genpd->sd_count) > 0)
556 			return -EBUSY;
557 
558 		/*
559 		 * If sd_count > 0 at this point, one of the subdomains hasn't
560 		 * managed to call genpd_power_on() for the master yet after
561 		 * incrementing it.  In that case genpd_power_on() will wait
562 		 * for us to drop the lock, so we can call .power_off() and let
563 		 * the genpd_power_on() restore power for us (this shouldn't
564 		 * happen very often).
565 		 */
566 		ret = _genpd_power_off(genpd, true);
567 		if (ret)
568 			return ret;
569 	}
570 
571 	genpd->status = GPD_STATE_POWER_OFF;
572 	genpd_update_accounting(genpd);
573 
574 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
575 		genpd_sd_counter_dec(link->master);
576 		genpd_lock_nested(link->master, depth + 1);
577 		genpd_power_off(link->master, false, depth + 1);
578 		genpd_unlock(link->master);
579 	}
580 
581 	return 0;
582 }
583 
584 /**
585  * genpd_power_on - Restore power to a given PM domain and its masters.
586  * @genpd: PM domain to power up.
587  * @depth: nesting count for lockdep.
588  *
589  * Restore power to @genpd and all of its masters so that it is possible to
590  * resume a device belonging to it.
591  */
592 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
593 {
594 	struct gpd_link *link;
595 	int ret = 0;
596 
597 	if (genpd_status_on(genpd))
598 		return 0;
599 
600 	/*
601 	 * The list is guaranteed not to change while the loop below is being
602 	 * executed, unless one of the masters' .power_on() callbacks fiddles
603 	 * with it.
604 	 */
605 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
606 		struct generic_pm_domain *master = link->master;
607 
608 		genpd_sd_counter_inc(master);
609 
610 		genpd_lock_nested(master, depth + 1);
611 		ret = genpd_power_on(master, depth + 1);
612 		genpd_unlock(master);
613 
614 		if (ret) {
615 			genpd_sd_counter_dec(master);
616 			goto err;
617 		}
618 	}
619 
620 	ret = _genpd_power_on(genpd, true);
621 	if (ret)
622 		goto err;
623 
624 	genpd->status = GPD_STATE_ACTIVE;
625 	genpd_update_accounting(genpd);
626 
627 	return 0;
628 
629  err:
630 	list_for_each_entry_continue_reverse(link,
631 					&genpd->slave_links,
632 					slave_node) {
633 		genpd_sd_counter_dec(link->master);
634 		genpd_lock_nested(link->master, depth + 1);
635 		genpd_power_off(link->master, false, depth + 1);
636 		genpd_unlock(link->master);
637 	}
638 
639 	return ret;
640 }
641 
642 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
643 				     unsigned long val, void *ptr)
644 {
645 	struct generic_pm_domain_data *gpd_data;
646 	struct device *dev;
647 
648 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
649 	dev = gpd_data->base.dev;
650 
651 	for (;;) {
652 		struct generic_pm_domain *genpd;
653 		struct pm_domain_data *pdd;
654 
655 		spin_lock_irq(&dev->power.lock);
656 
657 		pdd = dev->power.subsys_data ?
658 				dev->power.subsys_data->domain_data : NULL;
659 		if (pdd) {
660 			to_gpd_data(pdd)->td.constraint_changed = true;
661 			genpd = dev_to_genpd(dev);
662 		} else {
663 			genpd = ERR_PTR(-ENODATA);
664 		}
665 
666 		spin_unlock_irq(&dev->power.lock);
667 
668 		if (!IS_ERR(genpd)) {
669 			genpd_lock(genpd);
670 			genpd->max_off_time_changed = true;
671 			genpd_unlock(genpd);
672 		}
673 
674 		dev = dev->parent;
675 		if (!dev || dev->power.ignore_children)
676 			break;
677 	}
678 
679 	return NOTIFY_DONE;
680 }
681 
682 /**
683  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
684  * @work: Work structure used for scheduling the execution of this function.
685  */
686 static void genpd_power_off_work_fn(struct work_struct *work)
687 {
688 	struct generic_pm_domain *genpd;
689 
690 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
691 
692 	genpd_lock(genpd);
693 	genpd_power_off(genpd, false, 0);
694 	genpd_unlock(genpd);
695 }
696 
697 /**
698  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
699  * @dev: Device to handle.
700  */
701 static int __genpd_runtime_suspend(struct device *dev)
702 {
703 	int (*cb)(struct device *__dev);
704 
705 	if (dev->type && dev->type->pm)
706 		cb = dev->type->pm->runtime_suspend;
707 	else if (dev->class && dev->class->pm)
708 		cb = dev->class->pm->runtime_suspend;
709 	else if (dev->bus && dev->bus->pm)
710 		cb = dev->bus->pm->runtime_suspend;
711 	else
712 		cb = NULL;
713 
714 	if (!cb && dev->driver && dev->driver->pm)
715 		cb = dev->driver->pm->runtime_suspend;
716 
717 	return cb ? cb(dev) : 0;
718 }
719 
720 /**
721  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
722  * @dev: Device to handle.
723  */
724 static int __genpd_runtime_resume(struct device *dev)
725 {
726 	int (*cb)(struct device *__dev);
727 
728 	if (dev->type && dev->type->pm)
729 		cb = dev->type->pm->runtime_resume;
730 	else if (dev->class && dev->class->pm)
731 		cb = dev->class->pm->runtime_resume;
732 	else if (dev->bus && dev->bus->pm)
733 		cb = dev->bus->pm->runtime_resume;
734 	else
735 		cb = NULL;
736 
737 	if (!cb && dev->driver && dev->driver->pm)
738 		cb = dev->driver->pm->runtime_resume;
739 
740 	return cb ? cb(dev) : 0;
741 }
742 
743 /**
744  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
745  * @dev: Device to suspend.
746  *
747  * Carry out a runtime suspend of a device under the assumption that its
748  * pm_domain field points to the domain member of an object of type
749  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
750  */
751 static int genpd_runtime_suspend(struct device *dev)
752 {
753 	struct generic_pm_domain *genpd;
754 	bool (*suspend_ok)(struct device *__dev);
755 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
756 	bool runtime_pm = pm_runtime_enabled(dev);
757 	ktime_t time_start;
758 	s64 elapsed_ns;
759 	int ret;
760 
761 	dev_dbg(dev, "%s()\n", __func__);
762 
763 	genpd = dev_to_genpd(dev);
764 	if (IS_ERR(genpd))
765 		return -EINVAL;
766 
767 	/*
768 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
769 	 * callbacks for other purposes than runtime PM. In those scenarios
770 	 * runtime PM is disabled. Under these circumstances, we shall skip
771 	 * validating/measuring the PM QoS latency.
772 	 */
773 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
774 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
775 		return -EBUSY;
776 
777 	/* Measure suspend latency. */
778 	time_start = 0;
779 	if (runtime_pm)
780 		time_start = ktime_get();
781 
782 	ret = __genpd_runtime_suspend(dev);
783 	if (ret)
784 		return ret;
785 
786 	ret = genpd_stop_dev(genpd, dev);
787 	if (ret) {
788 		__genpd_runtime_resume(dev);
789 		return ret;
790 	}
791 
792 	/* Update suspend latency value if the measured time exceeds it. */
793 	if (runtime_pm) {
794 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
795 		if (elapsed_ns > td->suspend_latency_ns) {
796 			td->suspend_latency_ns = elapsed_ns;
797 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
798 				elapsed_ns);
799 			genpd->max_off_time_changed = true;
800 			td->constraint_changed = true;
801 		}
802 	}
803 
804 	/*
805 	 * If power.irq_safe is set, this routine may be run with
806 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
807 	 */
808 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
809 		return 0;
810 
811 	genpd_lock(genpd);
812 	genpd_power_off(genpd, true, 0);
813 	genpd_unlock(genpd);
814 
815 	return 0;
816 }
817 
818 /**
819  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
820  * @dev: Device to resume.
821  *
822  * Carry out a runtime resume of a device under the assumption that its
823  * pm_domain field points to the domain member of an object of type
824  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
825  */
826 static int genpd_runtime_resume(struct device *dev)
827 {
828 	struct generic_pm_domain *genpd;
829 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
830 	bool runtime_pm = pm_runtime_enabled(dev);
831 	ktime_t time_start;
832 	s64 elapsed_ns;
833 	int ret;
834 	bool timed = true;
835 
836 	dev_dbg(dev, "%s()\n", __func__);
837 
838 	genpd = dev_to_genpd(dev);
839 	if (IS_ERR(genpd))
840 		return -EINVAL;
841 
842 	/*
843 	 * As we don't power off a non IRQ safe domain, which holds
844 	 * an IRQ safe device, we don't need to restore power to it.
845 	 */
846 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
847 		timed = false;
848 		goto out;
849 	}
850 
851 	genpd_lock(genpd);
852 	ret = genpd_power_on(genpd, 0);
853 	genpd_unlock(genpd);
854 
855 	if (ret)
856 		return ret;
857 
858  out:
859 	/* Measure resume latency. */
860 	time_start = 0;
861 	if (timed && runtime_pm)
862 		time_start = ktime_get();
863 
864 	ret = genpd_start_dev(genpd, dev);
865 	if (ret)
866 		goto err_poweroff;
867 
868 	ret = __genpd_runtime_resume(dev);
869 	if (ret)
870 		goto err_stop;
871 
872 	/* Update resume latency value if the measured time exceeds it. */
873 	if (timed && runtime_pm) {
874 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
875 		if (elapsed_ns > td->resume_latency_ns) {
876 			td->resume_latency_ns = elapsed_ns;
877 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
878 				elapsed_ns);
879 			genpd->max_off_time_changed = true;
880 			td->constraint_changed = true;
881 		}
882 	}
883 
884 	return 0;
885 
886 err_stop:
887 	genpd_stop_dev(genpd, dev);
888 err_poweroff:
889 	if (!pm_runtime_is_irq_safe(dev) ||
890 		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
891 		genpd_lock(genpd);
892 		genpd_power_off(genpd, true, 0);
893 		genpd_unlock(genpd);
894 	}
895 
896 	return ret;
897 }
898 
899 static bool pd_ignore_unused;
900 static int __init pd_ignore_unused_setup(char *__unused)
901 {
902 	pd_ignore_unused = true;
903 	return 1;
904 }
905 __setup("pd_ignore_unused", pd_ignore_unused_setup);
906 
907 /**
908  * genpd_power_off_unused - Power off all PM domains with no devices in use.
909  */
910 static int __init genpd_power_off_unused(void)
911 {
912 	struct generic_pm_domain *genpd;
913 
914 	if (pd_ignore_unused) {
915 		pr_warn("genpd: Not disabling unused power domains\n");
916 		return 0;
917 	}
918 
919 	mutex_lock(&gpd_list_lock);
920 
921 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
922 		genpd_queue_power_off_work(genpd);
923 
924 	mutex_unlock(&gpd_list_lock);
925 
926 	return 0;
927 }
928 late_initcall(genpd_power_off_unused);
929 
930 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
931 
932 static bool genpd_present(const struct generic_pm_domain *genpd)
933 {
934 	const struct generic_pm_domain *gpd;
935 
936 	if (IS_ERR_OR_NULL(genpd))
937 		return false;
938 
939 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
940 		if (gpd == genpd)
941 			return true;
942 
943 	return false;
944 }
945 
946 #endif
947 
948 #ifdef CONFIG_PM_SLEEP
949 
950 /**
951  * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
952  * @genpd: PM domain to power off, if possible.
953  * @use_lock: use the lock.
954  * @depth: nesting count for lockdep.
955  *
956  * Check if the given PM domain can be powered off (during system suspend or
957  * hibernation) and do that if so.  Also, in that case propagate to its masters.
958  *
959  * This function is only called in "noirq" and "syscore" stages of system power
960  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
961  * these cases the lock must be held.
962  */
963 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
964 				 unsigned int depth)
965 {
966 	struct gpd_link *link;
967 
968 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
969 		return;
970 
971 	if (genpd->suspended_count != genpd->device_count
972 	    || atomic_read(&genpd->sd_count) > 0)
973 		return;
974 
975 	/* Choose the deepest state when suspending */
976 	genpd->state_idx = genpd->state_count - 1;
977 	if (_genpd_power_off(genpd, false))
978 		return;
979 
980 	genpd->status = GPD_STATE_POWER_OFF;
981 
982 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
983 		genpd_sd_counter_dec(link->master);
984 
985 		if (use_lock)
986 			genpd_lock_nested(link->master, depth + 1);
987 
988 		genpd_sync_power_off(link->master, use_lock, depth + 1);
989 
990 		if (use_lock)
991 			genpd_unlock(link->master);
992 	}
993 }
994 
995 /**
996  * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
997  * @genpd: PM domain to power on.
998  * @use_lock: use the lock.
999  * @depth: nesting count for lockdep.
1000  *
1001  * This function is only called in "noirq" and "syscore" stages of system power
1002  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1003  * these cases the lock must be held.
1004  */
1005 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1006 				unsigned int depth)
1007 {
1008 	struct gpd_link *link;
1009 
1010 	if (genpd_status_on(genpd))
1011 		return;
1012 
1013 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
1014 		genpd_sd_counter_inc(link->master);
1015 
1016 		if (use_lock)
1017 			genpd_lock_nested(link->master, depth + 1);
1018 
1019 		genpd_sync_power_on(link->master, use_lock, depth + 1);
1020 
1021 		if (use_lock)
1022 			genpd_unlock(link->master);
1023 	}
1024 
1025 	_genpd_power_on(genpd, false);
1026 
1027 	genpd->status = GPD_STATE_ACTIVE;
1028 }
1029 
1030 /**
1031  * resume_needed - Check whether to resume a device before system suspend.
1032  * @dev: Device to check.
1033  * @genpd: PM domain the device belongs to.
1034  *
1035  * There are two cases in which a device that can wake up the system from sleep
1036  * states should be resumed by genpd_prepare(): (1) if the device is enabled
1037  * to wake up the system and it has to remain active for this purpose while the
1038  * system is in the sleep state and (2) if the device is not enabled to wake up
1039  * the system from sleep states and it generally doesn't generate wakeup signals
1040  * by itself (those signals are generated on its behalf by other parts of the
1041  * system).  In the latter case it may be necessary to reconfigure the device's
1042  * wakeup settings during system suspend, because it may have been set up to
1043  * signal remote wakeup from the system's working state as needed by runtime PM.
1044  * Return 'true' in either of the above cases.
1045  */
1046 static bool resume_needed(struct device *dev,
1047 			  const struct generic_pm_domain *genpd)
1048 {
1049 	bool active_wakeup;
1050 
1051 	if (!device_can_wakeup(dev))
1052 		return false;
1053 
1054 	active_wakeup = genpd_is_active_wakeup(genpd);
1055 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1056 }
1057 
1058 /**
1059  * genpd_prepare - Start power transition of a device in a PM domain.
1060  * @dev: Device to start the transition of.
1061  *
1062  * Start a power transition of a device (during a system-wide power transition)
1063  * under the assumption that its pm_domain field points to the domain member of
1064  * an object of type struct generic_pm_domain representing a PM domain
1065  * consisting of I/O devices.
1066  */
1067 static int genpd_prepare(struct device *dev)
1068 {
1069 	struct generic_pm_domain *genpd;
1070 	int ret;
1071 
1072 	dev_dbg(dev, "%s()\n", __func__);
1073 
1074 	genpd = dev_to_genpd(dev);
1075 	if (IS_ERR(genpd))
1076 		return -EINVAL;
1077 
1078 	/*
1079 	 * If a wakeup request is pending for the device, it should be woken up
1080 	 * at this point and a system wakeup event should be reported if it's
1081 	 * set up to wake up the system from sleep states.
1082 	 */
1083 	if (resume_needed(dev, genpd))
1084 		pm_runtime_resume(dev);
1085 
1086 	genpd_lock(genpd);
1087 
1088 	if (genpd->prepared_count++ == 0)
1089 		genpd->suspended_count = 0;
1090 
1091 	genpd_unlock(genpd);
1092 
1093 	ret = pm_generic_prepare(dev);
1094 	if (ret < 0) {
1095 		genpd_lock(genpd);
1096 
1097 		genpd->prepared_count--;
1098 
1099 		genpd_unlock(genpd);
1100 	}
1101 
1102 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1103 	return ret >= 0 ? 0 : ret;
1104 }
1105 
1106 /**
1107  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1108  *   I/O pm domain.
1109  * @dev: Device to suspend.
1110  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1111  *
1112  * Stop the device and remove power from the domain if all devices in it have
1113  * been stopped.
1114  */
1115 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1116 {
1117 	struct generic_pm_domain *genpd;
1118 	int ret = 0;
1119 
1120 	genpd = dev_to_genpd(dev);
1121 	if (IS_ERR(genpd))
1122 		return -EINVAL;
1123 
1124 	if (poweroff)
1125 		ret = pm_generic_poweroff_noirq(dev);
1126 	else
1127 		ret = pm_generic_suspend_noirq(dev);
1128 	if (ret)
1129 		return ret;
1130 
1131 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1132 		return 0;
1133 
1134 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1135 	    !pm_runtime_status_suspended(dev)) {
1136 		ret = genpd_stop_dev(genpd, dev);
1137 		if (ret) {
1138 			if (poweroff)
1139 				pm_generic_restore_noirq(dev);
1140 			else
1141 				pm_generic_resume_noirq(dev);
1142 			return ret;
1143 		}
1144 	}
1145 
1146 	genpd_lock(genpd);
1147 	genpd->suspended_count++;
1148 	genpd_sync_power_off(genpd, true, 0);
1149 	genpd_unlock(genpd);
1150 
1151 	return 0;
1152 }
1153 
1154 /**
1155  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1156  * @dev: Device to suspend.
1157  *
1158  * Stop the device and remove power from the domain if all devices in it have
1159  * been stopped.
1160  */
1161 static int genpd_suspend_noirq(struct device *dev)
1162 {
1163 	dev_dbg(dev, "%s()\n", __func__);
1164 
1165 	return genpd_finish_suspend(dev, false);
1166 }
1167 
1168 /**
1169  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1170  * @dev: Device to resume.
1171  *
1172  * Restore power to the device's PM domain, if necessary, and start the device.
1173  */
1174 static int genpd_resume_noirq(struct device *dev)
1175 {
1176 	struct generic_pm_domain *genpd;
1177 	int ret;
1178 
1179 	dev_dbg(dev, "%s()\n", __func__);
1180 
1181 	genpd = dev_to_genpd(dev);
1182 	if (IS_ERR(genpd))
1183 		return -EINVAL;
1184 
1185 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1186 		return pm_generic_resume_noirq(dev);
1187 
1188 	genpd_lock(genpd);
1189 	genpd_sync_power_on(genpd, true, 0);
1190 	genpd->suspended_count--;
1191 	genpd_unlock(genpd);
1192 
1193 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1194 	    !pm_runtime_status_suspended(dev)) {
1195 		ret = genpd_start_dev(genpd, dev);
1196 		if (ret)
1197 			return ret;
1198 	}
1199 
1200 	return pm_generic_resume_noirq(dev);
1201 }
1202 
1203 /**
1204  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1205  * @dev: Device to freeze.
1206  *
1207  * Carry out a late freeze of a device under the assumption that its
1208  * pm_domain field points to the domain member of an object of type
1209  * struct generic_pm_domain representing a power domain consisting of I/O
1210  * devices.
1211  */
1212 static int genpd_freeze_noirq(struct device *dev)
1213 {
1214 	const struct generic_pm_domain *genpd;
1215 	int ret = 0;
1216 
1217 	dev_dbg(dev, "%s()\n", __func__);
1218 
1219 	genpd = dev_to_genpd(dev);
1220 	if (IS_ERR(genpd))
1221 		return -EINVAL;
1222 
1223 	ret = pm_generic_freeze_noirq(dev);
1224 	if (ret)
1225 		return ret;
1226 
1227 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1228 	    !pm_runtime_status_suspended(dev))
1229 		ret = genpd_stop_dev(genpd, dev);
1230 
1231 	return ret;
1232 }
1233 
1234 /**
1235  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1236  * @dev: Device to thaw.
1237  *
1238  * Start the device, unless power has been removed from the domain already
1239  * before the system transition.
1240  */
1241 static int genpd_thaw_noirq(struct device *dev)
1242 {
1243 	const struct generic_pm_domain *genpd;
1244 	int ret = 0;
1245 
1246 	dev_dbg(dev, "%s()\n", __func__);
1247 
1248 	genpd = dev_to_genpd(dev);
1249 	if (IS_ERR(genpd))
1250 		return -EINVAL;
1251 
1252 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1253 	    !pm_runtime_status_suspended(dev)) {
1254 		ret = genpd_start_dev(genpd, dev);
1255 		if (ret)
1256 			return ret;
1257 	}
1258 
1259 	return pm_generic_thaw_noirq(dev);
1260 }
1261 
1262 /**
1263  * genpd_poweroff_noirq - Completion of hibernation of device in an
1264  *   I/O PM domain.
1265  * @dev: Device to poweroff.
1266  *
1267  * Stop the device and remove power from the domain if all devices in it have
1268  * been stopped.
1269  */
1270 static int genpd_poweroff_noirq(struct device *dev)
1271 {
1272 	dev_dbg(dev, "%s()\n", __func__);
1273 
1274 	return genpd_finish_suspend(dev, true);
1275 }
1276 
1277 /**
1278  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1279  * @dev: Device to resume.
1280  *
1281  * Make sure the domain will be in the same power state as before the
1282  * hibernation the system is resuming from and start the device if necessary.
1283  */
1284 static int genpd_restore_noirq(struct device *dev)
1285 {
1286 	struct generic_pm_domain *genpd;
1287 	int ret = 0;
1288 
1289 	dev_dbg(dev, "%s()\n", __func__);
1290 
1291 	genpd = dev_to_genpd(dev);
1292 	if (IS_ERR(genpd))
1293 		return -EINVAL;
1294 
1295 	/*
1296 	 * At this point suspended_count == 0 means we are being run for the
1297 	 * first time for the given domain in the present cycle.
1298 	 */
1299 	genpd_lock(genpd);
1300 	if (genpd->suspended_count++ == 0)
1301 		/*
1302 		 * The boot kernel might put the domain into arbitrary state,
1303 		 * so make it appear as powered off to genpd_sync_power_on(),
1304 		 * so that it tries to power it on in case it was really off.
1305 		 */
1306 		genpd->status = GPD_STATE_POWER_OFF;
1307 
1308 	genpd_sync_power_on(genpd, true, 0);
1309 	genpd_unlock(genpd);
1310 
1311 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1312 	    !pm_runtime_status_suspended(dev)) {
1313 		ret = genpd_start_dev(genpd, dev);
1314 		if (ret)
1315 			return ret;
1316 	}
1317 
1318 	return pm_generic_restore_noirq(dev);
1319 }
1320 
1321 /**
1322  * genpd_complete - Complete power transition of a device in a power domain.
1323  * @dev: Device to complete the transition of.
1324  *
1325  * Complete a power transition of a device (during a system-wide power
1326  * transition) under the assumption that its pm_domain field points to the
1327  * domain member of an object of type struct generic_pm_domain representing
1328  * a power domain consisting of I/O devices.
1329  */
1330 static void genpd_complete(struct device *dev)
1331 {
1332 	struct generic_pm_domain *genpd;
1333 
1334 	dev_dbg(dev, "%s()\n", __func__);
1335 
1336 	genpd = dev_to_genpd(dev);
1337 	if (IS_ERR(genpd))
1338 		return;
1339 
1340 	pm_generic_complete(dev);
1341 
1342 	genpd_lock(genpd);
1343 
1344 	genpd->prepared_count--;
1345 	if (!genpd->prepared_count)
1346 		genpd_queue_power_off_work(genpd);
1347 
1348 	genpd_unlock(genpd);
1349 }
1350 
1351 /**
1352  * genpd_syscore_switch - Switch power during system core suspend or resume.
1353  * @dev: Device that normally is marked as "always on" to switch power for.
1354  *
1355  * This routine may only be called during the system core (syscore) suspend or
1356  * resume phase for devices whose "always on" flags are set.
1357  */
1358 static void genpd_syscore_switch(struct device *dev, bool suspend)
1359 {
1360 	struct generic_pm_domain *genpd;
1361 
1362 	genpd = dev_to_genpd(dev);
1363 	if (!genpd_present(genpd))
1364 		return;
1365 
1366 	if (suspend) {
1367 		genpd->suspended_count++;
1368 		genpd_sync_power_off(genpd, false, 0);
1369 	} else {
1370 		genpd_sync_power_on(genpd, false, 0);
1371 		genpd->suspended_count--;
1372 	}
1373 }
1374 
1375 void pm_genpd_syscore_poweroff(struct device *dev)
1376 {
1377 	genpd_syscore_switch(dev, true);
1378 }
1379 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1380 
1381 void pm_genpd_syscore_poweron(struct device *dev)
1382 {
1383 	genpd_syscore_switch(dev, false);
1384 }
1385 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1386 
1387 #else /* !CONFIG_PM_SLEEP */
1388 
1389 #define genpd_prepare		NULL
1390 #define genpd_suspend_noirq	NULL
1391 #define genpd_resume_noirq	NULL
1392 #define genpd_freeze_noirq	NULL
1393 #define genpd_thaw_noirq	NULL
1394 #define genpd_poweroff_noirq	NULL
1395 #define genpd_restore_noirq	NULL
1396 #define genpd_complete		NULL
1397 
1398 #endif /* CONFIG_PM_SLEEP */
1399 
1400 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1401 {
1402 	struct generic_pm_domain_data *gpd_data;
1403 	int ret;
1404 
1405 	ret = dev_pm_get_subsys_data(dev);
1406 	if (ret)
1407 		return ERR_PTR(ret);
1408 
1409 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1410 	if (!gpd_data) {
1411 		ret = -ENOMEM;
1412 		goto err_put;
1413 	}
1414 
1415 	gpd_data->base.dev = dev;
1416 	gpd_data->td.constraint_changed = true;
1417 	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1418 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1419 
1420 	spin_lock_irq(&dev->power.lock);
1421 
1422 	if (dev->power.subsys_data->domain_data) {
1423 		ret = -EINVAL;
1424 		goto err_free;
1425 	}
1426 
1427 	dev->power.subsys_data->domain_data = &gpd_data->base;
1428 
1429 	spin_unlock_irq(&dev->power.lock);
1430 
1431 	return gpd_data;
1432 
1433  err_free:
1434 	spin_unlock_irq(&dev->power.lock);
1435 	kfree(gpd_data);
1436  err_put:
1437 	dev_pm_put_subsys_data(dev);
1438 	return ERR_PTR(ret);
1439 }
1440 
1441 static void genpd_free_dev_data(struct device *dev,
1442 				struct generic_pm_domain_data *gpd_data)
1443 {
1444 	spin_lock_irq(&dev->power.lock);
1445 
1446 	dev->power.subsys_data->domain_data = NULL;
1447 
1448 	spin_unlock_irq(&dev->power.lock);
1449 
1450 	kfree(gpd_data);
1451 	dev_pm_put_subsys_data(dev);
1452 }
1453 
1454 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1455 				 int cpu, bool set, unsigned int depth)
1456 {
1457 	struct gpd_link *link;
1458 
1459 	if (!genpd_is_cpu_domain(genpd))
1460 		return;
1461 
1462 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
1463 		struct generic_pm_domain *master = link->master;
1464 
1465 		genpd_lock_nested(master, depth + 1);
1466 		genpd_update_cpumask(master, cpu, set, depth + 1);
1467 		genpd_unlock(master);
1468 	}
1469 
1470 	if (set)
1471 		cpumask_set_cpu(cpu, genpd->cpus);
1472 	else
1473 		cpumask_clear_cpu(cpu, genpd->cpus);
1474 }
1475 
1476 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1477 {
1478 	if (cpu >= 0)
1479 		genpd_update_cpumask(genpd, cpu, true, 0);
1480 }
1481 
1482 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1483 {
1484 	if (cpu >= 0)
1485 		genpd_update_cpumask(genpd, cpu, false, 0);
1486 }
1487 
1488 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1489 {
1490 	int cpu;
1491 
1492 	if (!genpd_is_cpu_domain(genpd))
1493 		return -1;
1494 
1495 	for_each_possible_cpu(cpu) {
1496 		if (get_cpu_device(cpu) == dev)
1497 			return cpu;
1498 	}
1499 
1500 	return -1;
1501 }
1502 
1503 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1504 			    struct device *base_dev)
1505 {
1506 	struct generic_pm_domain_data *gpd_data;
1507 	int ret;
1508 
1509 	dev_dbg(dev, "%s()\n", __func__);
1510 
1511 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1512 		return -EINVAL;
1513 
1514 	gpd_data = genpd_alloc_dev_data(dev);
1515 	if (IS_ERR(gpd_data))
1516 		return PTR_ERR(gpd_data);
1517 
1518 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1519 
1520 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1521 	if (ret)
1522 		goto out;
1523 
1524 	genpd_lock(genpd);
1525 
1526 	genpd_set_cpumask(genpd, gpd_data->cpu);
1527 	dev_pm_domain_set(dev, &genpd->domain);
1528 
1529 	genpd->device_count++;
1530 	genpd->max_off_time_changed = true;
1531 
1532 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1533 
1534 	genpd_unlock(genpd);
1535  out:
1536 	if (ret)
1537 		genpd_free_dev_data(dev, gpd_data);
1538 	else
1539 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1540 					DEV_PM_QOS_RESUME_LATENCY);
1541 
1542 	return ret;
1543 }
1544 
1545 /**
1546  * pm_genpd_add_device - Add a device to an I/O PM domain.
1547  * @genpd: PM domain to add the device to.
1548  * @dev: Device to be added.
1549  */
1550 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1551 {
1552 	int ret;
1553 
1554 	mutex_lock(&gpd_list_lock);
1555 	ret = genpd_add_device(genpd, dev, dev);
1556 	mutex_unlock(&gpd_list_lock);
1557 
1558 	return ret;
1559 }
1560 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1561 
1562 static int genpd_remove_device(struct generic_pm_domain *genpd,
1563 			       struct device *dev)
1564 {
1565 	struct generic_pm_domain_data *gpd_data;
1566 	struct pm_domain_data *pdd;
1567 	int ret = 0;
1568 
1569 	dev_dbg(dev, "%s()\n", __func__);
1570 
1571 	pdd = dev->power.subsys_data->domain_data;
1572 	gpd_data = to_gpd_data(pdd);
1573 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1574 				   DEV_PM_QOS_RESUME_LATENCY);
1575 
1576 	genpd_lock(genpd);
1577 
1578 	if (genpd->prepared_count > 0) {
1579 		ret = -EAGAIN;
1580 		goto out;
1581 	}
1582 
1583 	genpd->device_count--;
1584 	genpd->max_off_time_changed = true;
1585 
1586 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1587 	dev_pm_domain_set(dev, NULL);
1588 
1589 	list_del_init(&pdd->list_node);
1590 
1591 	genpd_unlock(genpd);
1592 
1593 	if (genpd->detach_dev)
1594 		genpd->detach_dev(genpd, dev);
1595 
1596 	genpd_free_dev_data(dev, gpd_data);
1597 
1598 	return 0;
1599 
1600  out:
1601 	genpd_unlock(genpd);
1602 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1603 
1604 	return ret;
1605 }
1606 
1607 /**
1608  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1609  * @dev: Device to be removed.
1610  */
1611 int pm_genpd_remove_device(struct device *dev)
1612 {
1613 	struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
1614 
1615 	if (!genpd)
1616 		return -EINVAL;
1617 
1618 	return genpd_remove_device(genpd, dev);
1619 }
1620 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1621 
1622 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1623 			       struct generic_pm_domain *subdomain)
1624 {
1625 	struct gpd_link *link, *itr;
1626 	int ret = 0;
1627 
1628 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1629 	    || genpd == subdomain)
1630 		return -EINVAL;
1631 
1632 	/*
1633 	 * If the domain can be powered on/off in an IRQ safe
1634 	 * context, ensure that the subdomain can also be
1635 	 * powered on/off in that context.
1636 	 */
1637 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1638 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1639 				genpd->name, subdomain->name);
1640 		return -EINVAL;
1641 	}
1642 
1643 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1644 	if (!link)
1645 		return -ENOMEM;
1646 
1647 	genpd_lock(subdomain);
1648 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1649 
1650 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1651 		ret = -EINVAL;
1652 		goto out;
1653 	}
1654 
1655 	list_for_each_entry(itr, &genpd->master_links, master_node) {
1656 		if (itr->slave == subdomain && itr->master == genpd) {
1657 			ret = -EINVAL;
1658 			goto out;
1659 		}
1660 	}
1661 
1662 	link->master = genpd;
1663 	list_add_tail(&link->master_node, &genpd->master_links);
1664 	link->slave = subdomain;
1665 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1666 	if (genpd_status_on(subdomain))
1667 		genpd_sd_counter_inc(genpd);
1668 
1669  out:
1670 	genpd_unlock(genpd);
1671 	genpd_unlock(subdomain);
1672 	if (ret)
1673 		kfree(link);
1674 	return ret;
1675 }
1676 
1677 /**
1678  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1679  * @genpd: Master PM domain to add the subdomain to.
1680  * @subdomain: Subdomain to be added.
1681  */
1682 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1683 			   struct generic_pm_domain *subdomain)
1684 {
1685 	int ret;
1686 
1687 	mutex_lock(&gpd_list_lock);
1688 	ret = genpd_add_subdomain(genpd, subdomain);
1689 	mutex_unlock(&gpd_list_lock);
1690 
1691 	return ret;
1692 }
1693 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1694 
1695 /**
1696  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1697  * @genpd: Master PM domain to remove the subdomain from.
1698  * @subdomain: Subdomain to be removed.
1699  */
1700 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1701 			      struct generic_pm_domain *subdomain)
1702 {
1703 	struct gpd_link *l, *link;
1704 	int ret = -EINVAL;
1705 
1706 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1707 		return -EINVAL;
1708 
1709 	genpd_lock(subdomain);
1710 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1711 
1712 	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1713 		pr_warn("%s: unable to remove subdomain %s\n",
1714 			genpd->name, subdomain->name);
1715 		ret = -EBUSY;
1716 		goto out;
1717 	}
1718 
1719 	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1720 		if (link->slave != subdomain)
1721 			continue;
1722 
1723 		list_del(&link->master_node);
1724 		list_del(&link->slave_node);
1725 		kfree(link);
1726 		if (genpd_status_on(subdomain))
1727 			genpd_sd_counter_dec(genpd);
1728 
1729 		ret = 0;
1730 		break;
1731 	}
1732 
1733 out:
1734 	genpd_unlock(genpd);
1735 	genpd_unlock(subdomain);
1736 
1737 	return ret;
1738 }
1739 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1740 
1741 static void genpd_free_default_power_state(struct genpd_power_state *states,
1742 					   unsigned int state_count)
1743 {
1744 	kfree(states);
1745 }
1746 
1747 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1748 {
1749 	struct genpd_power_state *state;
1750 
1751 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1752 	if (!state)
1753 		return -ENOMEM;
1754 
1755 	genpd->states = state;
1756 	genpd->state_count = 1;
1757 	genpd->free_states = genpd_free_default_power_state;
1758 
1759 	return 0;
1760 }
1761 
1762 static void genpd_lock_init(struct generic_pm_domain *genpd)
1763 {
1764 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1765 		spin_lock_init(&genpd->slock);
1766 		genpd->lock_ops = &genpd_spin_ops;
1767 	} else {
1768 		mutex_init(&genpd->mlock);
1769 		genpd->lock_ops = &genpd_mtx_ops;
1770 	}
1771 }
1772 
1773 /**
1774  * pm_genpd_init - Initialize a generic I/O PM domain object.
1775  * @genpd: PM domain object to initialize.
1776  * @gov: PM domain governor to associate with the domain (may be NULL).
1777  * @is_off: Initial value of the domain's power_is_off field.
1778  *
1779  * Returns 0 on successful initialization, else a negative error code.
1780  */
1781 int pm_genpd_init(struct generic_pm_domain *genpd,
1782 		  struct dev_power_governor *gov, bool is_off)
1783 {
1784 	int ret;
1785 
1786 	if (IS_ERR_OR_NULL(genpd))
1787 		return -EINVAL;
1788 
1789 	INIT_LIST_HEAD(&genpd->master_links);
1790 	INIT_LIST_HEAD(&genpd->slave_links);
1791 	INIT_LIST_HEAD(&genpd->dev_list);
1792 	genpd_lock_init(genpd);
1793 	genpd->gov = gov;
1794 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1795 	atomic_set(&genpd->sd_count, 0);
1796 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1797 	genpd->device_count = 0;
1798 	genpd->max_off_time_ns = -1;
1799 	genpd->max_off_time_changed = true;
1800 	genpd->provider = NULL;
1801 	genpd->has_provider = false;
1802 	genpd->accounting_time = ktime_get();
1803 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1804 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1805 	genpd->domain.ops.prepare = genpd_prepare;
1806 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1807 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1808 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1809 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1810 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1811 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1812 	genpd->domain.ops.complete = genpd_complete;
1813 
1814 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1815 		genpd->dev_ops.stop = pm_clk_suspend;
1816 		genpd->dev_ops.start = pm_clk_resume;
1817 	}
1818 
1819 	/* Always-on domains must be powered on at initialization. */
1820 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1821 			!genpd_status_on(genpd))
1822 		return -EINVAL;
1823 
1824 	if (genpd_is_cpu_domain(genpd) &&
1825 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1826 		return -ENOMEM;
1827 
1828 	/* Use only one "off" state if there were no states declared */
1829 	if (genpd->state_count == 0) {
1830 		ret = genpd_set_default_power_state(genpd);
1831 		if (ret) {
1832 			if (genpd_is_cpu_domain(genpd))
1833 				free_cpumask_var(genpd->cpus);
1834 			return ret;
1835 		}
1836 	} else if (!gov && genpd->state_count > 1) {
1837 		pr_warn("%s: no governor for states\n", genpd->name);
1838 	}
1839 
1840 	device_initialize(&genpd->dev);
1841 	dev_set_name(&genpd->dev, "%s", genpd->name);
1842 
1843 	mutex_lock(&gpd_list_lock);
1844 	list_add(&genpd->gpd_list_node, &gpd_list);
1845 	mutex_unlock(&gpd_list_lock);
1846 
1847 	return 0;
1848 }
1849 EXPORT_SYMBOL_GPL(pm_genpd_init);
1850 
1851 static int genpd_remove(struct generic_pm_domain *genpd)
1852 {
1853 	struct gpd_link *l, *link;
1854 
1855 	if (IS_ERR_OR_NULL(genpd))
1856 		return -EINVAL;
1857 
1858 	genpd_lock(genpd);
1859 
1860 	if (genpd->has_provider) {
1861 		genpd_unlock(genpd);
1862 		pr_err("Provider present, unable to remove %s\n", genpd->name);
1863 		return -EBUSY;
1864 	}
1865 
1866 	if (!list_empty(&genpd->master_links) || genpd->device_count) {
1867 		genpd_unlock(genpd);
1868 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1869 		return -EBUSY;
1870 	}
1871 
1872 	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1873 		list_del(&link->master_node);
1874 		list_del(&link->slave_node);
1875 		kfree(link);
1876 	}
1877 
1878 	list_del(&genpd->gpd_list_node);
1879 	genpd_unlock(genpd);
1880 	cancel_work_sync(&genpd->power_off_work);
1881 	if (genpd_is_cpu_domain(genpd))
1882 		free_cpumask_var(genpd->cpus);
1883 	if (genpd->free_states)
1884 		genpd->free_states(genpd->states, genpd->state_count);
1885 
1886 	pr_debug("%s: removed %s\n", __func__, genpd->name);
1887 
1888 	return 0;
1889 }
1890 
1891 /**
1892  * pm_genpd_remove - Remove a generic I/O PM domain
1893  * @genpd: Pointer to PM domain that is to be removed.
1894  *
1895  * To remove the PM domain, this function:
1896  *  - Removes the PM domain as a subdomain to any parent domains,
1897  *    if it was added.
1898  *  - Removes the PM domain from the list of registered PM domains.
1899  *
1900  * The PM domain will only be removed, if the associated provider has
1901  * been removed, it is not a parent to any other PM domain and has no
1902  * devices associated with it.
1903  */
1904 int pm_genpd_remove(struct generic_pm_domain *genpd)
1905 {
1906 	int ret;
1907 
1908 	mutex_lock(&gpd_list_lock);
1909 	ret = genpd_remove(genpd);
1910 	mutex_unlock(&gpd_list_lock);
1911 
1912 	return ret;
1913 }
1914 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1915 
1916 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1917 
1918 /*
1919  * Device Tree based PM domain providers.
1920  *
1921  * The code below implements generic device tree based PM domain providers that
1922  * bind device tree nodes with generic PM domains registered in the system.
1923  *
1924  * Any driver that registers generic PM domains and needs to support binding of
1925  * devices to these domains is supposed to register a PM domain provider, which
1926  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1927  *
1928  * Two simple mapping functions have been provided for convenience:
1929  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1930  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1931  *    index.
1932  */
1933 
1934 /**
1935  * struct of_genpd_provider - PM domain provider registration structure
1936  * @link: Entry in global list of PM domain providers
1937  * @node: Pointer to device tree node of PM domain provider
1938  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1939  *         into a PM domain.
1940  * @data: context pointer to be passed into @xlate callback
1941  */
1942 struct of_genpd_provider {
1943 	struct list_head link;
1944 	struct device_node *node;
1945 	genpd_xlate_t xlate;
1946 	void *data;
1947 };
1948 
1949 /* List of registered PM domain providers. */
1950 static LIST_HEAD(of_genpd_providers);
1951 /* Mutex to protect the list above. */
1952 static DEFINE_MUTEX(of_genpd_mutex);
1953 
1954 /**
1955  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1956  * @genpdspec: OF phandle args to map into a PM domain
1957  * @data: xlate function private data - pointer to struct generic_pm_domain
1958  *
1959  * This is a generic xlate function that can be used to model PM domains that
1960  * have their own device tree nodes. The private data of xlate function needs
1961  * to be a valid pointer to struct generic_pm_domain.
1962  */
1963 static struct generic_pm_domain *genpd_xlate_simple(
1964 					struct of_phandle_args *genpdspec,
1965 					void *data)
1966 {
1967 	return data;
1968 }
1969 
1970 /**
1971  * genpd_xlate_onecell() - Xlate function using a single index.
1972  * @genpdspec: OF phandle args to map into a PM domain
1973  * @data: xlate function private data - pointer to struct genpd_onecell_data
1974  *
1975  * This is a generic xlate function that can be used to model simple PM domain
1976  * controllers that have one device tree node and provide multiple PM domains.
1977  * A single cell is used as an index into an array of PM domains specified in
1978  * the genpd_onecell_data struct when registering the provider.
1979  */
1980 static struct generic_pm_domain *genpd_xlate_onecell(
1981 					struct of_phandle_args *genpdspec,
1982 					void *data)
1983 {
1984 	struct genpd_onecell_data *genpd_data = data;
1985 	unsigned int idx = genpdspec->args[0];
1986 
1987 	if (genpdspec->args_count != 1)
1988 		return ERR_PTR(-EINVAL);
1989 
1990 	if (idx >= genpd_data->num_domains) {
1991 		pr_err("%s: invalid domain index %u\n", __func__, idx);
1992 		return ERR_PTR(-EINVAL);
1993 	}
1994 
1995 	if (!genpd_data->domains[idx])
1996 		return ERR_PTR(-ENOENT);
1997 
1998 	return genpd_data->domains[idx];
1999 }
2000 
2001 /**
2002  * genpd_add_provider() - Register a PM domain provider for a node
2003  * @np: Device node pointer associated with the PM domain provider.
2004  * @xlate: Callback for decoding PM domain from phandle arguments.
2005  * @data: Context pointer for @xlate callback.
2006  */
2007 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2008 			      void *data)
2009 {
2010 	struct of_genpd_provider *cp;
2011 
2012 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2013 	if (!cp)
2014 		return -ENOMEM;
2015 
2016 	cp->node = of_node_get(np);
2017 	cp->data = data;
2018 	cp->xlate = xlate;
2019 
2020 	mutex_lock(&of_genpd_mutex);
2021 	list_add(&cp->link, &of_genpd_providers);
2022 	mutex_unlock(&of_genpd_mutex);
2023 	pr_debug("Added domain provider from %pOF\n", np);
2024 
2025 	return 0;
2026 }
2027 
2028 /**
2029  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2030  * @np: Device node pointer associated with the PM domain provider.
2031  * @genpd: Pointer to PM domain associated with the PM domain provider.
2032  */
2033 int of_genpd_add_provider_simple(struct device_node *np,
2034 				 struct generic_pm_domain *genpd)
2035 {
2036 	int ret = -EINVAL;
2037 
2038 	if (!np || !genpd)
2039 		return -EINVAL;
2040 
2041 	mutex_lock(&gpd_list_lock);
2042 
2043 	if (!genpd_present(genpd))
2044 		goto unlock;
2045 
2046 	genpd->dev.of_node = np;
2047 
2048 	/* Parse genpd OPP table */
2049 	if (genpd->set_performance_state) {
2050 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2051 		if (ret) {
2052 			dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2053 				ret);
2054 			goto unlock;
2055 		}
2056 
2057 		/*
2058 		 * Save table for faster processing while setting performance
2059 		 * state.
2060 		 */
2061 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2062 		WARN_ON(!genpd->opp_table);
2063 	}
2064 
2065 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2066 	if (ret) {
2067 		if (genpd->set_performance_state) {
2068 			dev_pm_opp_put_opp_table(genpd->opp_table);
2069 			dev_pm_opp_of_remove_table(&genpd->dev);
2070 		}
2071 
2072 		goto unlock;
2073 	}
2074 
2075 	genpd->provider = &np->fwnode;
2076 	genpd->has_provider = true;
2077 
2078 unlock:
2079 	mutex_unlock(&gpd_list_lock);
2080 
2081 	return ret;
2082 }
2083 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2084 
2085 /**
2086  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2087  * @np: Device node pointer associated with the PM domain provider.
2088  * @data: Pointer to the data associated with the PM domain provider.
2089  */
2090 int of_genpd_add_provider_onecell(struct device_node *np,
2091 				  struct genpd_onecell_data *data)
2092 {
2093 	struct generic_pm_domain *genpd;
2094 	unsigned int i;
2095 	int ret = -EINVAL;
2096 
2097 	if (!np || !data)
2098 		return -EINVAL;
2099 
2100 	mutex_lock(&gpd_list_lock);
2101 
2102 	if (!data->xlate)
2103 		data->xlate = genpd_xlate_onecell;
2104 
2105 	for (i = 0; i < data->num_domains; i++) {
2106 		genpd = data->domains[i];
2107 
2108 		if (!genpd)
2109 			continue;
2110 		if (!genpd_present(genpd))
2111 			goto error;
2112 
2113 		genpd->dev.of_node = np;
2114 
2115 		/* Parse genpd OPP table */
2116 		if (genpd->set_performance_state) {
2117 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2118 			if (ret) {
2119 				dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2120 					i, ret);
2121 				goto error;
2122 			}
2123 
2124 			/*
2125 			 * Save table for faster processing while setting
2126 			 * performance state.
2127 			 */
2128 			genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2129 			WARN_ON(!genpd->opp_table);
2130 		}
2131 
2132 		genpd->provider = &np->fwnode;
2133 		genpd->has_provider = true;
2134 	}
2135 
2136 	ret = genpd_add_provider(np, data->xlate, data);
2137 	if (ret < 0)
2138 		goto error;
2139 
2140 	mutex_unlock(&gpd_list_lock);
2141 
2142 	return 0;
2143 
2144 error:
2145 	while (i--) {
2146 		genpd = data->domains[i];
2147 
2148 		if (!genpd)
2149 			continue;
2150 
2151 		genpd->provider = NULL;
2152 		genpd->has_provider = false;
2153 
2154 		if (genpd->set_performance_state) {
2155 			dev_pm_opp_put_opp_table(genpd->opp_table);
2156 			dev_pm_opp_of_remove_table(&genpd->dev);
2157 		}
2158 	}
2159 
2160 	mutex_unlock(&gpd_list_lock);
2161 
2162 	return ret;
2163 }
2164 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2165 
2166 /**
2167  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2168  * @np: Device node pointer associated with the PM domain provider
2169  */
2170 void of_genpd_del_provider(struct device_node *np)
2171 {
2172 	struct of_genpd_provider *cp, *tmp;
2173 	struct generic_pm_domain *gpd;
2174 
2175 	mutex_lock(&gpd_list_lock);
2176 	mutex_lock(&of_genpd_mutex);
2177 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2178 		if (cp->node == np) {
2179 			/*
2180 			 * For each PM domain associated with the
2181 			 * provider, set the 'has_provider' to false
2182 			 * so that the PM domain can be safely removed.
2183 			 */
2184 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2185 				if (gpd->provider == &np->fwnode) {
2186 					gpd->has_provider = false;
2187 
2188 					if (!gpd->set_performance_state)
2189 						continue;
2190 
2191 					dev_pm_opp_put_opp_table(gpd->opp_table);
2192 					dev_pm_opp_of_remove_table(&gpd->dev);
2193 				}
2194 			}
2195 
2196 			list_del(&cp->link);
2197 			of_node_put(cp->node);
2198 			kfree(cp);
2199 			break;
2200 		}
2201 	}
2202 	mutex_unlock(&of_genpd_mutex);
2203 	mutex_unlock(&gpd_list_lock);
2204 }
2205 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2206 
2207 /**
2208  * genpd_get_from_provider() - Look-up PM domain
2209  * @genpdspec: OF phandle args to use for look-up
2210  *
2211  * Looks for a PM domain provider under the node specified by @genpdspec and if
2212  * found, uses xlate function of the provider to map phandle args to a PM
2213  * domain.
2214  *
2215  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2216  * on failure.
2217  */
2218 static struct generic_pm_domain *genpd_get_from_provider(
2219 					struct of_phandle_args *genpdspec)
2220 {
2221 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2222 	struct of_genpd_provider *provider;
2223 
2224 	if (!genpdspec)
2225 		return ERR_PTR(-EINVAL);
2226 
2227 	mutex_lock(&of_genpd_mutex);
2228 
2229 	/* Check if we have such a provider in our array */
2230 	list_for_each_entry(provider, &of_genpd_providers, link) {
2231 		if (provider->node == genpdspec->np)
2232 			genpd = provider->xlate(genpdspec, provider->data);
2233 		if (!IS_ERR(genpd))
2234 			break;
2235 	}
2236 
2237 	mutex_unlock(&of_genpd_mutex);
2238 
2239 	return genpd;
2240 }
2241 
2242 /**
2243  * of_genpd_add_device() - Add a device to an I/O PM domain
2244  * @genpdspec: OF phandle args to use for look-up PM domain
2245  * @dev: Device to be added.
2246  *
2247  * Looks-up an I/O PM domain based upon phandle args provided and adds
2248  * the device to the PM domain. Returns a negative error code on failure.
2249  */
2250 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2251 {
2252 	struct generic_pm_domain *genpd;
2253 	int ret;
2254 
2255 	mutex_lock(&gpd_list_lock);
2256 
2257 	genpd = genpd_get_from_provider(genpdspec);
2258 	if (IS_ERR(genpd)) {
2259 		ret = PTR_ERR(genpd);
2260 		goto out;
2261 	}
2262 
2263 	ret = genpd_add_device(genpd, dev, dev);
2264 
2265 out:
2266 	mutex_unlock(&gpd_list_lock);
2267 
2268 	return ret;
2269 }
2270 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2271 
2272 /**
2273  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2274  * @parent_spec: OF phandle args to use for parent PM domain look-up
2275  * @subdomain_spec: OF phandle args to use for subdomain look-up
2276  *
2277  * Looks-up a parent PM domain and subdomain based upon phandle args
2278  * provided and adds the subdomain to the parent PM domain. Returns a
2279  * negative error code on failure.
2280  */
2281 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2282 			   struct of_phandle_args *subdomain_spec)
2283 {
2284 	struct generic_pm_domain *parent, *subdomain;
2285 	int ret;
2286 
2287 	mutex_lock(&gpd_list_lock);
2288 
2289 	parent = genpd_get_from_provider(parent_spec);
2290 	if (IS_ERR(parent)) {
2291 		ret = PTR_ERR(parent);
2292 		goto out;
2293 	}
2294 
2295 	subdomain = genpd_get_from_provider(subdomain_spec);
2296 	if (IS_ERR(subdomain)) {
2297 		ret = PTR_ERR(subdomain);
2298 		goto out;
2299 	}
2300 
2301 	ret = genpd_add_subdomain(parent, subdomain);
2302 
2303 out:
2304 	mutex_unlock(&gpd_list_lock);
2305 
2306 	return ret;
2307 }
2308 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2309 
2310 /**
2311  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2312  * @provider: Pointer to device structure associated with provider
2313  *
2314  * Find the last PM domain that was added by a particular provider and
2315  * remove this PM domain from the list of PM domains. The provider is
2316  * identified by the 'provider' device structure that is passed. The PM
2317  * domain will only be removed, if the provider associated with domain
2318  * has been removed.
2319  *
2320  * Returns a valid pointer to struct generic_pm_domain on success or
2321  * ERR_PTR() on failure.
2322  */
2323 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2324 {
2325 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2326 	int ret;
2327 
2328 	if (IS_ERR_OR_NULL(np))
2329 		return ERR_PTR(-EINVAL);
2330 
2331 	mutex_lock(&gpd_list_lock);
2332 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2333 		if (gpd->provider == &np->fwnode) {
2334 			ret = genpd_remove(gpd);
2335 			genpd = ret ? ERR_PTR(ret) : gpd;
2336 			break;
2337 		}
2338 	}
2339 	mutex_unlock(&gpd_list_lock);
2340 
2341 	return genpd;
2342 }
2343 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2344 
2345 static void genpd_release_dev(struct device *dev)
2346 {
2347 	of_node_put(dev->of_node);
2348 	kfree(dev);
2349 }
2350 
2351 static struct bus_type genpd_bus_type = {
2352 	.name		= "genpd",
2353 };
2354 
2355 /**
2356  * genpd_dev_pm_detach - Detach a device from its PM domain.
2357  * @dev: Device to detach.
2358  * @power_off: Currently not used
2359  *
2360  * Try to locate a corresponding generic PM domain, which the device was
2361  * attached to previously. If such is found, the device is detached from it.
2362  */
2363 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2364 {
2365 	struct generic_pm_domain *pd;
2366 	unsigned int i;
2367 	int ret = 0;
2368 
2369 	pd = dev_to_genpd(dev);
2370 	if (IS_ERR(pd))
2371 		return;
2372 
2373 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2374 
2375 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2376 		ret = genpd_remove_device(pd, dev);
2377 		if (ret != -EAGAIN)
2378 			break;
2379 
2380 		mdelay(i);
2381 		cond_resched();
2382 	}
2383 
2384 	if (ret < 0) {
2385 		dev_err(dev, "failed to remove from PM domain %s: %d",
2386 			pd->name, ret);
2387 		return;
2388 	}
2389 
2390 	/* Check if PM domain can be powered off after removing this device. */
2391 	genpd_queue_power_off_work(pd);
2392 
2393 	/* Unregister the device if it was created by genpd. */
2394 	if (dev->bus == &genpd_bus_type)
2395 		device_unregister(dev);
2396 }
2397 
2398 static void genpd_dev_pm_sync(struct device *dev)
2399 {
2400 	struct generic_pm_domain *pd;
2401 
2402 	pd = dev_to_genpd(dev);
2403 	if (IS_ERR(pd))
2404 		return;
2405 
2406 	genpd_queue_power_off_work(pd);
2407 }
2408 
2409 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2410 				 unsigned int index, bool power_on)
2411 {
2412 	struct of_phandle_args pd_args;
2413 	struct generic_pm_domain *pd;
2414 	int ret;
2415 
2416 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2417 				"#power-domain-cells", index, &pd_args);
2418 	if (ret < 0)
2419 		return ret;
2420 
2421 	mutex_lock(&gpd_list_lock);
2422 	pd = genpd_get_from_provider(&pd_args);
2423 	of_node_put(pd_args.np);
2424 	if (IS_ERR(pd)) {
2425 		mutex_unlock(&gpd_list_lock);
2426 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2427 			__func__, PTR_ERR(pd));
2428 		return driver_deferred_probe_check_state(base_dev);
2429 	}
2430 
2431 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2432 
2433 	ret = genpd_add_device(pd, dev, base_dev);
2434 	mutex_unlock(&gpd_list_lock);
2435 
2436 	if (ret < 0) {
2437 		if (ret != -EPROBE_DEFER)
2438 			dev_err(dev, "failed to add to PM domain %s: %d",
2439 				pd->name, ret);
2440 		return ret;
2441 	}
2442 
2443 	dev->pm_domain->detach = genpd_dev_pm_detach;
2444 	dev->pm_domain->sync = genpd_dev_pm_sync;
2445 
2446 	if (power_on) {
2447 		genpd_lock(pd);
2448 		ret = genpd_power_on(pd, 0);
2449 		genpd_unlock(pd);
2450 	}
2451 
2452 	if (ret)
2453 		genpd_remove_device(pd, dev);
2454 
2455 	return ret ? -EPROBE_DEFER : 1;
2456 }
2457 
2458 /**
2459  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2460  * @dev: Device to attach.
2461  *
2462  * Parse device's OF node to find a PM domain specifier. If such is found,
2463  * attaches the device to retrieved pm_domain ops.
2464  *
2465  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2466  * PM domain or when multiple power-domains exists for it, else a negative error
2467  * code. Note that if a power-domain exists for the device, but it cannot be
2468  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2469  * not probed and to re-try again later.
2470  */
2471 int genpd_dev_pm_attach(struct device *dev)
2472 {
2473 	if (!dev->of_node)
2474 		return 0;
2475 
2476 	/*
2477 	 * Devices with multiple PM domains must be attached separately, as we
2478 	 * can only attach one PM domain per device.
2479 	 */
2480 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2481 				       "#power-domain-cells") != 1)
2482 		return 0;
2483 
2484 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2485 }
2486 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2487 
2488 /**
2489  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2490  * @dev: The device used to lookup the PM domain.
2491  * @index: The index of the PM domain.
2492  *
2493  * Parse device's OF node to find a PM domain specifier at the provided @index.
2494  * If such is found, creates a virtual device and attaches it to the retrieved
2495  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2496  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2497  *
2498  * Returns the created virtual device if successfully attached PM domain, NULL
2499  * when the device don't need a PM domain, else an ERR_PTR() in case of
2500  * failures. If a power-domain exists for the device, but cannot be found or
2501  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2502  * is not probed and to re-try again later.
2503  */
2504 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2505 					 unsigned int index)
2506 {
2507 	struct device *virt_dev;
2508 	int num_domains;
2509 	int ret;
2510 
2511 	if (!dev->of_node)
2512 		return NULL;
2513 
2514 	/* Verify that the index is within a valid range. */
2515 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2516 						 "#power-domain-cells");
2517 	if (index >= num_domains)
2518 		return NULL;
2519 
2520 	/* Allocate and register device on the genpd bus. */
2521 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2522 	if (!virt_dev)
2523 		return ERR_PTR(-ENOMEM);
2524 
2525 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2526 	virt_dev->bus = &genpd_bus_type;
2527 	virt_dev->release = genpd_release_dev;
2528 	virt_dev->of_node = of_node_get(dev->of_node);
2529 
2530 	ret = device_register(virt_dev);
2531 	if (ret) {
2532 		put_device(virt_dev);
2533 		return ERR_PTR(ret);
2534 	}
2535 
2536 	/* Try to attach the device to the PM domain at the specified index. */
2537 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2538 	if (ret < 1) {
2539 		device_unregister(virt_dev);
2540 		return ret ? ERR_PTR(ret) : NULL;
2541 	}
2542 
2543 	pm_runtime_enable(virt_dev);
2544 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2545 
2546 	return virt_dev;
2547 }
2548 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2549 
2550 /**
2551  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2552  * @dev: The device used to lookup the PM domain.
2553  * @name: The name of the PM domain.
2554  *
2555  * Parse device's OF node to find a PM domain specifier using the
2556  * power-domain-names DT property. For further description see
2557  * genpd_dev_pm_attach_by_id().
2558  */
2559 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2560 {
2561 	int index;
2562 
2563 	if (!dev->of_node)
2564 		return NULL;
2565 
2566 	index = of_property_match_string(dev->of_node, "power-domain-names",
2567 					 name);
2568 	if (index < 0)
2569 		return NULL;
2570 
2571 	return genpd_dev_pm_attach_by_id(dev, index);
2572 }
2573 
2574 static const struct of_device_id idle_state_match[] = {
2575 	{ .compatible = "domain-idle-state", },
2576 	{ }
2577 };
2578 
2579 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2580 				    struct device_node *state_node)
2581 {
2582 	int err;
2583 	u32 residency;
2584 	u32 entry_latency, exit_latency;
2585 
2586 	err = of_property_read_u32(state_node, "entry-latency-us",
2587 						&entry_latency);
2588 	if (err) {
2589 		pr_debug(" * %pOF missing entry-latency-us property\n",
2590 			 state_node);
2591 		return -EINVAL;
2592 	}
2593 
2594 	err = of_property_read_u32(state_node, "exit-latency-us",
2595 						&exit_latency);
2596 	if (err) {
2597 		pr_debug(" * %pOF missing exit-latency-us property\n",
2598 			 state_node);
2599 		return -EINVAL;
2600 	}
2601 
2602 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2603 	if (!err)
2604 		genpd_state->residency_ns = 1000 * residency;
2605 
2606 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2607 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2608 	genpd_state->fwnode = &state_node->fwnode;
2609 
2610 	return 0;
2611 }
2612 
2613 static int genpd_iterate_idle_states(struct device_node *dn,
2614 				     struct genpd_power_state *states)
2615 {
2616 	int ret;
2617 	struct of_phandle_iterator it;
2618 	struct device_node *np;
2619 	int i = 0;
2620 
2621 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2622 	if (ret <= 0)
2623 		return ret;
2624 
2625 	/* Loop over the phandles until all the requested entry is found */
2626 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2627 		np = it.node;
2628 		if (!of_match_node(idle_state_match, np))
2629 			continue;
2630 		if (states) {
2631 			ret = genpd_parse_state(&states[i], np);
2632 			if (ret) {
2633 				pr_err("Parsing idle state node %pOF failed with err %d\n",
2634 				       np, ret);
2635 				of_node_put(np);
2636 				return ret;
2637 			}
2638 		}
2639 		i++;
2640 	}
2641 
2642 	return i;
2643 }
2644 
2645 /**
2646  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2647  *
2648  * @dn: The genpd device node
2649  * @states: The pointer to which the state array will be saved.
2650  * @n: The count of elements in the array returned from this function.
2651  *
2652  * Returns the device states parsed from the OF node. The memory for the states
2653  * is allocated by this function and is the responsibility of the caller to
2654  * free the memory after use. If any or zero compatible domain idle states is
2655  * found it returns 0 and in case of errors, a negative error code is returned.
2656  */
2657 int of_genpd_parse_idle_states(struct device_node *dn,
2658 			struct genpd_power_state **states, int *n)
2659 {
2660 	struct genpd_power_state *st;
2661 	int ret;
2662 
2663 	ret = genpd_iterate_idle_states(dn, NULL);
2664 	if (ret < 0)
2665 		return ret;
2666 
2667 	if (!ret) {
2668 		*states = NULL;
2669 		*n = 0;
2670 		return 0;
2671 	}
2672 
2673 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2674 	if (!st)
2675 		return -ENOMEM;
2676 
2677 	ret = genpd_iterate_idle_states(dn, st);
2678 	if (ret <= 0) {
2679 		kfree(st);
2680 		return ret < 0 ? ret : -EINVAL;
2681 	}
2682 
2683 	*states = st;
2684 	*n = ret;
2685 
2686 	return 0;
2687 }
2688 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2689 
2690 /**
2691  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2692  *
2693  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2694  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2695  *	state.
2696  *
2697  * Returns performance state encoded in the OPP of the genpd. This calls
2698  * platform specific genpd->opp_to_performance_state() callback to translate
2699  * power domain OPP to performance state.
2700  *
2701  * Returns performance state on success and 0 on failure.
2702  */
2703 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2704 					       struct dev_pm_opp *opp)
2705 {
2706 	struct generic_pm_domain *genpd = NULL;
2707 	int state;
2708 
2709 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2710 
2711 	if (unlikely(!genpd->opp_to_performance_state))
2712 		return 0;
2713 
2714 	genpd_lock(genpd);
2715 	state = genpd->opp_to_performance_state(genpd, opp);
2716 	genpd_unlock(genpd);
2717 
2718 	return state;
2719 }
2720 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2721 
2722 static int __init genpd_bus_init(void)
2723 {
2724 	return bus_register(&genpd_bus_type);
2725 }
2726 core_initcall(genpd_bus_init);
2727 
2728 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2729 
2730 
2731 /***        debugfs support        ***/
2732 
2733 #ifdef CONFIG_DEBUG_FS
2734 #include <linux/pm.h>
2735 #include <linux/device.h>
2736 #include <linux/debugfs.h>
2737 #include <linux/seq_file.h>
2738 #include <linux/init.h>
2739 #include <linux/kobject.h>
2740 static struct dentry *genpd_debugfs_dir;
2741 
2742 /*
2743  * TODO: This function is a slightly modified version of rtpm_status_show
2744  * from sysfs.c, so generalize it.
2745  */
2746 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2747 {
2748 	static const char * const status_lookup[] = {
2749 		[RPM_ACTIVE] = "active",
2750 		[RPM_RESUMING] = "resuming",
2751 		[RPM_SUSPENDED] = "suspended",
2752 		[RPM_SUSPENDING] = "suspending"
2753 	};
2754 	const char *p = "";
2755 
2756 	if (dev->power.runtime_error)
2757 		p = "error";
2758 	else if (dev->power.disable_depth)
2759 		p = "unsupported";
2760 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2761 		p = status_lookup[dev->power.runtime_status];
2762 	else
2763 		WARN_ON(1);
2764 
2765 	seq_puts(s, p);
2766 }
2767 
2768 static int genpd_summary_one(struct seq_file *s,
2769 			struct generic_pm_domain *genpd)
2770 {
2771 	static const char * const status_lookup[] = {
2772 		[GPD_STATE_ACTIVE] = "on",
2773 		[GPD_STATE_POWER_OFF] = "off"
2774 	};
2775 	struct pm_domain_data *pm_data;
2776 	const char *kobj_path;
2777 	struct gpd_link *link;
2778 	char state[16];
2779 	int ret;
2780 
2781 	ret = genpd_lock_interruptible(genpd);
2782 	if (ret)
2783 		return -ERESTARTSYS;
2784 
2785 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2786 		goto exit;
2787 	if (!genpd_status_on(genpd))
2788 		snprintf(state, sizeof(state), "%s-%u",
2789 			 status_lookup[genpd->status], genpd->state_idx);
2790 	else
2791 		snprintf(state, sizeof(state), "%s",
2792 			 status_lookup[genpd->status]);
2793 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2794 
2795 	/*
2796 	 * Modifications on the list require holding locks on both
2797 	 * master and slave, so we are safe.
2798 	 * Also genpd->name is immutable.
2799 	 */
2800 	list_for_each_entry(link, &genpd->master_links, master_node) {
2801 		seq_printf(s, "%s", link->slave->name);
2802 		if (!list_is_last(&link->master_node, &genpd->master_links))
2803 			seq_puts(s, ", ");
2804 	}
2805 
2806 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2807 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2808 				genpd_is_irq_safe(genpd) ?
2809 				GFP_ATOMIC : GFP_KERNEL);
2810 		if (kobj_path == NULL)
2811 			continue;
2812 
2813 		seq_printf(s, "\n    %-50s  ", kobj_path);
2814 		rtpm_status_str(s, pm_data->dev);
2815 		kfree(kobj_path);
2816 	}
2817 
2818 	seq_puts(s, "\n");
2819 exit:
2820 	genpd_unlock(genpd);
2821 
2822 	return 0;
2823 }
2824 
2825 static int summary_show(struct seq_file *s, void *data)
2826 {
2827 	struct generic_pm_domain *genpd;
2828 	int ret = 0;
2829 
2830 	seq_puts(s, "domain                          status          slaves\n");
2831 	seq_puts(s, "    /device                                             runtime status\n");
2832 	seq_puts(s, "----------------------------------------------------------------------\n");
2833 
2834 	ret = mutex_lock_interruptible(&gpd_list_lock);
2835 	if (ret)
2836 		return -ERESTARTSYS;
2837 
2838 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2839 		ret = genpd_summary_one(s, genpd);
2840 		if (ret)
2841 			break;
2842 	}
2843 	mutex_unlock(&gpd_list_lock);
2844 
2845 	return ret;
2846 }
2847 
2848 static int status_show(struct seq_file *s, void *data)
2849 {
2850 	static const char * const status_lookup[] = {
2851 		[GPD_STATE_ACTIVE] = "on",
2852 		[GPD_STATE_POWER_OFF] = "off"
2853 	};
2854 
2855 	struct generic_pm_domain *genpd = s->private;
2856 	int ret = 0;
2857 
2858 	ret = genpd_lock_interruptible(genpd);
2859 	if (ret)
2860 		return -ERESTARTSYS;
2861 
2862 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2863 		goto exit;
2864 
2865 	if (genpd->status == GPD_STATE_POWER_OFF)
2866 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2867 			genpd->state_idx);
2868 	else
2869 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
2870 exit:
2871 	genpd_unlock(genpd);
2872 	return ret;
2873 }
2874 
2875 static int sub_domains_show(struct seq_file *s, void *data)
2876 {
2877 	struct generic_pm_domain *genpd = s->private;
2878 	struct gpd_link *link;
2879 	int ret = 0;
2880 
2881 	ret = genpd_lock_interruptible(genpd);
2882 	if (ret)
2883 		return -ERESTARTSYS;
2884 
2885 	list_for_each_entry(link, &genpd->master_links, master_node)
2886 		seq_printf(s, "%s\n", link->slave->name);
2887 
2888 	genpd_unlock(genpd);
2889 	return ret;
2890 }
2891 
2892 static int idle_states_show(struct seq_file *s, void *data)
2893 {
2894 	struct generic_pm_domain *genpd = s->private;
2895 	unsigned int i;
2896 	int ret = 0;
2897 
2898 	ret = genpd_lock_interruptible(genpd);
2899 	if (ret)
2900 		return -ERESTARTSYS;
2901 
2902 	seq_puts(s, "State          Time Spent(ms)\n");
2903 
2904 	for (i = 0; i < genpd->state_count; i++) {
2905 		ktime_t delta = 0;
2906 		s64 msecs;
2907 
2908 		if ((genpd->status == GPD_STATE_POWER_OFF) &&
2909 				(genpd->state_idx == i))
2910 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
2911 
2912 		msecs = ktime_to_ms(
2913 			ktime_add(genpd->states[i].idle_time, delta));
2914 		seq_printf(s, "S%-13i %lld\n", i, msecs);
2915 	}
2916 
2917 	genpd_unlock(genpd);
2918 	return ret;
2919 }
2920 
2921 static int active_time_show(struct seq_file *s, void *data)
2922 {
2923 	struct generic_pm_domain *genpd = s->private;
2924 	ktime_t delta = 0;
2925 	int ret = 0;
2926 
2927 	ret = genpd_lock_interruptible(genpd);
2928 	if (ret)
2929 		return -ERESTARTSYS;
2930 
2931 	if (genpd->status == GPD_STATE_ACTIVE)
2932 		delta = ktime_sub(ktime_get(), genpd->accounting_time);
2933 
2934 	seq_printf(s, "%lld ms\n", ktime_to_ms(
2935 				ktime_add(genpd->on_time, delta)));
2936 
2937 	genpd_unlock(genpd);
2938 	return ret;
2939 }
2940 
2941 static int total_idle_time_show(struct seq_file *s, void *data)
2942 {
2943 	struct generic_pm_domain *genpd = s->private;
2944 	ktime_t delta = 0, total = 0;
2945 	unsigned int i;
2946 	int ret = 0;
2947 
2948 	ret = genpd_lock_interruptible(genpd);
2949 	if (ret)
2950 		return -ERESTARTSYS;
2951 
2952 	for (i = 0; i < genpd->state_count; i++) {
2953 
2954 		if ((genpd->status == GPD_STATE_POWER_OFF) &&
2955 				(genpd->state_idx == i))
2956 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
2957 
2958 		total = ktime_add(total, genpd->states[i].idle_time);
2959 	}
2960 	total = ktime_add(total, delta);
2961 
2962 	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2963 
2964 	genpd_unlock(genpd);
2965 	return ret;
2966 }
2967 
2968 
2969 static int devices_show(struct seq_file *s, void *data)
2970 {
2971 	struct generic_pm_domain *genpd = s->private;
2972 	struct pm_domain_data *pm_data;
2973 	const char *kobj_path;
2974 	int ret = 0;
2975 
2976 	ret = genpd_lock_interruptible(genpd);
2977 	if (ret)
2978 		return -ERESTARTSYS;
2979 
2980 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2981 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2982 				genpd_is_irq_safe(genpd) ?
2983 				GFP_ATOMIC : GFP_KERNEL);
2984 		if (kobj_path == NULL)
2985 			continue;
2986 
2987 		seq_printf(s, "%s\n", kobj_path);
2988 		kfree(kobj_path);
2989 	}
2990 
2991 	genpd_unlock(genpd);
2992 	return ret;
2993 }
2994 
2995 static int perf_state_show(struct seq_file *s, void *data)
2996 {
2997 	struct generic_pm_domain *genpd = s->private;
2998 
2999 	if (genpd_lock_interruptible(genpd))
3000 		return -ERESTARTSYS;
3001 
3002 	seq_printf(s, "%u\n", genpd->performance_state);
3003 
3004 	genpd_unlock(genpd);
3005 	return 0;
3006 }
3007 
3008 DEFINE_SHOW_ATTRIBUTE(summary);
3009 DEFINE_SHOW_ATTRIBUTE(status);
3010 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3011 DEFINE_SHOW_ATTRIBUTE(idle_states);
3012 DEFINE_SHOW_ATTRIBUTE(active_time);
3013 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3014 DEFINE_SHOW_ATTRIBUTE(devices);
3015 DEFINE_SHOW_ATTRIBUTE(perf_state);
3016 
3017 static int __init genpd_debug_init(void)
3018 {
3019 	struct dentry *d;
3020 	struct generic_pm_domain *genpd;
3021 
3022 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3023 
3024 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3025 			    NULL, &summary_fops);
3026 
3027 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3028 		d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3029 
3030 		debugfs_create_file("current_state", 0444,
3031 				d, genpd, &status_fops);
3032 		debugfs_create_file("sub_domains", 0444,
3033 				d, genpd, &sub_domains_fops);
3034 		debugfs_create_file("idle_states", 0444,
3035 				d, genpd, &idle_states_fops);
3036 		debugfs_create_file("active_time", 0444,
3037 				d, genpd, &active_time_fops);
3038 		debugfs_create_file("total_idle_time", 0444,
3039 				d, genpd, &total_idle_time_fops);
3040 		debugfs_create_file("devices", 0444,
3041 				d, genpd, &devices_fops);
3042 		if (genpd->set_performance_state)
3043 			debugfs_create_file("perf_state", 0444,
3044 					    d, genpd, &perf_state_fops);
3045 	}
3046 
3047 	return 0;
3048 }
3049 late_initcall(genpd_debug_init);
3050 
3051 static void __exit genpd_debug_exit(void)
3052 {
3053 	debugfs_remove_recursive(genpd_debugfs_dir);
3054 }
3055 __exitcall(genpd_debug_exit);
3056 #endif /* CONFIG_DEBUG_FS */
3057