xref: /openbmc/linux/drivers/base/power/domain.c (revision aa0dc6a7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 #include <linux/debugfs.h>
25 
26 #include "power.h"
27 
28 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
29 
30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
31 ({								\
32 	type (*__routine)(struct device *__d); 			\
33 	type __ret = (type)0;					\
34 								\
35 	__routine = genpd->dev_ops.callback; 			\
36 	if (__routine) {					\
37 		__ret = __routine(dev); 			\
38 	}							\
39 	__ret;							\
40 })
41 
42 static LIST_HEAD(gpd_list);
43 static DEFINE_MUTEX(gpd_list_lock);
44 
45 struct genpd_lock_ops {
46 	void (*lock)(struct generic_pm_domain *genpd);
47 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 	void (*unlock)(struct generic_pm_domain *genpd);
50 };
51 
52 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53 {
54 	mutex_lock(&genpd->mlock);
55 }
56 
57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 					int depth)
59 {
60 	mutex_lock_nested(&genpd->mlock, depth);
61 }
62 
63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64 {
65 	return mutex_lock_interruptible(&genpd->mlock);
66 }
67 
68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69 {
70 	return mutex_unlock(&genpd->mlock);
71 }
72 
73 static const struct genpd_lock_ops genpd_mtx_ops = {
74 	.lock = genpd_lock_mtx,
75 	.lock_nested = genpd_lock_nested_mtx,
76 	.lock_interruptible = genpd_lock_interruptible_mtx,
77 	.unlock = genpd_unlock_mtx,
78 };
79 
80 static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 	__acquires(&genpd->slock)
82 {
83 	unsigned long flags;
84 
85 	spin_lock_irqsave(&genpd->slock, flags);
86 	genpd->lock_flags = flags;
87 }
88 
89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 					int depth)
91 	__acquires(&genpd->slock)
92 {
93 	unsigned long flags;
94 
95 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 	genpd->lock_flags = flags;
97 }
98 
99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 	__acquires(&genpd->slock)
101 {
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&genpd->slock, flags);
105 	genpd->lock_flags = flags;
106 	return 0;
107 }
108 
109 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 	__releases(&genpd->slock)
111 {
112 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113 }
114 
115 static const struct genpd_lock_ops genpd_spin_ops = {
116 	.lock = genpd_lock_spin,
117 	.lock_nested = genpd_lock_nested_spin,
118 	.lock_interruptible = genpd_lock_interruptible_spin,
119 	.unlock = genpd_unlock_spin,
120 };
121 
122 #define genpd_lock(p)			p->lock_ops->lock(p)
123 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
124 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
125 #define genpd_unlock(p)			p->lock_ops->unlock(p)
126 
127 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
128 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
129 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
130 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
133 
134 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
135 		const struct generic_pm_domain *genpd)
136 {
137 	bool ret;
138 
139 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
140 
141 	/*
142 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
143 	 * to indicate a suboptimal configuration for PM. For an always on
144 	 * domain this isn't case, thus don't warn.
145 	 */
146 	if (ret && !genpd_is_always_on(genpd))
147 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
148 				genpd->name);
149 
150 	return ret;
151 }
152 
153 static int genpd_runtime_suspend(struct device *dev);
154 
155 /*
156  * Get the generic PM domain for a particular struct device.
157  * This validates the struct device pointer, the PM domain pointer,
158  * and checks that the PM domain pointer is a real generic PM domain.
159  * Any failure results in NULL being returned.
160  */
161 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
162 {
163 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
164 		return NULL;
165 
166 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
167 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
168 		return pd_to_genpd(dev->pm_domain);
169 
170 	return NULL;
171 }
172 
173 /*
174  * This should only be used where we are certain that the pm_domain
175  * attached to the device is a genpd domain.
176  */
177 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
178 {
179 	if (IS_ERR_OR_NULL(dev->pm_domain))
180 		return ERR_PTR(-EINVAL);
181 
182 	return pd_to_genpd(dev->pm_domain);
183 }
184 
185 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
186 			  struct device *dev)
187 {
188 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
189 }
190 
191 static int genpd_start_dev(const struct generic_pm_domain *genpd,
192 			   struct device *dev)
193 {
194 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
195 }
196 
197 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
198 {
199 	bool ret = false;
200 
201 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
202 		ret = !!atomic_dec_and_test(&genpd->sd_count);
203 
204 	return ret;
205 }
206 
207 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
208 {
209 	atomic_inc(&genpd->sd_count);
210 	smp_mb__after_atomic();
211 }
212 
213 #ifdef CONFIG_DEBUG_FS
214 static struct dentry *genpd_debugfs_dir;
215 
216 static void genpd_debug_add(struct generic_pm_domain *genpd);
217 
218 static void genpd_debug_remove(struct generic_pm_domain *genpd)
219 {
220 	struct dentry *d;
221 
222 	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
223 	debugfs_remove(d);
224 }
225 
226 static void genpd_update_accounting(struct generic_pm_domain *genpd)
227 {
228 	ktime_t delta, now;
229 
230 	now = ktime_get();
231 	delta = ktime_sub(now, genpd->accounting_time);
232 
233 	/*
234 	 * If genpd->status is active, it means we are just
235 	 * out of off and so update the idle time and vice
236 	 * versa.
237 	 */
238 	if (genpd->status == GENPD_STATE_ON) {
239 		int state_idx = genpd->state_idx;
240 
241 		genpd->states[state_idx].idle_time =
242 			ktime_add(genpd->states[state_idx].idle_time, delta);
243 	} else {
244 		genpd->on_time = ktime_add(genpd->on_time, delta);
245 	}
246 
247 	genpd->accounting_time = now;
248 }
249 #else
250 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
251 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
252 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
253 #endif
254 
255 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
256 					   unsigned int state)
257 {
258 	struct generic_pm_domain_data *pd_data;
259 	struct pm_domain_data *pdd;
260 	struct gpd_link *link;
261 
262 	/* New requested state is same as Max requested state */
263 	if (state == genpd->performance_state)
264 		return state;
265 
266 	/* New requested state is higher than Max requested state */
267 	if (state > genpd->performance_state)
268 		return state;
269 
270 	/* Traverse all devices within the domain */
271 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
272 		pd_data = to_gpd_data(pdd);
273 
274 		if (pd_data->performance_state > state)
275 			state = pd_data->performance_state;
276 	}
277 
278 	/*
279 	 * Traverse all sub-domains within the domain. This can be
280 	 * done without any additional locking as the link->performance_state
281 	 * field is protected by the parent genpd->lock, which is already taken.
282 	 *
283 	 * Also note that link->performance_state (subdomain's performance state
284 	 * requirement to parent domain) is different from
285 	 * link->child->performance_state (current performance state requirement
286 	 * of the devices/sub-domains of the subdomain) and so can have a
287 	 * different value.
288 	 *
289 	 * Note that we also take vote from powered-off sub-domains into account
290 	 * as the same is done for devices right now.
291 	 */
292 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
293 		if (link->performance_state > state)
294 			state = link->performance_state;
295 	}
296 
297 	return state;
298 }
299 
300 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
301 					 struct generic_pm_domain *parent,
302 					 unsigned int pstate)
303 {
304 	if (!parent->set_performance_state)
305 		return pstate;
306 
307 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
308 						  parent->opp_table,
309 						  pstate);
310 }
311 
312 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
313 					unsigned int state, int depth)
314 {
315 	struct generic_pm_domain *parent;
316 	struct gpd_link *link;
317 	int parent_state, ret;
318 
319 	if (state == genpd->performance_state)
320 		return 0;
321 
322 	/* Propagate to parents of genpd */
323 	list_for_each_entry(link, &genpd->child_links, child_node) {
324 		parent = link->parent;
325 
326 		/* Find parent's performance state */
327 		ret = genpd_xlate_performance_state(genpd, parent, state);
328 		if (unlikely(ret < 0))
329 			goto err;
330 
331 		parent_state = ret;
332 
333 		genpd_lock_nested(parent, depth + 1);
334 
335 		link->prev_performance_state = link->performance_state;
336 		link->performance_state = parent_state;
337 		parent_state = _genpd_reeval_performance_state(parent,
338 						parent_state);
339 		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
340 		if (ret)
341 			link->performance_state = link->prev_performance_state;
342 
343 		genpd_unlock(parent);
344 
345 		if (ret)
346 			goto err;
347 	}
348 
349 	if (genpd->set_performance_state) {
350 		ret = genpd->set_performance_state(genpd, state);
351 		if (ret)
352 			goto err;
353 	}
354 
355 	genpd->performance_state = state;
356 	return 0;
357 
358 err:
359 	/* Encountered an error, lets rollback */
360 	list_for_each_entry_continue_reverse(link, &genpd->child_links,
361 					     child_node) {
362 		parent = link->parent;
363 
364 		genpd_lock_nested(parent, depth + 1);
365 
366 		parent_state = link->prev_performance_state;
367 		link->performance_state = parent_state;
368 
369 		parent_state = _genpd_reeval_performance_state(parent,
370 						parent_state);
371 		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
372 			pr_err("%s: Failed to roll back to %d performance state\n",
373 			       parent->name, parent_state);
374 		}
375 
376 		genpd_unlock(parent);
377 	}
378 
379 	return ret;
380 }
381 
382 static int genpd_set_performance_state(struct device *dev, unsigned int state)
383 {
384 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
385 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
386 	unsigned int prev_state;
387 	int ret;
388 
389 	prev_state = gpd_data->performance_state;
390 	if (prev_state == state)
391 		return 0;
392 
393 	gpd_data->performance_state = state;
394 	state = _genpd_reeval_performance_state(genpd, state);
395 
396 	ret = _genpd_set_performance_state(genpd, state, 0);
397 	if (ret)
398 		gpd_data->performance_state = prev_state;
399 
400 	return ret;
401 }
402 
403 static int genpd_drop_performance_state(struct device *dev)
404 {
405 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
406 
407 	if (!genpd_set_performance_state(dev, 0))
408 		return prev_state;
409 
410 	return 0;
411 }
412 
413 static void genpd_restore_performance_state(struct device *dev,
414 					    unsigned int state)
415 {
416 	if (state)
417 		genpd_set_performance_state(dev, state);
418 }
419 
420 /**
421  * dev_pm_genpd_set_performance_state- Set performance state of device's power
422  * domain.
423  *
424  * @dev: Device for which the performance-state needs to be set.
425  * @state: Target performance state of the device. This can be set as 0 when the
426  *	   device doesn't have any performance state constraints left (And so
427  *	   the device wouldn't participate anymore to find the target
428  *	   performance state of the genpd).
429  *
430  * It is assumed that the users guarantee that the genpd wouldn't be detached
431  * while this routine is getting called.
432  *
433  * Returns 0 on success and negative error values on failures.
434  */
435 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
436 {
437 	struct generic_pm_domain *genpd;
438 	int ret;
439 
440 	genpd = dev_to_genpd_safe(dev);
441 	if (!genpd)
442 		return -ENODEV;
443 
444 	if (WARN_ON(!dev->power.subsys_data ||
445 		     !dev->power.subsys_data->domain_data))
446 		return -EINVAL;
447 
448 	genpd_lock(genpd);
449 	ret = genpd_set_performance_state(dev, state);
450 	genpd_unlock(genpd);
451 
452 	return ret;
453 }
454 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
455 
456 /**
457  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
458  *
459  * @dev: Device to handle
460  * @next: impending interrupt/wakeup for the device
461  *
462  *
463  * Allow devices to inform of the next wakeup. It's assumed that the users
464  * guarantee that the genpd wouldn't be detached while this routine is getting
465  * called. Additionally, it's also assumed that @dev isn't runtime suspended
466  * (RPM_SUSPENDED)."
467  * Although devices are expected to update the next_wakeup after the end of
468  * their usecase as well, it is possible the devices themselves may not know
469  * about that, so stale @next will be ignored when powering off the domain.
470  */
471 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
472 {
473 	struct generic_pm_domain_data *gpd_data;
474 	struct generic_pm_domain *genpd;
475 
476 	genpd = dev_to_genpd_safe(dev);
477 	if (!genpd)
478 		return;
479 
480 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
481 	gpd_data->next_wakeup = next;
482 }
483 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
484 
485 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
486 {
487 	unsigned int state_idx = genpd->state_idx;
488 	ktime_t time_start;
489 	s64 elapsed_ns;
490 	int ret;
491 
492 	/* Notify consumers that we are about to power on. */
493 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
494 					     GENPD_NOTIFY_PRE_ON,
495 					     GENPD_NOTIFY_OFF, NULL);
496 	ret = notifier_to_errno(ret);
497 	if (ret)
498 		return ret;
499 
500 	if (!genpd->power_on)
501 		goto out;
502 
503 	if (!timed) {
504 		ret = genpd->power_on(genpd);
505 		if (ret)
506 			goto err;
507 
508 		goto out;
509 	}
510 
511 	time_start = ktime_get();
512 	ret = genpd->power_on(genpd);
513 	if (ret)
514 		goto err;
515 
516 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
517 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
518 		goto out;
519 
520 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
521 	genpd->max_off_time_changed = true;
522 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
523 		 genpd->name, "on", elapsed_ns);
524 
525 out:
526 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
527 	return 0;
528 err:
529 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
530 				NULL);
531 	return ret;
532 }
533 
534 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
535 {
536 	unsigned int state_idx = genpd->state_idx;
537 	ktime_t time_start;
538 	s64 elapsed_ns;
539 	int ret;
540 
541 	/* Notify consumers that we are about to power off. */
542 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
543 					     GENPD_NOTIFY_PRE_OFF,
544 					     GENPD_NOTIFY_ON, NULL);
545 	ret = notifier_to_errno(ret);
546 	if (ret)
547 		return ret;
548 
549 	if (!genpd->power_off)
550 		goto out;
551 
552 	if (!timed) {
553 		ret = genpd->power_off(genpd);
554 		if (ret)
555 			goto busy;
556 
557 		goto out;
558 	}
559 
560 	time_start = ktime_get();
561 	ret = genpd->power_off(genpd);
562 	if (ret)
563 		goto busy;
564 
565 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
566 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
567 		goto out;
568 
569 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
570 	genpd->max_off_time_changed = true;
571 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
572 		 genpd->name, "off", elapsed_ns);
573 
574 out:
575 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
576 				NULL);
577 	return 0;
578 busy:
579 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
580 	return ret;
581 }
582 
583 /**
584  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
585  * @genpd: PM domain to power off.
586  *
587  * Queue up the execution of genpd_power_off() unless it's already been done
588  * before.
589  */
590 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
591 {
592 	queue_work(pm_wq, &genpd->power_off_work);
593 }
594 
595 /**
596  * genpd_power_off - Remove power from a given PM domain.
597  * @genpd: PM domain to power down.
598  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
599  * RPM status of the releated device is in an intermediate state, not yet turned
600  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
601  * be RPM_SUSPENDED, while it tries to power off the PM domain.
602  * @depth: nesting count for lockdep.
603  *
604  * If all of the @genpd's devices have been suspended and all of its subdomains
605  * have been powered down, remove power from @genpd.
606  */
607 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
608 			   unsigned int depth)
609 {
610 	struct pm_domain_data *pdd;
611 	struct gpd_link *link;
612 	unsigned int not_suspended = 0;
613 	int ret;
614 
615 	/*
616 	 * Do not try to power off the domain in the following situations:
617 	 * (1) The domain is already in the "power off" state.
618 	 * (2) System suspend is in progress.
619 	 */
620 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
621 		return 0;
622 
623 	/*
624 	 * Abort power off for the PM domain in the following situations:
625 	 * (1) The domain is configured as always on.
626 	 * (2) When the domain has a subdomain being powered on.
627 	 */
628 	if (genpd_is_always_on(genpd) ||
629 			genpd_is_rpm_always_on(genpd) ||
630 			atomic_read(&genpd->sd_count) > 0)
631 		return -EBUSY;
632 
633 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
634 		enum pm_qos_flags_status stat;
635 
636 		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
637 		if (stat > PM_QOS_FLAGS_NONE)
638 			return -EBUSY;
639 
640 		/*
641 		 * Do not allow PM domain to be powered off, when an IRQ safe
642 		 * device is part of a non-IRQ safe domain.
643 		 */
644 		if (!pm_runtime_suspended(pdd->dev) ||
645 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
646 			not_suspended++;
647 	}
648 
649 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
650 		return -EBUSY;
651 
652 	if (genpd->gov && genpd->gov->power_down_ok) {
653 		if (!genpd->gov->power_down_ok(&genpd->domain))
654 			return -EAGAIN;
655 	}
656 
657 	/* Default to shallowest state. */
658 	if (!genpd->gov)
659 		genpd->state_idx = 0;
660 
661 	/* Don't power off, if a child domain is waiting to power on. */
662 	if (atomic_read(&genpd->sd_count) > 0)
663 		return -EBUSY;
664 
665 	ret = _genpd_power_off(genpd, true);
666 	if (ret) {
667 		genpd->states[genpd->state_idx].rejected++;
668 		return ret;
669 	}
670 
671 	genpd->status = GENPD_STATE_OFF;
672 	genpd_update_accounting(genpd);
673 	genpd->states[genpd->state_idx].usage++;
674 
675 	list_for_each_entry(link, &genpd->child_links, child_node) {
676 		genpd_sd_counter_dec(link->parent);
677 		genpd_lock_nested(link->parent, depth + 1);
678 		genpd_power_off(link->parent, false, depth + 1);
679 		genpd_unlock(link->parent);
680 	}
681 
682 	return 0;
683 }
684 
685 /**
686  * genpd_power_on - Restore power to a given PM domain and its parents.
687  * @genpd: PM domain to power up.
688  * @depth: nesting count for lockdep.
689  *
690  * Restore power to @genpd and all of its parents so that it is possible to
691  * resume a device belonging to it.
692  */
693 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
694 {
695 	struct gpd_link *link;
696 	int ret = 0;
697 
698 	if (genpd_status_on(genpd))
699 		return 0;
700 
701 	/*
702 	 * The list is guaranteed not to change while the loop below is being
703 	 * executed, unless one of the parents' .power_on() callbacks fiddles
704 	 * with it.
705 	 */
706 	list_for_each_entry(link, &genpd->child_links, child_node) {
707 		struct generic_pm_domain *parent = link->parent;
708 
709 		genpd_sd_counter_inc(parent);
710 
711 		genpd_lock_nested(parent, depth + 1);
712 		ret = genpd_power_on(parent, depth + 1);
713 		genpd_unlock(parent);
714 
715 		if (ret) {
716 			genpd_sd_counter_dec(parent);
717 			goto err;
718 		}
719 	}
720 
721 	ret = _genpd_power_on(genpd, true);
722 	if (ret)
723 		goto err;
724 
725 	genpd->status = GENPD_STATE_ON;
726 	genpd_update_accounting(genpd);
727 
728 	return 0;
729 
730  err:
731 	list_for_each_entry_continue_reverse(link,
732 					&genpd->child_links,
733 					child_node) {
734 		genpd_sd_counter_dec(link->parent);
735 		genpd_lock_nested(link->parent, depth + 1);
736 		genpd_power_off(link->parent, false, depth + 1);
737 		genpd_unlock(link->parent);
738 	}
739 
740 	return ret;
741 }
742 
743 static int genpd_dev_pm_start(struct device *dev)
744 {
745 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
746 
747 	return genpd_start_dev(genpd, dev);
748 }
749 
750 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
751 				     unsigned long val, void *ptr)
752 {
753 	struct generic_pm_domain_data *gpd_data;
754 	struct device *dev;
755 
756 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
757 	dev = gpd_data->base.dev;
758 
759 	for (;;) {
760 		struct generic_pm_domain *genpd;
761 		struct pm_domain_data *pdd;
762 
763 		spin_lock_irq(&dev->power.lock);
764 
765 		pdd = dev->power.subsys_data ?
766 				dev->power.subsys_data->domain_data : NULL;
767 		if (pdd) {
768 			to_gpd_data(pdd)->td.constraint_changed = true;
769 			genpd = dev_to_genpd(dev);
770 		} else {
771 			genpd = ERR_PTR(-ENODATA);
772 		}
773 
774 		spin_unlock_irq(&dev->power.lock);
775 
776 		if (!IS_ERR(genpd)) {
777 			genpd_lock(genpd);
778 			genpd->max_off_time_changed = true;
779 			genpd_unlock(genpd);
780 		}
781 
782 		dev = dev->parent;
783 		if (!dev || dev->power.ignore_children)
784 			break;
785 	}
786 
787 	return NOTIFY_DONE;
788 }
789 
790 /**
791  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
792  * @work: Work structure used for scheduling the execution of this function.
793  */
794 static void genpd_power_off_work_fn(struct work_struct *work)
795 {
796 	struct generic_pm_domain *genpd;
797 
798 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
799 
800 	genpd_lock(genpd);
801 	genpd_power_off(genpd, false, 0);
802 	genpd_unlock(genpd);
803 }
804 
805 /**
806  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
807  * @dev: Device to handle.
808  */
809 static int __genpd_runtime_suspend(struct device *dev)
810 {
811 	int (*cb)(struct device *__dev);
812 
813 	if (dev->type && dev->type->pm)
814 		cb = dev->type->pm->runtime_suspend;
815 	else if (dev->class && dev->class->pm)
816 		cb = dev->class->pm->runtime_suspend;
817 	else if (dev->bus && dev->bus->pm)
818 		cb = dev->bus->pm->runtime_suspend;
819 	else
820 		cb = NULL;
821 
822 	if (!cb && dev->driver && dev->driver->pm)
823 		cb = dev->driver->pm->runtime_suspend;
824 
825 	return cb ? cb(dev) : 0;
826 }
827 
828 /**
829  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
830  * @dev: Device to handle.
831  */
832 static int __genpd_runtime_resume(struct device *dev)
833 {
834 	int (*cb)(struct device *__dev);
835 
836 	if (dev->type && dev->type->pm)
837 		cb = dev->type->pm->runtime_resume;
838 	else if (dev->class && dev->class->pm)
839 		cb = dev->class->pm->runtime_resume;
840 	else if (dev->bus && dev->bus->pm)
841 		cb = dev->bus->pm->runtime_resume;
842 	else
843 		cb = NULL;
844 
845 	if (!cb && dev->driver && dev->driver->pm)
846 		cb = dev->driver->pm->runtime_resume;
847 
848 	return cb ? cb(dev) : 0;
849 }
850 
851 /**
852  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
853  * @dev: Device to suspend.
854  *
855  * Carry out a runtime suspend of a device under the assumption that its
856  * pm_domain field points to the domain member of an object of type
857  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
858  */
859 static int genpd_runtime_suspend(struct device *dev)
860 {
861 	struct generic_pm_domain *genpd;
862 	bool (*suspend_ok)(struct device *__dev);
863 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
864 	struct gpd_timing_data *td = &gpd_data->td;
865 	bool runtime_pm = pm_runtime_enabled(dev);
866 	ktime_t time_start;
867 	s64 elapsed_ns;
868 	int ret;
869 
870 	dev_dbg(dev, "%s()\n", __func__);
871 
872 	genpd = dev_to_genpd(dev);
873 	if (IS_ERR(genpd))
874 		return -EINVAL;
875 
876 	/*
877 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
878 	 * callbacks for other purposes than runtime PM. In those scenarios
879 	 * runtime PM is disabled. Under these circumstances, we shall skip
880 	 * validating/measuring the PM QoS latency.
881 	 */
882 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
883 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
884 		return -EBUSY;
885 
886 	/* Measure suspend latency. */
887 	time_start = 0;
888 	if (runtime_pm)
889 		time_start = ktime_get();
890 
891 	ret = __genpd_runtime_suspend(dev);
892 	if (ret)
893 		return ret;
894 
895 	ret = genpd_stop_dev(genpd, dev);
896 	if (ret) {
897 		__genpd_runtime_resume(dev);
898 		return ret;
899 	}
900 
901 	/* Update suspend latency value if the measured time exceeds it. */
902 	if (runtime_pm) {
903 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
904 		if (elapsed_ns > td->suspend_latency_ns) {
905 			td->suspend_latency_ns = elapsed_ns;
906 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
907 				elapsed_ns);
908 			genpd->max_off_time_changed = true;
909 			td->constraint_changed = true;
910 		}
911 	}
912 
913 	/*
914 	 * If power.irq_safe is set, this routine may be run with
915 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
916 	 */
917 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
918 		return 0;
919 
920 	genpd_lock(genpd);
921 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
922 	genpd_power_off(genpd, true, 0);
923 	genpd_unlock(genpd);
924 
925 	return 0;
926 }
927 
928 /**
929  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
930  * @dev: Device to resume.
931  *
932  * Carry out a runtime resume of a device under the assumption that its
933  * pm_domain field points to the domain member of an object of type
934  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
935  */
936 static int genpd_runtime_resume(struct device *dev)
937 {
938 	struct generic_pm_domain *genpd;
939 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
940 	struct gpd_timing_data *td = &gpd_data->td;
941 	bool runtime_pm = pm_runtime_enabled(dev);
942 	ktime_t time_start;
943 	s64 elapsed_ns;
944 	int ret;
945 	bool timed = true;
946 
947 	dev_dbg(dev, "%s()\n", __func__);
948 
949 	genpd = dev_to_genpd(dev);
950 	if (IS_ERR(genpd))
951 		return -EINVAL;
952 
953 	/*
954 	 * As we don't power off a non IRQ safe domain, which holds
955 	 * an IRQ safe device, we don't need to restore power to it.
956 	 */
957 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
958 		timed = false;
959 		goto out;
960 	}
961 
962 	genpd_lock(genpd);
963 	ret = genpd_power_on(genpd, 0);
964 	if (!ret)
965 		genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
966 	genpd_unlock(genpd);
967 
968 	if (ret)
969 		return ret;
970 
971  out:
972 	/* Measure resume latency. */
973 	time_start = 0;
974 	if (timed && runtime_pm)
975 		time_start = ktime_get();
976 
977 	ret = genpd_start_dev(genpd, dev);
978 	if (ret)
979 		goto err_poweroff;
980 
981 	ret = __genpd_runtime_resume(dev);
982 	if (ret)
983 		goto err_stop;
984 
985 	/* Update resume latency value if the measured time exceeds it. */
986 	if (timed && runtime_pm) {
987 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
988 		if (elapsed_ns > td->resume_latency_ns) {
989 			td->resume_latency_ns = elapsed_ns;
990 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
991 				elapsed_ns);
992 			genpd->max_off_time_changed = true;
993 			td->constraint_changed = true;
994 		}
995 	}
996 
997 	return 0;
998 
999 err_stop:
1000 	genpd_stop_dev(genpd, dev);
1001 err_poweroff:
1002 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1003 		genpd_lock(genpd);
1004 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1005 		genpd_power_off(genpd, true, 0);
1006 		genpd_unlock(genpd);
1007 	}
1008 
1009 	return ret;
1010 }
1011 
1012 static bool pd_ignore_unused;
1013 static int __init pd_ignore_unused_setup(char *__unused)
1014 {
1015 	pd_ignore_unused = true;
1016 	return 1;
1017 }
1018 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1019 
1020 /**
1021  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1022  */
1023 static int __init genpd_power_off_unused(void)
1024 {
1025 	struct generic_pm_domain *genpd;
1026 
1027 	if (pd_ignore_unused) {
1028 		pr_warn("genpd: Not disabling unused power domains\n");
1029 		return 0;
1030 	}
1031 
1032 	mutex_lock(&gpd_list_lock);
1033 
1034 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1035 		genpd_queue_power_off_work(genpd);
1036 
1037 	mutex_unlock(&gpd_list_lock);
1038 
1039 	return 0;
1040 }
1041 late_initcall(genpd_power_off_unused);
1042 
1043 #ifdef CONFIG_PM_SLEEP
1044 
1045 /**
1046  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1047  * @genpd: PM domain to power off, if possible.
1048  * @use_lock: use the lock.
1049  * @depth: nesting count for lockdep.
1050  *
1051  * Check if the given PM domain can be powered off (during system suspend or
1052  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1053  *
1054  * This function is only called in "noirq" and "syscore" stages of system power
1055  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1056  * these cases the lock must be held.
1057  */
1058 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1059 				 unsigned int depth)
1060 {
1061 	struct gpd_link *link;
1062 
1063 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1064 		return;
1065 
1066 	if (genpd->suspended_count != genpd->device_count
1067 	    || atomic_read(&genpd->sd_count) > 0)
1068 		return;
1069 
1070 	/* Choose the deepest state when suspending */
1071 	genpd->state_idx = genpd->state_count - 1;
1072 	if (_genpd_power_off(genpd, false))
1073 		return;
1074 
1075 	genpd->status = GENPD_STATE_OFF;
1076 
1077 	list_for_each_entry(link, &genpd->child_links, child_node) {
1078 		genpd_sd_counter_dec(link->parent);
1079 
1080 		if (use_lock)
1081 			genpd_lock_nested(link->parent, depth + 1);
1082 
1083 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1084 
1085 		if (use_lock)
1086 			genpd_unlock(link->parent);
1087 	}
1088 }
1089 
1090 /**
1091  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1092  * @genpd: PM domain to power on.
1093  * @use_lock: use the lock.
1094  * @depth: nesting count for lockdep.
1095  *
1096  * This function is only called in "noirq" and "syscore" stages of system power
1097  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1098  * these cases the lock must be held.
1099  */
1100 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1101 				unsigned int depth)
1102 {
1103 	struct gpd_link *link;
1104 
1105 	if (genpd_status_on(genpd))
1106 		return;
1107 
1108 	list_for_each_entry(link, &genpd->child_links, child_node) {
1109 		genpd_sd_counter_inc(link->parent);
1110 
1111 		if (use_lock)
1112 			genpd_lock_nested(link->parent, depth + 1);
1113 
1114 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1115 
1116 		if (use_lock)
1117 			genpd_unlock(link->parent);
1118 	}
1119 
1120 	_genpd_power_on(genpd, false);
1121 	genpd->status = GENPD_STATE_ON;
1122 }
1123 
1124 /**
1125  * genpd_prepare - Start power transition of a device in a PM domain.
1126  * @dev: Device to start the transition of.
1127  *
1128  * Start a power transition of a device (during a system-wide power transition)
1129  * under the assumption that its pm_domain field points to the domain member of
1130  * an object of type struct generic_pm_domain representing a PM domain
1131  * consisting of I/O devices.
1132  */
1133 static int genpd_prepare(struct device *dev)
1134 {
1135 	struct generic_pm_domain *genpd;
1136 	int ret;
1137 
1138 	dev_dbg(dev, "%s()\n", __func__);
1139 
1140 	genpd = dev_to_genpd(dev);
1141 	if (IS_ERR(genpd))
1142 		return -EINVAL;
1143 
1144 	genpd_lock(genpd);
1145 
1146 	if (genpd->prepared_count++ == 0)
1147 		genpd->suspended_count = 0;
1148 
1149 	genpd_unlock(genpd);
1150 
1151 	ret = pm_generic_prepare(dev);
1152 	if (ret < 0) {
1153 		genpd_lock(genpd);
1154 
1155 		genpd->prepared_count--;
1156 
1157 		genpd_unlock(genpd);
1158 	}
1159 
1160 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1161 	return ret >= 0 ? 0 : ret;
1162 }
1163 
1164 /**
1165  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1166  *   I/O pm domain.
1167  * @dev: Device to suspend.
1168  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1169  *
1170  * Stop the device and remove power from the domain if all devices in it have
1171  * been stopped.
1172  */
1173 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1174 {
1175 	struct generic_pm_domain *genpd;
1176 	int ret = 0;
1177 
1178 	genpd = dev_to_genpd(dev);
1179 	if (IS_ERR(genpd))
1180 		return -EINVAL;
1181 
1182 	if (poweroff)
1183 		ret = pm_generic_poweroff_noirq(dev);
1184 	else
1185 		ret = pm_generic_suspend_noirq(dev);
1186 	if (ret)
1187 		return ret;
1188 
1189 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1190 		return 0;
1191 
1192 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1193 	    !pm_runtime_status_suspended(dev)) {
1194 		ret = genpd_stop_dev(genpd, dev);
1195 		if (ret) {
1196 			if (poweroff)
1197 				pm_generic_restore_noirq(dev);
1198 			else
1199 				pm_generic_resume_noirq(dev);
1200 			return ret;
1201 		}
1202 	}
1203 
1204 	genpd_lock(genpd);
1205 	genpd->suspended_count++;
1206 	genpd_sync_power_off(genpd, true, 0);
1207 	genpd_unlock(genpd);
1208 
1209 	return 0;
1210 }
1211 
1212 /**
1213  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1214  * @dev: Device to suspend.
1215  *
1216  * Stop the device and remove power from the domain if all devices in it have
1217  * been stopped.
1218  */
1219 static int genpd_suspend_noirq(struct device *dev)
1220 {
1221 	dev_dbg(dev, "%s()\n", __func__);
1222 
1223 	return genpd_finish_suspend(dev, false);
1224 }
1225 
1226 /**
1227  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1228  * @dev: Device to resume.
1229  *
1230  * Restore power to the device's PM domain, if necessary, and start the device.
1231  */
1232 static int genpd_resume_noirq(struct device *dev)
1233 {
1234 	struct generic_pm_domain *genpd;
1235 	int ret;
1236 
1237 	dev_dbg(dev, "%s()\n", __func__);
1238 
1239 	genpd = dev_to_genpd(dev);
1240 	if (IS_ERR(genpd))
1241 		return -EINVAL;
1242 
1243 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1244 		return pm_generic_resume_noirq(dev);
1245 
1246 	genpd_lock(genpd);
1247 	genpd_sync_power_on(genpd, true, 0);
1248 	genpd->suspended_count--;
1249 	genpd_unlock(genpd);
1250 
1251 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1252 	    !pm_runtime_status_suspended(dev)) {
1253 		ret = genpd_start_dev(genpd, dev);
1254 		if (ret)
1255 			return ret;
1256 	}
1257 
1258 	return pm_generic_resume_noirq(dev);
1259 }
1260 
1261 /**
1262  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1263  * @dev: Device to freeze.
1264  *
1265  * Carry out a late freeze of a device under the assumption that its
1266  * pm_domain field points to the domain member of an object of type
1267  * struct generic_pm_domain representing a power domain consisting of I/O
1268  * devices.
1269  */
1270 static int genpd_freeze_noirq(struct device *dev)
1271 {
1272 	const struct generic_pm_domain *genpd;
1273 	int ret = 0;
1274 
1275 	dev_dbg(dev, "%s()\n", __func__);
1276 
1277 	genpd = dev_to_genpd(dev);
1278 	if (IS_ERR(genpd))
1279 		return -EINVAL;
1280 
1281 	ret = pm_generic_freeze_noirq(dev);
1282 	if (ret)
1283 		return ret;
1284 
1285 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1286 	    !pm_runtime_status_suspended(dev))
1287 		ret = genpd_stop_dev(genpd, dev);
1288 
1289 	return ret;
1290 }
1291 
1292 /**
1293  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1294  * @dev: Device to thaw.
1295  *
1296  * Start the device, unless power has been removed from the domain already
1297  * before the system transition.
1298  */
1299 static int genpd_thaw_noirq(struct device *dev)
1300 {
1301 	const struct generic_pm_domain *genpd;
1302 	int ret = 0;
1303 
1304 	dev_dbg(dev, "%s()\n", __func__);
1305 
1306 	genpd = dev_to_genpd(dev);
1307 	if (IS_ERR(genpd))
1308 		return -EINVAL;
1309 
1310 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1311 	    !pm_runtime_status_suspended(dev)) {
1312 		ret = genpd_start_dev(genpd, dev);
1313 		if (ret)
1314 			return ret;
1315 	}
1316 
1317 	return pm_generic_thaw_noirq(dev);
1318 }
1319 
1320 /**
1321  * genpd_poweroff_noirq - Completion of hibernation of device in an
1322  *   I/O PM domain.
1323  * @dev: Device to poweroff.
1324  *
1325  * Stop the device and remove power from the domain if all devices in it have
1326  * been stopped.
1327  */
1328 static int genpd_poweroff_noirq(struct device *dev)
1329 {
1330 	dev_dbg(dev, "%s()\n", __func__);
1331 
1332 	return genpd_finish_suspend(dev, true);
1333 }
1334 
1335 /**
1336  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1337  * @dev: Device to resume.
1338  *
1339  * Make sure the domain will be in the same power state as before the
1340  * hibernation the system is resuming from and start the device if necessary.
1341  */
1342 static int genpd_restore_noirq(struct device *dev)
1343 {
1344 	struct generic_pm_domain *genpd;
1345 	int ret = 0;
1346 
1347 	dev_dbg(dev, "%s()\n", __func__);
1348 
1349 	genpd = dev_to_genpd(dev);
1350 	if (IS_ERR(genpd))
1351 		return -EINVAL;
1352 
1353 	/*
1354 	 * At this point suspended_count == 0 means we are being run for the
1355 	 * first time for the given domain in the present cycle.
1356 	 */
1357 	genpd_lock(genpd);
1358 	if (genpd->suspended_count++ == 0) {
1359 		/*
1360 		 * The boot kernel might put the domain into arbitrary state,
1361 		 * so make it appear as powered off to genpd_sync_power_on(),
1362 		 * so that it tries to power it on in case it was really off.
1363 		 */
1364 		genpd->status = GENPD_STATE_OFF;
1365 	}
1366 
1367 	genpd_sync_power_on(genpd, true, 0);
1368 	genpd_unlock(genpd);
1369 
1370 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1371 	    !pm_runtime_status_suspended(dev)) {
1372 		ret = genpd_start_dev(genpd, dev);
1373 		if (ret)
1374 			return ret;
1375 	}
1376 
1377 	return pm_generic_restore_noirq(dev);
1378 }
1379 
1380 /**
1381  * genpd_complete - Complete power transition of a device in a power domain.
1382  * @dev: Device to complete the transition of.
1383  *
1384  * Complete a power transition of a device (during a system-wide power
1385  * transition) under the assumption that its pm_domain field points to the
1386  * domain member of an object of type struct generic_pm_domain representing
1387  * a power domain consisting of I/O devices.
1388  */
1389 static void genpd_complete(struct device *dev)
1390 {
1391 	struct generic_pm_domain *genpd;
1392 
1393 	dev_dbg(dev, "%s()\n", __func__);
1394 
1395 	genpd = dev_to_genpd(dev);
1396 	if (IS_ERR(genpd))
1397 		return;
1398 
1399 	pm_generic_complete(dev);
1400 
1401 	genpd_lock(genpd);
1402 
1403 	genpd->prepared_count--;
1404 	if (!genpd->prepared_count)
1405 		genpd_queue_power_off_work(genpd);
1406 
1407 	genpd_unlock(genpd);
1408 }
1409 
1410 static void genpd_switch_state(struct device *dev, bool suspend)
1411 {
1412 	struct generic_pm_domain *genpd;
1413 	bool use_lock;
1414 
1415 	genpd = dev_to_genpd_safe(dev);
1416 	if (!genpd)
1417 		return;
1418 
1419 	use_lock = genpd_is_irq_safe(genpd);
1420 
1421 	if (use_lock)
1422 		genpd_lock(genpd);
1423 
1424 	if (suspend) {
1425 		genpd->suspended_count++;
1426 		genpd_sync_power_off(genpd, use_lock, 0);
1427 	} else {
1428 		genpd_sync_power_on(genpd, use_lock, 0);
1429 		genpd->suspended_count--;
1430 	}
1431 
1432 	if (use_lock)
1433 		genpd_unlock(genpd);
1434 }
1435 
1436 /**
1437  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1438  * @dev: The device that is attached to the genpd, that can be suspended.
1439  *
1440  * This routine should typically be called for a device that needs to be
1441  * suspended during the syscore suspend phase. It may also be called during
1442  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1443  * genpd.
1444  */
1445 void dev_pm_genpd_suspend(struct device *dev)
1446 {
1447 	genpd_switch_state(dev, true);
1448 }
1449 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1450 
1451 /**
1452  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1453  * @dev: The device that is attached to the genpd, which needs to be resumed.
1454  *
1455  * This routine should typically be called for a device that needs to be resumed
1456  * during the syscore resume phase. It may also be called during suspend-to-idle
1457  * to resume a corresponding CPU device that is attached to a genpd.
1458  */
1459 void dev_pm_genpd_resume(struct device *dev)
1460 {
1461 	genpd_switch_state(dev, false);
1462 }
1463 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1464 
1465 #else /* !CONFIG_PM_SLEEP */
1466 
1467 #define genpd_prepare		NULL
1468 #define genpd_suspend_noirq	NULL
1469 #define genpd_resume_noirq	NULL
1470 #define genpd_freeze_noirq	NULL
1471 #define genpd_thaw_noirq	NULL
1472 #define genpd_poweroff_noirq	NULL
1473 #define genpd_restore_noirq	NULL
1474 #define genpd_complete		NULL
1475 
1476 #endif /* CONFIG_PM_SLEEP */
1477 
1478 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1479 {
1480 	struct generic_pm_domain_data *gpd_data;
1481 	int ret;
1482 
1483 	ret = dev_pm_get_subsys_data(dev);
1484 	if (ret)
1485 		return ERR_PTR(ret);
1486 
1487 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1488 	if (!gpd_data) {
1489 		ret = -ENOMEM;
1490 		goto err_put;
1491 	}
1492 
1493 	gpd_data->base.dev = dev;
1494 	gpd_data->td.constraint_changed = true;
1495 	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1496 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1497 	gpd_data->next_wakeup = KTIME_MAX;
1498 
1499 	spin_lock_irq(&dev->power.lock);
1500 
1501 	if (dev->power.subsys_data->domain_data) {
1502 		ret = -EINVAL;
1503 		goto err_free;
1504 	}
1505 
1506 	dev->power.subsys_data->domain_data = &gpd_data->base;
1507 
1508 	spin_unlock_irq(&dev->power.lock);
1509 
1510 	return gpd_data;
1511 
1512  err_free:
1513 	spin_unlock_irq(&dev->power.lock);
1514 	kfree(gpd_data);
1515  err_put:
1516 	dev_pm_put_subsys_data(dev);
1517 	return ERR_PTR(ret);
1518 }
1519 
1520 static void genpd_free_dev_data(struct device *dev,
1521 				struct generic_pm_domain_data *gpd_data)
1522 {
1523 	spin_lock_irq(&dev->power.lock);
1524 
1525 	dev->power.subsys_data->domain_data = NULL;
1526 
1527 	spin_unlock_irq(&dev->power.lock);
1528 
1529 	kfree(gpd_data);
1530 	dev_pm_put_subsys_data(dev);
1531 }
1532 
1533 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1534 				 int cpu, bool set, unsigned int depth)
1535 {
1536 	struct gpd_link *link;
1537 
1538 	if (!genpd_is_cpu_domain(genpd))
1539 		return;
1540 
1541 	list_for_each_entry(link, &genpd->child_links, child_node) {
1542 		struct generic_pm_domain *parent = link->parent;
1543 
1544 		genpd_lock_nested(parent, depth + 1);
1545 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1546 		genpd_unlock(parent);
1547 	}
1548 
1549 	if (set)
1550 		cpumask_set_cpu(cpu, genpd->cpus);
1551 	else
1552 		cpumask_clear_cpu(cpu, genpd->cpus);
1553 }
1554 
1555 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1556 {
1557 	if (cpu >= 0)
1558 		genpd_update_cpumask(genpd, cpu, true, 0);
1559 }
1560 
1561 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1562 {
1563 	if (cpu >= 0)
1564 		genpd_update_cpumask(genpd, cpu, false, 0);
1565 }
1566 
1567 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1568 {
1569 	int cpu;
1570 
1571 	if (!genpd_is_cpu_domain(genpd))
1572 		return -1;
1573 
1574 	for_each_possible_cpu(cpu) {
1575 		if (get_cpu_device(cpu) == dev)
1576 			return cpu;
1577 	}
1578 
1579 	return -1;
1580 }
1581 
1582 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1583 			    struct device *base_dev)
1584 {
1585 	struct generic_pm_domain_data *gpd_data;
1586 	int ret;
1587 
1588 	dev_dbg(dev, "%s()\n", __func__);
1589 
1590 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1591 		return -EINVAL;
1592 
1593 	gpd_data = genpd_alloc_dev_data(dev);
1594 	if (IS_ERR(gpd_data))
1595 		return PTR_ERR(gpd_data);
1596 
1597 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1598 
1599 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1600 	if (ret)
1601 		goto out;
1602 
1603 	genpd_lock(genpd);
1604 
1605 	genpd_set_cpumask(genpd, gpd_data->cpu);
1606 	dev_pm_domain_set(dev, &genpd->domain);
1607 
1608 	genpd->device_count++;
1609 	genpd->max_off_time_changed = true;
1610 
1611 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1612 
1613 	genpd_unlock(genpd);
1614  out:
1615 	if (ret)
1616 		genpd_free_dev_data(dev, gpd_data);
1617 	else
1618 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1619 					DEV_PM_QOS_RESUME_LATENCY);
1620 
1621 	return ret;
1622 }
1623 
1624 /**
1625  * pm_genpd_add_device - Add a device to an I/O PM domain.
1626  * @genpd: PM domain to add the device to.
1627  * @dev: Device to be added.
1628  */
1629 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1630 {
1631 	int ret;
1632 
1633 	mutex_lock(&gpd_list_lock);
1634 	ret = genpd_add_device(genpd, dev, dev);
1635 	mutex_unlock(&gpd_list_lock);
1636 
1637 	return ret;
1638 }
1639 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1640 
1641 static int genpd_remove_device(struct generic_pm_domain *genpd,
1642 			       struct device *dev)
1643 {
1644 	struct generic_pm_domain_data *gpd_data;
1645 	struct pm_domain_data *pdd;
1646 	int ret = 0;
1647 
1648 	dev_dbg(dev, "%s()\n", __func__);
1649 
1650 	pdd = dev->power.subsys_data->domain_data;
1651 	gpd_data = to_gpd_data(pdd);
1652 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1653 				   DEV_PM_QOS_RESUME_LATENCY);
1654 
1655 	genpd_lock(genpd);
1656 
1657 	if (genpd->prepared_count > 0) {
1658 		ret = -EAGAIN;
1659 		goto out;
1660 	}
1661 
1662 	genpd->device_count--;
1663 	genpd->max_off_time_changed = true;
1664 
1665 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1666 	dev_pm_domain_set(dev, NULL);
1667 
1668 	list_del_init(&pdd->list_node);
1669 
1670 	genpd_unlock(genpd);
1671 
1672 	if (genpd->detach_dev)
1673 		genpd->detach_dev(genpd, dev);
1674 
1675 	genpd_free_dev_data(dev, gpd_data);
1676 
1677 	return 0;
1678 
1679  out:
1680 	genpd_unlock(genpd);
1681 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1682 
1683 	return ret;
1684 }
1685 
1686 /**
1687  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1688  * @dev: Device to be removed.
1689  */
1690 int pm_genpd_remove_device(struct device *dev)
1691 {
1692 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1693 
1694 	if (!genpd)
1695 		return -EINVAL;
1696 
1697 	return genpd_remove_device(genpd, dev);
1698 }
1699 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1700 
1701 /**
1702  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1703  *
1704  * @dev: Device that should be associated with the notifier
1705  * @nb: The notifier block to register
1706  *
1707  * Users may call this function to add a genpd power on/off notifier for an
1708  * attached @dev. Only one notifier per device is allowed. The notifier is
1709  * sent when genpd is powering on/off the PM domain.
1710  *
1711  * It is assumed that the user guarantee that the genpd wouldn't be detached
1712  * while this routine is getting called.
1713  *
1714  * Returns 0 on success and negative error values on failures.
1715  */
1716 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1717 {
1718 	struct generic_pm_domain *genpd;
1719 	struct generic_pm_domain_data *gpd_data;
1720 	int ret;
1721 
1722 	genpd = dev_to_genpd_safe(dev);
1723 	if (!genpd)
1724 		return -ENODEV;
1725 
1726 	if (WARN_ON(!dev->power.subsys_data ||
1727 		     !dev->power.subsys_data->domain_data))
1728 		return -EINVAL;
1729 
1730 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1731 	if (gpd_data->power_nb)
1732 		return -EEXIST;
1733 
1734 	genpd_lock(genpd);
1735 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1736 	genpd_unlock(genpd);
1737 
1738 	if (ret) {
1739 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1740 			 genpd->name);
1741 		return ret;
1742 	}
1743 
1744 	gpd_data->power_nb = nb;
1745 	return 0;
1746 }
1747 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1748 
1749 /**
1750  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1751  *
1752  * @dev: Device that is associated with the notifier
1753  *
1754  * Users may call this function to remove a genpd power on/off notifier for an
1755  * attached @dev.
1756  *
1757  * It is assumed that the user guarantee that the genpd wouldn't be detached
1758  * while this routine is getting called.
1759  *
1760  * Returns 0 on success and negative error values on failures.
1761  */
1762 int dev_pm_genpd_remove_notifier(struct device *dev)
1763 {
1764 	struct generic_pm_domain *genpd;
1765 	struct generic_pm_domain_data *gpd_data;
1766 	int ret;
1767 
1768 	genpd = dev_to_genpd_safe(dev);
1769 	if (!genpd)
1770 		return -ENODEV;
1771 
1772 	if (WARN_ON(!dev->power.subsys_data ||
1773 		     !dev->power.subsys_data->domain_data))
1774 		return -EINVAL;
1775 
1776 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1777 	if (!gpd_data->power_nb)
1778 		return -ENODEV;
1779 
1780 	genpd_lock(genpd);
1781 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1782 					    gpd_data->power_nb);
1783 	genpd_unlock(genpd);
1784 
1785 	if (ret) {
1786 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1787 			 genpd->name);
1788 		return ret;
1789 	}
1790 
1791 	gpd_data->power_nb = NULL;
1792 	return 0;
1793 }
1794 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1795 
1796 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1797 			       struct generic_pm_domain *subdomain)
1798 {
1799 	struct gpd_link *link, *itr;
1800 	int ret = 0;
1801 
1802 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1803 	    || genpd == subdomain)
1804 		return -EINVAL;
1805 
1806 	/*
1807 	 * If the domain can be powered on/off in an IRQ safe
1808 	 * context, ensure that the subdomain can also be
1809 	 * powered on/off in that context.
1810 	 */
1811 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1812 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1813 				genpd->name, subdomain->name);
1814 		return -EINVAL;
1815 	}
1816 
1817 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1818 	if (!link)
1819 		return -ENOMEM;
1820 
1821 	genpd_lock(subdomain);
1822 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1823 
1824 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1825 		ret = -EINVAL;
1826 		goto out;
1827 	}
1828 
1829 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1830 		if (itr->child == subdomain && itr->parent == genpd) {
1831 			ret = -EINVAL;
1832 			goto out;
1833 		}
1834 	}
1835 
1836 	link->parent = genpd;
1837 	list_add_tail(&link->parent_node, &genpd->parent_links);
1838 	link->child = subdomain;
1839 	list_add_tail(&link->child_node, &subdomain->child_links);
1840 	if (genpd_status_on(subdomain))
1841 		genpd_sd_counter_inc(genpd);
1842 
1843  out:
1844 	genpd_unlock(genpd);
1845 	genpd_unlock(subdomain);
1846 	if (ret)
1847 		kfree(link);
1848 	return ret;
1849 }
1850 
1851 /**
1852  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1853  * @genpd: Leader PM domain to add the subdomain to.
1854  * @subdomain: Subdomain to be added.
1855  */
1856 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1857 			   struct generic_pm_domain *subdomain)
1858 {
1859 	int ret;
1860 
1861 	mutex_lock(&gpd_list_lock);
1862 	ret = genpd_add_subdomain(genpd, subdomain);
1863 	mutex_unlock(&gpd_list_lock);
1864 
1865 	return ret;
1866 }
1867 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1868 
1869 /**
1870  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1871  * @genpd: Leader PM domain to remove the subdomain from.
1872  * @subdomain: Subdomain to be removed.
1873  */
1874 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1875 			      struct generic_pm_domain *subdomain)
1876 {
1877 	struct gpd_link *l, *link;
1878 	int ret = -EINVAL;
1879 
1880 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1881 		return -EINVAL;
1882 
1883 	genpd_lock(subdomain);
1884 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1885 
1886 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1887 		pr_warn("%s: unable to remove subdomain %s\n",
1888 			genpd->name, subdomain->name);
1889 		ret = -EBUSY;
1890 		goto out;
1891 	}
1892 
1893 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1894 		if (link->child != subdomain)
1895 			continue;
1896 
1897 		list_del(&link->parent_node);
1898 		list_del(&link->child_node);
1899 		kfree(link);
1900 		if (genpd_status_on(subdomain))
1901 			genpd_sd_counter_dec(genpd);
1902 
1903 		ret = 0;
1904 		break;
1905 	}
1906 
1907 out:
1908 	genpd_unlock(genpd);
1909 	genpd_unlock(subdomain);
1910 
1911 	return ret;
1912 }
1913 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1914 
1915 static void genpd_free_default_power_state(struct genpd_power_state *states,
1916 					   unsigned int state_count)
1917 {
1918 	kfree(states);
1919 }
1920 
1921 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1922 {
1923 	struct genpd_power_state *state;
1924 
1925 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1926 	if (!state)
1927 		return -ENOMEM;
1928 
1929 	genpd->states = state;
1930 	genpd->state_count = 1;
1931 	genpd->free_states = genpd_free_default_power_state;
1932 
1933 	return 0;
1934 }
1935 
1936 static void genpd_lock_init(struct generic_pm_domain *genpd)
1937 {
1938 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1939 		spin_lock_init(&genpd->slock);
1940 		genpd->lock_ops = &genpd_spin_ops;
1941 	} else {
1942 		mutex_init(&genpd->mlock);
1943 		genpd->lock_ops = &genpd_mtx_ops;
1944 	}
1945 }
1946 
1947 /**
1948  * pm_genpd_init - Initialize a generic I/O PM domain object.
1949  * @genpd: PM domain object to initialize.
1950  * @gov: PM domain governor to associate with the domain (may be NULL).
1951  * @is_off: Initial value of the domain's power_is_off field.
1952  *
1953  * Returns 0 on successful initialization, else a negative error code.
1954  */
1955 int pm_genpd_init(struct generic_pm_domain *genpd,
1956 		  struct dev_power_governor *gov, bool is_off)
1957 {
1958 	int ret;
1959 
1960 	if (IS_ERR_OR_NULL(genpd))
1961 		return -EINVAL;
1962 
1963 	INIT_LIST_HEAD(&genpd->parent_links);
1964 	INIT_LIST_HEAD(&genpd->child_links);
1965 	INIT_LIST_HEAD(&genpd->dev_list);
1966 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
1967 	genpd_lock_init(genpd);
1968 	genpd->gov = gov;
1969 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1970 	atomic_set(&genpd->sd_count, 0);
1971 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
1972 	genpd->device_count = 0;
1973 	genpd->max_off_time_ns = -1;
1974 	genpd->max_off_time_changed = true;
1975 	genpd->provider = NULL;
1976 	genpd->has_provider = false;
1977 	genpd->accounting_time = ktime_get();
1978 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1979 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1980 	genpd->domain.ops.prepare = genpd_prepare;
1981 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1982 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1983 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1984 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1985 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1986 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1987 	genpd->domain.ops.complete = genpd_complete;
1988 	genpd->domain.start = genpd_dev_pm_start;
1989 
1990 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1991 		genpd->dev_ops.stop = pm_clk_suspend;
1992 		genpd->dev_ops.start = pm_clk_resume;
1993 	}
1994 
1995 	/* Always-on domains must be powered on at initialization. */
1996 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1997 			!genpd_status_on(genpd))
1998 		return -EINVAL;
1999 
2000 	if (genpd_is_cpu_domain(genpd) &&
2001 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2002 		return -ENOMEM;
2003 
2004 	/* Use only one "off" state if there were no states declared */
2005 	if (genpd->state_count == 0) {
2006 		ret = genpd_set_default_power_state(genpd);
2007 		if (ret) {
2008 			if (genpd_is_cpu_domain(genpd))
2009 				free_cpumask_var(genpd->cpus);
2010 			return ret;
2011 		}
2012 	} else if (!gov && genpd->state_count > 1) {
2013 		pr_warn("%s: no governor for states\n", genpd->name);
2014 	}
2015 
2016 	device_initialize(&genpd->dev);
2017 	dev_set_name(&genpd->dev, "%s", genpd->name);
2018 
2019 	mutex_lock(&gpd_list_lock);
2020 	list_add(&genpd->gpd_list_node, &gpd_list);
2021 	mutex_unlock(&gpd_list_lock);
2022 	genpd_debug_add(genpd);
2023 
2024 	return 0;
2025 }
2026 EXPORT_SYMBOL_GPL(pm_genpd_init);
2027 
2028 static int genpd_remove(struct generic_pm_domain *genpd)
2029 {
2030 	struct gpd_link *l, *link;
2031 
2032 	if (IS_ERR_OR_NULL(genpd))
2033 		return -EINVAL;
2034 
2035 	genpd_lock(genpd);
2036 
2037 	if (genpd->has_provider) {
2038 		genpd_unlock(genpd);
2039 		pr_err("Provider present, unable to remove %s\n", genpd->name);
2040 		return -EBUSY;
2041 	}
2042 
2043 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2044 		genpd_unlock(genpd);
2045 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2046 		return -EBUSY;
2047 	}
2048 
2049 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2050 		list_del(&link->parent_node);
2051 		list_del(&link->child_node);
2052 		kfree(link);
2053 	}
2054 
2055 	genpd_debug_remove(genpd);
2056 	list_del(&genpd->gpd_list_node);
2057 	genpd_unlock(genpd);
2058 	cancel_work_sync(&genpd->power_off_work);
2059 	if (genpd_is_cpu_domain(genpd))
2060 		free_cpumask_var(genpd->cpus);
2061 	if (genpd->free_states)
2062 		genpd->free_states(genpd->states, genpd->state_count);
2063 
2064 	pr_debug("%s: removed %s\n", __func__, genpd->name);
2065 
2066 	return 0;
2067 }
2068 
2069 /**
2070  * pm_genpd_remove - Remove a generic I/O PM domain
2071  * @genpd: Pointer to PM domain that is to be removed.
2072  *
2073  * To remove the PM domain, this function:
2074  *  - Removes the PM domain as a subdomain to any parent domains,
2075  *    if it was added.
2076  *  - Removes the PM domain from the list of registered PM domains.
2077  *
2078  * The PM domain will only be removed, if the associated provider has
2079  * been removed, it is not a parent to any other PM domain and has no
2080  * devices associated with it.
2081  */
2082 int pm_genpd_remove(struct generic_pm_domain *genpd)
2083 {
2084 	int ret;
2085 
2086 	mutex_lock(&gpd_list_lock);
2087 	ret = genpd_remove(genpd);
2088 	mutex_unlock(&gpd_list_lock);
2089 
2090 	return ret;
2091 }
2092 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2093 
2094 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2095 
2096 /*
2097  * Device Tree based PM domain providers.
2098  *
2099  * The code below implements generic device tree based PM domain providers that
2100  * bind device tree nodes with generic PM domains registered in the system.
2101  *
2102  * Any driver that registers generic PM domains and needs to support binding of
2103  * devices to these domains is supposed to register a PM domain provider, which
2104  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2105  *
2106  * Two simple mapping functions have been provided for convenience:
2107  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2108  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2109  *    index.
2110  */
2111 
2112 /**
2113  * struct of_genpd_provider - PM domain provider registration structure
2114  * @link: Entry in global list of PM domain providers
2115  * @node: Pointer to device tree node of PM domain provider
2116  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2117  *         into a PM domain.
2118  * @data: context pointer to be passed into @xlate callback
2119  */
2120 struct of_genpd_provider {
2121 	struct list_head link;
2122 	struct device_node *node;
2123 	genpd_xlate_t xlate;
2124 	void *data;
2125 };
2126 
2127 /* List of registered PM domain providers. */
2128 static LIST_HEAD(of_genpd_providers);
2129 /* Mutex to protect the list above. */
2130 static DEFINE_MUTEX(of_genpd_mutex);
2131 
2132 /**
2133  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2134  * @genpdspec: OF phandle args to map into a PM domain
2135  * @data: xlate function private data - pointer to struct generic_pm_domain
2136  *
2137  * This is a generic xlate function that can be used to model PM domains that
2138  * have their own device tree nodes. The private data of xlate function needs
2139  * to be a valid pointer to struct generic_pm_domain.
2140  */
2141 static struct generic_pm_domain *genpd_xlate_simple(
2142 					struct of_phandle_args *genpdspec,
2143 					void *data)
2144 {
2145 	return data;
2146 }
2147 
2148 /**
2149  * genpd_xlate_onecell() - Xlate function using a single index.
2150  * @genpdspec: OF phandle args to map into a PM domain
2151  * @data: xlate function private data - pointer to struct genpd_onecell_data
2152  *
2153  * This is a generic xlate function that can be used to model simple PM domain
2154  * controllers that have one device tree node and provide multiple PM domains.
2155  * A single cell is used as an index into an array of PM domains specified in
2156  * the genpd_onecell_data struct when registering the provider.
2157  */
2158 static struct generic_pm_domain *genpd_xlate_onecell(
2159 					struct of_phandle_args *genpdspec,
2160 					void *data)
2161 {
2162 	struct genpd_onecell_data *genpd_data = data;
2163 	unsigned int idx = genpdspec->args[0];
2164 
2165 	if (genpdspec->args_count != 1)
2166 		return ERR_PTR(-EINVAL);
2167 
2168 	if (idx >= genpd_data->num_domains) {
2169 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2170 		return ERR_PTR(-EINVAL);
2171 	}
2172 
2173 	if (!genpd_data->domains[idx])
2174 		return ERR_PTR(-ENOENT);
2175 
2176 	return genpd_data->domains[idx];
2177 }
2178 
2179 /**
2180  * genpd_add_provider() - Register a PM domain provider for a node
2181  * @np: Device node pointer associated with the PM domain provider.
2182  * @xlate: Callback for decoding PM domain from phandle arguments.
2183  * @data: Context pointer for @xlate callback.
2184  */
2185 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2186 			      void *data)
2187 {
2188 	struct of_genpd_provider *cp;
2189 
2190 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2191 	if (!cp)
2192 		return -ENOMEM;
2193 
2194 	cp->node = of_node_get(np);
2195 	cp->data = data;
2196 	cp->xlate = xlate;
2197 	fwnode_dev_initialized(&np->fwnode, true);
2198 
2199 	mutex_lock(&of_genpd_mutex);
2200 	list_add(&cp->link, &of_genpd_providers);
2201 	mutex_unlock(&of_genpd_mutex);
2202 	pr_debug("Added domain provider from %pOF\n", np);
2203 
2204 	return 0;
2205 }
2206 
2207 static bool genpd_present(const struct generic_pm_domain *genpd)
2208 {
2209 	bool ret = false;
2210 	const struct generic_pm_domain *gpd;
2211 
2212 	mutex_lock(&gpd_list_lock);
2213 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2214 		if (gpd == genpd) {
2215 			ret = true;
2216 			break;
2217 		}
2218 	}
2219 	mutex_unlock(&gpd_list_lock);
2220 
2221 	return ret;
2222 }
2223 
2224 /**
2225  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2226  * @np: Device node pointer associated with the PM domain provider.
2227  * @genpd: Pointer to PM domain associated with the PM domain provider.
2228  */
2229 int of_genpd_add_provider_simple(struct device_node *np,
2230 				 struct generic_pm_domain *genpd)
2231 {
2232 	int ret;
2233 
2234 	if (!np || !genpd)
2235 		return -EINVAL;
2236 
2237 	if (!genpd_present(genpd))
2238 		return -EINVAL;
2239 
2240 	genpd->dev.of_node = np;
2241 
2242 	/* Parse genpd OPP table */
2243 	if (genpd->set_performance_state) {
2244 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2245 		if (ret) {
2246 			if (ret != -EPROBE_DEFER)
2247 				dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2248 					ret);
2249 			return ret;
2250 		}
2251 
2252 		/*
2253 		 * Save table for faster processing while setting performance
2254 		 * state.
2255 		 */
2256 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2257 		WARN_ON(IS_ERR(genpd->opp_table));
2258 	}
2259 
2260 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2261 	if (ret) {
2262 		if (genpd->set_performance_state) {
2263 			dev_pm_opp_put_opp_table(genpd->opp_table);
2264 			dev_pm_opp_of_remove_table(&genpd->dev);
2265 		}
2266 
2267 		return ret;
2268 	}
2269 
2270 	genpd->provider = &np->fwnode;
2271 	genpd->has_provider = true;
2272 
2273 	return 0;
2274 }
2275 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2276 
2277 /**
2278  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2279  * @np: Device node pointer associated with the PM domain provider.
2280  * @data: Pointer to the data associated with the PM domain provider.
2281  */
2282 int of_genpd_add_provider_onecell(struct device_node *np,
2283 				  struct genpd_onecell_data *data)
2284 {
2285 	struct generic_pm_domain *genpd;
2286 	unsigned int i;
2287 	int ret = -EINVAL;
2288 
2289 	if (!np || !data)
2290 		return -EINVAL;
2291 
2292 	if (!data->xlate)
2293 		data->xlate = genpd_xlate_onecell;
2294 
2295 	for (i = 0; i < data->num_domains; i++) {
2296 		genpd = data->domains[i];
2297 
2298 		if (!genpd)
2299 			continue;
2300 		if (!genpd_present(genpd))
2301 			goto error;
2302 
2303 		genpd->dev.of_node = np;
2304 
2305 		/* Parse genpd OPP table */
2306 		if (genpd->set_performance_state) {
2307 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2308 			if (ret) {
2309 				if (ret != -EPROBE_DEFER)
2310 					dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2311 						i, ret);
2312 				goto error;
2313 			}
2314 
2315 			/*
2316 			 * Save table for faster processing while setting
2317 			 * performance state.
2318 			 */
2319 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2320 			WARN_ON(IS_ERR(genpd->opp_table));
2321 		}
2322 
2323 		genpd->provider = &np->fwnode;
2324 		genpd->has_provider = true;
2325 	}
2326 
2327 	ret = genpd_add_provider(np, data->xlate, data);
2328 	if (ret < 0)
2329 		goto error;
2330 
2331 	return 0;
2332 
2333 error:
2334 	while (i--) {
2335 		genpd = data->domains[i];
2336 
2337 		if (!genpd)
2338 			continue;
2339 
2340 		genpd->provider = NULL;
2341 		genpd->has_provider = false;
2342 
2343 		if (genpd->set_performance_state) {
2344 			dev_pm_opp_put_opp_table(genpd->opp_table);
2345 			dev_pm_opp_of_remove_table(&genpd->dev);
2346 		}
2347 	}
2348 
2349 	return ret;
2350 }
2351 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2352 
2353 /**
2354  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2355  * @np: Device node pointer associated with the PM domain provider
2356  */
2357 void of_genpd_del_provider(struct device_node *np)
2358 {
2359 	struct of_genpd_provider *cp, *tmp;
2360 	struct generic_pm_domain *gpd;
2361 
2362 	mutex_lock(&gpd_list_lock);
2363 	mutex_lock(&of_genpd_mutex);
2364 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2365 		if (cp->node == np) {
2366 			/*
2367 			 * For each PM domain associated with the
2368 			 * provider, set the 'has_provider' to false
2369 			 * so that the PM domain can be safely removed.
2370 			 */
2371 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2372 				if (gpd->provider == &np->fwnode) {
2373 					gpd->has_provider = false;
2374 
2375 					if (!gpd->set_performance_state)
2376 						continue;
2377 
2378 					dev_pm_opp_put_opp_table(gpd->opp_table);
2379 					dev_pm_opp_of_remove_table(&gpd->dev);
2380 				}
2381 			}
2382 
2383 			fwnode_dev_initialized(&cp->node->fwnode, false);
2384 			list_del(&cp->link);
2385 			of_node_put(cp->node);
2386 			kfree(cp);
2387 			break;
2388 		}
2389 	}
2390 	mutex_unlock(&of_genpd_mutex);
2391 	mutex_unlock(&gpd_list_lock);
2392 }
2393 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2394 
2395 /**
2396  * genpd_get_from_provider() - Look-up PM domain
2397  * @genpdspec: OF phandle args to use for look-up
2398  *
2399  * Looks for a PM domain provider under the node specified by @genpdspec and if
2400  * found, uses xlate function of the provider to map phandle args to a PM
2401  * domain.
2402  *
2403  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2404  * on failure.
2405  */
2406 static struct generic_pm_domain *genpd_get_from_provider(
2407 					struct of_phandle_args *genpdspec)
2408 {
2409 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2410 	struct of_genpd_provider *provider;
2411 
2412 	if (!genpdspec)
2413 		return ERR_PTR(-EINVAL);
2414 
2415 	mutex_lock(&of_genpd_mutex);
2416 
2417 	/* Check if we have such a provider in our array */
2418 	list_for_each_entry(provider, &of_genpd_providers, link) {
2419 		if (provider->node == genpdspec->np)
2420 			genpd = provider->xlate(genpdspec, provider->data);
2421 		if (!IS_ERR(genpd))
2422 			break;
2423 	}
2424 
2425 	mutex_unlock(&of_genpd_mutex);
2426 
2427 	return genpd;
2428 }
2429 
2430 /**
2431  * of_genpd_add_device() - Add a device to an I/O PM domain
2432  * @genpdspec: OF phandle args to use for look-up PM domain
2433  * @dev: Device to be added.
2434  *
2435  * Looks-up an I/O PM domain based upon phandle args provided and adds
2436  * the device to the PM domain. Returns a negative error code on failure.
2437  */
2438 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2439 {
2440 	struct generic_pm_domain *genpd;
2441 	int ret;
2442 
2443 	mutex_lock(&gpd_list_lock);
2444 
2445 	genpd = genpd_get_from_provider(genpdspec);
2446 	if (IS_ERR(genpd)) {
2447 		ret = PTR_ERR(genpd);
2448 		goto out;
2449 	}
2450 
2451 	ret = genpd_add_device(genpd, dev, dev);
2452 
2453 out:
2454 	mutex_unlock(&gpd_list_lock);
2455 
2456 	return ret;
2457 }
2458 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2459 
2460 /**
2461  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2462  * @parent_spec: OF phandle args to use for parent PM domain look-up
2463  * @subdomain_spec: OF phandle args to use for subdomain look-up
2464  *
2465  * Looks-up a parent PM domain and subdomain based upon phandle args
2466  * provided and adds the subdomain to the parent PM domain. Returns a
2467  * negative error code on failure.
2468  */
2469 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2470 			   struct of_phandle_args *subdomain_spec)
2471 {
2472 	struct generic_pm_domain *parent, *subdomain;
2473 	int ret;
2474 
2475 	mutex_lock(&gpd_list_lock);
2476 
2477 	parent = genpd_get_from_provider(parent_spec);
2478 	if (IS_ERR(parent)) {
2479 		ret = PTR_ERR(parent);
2480 		goto out;
2481 	}
2482 
2483 	subdomain = genpd_get_from_provider(subdomain_spec);
2484 	if (IS_ERR(subdomain)) {
2485 		ret = PTR_ERR(subdomain);
2486 		goto out;
2487 	}
2488 
2489 	ret = genpd_add_subdomain(parent, subdomain);
2490 
2491 out:
2492 	mutex_unlock(&gpd_list_lock);
2493 
2494 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2495 }
2496 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2497 
2498 /**
2499  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2500  * @parent_spec: OF phandle args to use for parent PM domain look-up
2501  * @subdomain_spec: OF phandle args to use for subdomain look-up
2502  *
2503  * Looks-up a parent PM domain and subdomain based upon phandle args
2504  * provided and removes the subdomain from the parent PM domain. Returns a
2505  * negative error code on failure.
2506  */
2507 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2508 			      struct of_phandle_args *subdomain_spec)
2509 {
2510 	struct generic_pm_domain *parent, *subdomain;
2511 	int ret;
2512 
2513 	mutex_lock(&gpd_list_lock);
2514 
2515 	parent = genpd_get_from_provider(parent_spec);
2516 	if (IS_ERR(parent)) {
2517 		ret = PTR_ERR(parent);
2518 		goto out;
2519 	}
2520 
2521 	subdomain = genpd_get_from_provider(subdomain_spec);
2522 	if (IS_ERR(subdomain)) {
2523 		ret = PTR_ERR(subdomain);
2524 		goto out;
2525 	}
2526 
2527 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2528 
2529 out:
2530 	mutex_unlock(&gpd_list_lock);
2531 
2532 	return ret;
2533 }
2534 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2535 
2536 /**
2537  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2538  * @np: Pointer to device node associated with provider
2539  *
2540  * Find the last PM domain that was added by a particular provider and
2541  * remove this PM domain from the list of PM domains. The provider is
2542  * identified by the 'provider' device structure that is passed. The PM
2543  * domain will only be removed, if the provider associated with domain
2544  * has been removed.
2545  *
2546  * Returns a valid pointer to struct generic_pm_domain on success or
2547  * ERR_PTR() on failure.
2548  */
2549 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2550 {
2551 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2552 	int ret;
2553 
2554 	if (IS_ERR_OR_NULL(np))
2555 		return ERR_PTR(-EINVAL);
2556 
2557 	mutex_lock(&gpd_list_lock);
2558 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2559 		if (gpd->provider == &np->fwnode) {
2560 			ret = genpd_remove(gpd);
2561 			genpd = ret ? ERR_PTR(ret) : gpd;
2562 			break;
2563 		}
2564 	}
2565 	mutex_unlock(&gpd_list_lock);
2566 
2567 	return genpd;
2568 }
2569 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2570 
2571 static void genpd_release_dev(struct device *dev)
2572 {
2573 	of_node_put(dev->of_node);
2574 	kfree(dev);
2575 }
2576 
2577 static struct bus_type genpd_bus_type = {
2578 	.name		= "genpd",
2579 };
2580 
2581 /**
2582  * genpd_dev_pm_detach - Detach a device from its PM domain.
2583  * @dev: Device to detach.
2584  * @power_off: Currently not used
2585  *
2586  * Try to locate a corresponding generic PM domain, which the device was
2587  * attached to previously. If such is found, the device is detached from it.
2588  */
2589 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2590 {
2591 	struct generic_pm_domain *pd;
2592 	unsigned int i;
2593 	int ret = 0;
2594 
2595 	pd = dev_to_genpd(dev);
2596 	if (IS_ERR(pd))
2597 		return;
2598 
2599 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2600 
2601 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2602 		ret = genpd_remove_device(pd, dev);
2603 		if (ret != -EAGAIN)
2604 			break;
2605 
2606 		mdelay(i);
2607 		cond_resched();
2608 	}
2609 
2610 	if (ret < 0) {
2611 		dev_err(dev, "failed to remove from PM domain %s: %d",
2612 			pd->name, ret);
2613 		return;
2614 	}
2615 
2616 	/* Check if PM domain can be powered off after removing this device. */
2617 	genpd_queue_power_off_work(pd);
2618 
2619 	/* Unregister the device if it was created by genpd. */
2620 	if (dev->bus == &genpd_bus_type)
2621 		device_unregister(dev);
2622 }
2623 
2624 static void genpd_dev_pm_sync(struct device *dev)
2625 {
2626 	struct generic_pm_domain *pd;
2627 
2628 	pd = dev_to_genpd(dev);
2629 	if (IS_ERR(pd))
2630 		return;
2631 
2632 	genpd_queue_power_off_work(pd);
2633 }
2634 
2635 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2636 				 unsigned int index, bool power_on)
2637 {
2638 	struct of_phandle_args pd_args;
2639 	struct generic_pm_domain *pd;
2640 	int ret;
2641 
2642 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2643 				"#power-domain-cells", index, &pd_args);
2644 	if (ret < 0)
2645 		return ret;
2646 
2647 	mutex_lock(&gpd_list_lock);
2648 	pd = genpd_get_from_provider(&pd_args);
2649 	of_node_put(pd_args.np);
2650 	if (IS_ERR(pd)) {
2651 		mutex_unlock(&gpd_list_lock);
2652 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2653 			__func__, PTR_ERR(pd));
2654 		return driver_deferred_probe_check_state(base_dev);
2655 	}
2656 
2657 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2658 
2659 	ret = genpd_add_device(pd, dev, base_dev);
2660 	mutex_unlock(&gpd_list_lock);
2661 
2662 	if (ret < 0) {
2663 		if (ret != -EPROBE_DEFER)
2664 			dev_err(dev, "failed to add to PM domain %s: %d",
2665 				pd->name, ret);
2666 		return ret;
2667 	}
2668 
2669 	dev->pm_domain->detach = genpd_dev_pm_detach;
2670 	dev->pm_domain->sync = genpd_dev_pm_sync;
2671 
2672 	if (power_on) {
2673 		genpd_lock(pd);
2674 		ret = genpd_power_on(pd, 0);
2675 		genpd_unlock(pd);
2676 	}
2677 
2678 	if (ret)
2679 		genpd_remove_device(pd, dev);
2680 
2681 	return ret ? -EPROBE_DEFER : 1;
2682 }
2683 
2684 /**
2685  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2686  * @dev: Device to attach.
2687  *
2688  * Parse device's OF node to find a PM domain specifier. If such is found,
2689  * attaches the device to retrieved pm_domain ops.
2690  *
2691  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2692  * PM domain or when multiple power-domains exists for it, else a negative error
2693  * code. Note that if a power-domain exists for the device, but it cannot be
2694  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2695  * not probed and to re-try again later.
2696  */
2697 int genpd_dev_pm_attach(struct device *dev)
2698 {
2699 	if (!dev->of_node)
2700 		return 0;
2701 
2702 	/*
2703 	 * Devices with multiple PM domains must be attached separately, as we
2704 	 * can only attach one PM domain per device.
2705 	 */
2706 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2707 				       "#power-domain-cells") != 1)
2708 		return 0;
2709 
2710 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2711 }
2712 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2713 
2714 /**
2715  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2716  * @dev: The device used to lookup the PM domain.
2717  * @index: The index of the PM domain.
2718  *
2719  * Parse device's OF node to find a PM domain specifier at the provided @index.
2720  * If such is found, creates a virtual device and attaches it to the retrieved
2721  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2722  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2723  *
2724  * Returns the created virtual device if successfully attached PM domain, NULL
2725  * when the device don't need a PM domain, else an ERR_PTR() in case of
2726  * failures. If a power-domain exists for the device, but cannot be found or
2727  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2728  * is not probed and to re-try again later.
2729  */
2730 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2731 					 unsigned int index)
2732 {
2733 	struct device *virt_dev;
2734 	int num_domains;
2735 	int ret;
2736 
2737 	if (!dev->of_node)
2738 		return NULL;
2739 
2740 	/* Verify that the index is within a valid range. */
2741 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2742 						 "#power-domain-cells");
2743 	if (index >= num_domains)
2744 		return NULL;
2745 
2746 	/* Allocate and register device on the genpd bus. */
2747 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2748 	if (!virt_dev)
2749 		return ERR_PTR(-ENOMEM);
2750 
2751 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2752 	virt_dev->bus = &genpd_bus_type;
2753 	virt_dev->release = genpd_release_dev;
2754 	virt_dev->of_node = of_node_get(dev->of_node);
2755 
2756 	ret = device_register(virt_dev);
2757 	if (ret) {
2758 		put_device(virt_dev);
2759 		return ERR_PTR(ret);
2760 	}
2761 
2762 	/* Try to attach the device to the PM domain at the specified index. */
2763 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2764 	if (ret < 1) {
2765 		device_unregister(virt_dev);
2766 		return ret ? ERR_PTR(ret) : NULL;
2767 	}
2768 
2769 	pm_runtime_enable(virt_dev);
2770 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2771 
2772 	return virt_dev;
2773 }
2774 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2775 
2776 /**
2777  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2778  * @dev: The device used to lookup the PM domain.
2779  * @name: The name of the PM domain.
2780  *
2781  * Parse device's OF node to find a PM domain specifier using the
2782  * power-domain-names DT property. For further description see
2783  * genpd_dev_pm_attach_by_id().
2784  */
2785 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2786 {
2787 	int index;
2788 
2789 	if (!dev->of_node)
2790 		return NULL;
2791 
2792 	index = of_property_match_string(dev->of_node, "power-domain-names",
2793 					 name);
2794 	if (index < 0)
2795 		return NULL;
2796 
2797 	return genpd_dev_pm_attach_by_id(dev, index);
2798 }
2799 
2800 static const struct of_device_id idle_state_match[] = {
2801 	{ .compatible = "domain-idle-state", },
2802 	{ }
2803 };
2804 
2805 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2806 				    struct device_node *state_node)
2807 {
2808 	int err;
2809 	u32 residency;
2810 	u32 entry_latency, exit_latency;
2811 
2812 	err = of_property_read_u32(state_node, "entry-latency-us",
2813 						&entry_latency);
2814 	if (err) {
2815 		pr_debug(" * %pOF missing entry-latency-us property\n",
2816 			 state_node);
2817 		return -EINVAL;
2818 	}
2819 
2820 	err = of_property_read_u32(state_node, "exit-latency-us",
2821 						&exit_latency);
2822 	if (err) {
2823 		pr_debug(" * %pOF missing exit-latency-us property\n",
2824 			 state_node);
2825 		return -EINVAL;
2826 	}
2827 
2828 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2829 	if (!err)
2830 		genpd_state->residency_ns = 1000 * residency;
2831 
2832 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2833 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2834 	genpd_state->fwnode = &state_node->fwnode;
2835 
2836 	return 0;
2837 }
2838 
2839 static int genpd_iterate_idle_states(struct device_node *dn,
2840 				     struct genpd_power_state *states)
2841 {
2842 	int ret;
2843 	struct of_phandle_iterator it;
2844 	struct device_node *np;
2845 	int i = 0;
2846 
2847 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2848 	if (ret <= 0)
2849 		return ret == -ENOENT ? 0 : ret;
2850 
2851 	/* Loop over the phandles until all the requested entry is found */
2852 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2853 		np = it.node;
2854 		if (!of_match_node(idle_state_match, np))
2855 			continue;
2856 		if (states) {
2857 			ret = genpd_parse_state(&states[i], np);
2858 			if (ret) {
2859 				pr_err("Parsing idle state node %pOF failed with err %d\n",
2860 				       np, ret);
2861 				of_node_put(np);
2862 				return ret;
2863 			}
2864 		}
2865 		i++;
2866 	}
2867 
2868 	return i;
2869 }
2870 
2871 /**
2872  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2873  *
2874  * @dn: The genpd device node
2875  * @states: The pointer to which the state array will be saved.
2876  * @n: The count of elements in the array returned from this function.
2877  *
2878  * Returns the device states parsed from the OF node. The memory for the states
2879  * is allocated by this function and is the responsibility of the caller to
2880  * free the memory after use. If any or zero compatible domain idle states is
2881  * found it returns 0 and in case of errors, a negative error code is returned.
2882  */
2883 int of_genpd_parse_idle_states(struct device_node *dn,
2884 			struct genpd_power_state **states, int *n)
2885 {
2886 	struct genpd_power_state *st;
2887 	int ret;
2888 
2889 	ret = genpd_iterate_idle_states(dn, NULL);
2890 	if (ret < 0)
2891 		return ret;
2892 
2893 	if (!ret) {
2894 		*states = NULL;
2895 		*n = 0;
2896 		return 0;
2897 	}
2898 
2899 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2900 	if (!st)
2901 		return -ENOMEM;
2902 
2903 	ret = genpd_iterate_idle_states(dn, st);
2904 	if (ret <= 0) {
2905 		kfree(st);
2906 		return ret < 0 ? ret : -EINVAL;
2907 	}
2908 
2909 	*states = st;
2910 	*n = ret;
2911 
2912 	return 0;
2913 }
2914 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2915 
2916 /**
2917  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2918  *
2919  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2920  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2921  *	state.
2922  *
2923  * Returns performance state encoded in the OPP of the genpd. This calls
2924  * platform specific genpd->opp_to_performance_state() callback to translate
2925  * power domain OPP to performance state.
2926  *
2927  * Returns performance state on success and 0 on failure.
2928  */
2929 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2930 					       struct dev_pm_opp *opp)
2931 {
2932 	struct generic_pm_domain *genpd = NULL;
2933 	int state;
2934 
2935 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2936 
2937 	if (unlikely(!genpd->opp_to_performance_state))
2938 		return 0;
2939 
2940 	genpd_lock(genpd);
2941 	state = genpd->opp_to_performance_state(genpd, opp);
2942 	genpd_unlock(genpd);
2943 
2944 	return state;
2945 }
2946 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2947 
2948 static int __init genpd_bus_init(void)
2949 {
2950 	return bus_register(&genpd_bus_type);
2951 }
2952 core_initcall(genpd_bus_init);
2953 
2954 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2955 
2956 
2957 /***        debugfs support        ***/
2958 
2959 #ifdef CONFIG_DEBUG_FS
2960 /*
2961  * TODO: This function is a slightly modified version of rtpm_status_show
2962  * from sysfs.c, so generalize it.
2963  */
2964 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2965 {
2966 	static const char * const status_lookup[] = {
2967 		[RPM_ACTIVE] = "active",
2968 		[RPM_RESUMING] = "resuming",
2969 		[RPM_SUSPENDED] = "suspended",
2970 		[RPM_SUSPENDING] = "suspending"
2971 	};
2972 	const char *p = "";
2973 
2974 	if (dev->power.runtime_error)
2975 		p = "error";
2976 	else if (dev->power.disable_depth)
2977 		p = "unsupported";
2978 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2979 		p = status_lookup[dev->power.runtime_status];
2980 	else
2981 		WARN_ON(1);
2982 
2983 	seq_printf(s, "%-25s  ", p);
2984 }
2985 
2986 static void perf_status_str(struct seq_file *s, struct device *dev)
2987 {
2988 	struct generic_pm_domain_data *gpd_data;
2989 
2990 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2991 	seq_put_decimal_ull(s, "", gpd_data->performance_state);
2992 }
2993 
2994 static int genpd_summary_one(struct seq_file *s,
2995 			struct generic_pm_domain *genpd)
2996 {
2997 	static const char * const status_lookup[] = {
2998 		[GENPD_STATE_ON] = "on",
2999 		[GENPD_STATE_OFF] = "off"
3000 	};
3001 	struct pm_domain_data *pm_data;
3002 	const char *kobj_path;
3003 	struct gpd_link *link;
3004 	char state[16];
3005 	int ret;
3006 
3007 	ret = genpd_lock_interruptible(genpd);
3008 	if (ret)
3009 		return -ERESTARTSYS;
3010 
3011 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3012 		goto exit;
3013 	if (!genpd_status_on(genpd))
3014 		snprintf(state, sizeof(state), "%s-%u",
3015 			 status_lookup[genpd->status], genpd->state_idx);
3016 	else
3017 		snprintf(state, sizeof(state), "%s",
3018 			 status_lookup[genpd->status]);
3019 	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3020 
3021 	/*
3022 	 * Modifications on the list require holding locks on both
3023 	 * parent and child, so we are safe.
3024 	 * Also genpd->name is immutable.
3025 	 */
3026 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3027 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3028 			seq_printf(s, "\n%48s", " ");
3029 		seq_printf(s, "%s", link->child->name);
3030 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3031 			seq_puts(s, ", ");
3032 	}
3033 
3034 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3035 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3036 				genpd_is_irq_safe(genpd) ?
3037 				GFP_ATOMIC : GFP_KERNEL);
3038 		if (kobj_path == NULL)
3039 			continue;
3040 
3041 		seq_printf(s, "\n    %-50s  ", kobj_path);
3042 		rtpm_status_str(s, pm_data->dev);
3043 		perf_status_str(s, pm_data->dev);
3044 		kfree(kobj_path);
3045 	}
3046 
3047 	seq_puts(s, "\n");
3048 exit:
3049 	genpd_unlock(genpd);
3050 
3051 	return 0;
3052 }
3053 
3054 static int summary_show(struct seq_file *s, void *data)
3055 {
3056 	struct generic_pm_domain *genpd;
3057 	int ret = 0;
3058 
3059 	seq_puts(s, "domain                          status          children                           performance\n");
3060 	seq_puts(s, "    /device                                             runtime status\n");
3061 	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3062 
3063 	ret = mutex_lock_interruptible(&gpd_list_lock);
3064 	if (ret)
3065 		return -ERESTARTSYS;
3066 
3067 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3068 		ret = genpd_summary_one(s, genpd);
3069 		if (ret)
3070 			break;
3071 	}
3072 	mutex_unlock(&gpd_list_lock);
3073 
3074 	return ret;
3075 }
3076 
3077 static int status_show(struct seq_file *s, void *data)
3078 {
3079 	static const char * const status_lookup[] = {
3080 		[GENPD_STATE_ON] = "on",
3081 		[GENPD_STATE_OFF] = "off"
3082 	};
3083 
3084 	struct generic_pm_domain *genpd = s->private;
3085 	int ret = 0;
3086 
3087 	ret = genpd_lock_interruptible(genpd);
3088 	if (ret)
3089 		return -ERESTARTSYS;
3090 
3091 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3092 		goto exit;
3093 
3094 	if (genpd->status == GENPD_STATE_OFF)
3095 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3096 			genpd->state_idx);
3097 	else
3098 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3099 exit:
3100 	genpd_unlock(genpd);
3101 	return ret;
3102 }
3103 
3104 static int sub_domains_show(struct seq_file *s, void *data)
3105 {
3106 	struct generic_pm_domain *genpd = s->private;
3107 	struct gpd_link *link;
3108 	int ret = 0;
3109 
3110 	ret = genpd_lock_interruptible(genpd);
3111 	if (ret)
3112 		return -ERESTARTSYS;
3113 
3114 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3115 		seq_printf(s, "%s\n", link->child->name);
3116 
3117 	genpd_unlock(genpd);
3118 	return ret;
3119 }
3120 
3121 static int idle_states_show(struct seq_file *s, void *data)
3122 {
3123 	struct generic_pm_domain *genpd = s->private;
3124 	unsigned int i;
3125 	int ret = 0;
3126 
3127 	ret = genpd_lock_interruptible(genpd);
3128 	if (ret)
3129 		return -ERESTARTSYS;
3130 
3131 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3132 
3133 	for (i = 0; i < genpd->state_count; i++) {
3134 		ktime_t delta = 0;
3135 		s64 msecs;
3136 
3137 		if ((genpd->status == GENPD_STATE_OFF) &&
3138 				(genpd->state_idx == i))
3139 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
3140 
3141 		msecs = ktime_to_ms(
3142 			ktime_add(genpd->states[i].idle_time, delta));
3143 		seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
3144 			      genpd->states[i].usage, genpd->states[i].rejected);
3145 	}
3146 
3147 	genpd_unlock(genpd);
3148 	return ret;
3149 }
3150 
3151 static int active_time_show(struct seq_file *s, void *data)
3152 {
3153 	struct generic_pm_domain *genpd = s->private;
3154 	ktime_t delta = 0;
3155 	int ret = 0;
3156 
3157 	ret = genpd_lock_interruptible(genpd);
3158 	if (ret)
3159 		return -ERESTARTSYS;
3160 
3161 	if (genpd->status == GENPD_STATE_ON)
3162 		delta = ktime_sub(ktime_get(), genpd->accounting_time);
3163 
3164 	seq_printf(s, "%lld ms\n", ktime_to_ms(
3165 				ktime_add(genpd->on_time, delta)));
3166 
3167 	genpd_unlock(genpd);
3168 	return ret;
3169 }
3170 
3171 static int total_idle_time_show(struct seq_file *s, void *data)
3172 {
3173 	struct generic_pm_domain *genpd = s->private;
3174 	ktime_t delta = 0, total = 0;
3175 	unsigned int i;
3176 	int ret = 0;
3177 
3178 	ret = genpd_lock_interruptible(genpd);
3179 	if (ret)
3180 		return -ERESTARTSYS;
3181 
3182 	for (i = 0; i < genpd->state_count; i++) {
3183 
3184 		if ((genpd->status == GENPD_STATE_OFF) &&
3185 				(genpd->state_idx == i))
3186 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
3187 
3188 		total = ktime_add(total, genpd->states[i].idle_time);
3189 	}
3190 	total = ktime_add(total, delta);
3191 
3192 	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
3193 
3194 	genpd_unlock(genpd);
3195 	return ret;
3196 }
3197 
3198 
3199 static int devices_show(struct seq_file *s, void *data)
3200 {
3201 	struct generic_pm_domain *genpd = s->private;
3202 	struct pm_domain_data *pm_data;
3203 	const char *kobj_path;
3204 	int ret = 0;
3205 
3206 	ret = genpd_lock_interruptible(genpd);
3207 	if (ret)
3208 		return -ERESTARTSYS;
3209 
3210 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3211 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3212 				genpd_is_irq_safe(genpd) ?
3213 				GFP_ATOMIC : GFP_KERNEL);
3214 		if (kobj_path == NULL)
3215 			continue;
3216 
3217 		seq_printf(s, "%s\n", kobj_path);
3218 		kfree(kobj_path);
3219 	}
3220 
3221 	genpd_unlock(genpd);
3222 	return ret;
3223 }
3224 
3225 static int perf_state_show(struct seq_file *s, void *data)
3226 {
3227 	struct generic_pm_domain *genpd = s->private;
3228 
3229 	if (genpd_lock_interruptible(genpd))
3230 		return -ERESTARTSYS;
3231 
3232 	seq_printf(s, "%u\n", genpd->performance_state);
3233 
3234 	genpd_unlock(genpd);
3235 	return 0;
3236 }
3237 
3238 DEFINE_SHOW_ATTRIBUTE(summary);
3239 DEFINE_SHOW_ATTRIBUTE(status);
3240 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3241 DEFINE_SHOW_ATTRIBUTE(idle_states);
3242 DEFINE_SHOW_ATTRIBUTE(active_time);
3243 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3244 DEFINE_SHOW_ATTRIBUTE(devices);
3245 DEFINE_SHOW_ATTRIBUTE(perf_state);
3246 
3247 static void genpd_debug_add(struct generic_pm_domain *genpd)
3248 {
3249 	struct dentry *d;
3250 
3251 	if (!genpd_debugfs_dir)
3252 		return;
3253 
3254 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3255 
3256 	debugfs_create_file("current_state", 0444,
3257 			    d, genpd, &status_fops);
3258 	debugfs_create_file("sub_domains", 0444,
3259 			    d, genpd, &sub_domains_fops);
3260 	debugfs_create_file("idle_states", 0444,
3261 			    d, genpd, &idle_states_fops);
3262 	debugfs_create_file("active_time", 0444,
3263 			    d, genpd, &active_time_fops);
3264 	debugfs_create_file("total_idle_time", 0444,
3265 			    d, genpd, &total_idle_time_fops);
3266 	debugfs_create_file("devices", 0444,
3267 			    d, genpd, &devices_fops);
3268 	if (genpd->set_performance_state)
3269 		debugfs_create_file("perf_state", 0444,
3270 				    d, genpd, &perf_state_fops);
3271 }
3272 
3273 static int __init genpd_debug_init(void)
3274 {
3275 	struct generic_pm_domain *genpd;
3276 
3277 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3278 
3279 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3280 			    NULL, &summary_fops);
3281 
3282 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3283 		genpd_debug_add(genpd);
3284 
3285 	return 0;
3286 }
3287 late_initcall(genpd_debug_init);
3288 
3289 static void __exit genpd_debug_exit(void)
3290 {
3291 	debugfs_remove_recursive(genpd_debugfs_dir);
3292 }
3293 __exitcall(genpd_debug_exit);
3294 #endif /* CONFIG_DEBUG_FS */
3295