xref: /openbmc/linux/drivers/base/power/domain.c (revision 2bc7d3e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 #include <linux/debugfs.h>
25 
26 #include "power.h"
27 
28 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
29 
30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
31 ({								\
32 	type (*__routine)(struct device *__d); 			\
33 	type __ret = (type)0;					\
34 								\
35 	__routine = genpd->dev_ops.callback; 			\
36 	if (__routine) {					\
37 		__ret = __routine(dev); 			\
38 	}							\
39 	__ret;							\
40 })
41 
42 static LIST_HEAD(gpd_list);
43 static DEFINE_MUTEX(gpd_list_lock);
44 
45 struct genpd_lock_ops {
46 	void (*lock)(struct generic_pm_domain *genpd);
47 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 	void (*unlock)(struct generic_pm_domain *genpd);
50 };
51 
52 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53 {
54 	mutex_lock(&genpd->mlock);
55 }
56 
57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 					int depth)
59 {
60 	mutex_lock_nested(&genpd->mlock, depth);
61 }
62 
63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64 {
65 	return mutex_lock_interruptible(&genpd->mlock);
66 }
67 
68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69 {
70 	return mutex_unlock(&genpd->mlock);
71 }
72 
73 static const struct genpd_lock_ops genpd_mtx_ops = {
74 	.lock = genpd_lock_mtx,
75 	.lock_nested = genpd_lock_nested_mtx,
76 	.lock_interruptible = genpd_lock_interruptible_mtx,
77 	.unlock = genpd_unlock_mtx,
78 };
79 
80 static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 	__acquires(&genpd->slock)
82 {
83 	unsigned long flags;
84 
85 	spin_lock_irqsave(&genpd->slock, flags);
86 	genpd->lock_flags = flags;
87 }
88 
89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 					int depth)
91 	__acquires(&genpd->slock)
92 {
93 	unsigned long flags;
94 
95 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 	genpd->lock_flags = flags;
97 }
98 
99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 	__acquires(&genpd->slock)
101 {
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&genpd->slock, flags);
105 	genpd->lock_flags = flags;
106 	return 0;
107 }
108 
109 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 	__releases(&genpd->slock)
111 {
112 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113 }
114 
115 static const struct genpd_lock_ops genpd_spin_ops = {
116 	.lock = genpd_lock_spin,
117 	.lock_nested = genpd_lock_nested_spin,
118 	.lock_interruptible = genpd_lock_interruptible_spin,
119 	.unlock = genpd_unlock_spin,
120 };
121 
122 #define genpd_lock(p)			p->lock_ops->lock(p)
123 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
124 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
125 #define genpd_unlock(p)			p->lock_ops->unlock(p)
126 
127 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
128 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
129 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
130 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
133 
134 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
135 		const struct generic_pm_domain *genpd)
136 {
137 	bool ret;
138 
139 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
140 
141 	/*
142 	 * Warn once if an IRQ safe device is attached to a domain, which
143 	 * callbacks are allowed to sleep. This indicates a suboptimal
144 	 * configuration for PM, but it doesn't matter for an always on domain.
145 	 */
146 	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
147 		return ret;
148 
149 	if (ret)
150 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
151 				genpd->name);
152 
153 	return ret;
154 }
155 
156 static int genpd_runtime_suspend(struct device *dev);
157 
158 /*
159  * Get the generic PM domain for a particular struct device.
160  * This validates the struct device pointer, the PM domain pointer,
161  * and checks that the PM domain pointer is a real generic PM domain.
162  * Any failure results in NULL being returned.
163  */
164 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
165 {
166 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
167 		return NULL;
168 
169 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
170 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
171 		return pd_to_genpd(dev->pm_domain);
172 
173 	return NULL;
174 }
175 
176 /*
177  * This should only be used where we are certain that the pm_domain
178  * attached to the device is a genpd domain.
179  */
180 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
181 {
182 	if (IS_ERR_OR_NULL(dev->pm_domain))
183 		return ERR_PTR(-EINVAL);
184 
185 	return pd_to_genpd(dev->pm_domain);
186 }
187 
188 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
189 			  struct device *dev)
190 {
191 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
192 }
193 
194 static int genpd_start_dev(const struct generic_pm_domain *genpd,
195 			   struct device *dev)
196 {
197 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
198 }
199 
200 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
201 {
202 	bool ret = false;
203 
204 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
205 		ret = !!atomic_dec_and_test(&genpd->sd_count);
206 
207 	return ret;
208 }
209 
210 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
211 {
212 	atomic_inc(&genpd->sd_count);
213 	smp_mb__after_atomic();
214 }
215 
216 #ifdef CONFIG_DEBUG_FS
217 static struct dentry *genpd_debugfs_dir;
218 
219 static void genpd_debug_add(struct generic_pm_domain *genpd);
220 
221 static void genpd_debug_remove(struct generic_pm_domain *genpd)
222 {
223 	struct dentry *d;
224 
225 	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
226 	debugfs_remove(d);
227 }
228 
229 static void genpd_update_accounting(struct generic_pm_domain *genpd)
230 {
231 	u64 delta, now;
232 
233 	now = ktime_get_mono_fast_ns();
234 	if (now <= genpd->accounting_time)
235 		return;
236 
237 	delta = now - genpd->accounting_time;
238 
239 	/*
240 	 * If genpd->status is active, it means we are just
241 	 * out of off and so update the idle time and vice
242 	 * versa.
243 	 */
244 	if (genpd->status == GENPD_STATE_ON)
245 		genpd->states[genpd->state_idx].idle_time += delta;
246 	else
247 		genpd->on_time += delta;
248 
249 	genpd->accounting_time = now;
250 }
251 #else
252 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
253 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
254 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
255 #endif
256 
257 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
258 					   unsigned int state)
259 {
260 	struct generic_pm_domain_data *pd_data;
261 	struct pm_domain_data *pdd;
262 	struct gpd_link *link;
263 
264 	/* New requested state is same as Max requested state */
265 	if (state == genpd->performance_state)
266 		return state;
267 
268 	/* New requested state is higher than Max requested state */
269 	if (state > genpd->performance_state)
270 		return state;
271 
272 	/* Traverse all devices within the domain */
273 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
274 		pd_data = to_gpd_data(pdd);
275 
276 		if (pd_data->performance_state > state)
277 			state = pd_data->performance_state;
278 	}
279 
280 	/*
281 	 * Traverse all sub-domains within the domain. This can be
282 	 * done without any additional locking as the link->performance_state
283 	 * field is protected by the parent genpd->lock, which is already taken.
284 	 *
285 	 * Also note that link->performance_state (subdomain's performance state
286 	 * requirement to parent domain) is different from
287 	 * link->child->performance_state (current performance state requirement
288 	 * of the devices/sub-domains of the subdomain) and so can have a
289 	 * different value.
290 	 *
291 	 * Note that we also take vote from powered-off sub-domains into account
292 	 * as the same is done for devices right now.
293 	 */
294 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
295 		if (link->performance_state > state)
296 			state = link->performance_state;
297 	}
298 
299 	return state;
300 }
301 
302 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
303 					 struct generic_pm_domain *parent,
304 					 unsigned int pstate)
305 {
306 	if (!parent->set_performance_state)
307 		return pstate;
308 
309 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
310 						  parent->opp_table,
311 						  pstate);
312 }
313 
314 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
315 					unsigned int state, int depth)
316 {
317 	struct generic_pm_domain *parent;
318 	struct gpd_link *link;
319 	int parent_state, ret;
320 
321 	if (state == genpd->performance_state)
322 		return 0;
323 
324 	/* Propagate to parents of genpd */
325 	list_for_each_entry(link, &genpd->child_links, child_node) {
326 		parent = link->parent;
327 
328 		/* Find parent's performance state */
329 		ret = genpd_xlate_performance_state(genpd, parent, state);
330 		if (unlikely(ret < 0))
331 			goto err;
332 
333 		parent_state = ret;
334 
335 		genpd_lock_nested(parent, depth + 1);
336 
337 		link->prev_performance_state = link->performance_state;
338 		link->performance_state = parent_state;
339 		parent_state = _genpd_reeval_performance_state(parent,
340 						parent_state);
341 		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
342 		if (ret)
343 			link->performance_state = link->prev_performance_state;
344 
345 		genpd_unlock(parent);
346 
347 		if (ret)
348 			goto err;
349 	}
350 
351 	if (genpd->set_performance_state) {
352 		ret = genpd->set_performance_state(genpd, state);
353 		if (ret)
354 			goto err;
355 	}
356 
357 	genpd->performance_state = state;
358 	return 0;
359 
360 err:
361 	/* Encountered an error, lets rollback */
362 	list_for_each_entry_continue_reverse(link, &genpd->child_links,
363 					     child_node) {
364 		parent = link->parent;
365 
366 		genpd_lock_nested(parent, depth + 1);
367 
368 		parent_state = link->prev_performance_state;
369 		link->performance_state = parent_state;
370 
371 		parent_state = _genpd_reeval_performance_state(parent,
372 						parent_state);
373 		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
374 			pr_err("%s: Failed to roll back to %d performance state\n",
375 			       parent->name, parent_state);
376 		}
377 
378 		genpd_unlock(parent);
379 	}
380 
381 	return ret;
382 }
383 
384 static int genpd_set_performance_state(struct device *dev, unsigned int state)
385 {
386 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
387 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
388 	unsigned int prev_state;
389 	int ret;
390 
391 	prev_state = gpd_data->performance_state;
392 	if (prev_state == state)
393 		return 0;
394 
395 	gpd_data->performance_state = state;
396 	state = _genpd_reeval_performance_state(genpd, state);
397 
398 	ret = _genpd_set_performance_state(genpd, state, 0);
399 	if (ret)
400 		gpd_data->performance_state = prev_state;
401 
402 	return ret;
403 }
404 
405 static int genpd_drop_performance_state(struct device *dev)
406 {
407 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
408 
409 	if (!genpd_set_performance_state(dev, 0))
410 		return prev_state;
411 
412 	return 0;
413 }
414 
415 static void genpd_restore_performance_state(struct device *dev,
416 					    unsigned int state)
417 {
418 	if (state)
419 		genpd_set_performance_state(dev, state);
420 }
421 
422 /**
423  * dev_pm_genpd_set_performance_state- Set performance state of device's power
424  * domain.
425  *
426  * @dev: Device for which the performance-state needs to be set.
427  * @state: Target performance state of the device. This can be set as 0 when the
428  *	   device doesn't have any performance state constraints left (And so
429  *	   the device wouldn't participate anymore to find the target
430  *	   performance state of the genpd).
431  *
432  * It is assumed that the users guarantee that the genpd wouldn't be detached
433  * while this routine is getting called.
434  *
435  * Returns 0 on success and negative error values on failures.
436  */
437 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
438 {
439 	struct generic_pm_domain *genpd;
440 	int ret = 0;
441 
442 	genpd = dev_to_genpd_safe(dev);
443 	if (!genpd)
444 		return -ENODEV;
445 
446 	if (WARN_ON(!dev->power.subsys_data ||
447 		     !dev->power.subsys_data->domain_data))
448 		return -EINVAL;
449 
450 	genpd_lock(genpd);
451 	if (pm_runtime_suspended(dev)) {
452 		dev_gpd_data(dev)->rpm_pstate = state;
453 	} else {
454 		ret = genpd_set_performance_state(dev, state);
455 		if (!ret)
456 			dev_gpd_data(dev)->rpm_pstate = 0;
457 	}
458 	genpd_unlock(genpd);
459 
460 	return ret;
461 }
462 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
463 
464 /**
465  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
466  *
467  * @dev: Device to handle
468  * @next: impending interrupt/wakeup for the device
469  *
470  *
471  * Allow devices to inform of the next wakeup. It's assumed that the users
472  * guarantee that the genpd wouldn't be detached while this routine is getting
473  * called. Additionally, it's also assumed that @dev isn't runtime suspended
474  * (RPM_SUSPENDED)."
475  * Although devices are expected to update the next_wakeup after the end of
476  * their usecase as well, it is possible the devices themselves may not know
477  * about that, so stale @next will be ignored when powering off the domain.
478  */
479 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
480 {
481 	struct generic_pm_domain *genpd;
482 	struct gpd_timing_data *td;
483 
484 	genpd = dev_to_genpd_safe(dev);
485 	if (!genpd)
486 		return;
487 
488 	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
489 	if (td)
490 		td->next_wakeup = next;
491 }
492 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
493 
494 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
495 {
496 	unsigned int state_idx = genpd->state_idx;
497 	ktime_t time_start;
498 	s64 elapsed_ns;
499 	int ret;
500 
501 	/* Notify consumers that we are about to power on. */
502 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
503 					     GENPD_NOTIFY_PRE_ON,
504 					     GENPD_NOTIFY_OFF, NULL);
505 	ret = notifier_to_errno(ret);
506 	if (ret)
507 		return ret;
508 
509 	if (!genpd->power_on)
510 		goto out;
511 
512 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
513 	if (!timed) {
514 		ret = genpd->power_on(genpd);
515 		if (ret)
516 			goto err;
517 
518 		goto out;
519 	}
520 
521 	time_start = ktime_get();
522 	ret = genpd->power_on(genpd);
523 	if (ret)
524 		goto err;
525 
526 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
527 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
528 		goto out;
529 
530 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
531 	genpd->gd->max_off_time_changed = true;
532 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
533 		 genpd->name, "on", elapsed_ns);
534 
535 out:
536 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
537 	return 0;
538 err:
539 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
540 				NULL);
541 	return ret;
542 }
543 
544 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
545 {
546 	unsigned int state_idx = genpd->state_idx;
547 	ktime_t time_start;
548 	s64 elapsed_ns;
549 	int ret;
550 
551 	/* Notify consumers that we are about to power off. */
552 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
553 					     GENPD_NOTIFY_PRE_OFF,
554 					     GENPD_NOTIFY_ON, NULL);
555 	ret = notifier_to_errno(ret);
556 	if (ret)
557 		return ret;
558 
559 	if (!genpd->power_off)
560 		goto out;
561 
562 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
563 	if (!timed) {
564 		ret = genpd->power_off(genpd);
565 		if (ret)
566 			goto busy;
567 
568 		goto out;
569 	}
570 
571 	time_start = ktime_get();
572 	ret = genpd->power_off(genpd);
573 	if (ret)
574 		goto busy;
575 
576 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
577 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
578 		goto out;
579 
580 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
581 	genpd->gd->max_off_time_changed = true;
582 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
583 		 genpd->name, "off", elapsed_ns);
584 
585 out:
586 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
587 				NULL);
588 	return 0;
589 busy:
590 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
591 	return ret;
592 }
593 
594 /**
595  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
596  * @genpd: PM domain to power off.
597  *
598  * Queue up the execution of genpd_power_off() unless it's already been done
599  * before.
600  */
601 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
602 {
603 	queue_work(pm_wq, &genpd->power_off_work);
604 }
605 
606 /**
607  * genpd_power_off - Remove power from a given PM domain.
608  * @genpd: PM domain to power down.
609  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
610  * RPM status of the releated device is in an intermediate state, not yet turned
611  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
612  * be RPM_SUSPENDED, while it tries to power off the PM domain.
613  * @depth: nesting count for lockdep.
614  *
615  * If all of the @genpd's devices have been suspended and all of its subdomains
616  * have been powered down, remove power from @genpd.
617  */
618 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
619 			   unsigned int depth)
620 {
621 	struct pm_domain_data *pdd;
622 	struct gpd_link *link;
623 	unsigned int not_suspended = 0;
624 	int ret;
625 
626 	/*
627 	 * Do not try to power off the domain in the following situations:
628 	 * (1) The domain is already in the "power off" state.
629 	 * (2) System suspend is in progress.
630 	 */
631 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
632 		return 0;
633 
634 	/*
635 	 * Abort power off for the PM domain in the following situations:
636 	 * (1) The domain is configured as always on.
637 	 * (2) When the domain has a subdomain being powered on.
638 	 */
639 	if (genpd_is_always_on(genpd) ||
640 			genpd_is_rpm_always_on(genpd) ||
641 			atomic_read(&genpd->sd_count) > 0)
642 		return -EBUSY;
643 
644 	/*
645 	 * The children must be in their deepest (powered-off) states to allow
646 	 * the parent to be powered off. Note that, there's no need for
647 	 * additional locking, as powering on a child, requires the parent's
648 	 * lock to be acquired first.
649 	 */
650 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
651 		struct generic_pm_domain *child = link->child;
652 		if (child->state_idx < child->state_count - 1)
653 			return -EBUSY;
654 	}
655 
656 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
657 		/*
658 		 * Do not allow PM domain to be powered off, when an IRQ safe
659 		 * device is part of a non-IRQ safe domain.
660 		 */
661 		if (!pm_runtime_suspended(pdd->dev) ||
662 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
663 			not_suspended++;
664 	}
665 
666 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
667 		return -EBUSY;
668 
669 	if (genpd->gov && genpd->gov->power_down_ok) {
670 		if (!genpd->gov->power_down_ok(&genpd->domain))
671 			return -EAGAIN;
672 	}
673 
674 	/* Default to shallowest state. */
675 	if (!genpd->gov)
676 		genpd->state_idx = 0;
677 
678 	/* Don't power off, if a child domain is waiting to power on. */
679 	if (atomic_read(&genpd->sd_count) > 0)
680 		return -EBUSY;
681 
682 	ret = _genpd_power_off(genpd, true);
683 	if (ret) {
684 		genpd->states[genpd->state_idx].rejected++;
685 		return ret;
686 	}
687 
688 	genpd->status = GENPD_STATE_OFF;
689 	genpd_update_accounting(genpd);
690 	genpd->states[genpd->state_idx].usage++;
691 
692 	list_for_each_entry(link, &genpd->child_links, child_node) {
693 		genpd_sd_counter_dec(link->parent);
694 		genpd_lock_nested(link->parent, depth + 1);
695 		genpd_power_off(link->parent, false, depth + 1);
696 		genpd_unlock(link->parent);
697 	}
698 
699 	return 0;
700 }
701 
702 /**
703  * genpd_power_on - Restore power to a given PM domain and its parents.
704  * @genpd: PM domain to power up.
705  * @depth: nesting count for lockdep.
706  *
707  * Restore power to @genpd and all of its parents so that it is possible to
708  * resume a device belonging to it.
709  */
710 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
711 {
712 	struct gpd_link *link;
713 	int ret = 0;
714 
715 	if (genpd_status_on(genpd))
716 		return 0;
717 
718 	/*
719 	 * The list is guaranteed not to change while the loop below is being
720 	 * executed, unless one of the parents' .power_on() callbacks fiddles
721 	 * with it.
722 	 */
723 	list_for_each_entry(link, &genpd->child_links, child_node) {
724 		struct generic_pm_domain *parent = link->parent;
725 
726 		genpd_sd_counter_inc(parent);
727 
728 		genpd_lock_nested(parent, depth + 1);
729 		ret = genpd_power_on(parent, depth + 1);
730 		genpd_unlock(parent);
731 
732 		if (ret) {
733 			genpd_sd_counter_dec(parent);
734 			goto err;
735 		}
736 	}
737 
738 	ret = _genpd_power_on(genpd, true);
739 	if (ret)
740 		goto err;
741 
742 	genpd->status = GENPD_STATE_ON;
743 	genpd_update_accounting(genpd);
744 
745 	return 0;
746 
747  err:
748 	list_for_each_entry_continue_reverse(link,
749 					&genpd->child_links,
750 					child_node) {
751 		genpd_sd_counter_dec(link->parent);
752 		genpd_lock_nested(link->parent, depth + 1);
753 		genpd_power_off(link->parent, false, depth + 1);
754 		genpd_unlock(link->parent);
755 	}
756 
757 	return ret;
758 }
759 
760 static int genpd_dev_pm_start(struct device *dev)
761 {
762 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
763 
764 	return genpd_start_dev(genpd, dev);
765 }
766 
767 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
768 				     unsigned long val, void *ptr)
769 {
770 	struct generic_pm_domain_data *gpd_data;
771 	struct device *dev;
772 
773 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
774 	dev = gpd_data->base.dev;
775 
776 	for (;;) {
777 		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
778 		struct pm_domain_data *pdd;
779 		struct gpd_timing_data *td;
780 
781 		spin_lock_irq(&dev->power.lock);
782 
783 		pdd = dev->power.subsys_data ?
784 				dev->power.subsys_data->domain_data : NULL;
785 		if (pdd) {
786 			td = to_gpd_data(pdd)->td;
787 			if (td) {
788 				td->constraint_changed = true;
789 				genpd = dev_to_genpd(dev);
790 			}
791 		}
792 
793 		spin_unlock_irq(&dev->power.lock);
794 
795 		if (!IS_ERR(genpd)) {
796 			genpd_lock(genpd);
797 			genpd->gd->max_off_time_changed = true;
798 			genpd_unlock(genpd);
799 		}
800 
801 		dev = dev->parent;
802 		if (!dev || dev->power.ignore_children)
803 			break;
804 	}
805 
806 	return NOTIFY_DONE;
807 }
808 
809 /**
810  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
811  * @work: Work structure used for scheduling the execution of this function.
812  */
813 static void genpd_power_off_work_fn(struct work_struct *work)
814 {
815 	struct generic_pm_domain *genpd;
816 
817 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
818 
819 	genpd_lock(genpd);
820 	genpd_power_off(genpd, false, 0);
821 	genpd_unlock(genpd);
822 }
823 
824 /**
825  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
826  * @dev: Device to handle.
827  */
828 static int __genpd_runtime_suspend(struct device *dev)
829 {
830 	int (*cb)(struct device *__dev);
831 
832 	if (dev->type && dev->type->pm)
833 		cb = dev->type->pm->runtime_suspend;
834 	else if (dev->class && dev->class->pm)
835 		cb = dev->class->pm->runtime_suspend;
836 	else if (dev->bus && dev->bus->pm)
837 		cb = dev->bus->pm->runtime_suspend;
838 	else
839 		cb = NULL;
840 
841 	if (!cb && dev->driver && dev->driver->pm)
842 		cb = dev->driver->pm->runtime_suspend;
843 
844 	return cb ? cb(dev) : 0;
845 }
846 
847 /**
848  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
849  * @dev: Device to handle.
850  */
851 static int __genpd_runtime_resume(struct device *dev)
852 {
853 	int (*cb)(struct device *__dev);
854 
855 	if (dev->type && dev->type->pm)
856 		cb = dev->type->pm->runtime_resume;
857 	else if (dev->class && dev->class->pm)
858 		cb = dev->class->pm->runtime_resume;
859 	else if (dev->bus && dev->bus->pm)
860 		cb = dev->bus->pm->runtime_resume;
861 	else
862 		cb = NULL;
863 
864 	if (!cb && dev->driver && dev->driver->pm)
865 		cb = dev->driver->pm->runtime_resume;
866 
867 	return cb ? cb(dev) : 0;
868 }
869 
870 /**
871  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
872  * @dev: Device to suspend.
873  *
874  * Carry out a runtime suspend of a device under the assumption that its
875  * pm_domain field points to the domain member of an object of type
876  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
877  */
878 static int genpd_runtime_suspend(struct device *dev)
879 {
880 	struct generic_pm_domain *genpd;
881 	bool (*suspend_ok)(struct device *__dev);
882 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
883 	struct gpd_timing_data *td = gpd_data->td;
884 	bool runtime_pm = pm_runtime_enabled(dev);
885 	ktime_t time_start = 0;
886 	s64 elapsed_ns;
887 	int ret;
888 
889 	dev_dbg(dev, "%s()\n", __func__);
890 
891 	genpd = dev_to_genpd(dev);
892 	if (IS_ERR(genpd))
893 		return -EINVAL;
894 
895 	/*
896 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
897 	 * callbacks for other purposes than runtime PM. In those scenarios
898 	 * runtime PM is disabled. Under these circumstances, we shall skip
899 	 * validating/measuring the PM QoS latency.
900 	 */
901 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
902 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
903 		return -EBUSY;
904 
905 	/* Measure suspend latency. */
906 	if (td && runtime_pm)
907 		time_start = ktime_get();
908 
909 	ret = __genpd_runtime_suspend(dev);
910 	if (ret)
911 		return ret;
912 
913 	ret = genpd_stop_dev(genpd, dev);
914 	if (ret) {
915 		__genpd_runtime_resume(dev);
916 		return ret;
917 	}
918 
919 	/* Update suspend latency value if the measured time exceeds it. */
920 	if (td && runtime_pm) {
921 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
922 		if (elapsed_ns > td->suspend_latency_ns) {
923 			td->suspend_latency_ns = elapsed_ns;
924 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
925 				elapsed_ns);
926 			genpd->gd->max_off_time_changed = true;
927 			td->constraint_changed = true;
928 		}
929 	}
930 
931 	/*
932 	 * If power.irq_safe is set, this routine may be run with
933 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
934 	 */
935 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
936 		return 0;
937 
938 	genpd_lock(genpd);
939 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
940 	genpd_power_off(genpd, true, 0);
941 	genpd_unlock(genpd);
942 
943 	return 0;
944 }
945 
946 /**
947  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
948  * @dev: Device to resume.
949  *
950  * Carry out a runtime resume of a device under the assumption that its
951  * pm_domain field points to the domain member of an object of type
952  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
953  */
954 static int genpd_runtime_resume(struct device *dev)
955 {
956 	struct generic_pm_domain *genpd;
957 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
958 	struct gpd_timing_data *td = gpd_data->td;
959 	bool timed = td && pm_runtime_enabled(dev);
960 	ktime_t time_start = 0;
961 	s64 elapsed_ns;
962 	int ret;
963 
964 	dev_dbg(dev, "%s()\n", __func__);
965 
966 	genpd = dev_to_genpd(dev);
967 	if (IS_ERR(genpd))
968 		return -EINVAL;
969 
970 	/*
971 	 * As we don't power off a non IRQ safe domain, which holds
972 	 * an IRQ safe device, we don't need to restore power to it.
973 	 */
974 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
975 		goto out;
976 
977 	genpd_lock(genpd);
978 	ret = genpd_power_on(genpd, 0);
979 	if (!ret)
980 		genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
981 	genpd_unlock(genpd);
982 
983 	if (ret)
984 		return ret;
985 
986  out:
987 	/* Measure resume latency. */
988 	if (timed)
989 		time_start = ktime_get();
990 
991 	ret = genpd_start_dev(genpd, dev);
992 	if (ret)
993 		goto err_poweroff;
994 
995 	ret = __genpd_runtime_resume(dev);
996 	if (ret)
997 		goto err_stop;
998 
999 	/* Update resume latency value if the measured time exceeds it. */
1000 	if (timed) {
1001 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1002 		if (elapsed_ns > td->resume_latency_ns) {
1003 			td->resume_latency_ns = elapsed_ns;
1004 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1005 				elapsed_ns);
1006 			genpd->gd->max_off_time_changed = true;
1007 			td->constraint_changed = true;
1008 		}
1009 	}
1010 
1011 	return 0;
1012 
1013 err_stop:
1014 	genpd_stop_dev(genpd, dev);
1015 err_poweroff:
1016 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1017 		genpd_lock(genpd);
1018 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1019 		genpd_power_off(genpd, true, 0);
1020 		genpd_unlock(genpd);
1021 	}
1022 
1023 	return ret;
1024 }
1025 
1026 static bool pd_ignore_unused;
1027 static int __init pd_ignore_unused_setup(char *__unused)
1028 {
1029 	pd_ignore_unused = true;
1030 	return 1;
1031 }
1032 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1033 
1034 /**
1035  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1036  */
1037 static int __init genpd_power_off_unused(void)
1038 {
1039 	struct generic_pm_domain *genpd;
1040 
1041 	if (pd_ignore_unused) {
1042 		pr_warn("genpd: Not disabling unused power domains\n");
1043 		return 0;
1044 	}
1045 
1046 	mutex_lock(&gpd_list_lock);
1047 
1048 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1049 		genpd_queue_power_off_work(genpd);
1050 
1051 	mutex_unlock(&gpd_list_lock);
1052 
1053 	return 0;
1054 }
1055 late_initcall(genpd_power_off_unused);
1056 
1057 #ifdef CONFIG_PM_SLEEP
1058 
1059 /**
1060  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1061  * @genpd: PM domain to power off, if possible.
1062  * @use_lock: use the lock.
1063  * @depth: nesting count for lockdep.
1064  *
1065  * Check if the given PM domain can be powered off (during system suspend or
1066  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1067  *
1068  * This function is only called in "noirq" and "syscore" stages of system power
1069  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1070  * these cases the lock must be held.
1071  */
1072 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1073 				 unsigned int depth)
1074 {
1075 	struct gpd_link *link;
1076 
1077 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1078 		return;
1079 
1080 	if (genpd->suspended_count != genpd->device_count
1081 	    || atomic_read(&genpd->sd_count) > 0)
1082 		return;
1083 
1084 	/* Check that the children are in their deepest (powered-off) state. */
1085 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1086 		struct generic_pm_domain *child = link->child;
1087 		if (child->state_idx < child->state_count - 1)
1088 			return;
1089 	}
1090 
1091 	/* Choose the deepest state when suspending */
1092 	genpd->state_idx = genpd->state_count - 1;
1093 	if (_genpd_power_off(genpd, false))
1094 		return;
1095 
1096 	genpd->status = GENPD_STATE_OFF;
1097 
1098 	list_for_each_entry(link, &genpd->child_links, child_node) {
1099 		genpd_sd_counter_dec(link->parent);
1100 
1101 		if (use_lock)
1102 			genpd_lock_nested(link->parent, depth + 1);
1103 
1104 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1105 
1106 		if (use_lock)
1107 			genpd_unlock(link->parent);
1108 	}
1109 }
1110 
1111 /**
1112  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1113  * @genpd: PM domain to power on.
1114  * @use_lock: use the lock.
1115  * @depth: nesting count for lockdep.
1116  *
1117  * This function is only called in "noirq" and "syscore" stages of system power
1118  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1119  * these cases the lock must be held.
1120  */
1121 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1122 				unsigned int depth)
1123 {
1124 	struct gpd_link *link;
1125 
1126 	if (genpd_status_on(genpd))
1127 		return;
1128 
1129 	list_for_each_entry(link, &genpd->child_links, child_node) {
1130 		genpd_sd_counter_inc(link->parent);
1131 
1132 		if (use_lock)
1133 			genpd_lock_nested(link->parent, depth + 1);
1134 
1135 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1136 
1137 		if (use_lock)
1138 			genpd_unlock(link->parent);
1139 	}
1140 
1141 	_genpd_power_on(genpd, false);
1142 	genpd->status = GENPD_STATE_ON;
1143 }
1144 
1145 /**
1146  * genpd_prepare - Start power transition of a device in a PM domain.
1147  * @dev: Device to start the transition of.
1148  *
1149  * Start a power transition of a device (during a system-wide power transition)
1150  * under the assumption that its pm_domain field points to the domain member of
1151  * an object of type struct generic_pm_domain representing a PM domain
1152  * consisting of I/O devices.
1153  */
1154 static int genpd_prepare(struct device *dev)
1155 {
1156 	struct generic_pm_domain *genpd;
1157 	int ret;
1158 
1159 	dev_dbg(dev, "%s()\n", __func__);
1160 
1161 	genpd = dev_to_genpd(dev);
1162 	if (IS_ERR(genpd))
1163 		return -EINVAL;
1164 
1165 	genpd_lock(genpd);
1166 
1167 	if (genpd->prepared_count++ == 0)
1168 		genpd->suspended_count = 0;
1169 
1170 	genpd_unlock(genpd);
1171 
1172 	ret = pm_generic_prepare(dev);
1173 	if (ret < 0) {
1174 		genpd_lock(genpd);
1175 
1176 		genpd->prepared_count--;
1177 
1178 		genpd_unlock(genpd);
1179 	}
1180 
1181 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1182 	return ret >= 0 ? 0 : ret;
1183 }
1184 
1185 /**
1186  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1187  *   I/O pm domain.
1188  * @dev: Device to suspend.
1189  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1190  *
1191  * Stop the device and remove power from the domain if all devices in it have
1192  * been stopped.
1193  */
1194 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1195 {
1196 	struct generic_pm_domain *genpd;
1197 	int ret = 0;
1198 
1199 	genpd = dev_to_genpd(dev);
1200 	if (IS_ERR(genpd))
1201 		return -EINVAL;
1202 
1203 	if (poweroff)
1204 		ret = pm_generic_poweroff_noirq(dev);
1205 	else
1206 		ret = pm_generic_suspend_noirq(dev);
1207 	if (ret)
1208 		return ret;
1209 
1210 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1211 		return 0;
1212 
1213 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1214 	    !pm_runtime_status_suspended(dev)) {
1215 		ret = genpd_stop_dev(genpd, dev);
1216 		if (ret) {
1217 			if (poweroff)
1218 				pm_generic_restore_noirq(dev);
1219 			else
1220 				pm_generic_resume_noirq(dev);
1221 			return ret;
1222 		}
1223 	}
1224 
1225 	genpd_lock(genpd);
1226 	genpd->suspended_count++;
1227 	genpd_sync_power_off(genpd, true, 0);
1228 	genpd_unlock(genpd);
1229 
1230 	return 0;
1231 }
1232 
1233 /**
1234  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1235  * @dev: Device to suspend.
1236  *
1237  * Stop the device and remove power from the domain if all devices in it have
1238  * been stopped.
1239  */
1240 static int genpd_suspend_noirq(struct device *dev)
1241 {
1242 	dev_dbg(dev, "%s()\n", __func__);
1243 
1244 	return genpd_finish_suspend(dev, false);
1245 }
1246 
1247 /**
1248  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1249  * @dev: Device to resume.
1250  *
1251  * Restore power to the device's PM domain, if necessary, and start the device.
1252  */
1253 static int genpd_resume_noirq(struct device *dev)
1254 {
1255 	struct generic_pm_domain *genpd;
1256 	int ret;
1257 
1258 	dev_dbg(dev, "%s()\n", __func__);
1259 
1260 	genpd = dev_to_genpd(dev);
1261 	if (IS_ERR(genpd))
1262 		return -EINVAL;
1263 
1264 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1265 		return pm_generic_resume_noirq(dev);
1266 
1267 	genpd_lock(genpd);
1268 	genpd_sync_power_on(genpd, true, 0);
1269 	genpd->suspended_count--;
1270 	genpd_unlock(genpd);
1271 
1272 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1273 	    !pm_runtime_status_suspended(dev)) {
1274 		ret = genpd_start_dev(genpd, dev);
1275 		if (ret)
1276 			return ret;
1277 	}
1278 
1279 	return pm_generic_resume_noirq(dev);
1280 }
1281 
1282 /**
1283  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1284  * @dev: Device to freeze.
1285  *
1286  * Carry out a late freeze of a device under the assumption that its
1287  * pm_domain field points to the domain member of an object of type
1288  * struct generic_pm_domain representing a power domain consisting of I/O
1289  * devices.
1290  */
1291 static int genpd_freeze_noirq(struct device *dev)
1292 {
1293 	const struct generic_pm_domain *genpd;
1294 	int ret = 0;
1295 
1296 	dev_dbg(dev, "%s()\n", __func__);
1297 
1298 	genpd = dev_to_genpd(dev);
1299 	if (IS_ERR(genpd))
1300 		return -EINVAL;
1301 
1302 	ret = pm_generic_freeze_noirq(dev);
1303 	if (ret)
1304 		return ret;
1305 
1306 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1307 	    !pm_runtime_status_suspended(dev))
1308 		ret = genpd_stop_dev(genpd, dev);
1309 
1310 	return ret;
1311 }
1312 
1313 /**
1314  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1315  * @dev: Device to thaw.
1316  *
1317  * Start the device, unless power has been removed from the domain already
1318  * before the system transition.
1319  */
1320 static int genpd_thaw_noirq(struct device *dev)
1321 {
1322 	const struct generic_pm_domain *genpd;
1323 	int ret = 0;
1324 
1325 	dev_dbg(dev, "%s()\n", __func__);
1326 
1327 	genpd = dev_to_genpd(dev);
1328 	if (IS_ERR(genpd))
1329 		return -EINVAL;
1330 
1331 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1332 	    !pm_runtime_status_suspended(dev)) {
1333 		ret = genpd_start_dev(genpd, dev);
1334 		if (ret)
1335 			return ret;
1336 	}
1337 
1338 	return pm_generic_thaw_noirq(dev);
1339 }
1340 
1341 /**
1342  * genpd_poweroff_noirq - Completion of hibernation of device in an
1343  *   I/O PM domain.
1344  * @dev: Device to poweroff.
1345  *
1346  * Stop the device and remove power from the domain if all devices in it have
1347  * been stopped.
1348  */
1349 static int genpd_poweroff_noirq(struct device *dev)
1350 {
1351 	dev_dbg(dev, "%s()\n", __func__);
1352 
1353 	return genpd_finish_suspend(dev, true);
1354 }
1355 
1356 /**
1357  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1358  * @dev: Device to resume.
1359  *
1360  * Make sure the domain will be in the same power state as before the
1361  * hibernation the system is resuming from and start the device if necessary.
1362  */
1363 static int genpd_restore_noirq(struct device *dev)
1364 {
1365 	struct generic_pm_domain *genpd;
1366 	int ret = 0;
1367 
1368 	dev_dbg(dev, "%s()\n", __func__);
1369 
1370 	genpd = dev_to_genpd(dev);
1371 	if (IS_ERR(genpd))
1372 		return -EINVAL;
1373 
1374 	/*
1375 	 * At this point suspended_count == 0 means we are being run for the
1376 	 * first time for the given domain in the present cycle.
1377 	 */
1378 	genpd_lock(genpd);
1379 	if (genpd->suspended_count++ == 0) {
1380 		/*
1381 		 * The boot kernel might put the domain into arbitrary state,
1382 		 * so make it appear as powered off to genpd_sync_power_on(),
1383 		 * so that it tries to power it on in case it was really off.
1384 		 */
1385 		genpd->status = GENPD_STATE_OFF;
1386 	}
1387 
1388 	genpd_sync_power_on(genpd, true, 0);
1389 	genpd_unlock(genpd);
1390 
1391 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1392 	    !pm_runtime_status_suspended(dev)) {
1393 		ret = genpd_start_dev(genpd, dev);
1394 		if (ret)
1395 			return ret;
1396 	}
1397 
1398 	return pm_generic_restore_noirq(dev);
1399 }
1400 
1401 /**
1402  * genpd_complete - Complete power transition of a device in a power domain.
1403  * @dev: Device to complete the transition of.
1404  *
1405  * Complete a power transition of a device (during a system-wide power
1406  * transition) under the assumption that its pm_domain field points to the
1407  * domain member of an object of type struct generic_pm_domain representing
1408  * a power domain consisting of I/O devices.
1409  */
1410 static void genpd_complete(struct device *dev)
1411 {
1412 	struct generic_pm_domain *genpd;
1413 
1414 	dev_dbg(dev, "%s()\n", __func__);
1415 
1416 	genpd = dev_to_genpd(dev);
1417 	if (IS_ERR(genpd))
1418 		return;
1419 
1420 	pm_generic_complete(dev);
1421 
1422 	genpd_lock(genpd);
1423 
1424 	genpd->prepared_count--;
1425 	if (!genpd->prepared_count)
1426 		genpd_queue_power_off_work(genpd);
1427 
1428 	genpd_unlock(genpd);
1429 }
1430 
1431 static void genpd_switch_state(struct device *dev, bool suspend)
1432 {
1433 	struct generic_pm_domain *genpd;
1434 	bool use_lock;
1435 
1436 	genpd = dev_to_genpd_safe(dev);
1437 	if (!genpd)
1438 		return;
1439 
1440 	use_lock = genpd_is_irq_safe(genpd);
1441 
1442 	if (use_lock)
1443 		genpd_lock(genpd);
1444 
1445 	if (suspend) {
1446 		genpd->suspended_count++;
1447 		genpd_sync_power_off(genpd, use_lock, 0);
1448 	} else {
1449 		genpd_sync_power_on(genpd, use_lock, 0);
1450 		genpd->suspended_count--;
1451 	}
1452 
1453 	if (use_lock)
1454 		genpd_unlock(genpd);
1455 }
1456 
1457 /**
1458  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1459  * @dev: The device that is attached to the genpd, that can be suspended.
1460  *
1461  * This routine should typically be called for a device that needs to be
1462  * suspended during the syscore suspend phase. It may also be called during
1463  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1464  * genpd.
1465  */
1466 void dev_pm_genpd_suspend(struct device *dev)
1467 {
1468 	genpd_switch_state(dev, true);
1469 }
1470 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1471 
1472 /**
1473  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1474  * @dev: The device that is attached to the genpd, which needs to be resumed.
1475  *
1476  * This routine should typically be called for a device that needs to be resumed
1477  * during the syscore resume phase. It may also be called during suspend-to-idle
1478  * to resume a corresponding CPU device that is attached to a genpd.
1479  */
1480 void dev_pm_genpd_resume(struct device *dev)
1481 {
1482 	genpd_switch_state(dev, false);
1483 }
1484 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1485 
1486 #else /* !CONFIG_PM_SLEEP */
1487 
1488 #define genpd_prepare		NULL
1489 #define genpd_suspend_noirq	NULL
1490 #define genpd_resume_noirq	NULL
1491 #define genpd_freeze_noirq	NULL
1492 #define genpd_thaw_noirq	NULL
1493 #define genpd_poweroff_noirq	NULL
1494 #define genpd_restore_noirq	NULL
1495 #define genpd_complete		NULL
1496 
1497 #endif /* CONFIG_PM_SLEEP */
1498 
1499 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1500 							   bool has_governor)
1501 {
1502 	struct generic_pm_domain_data *gpd_data;
1503 	struct gpd_timing_data *td;
1504 	int ret;
1505 
1506 	ret = dev_pm_get_subsys_data(dev);
1507 	if (ret)
1508 		return ERR_PTR(ret);
1509 
1510 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1511 	if (!gpd_data) {
1512 		ret = -ENOMEM;
1513 		goto err_put;
1514 	}
1515 
1516 	gpd_data->base.dev = dev;
1517 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1518 
1519 	/* Allocate data used by a governor. */
1520 	if (has_governor) {
1521 		td = kzalloc(sizeof(*td), GFP_KERNEL);
1522 		if (!td) {
1523 			ret = -ENOMEM;
1524 			goto err_free;
1525 		}
1526 
1527 		td->constraint_changed = true;
1528 		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1529 		td->next_wakeup = KTIME_MAX;
1530 		gpd_data->td = td;
1531 	}
1532 
1533 	spin_lock_irq(&dev->power.lock);
1534 
1535 	if (dev->power.subsys_data->domain_data)
1536 		ret = -EINVAL;
1537 	else
1538 		dev->power.subsys_data->domain_data = &gpd_data->base;
1539 
1540 	spin_unlock_irq(&dev->power.lock);
1541 
1542 	if (ret)
1543 		goto err_free;
1544 
1545 	return gpd_data;
1546 
1547  err_free:
1548 	kfree(gpd_data->td);
1549 	kfree(gpd_data);
1550  err_put:
1551 	dev_pm_put_subsys_data(dev);
1552 	return ERR_PTR(ret);
1553 }
1554 
1555 static void genpd_free_dev_data(struct device *dev,
1556 				struct generic_pm_domain_data *gpd_data)
1557 {
1558 	spin_lock_irq(&dev->power.lock);
1559 
1560 	dev->power.subsys_data->domain_data = NULL;
1561 
1562 	spin_unlock_irq(&dev->power.lock);
1563 
1564 	kfree(gpd_data->td);
1565 	kfree(gpd_data);
1566 	dev_pm_put_subsys_data(dev);
1567 }
1568 
1569 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1570 				 int cpu, bool set, unsigned int depth)
1571 {
1572 	struct gpd_link *link;
1573 
1574 	if (!genpd_is_cpu_domain(genpd))
1575 		return;
1576 
1577 	list_for_each_entry(link, &genpd->child_links, child_node) {
1578 		struct generic_pm_domain *parent = link->parent;
1579 
1580 		genpd_lock_nested(parent, depth + 1);
1581 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1582 		genpd_unlock(parent);
1583 	}
1584 
1585 	if (set)
1586 		cpumask_set_cpu(cpu, genpd->cpus);
1587 	else
1588 		cpumask_clear_cpu(cpu, genpd->cpus);
1589 }
1590 
1591 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1592 {
1593 	if (cpu >= 0)
1594 		genpd_update_cpumask(genpd, cpu, true, 0);
1595 }
1596 
1597 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1598 {
1599 	if (cpu >= 0)
1600 		genpd_update_cpumask(genpd, cpu, false, 0);
1601 }
1602 
1603 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1604 {
1605 	int cpu;
1606 
1607 	if (!genpd_is_cpu_domain(genpd))
1608 		return -1;
1609 
1610 	for_each_possible_cpu(cpu) {
1611 		if (get_cpu_device(cpu) == dev)
1612 			return cpu;
1613 	}
1614 
1615 	return -1;
1616 }
1617 
1618 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1619 			    struct device *base_dev)
1620 {
1621 	struct genpd_governor_data *gd = genpd->gd;
1622 	struct generic_pm_domain_data *gpd_data;
1623 	int ret;
1624 
1625 	dev_dbg(dev, "%s()\n", __func__);
1626 
1627 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1628 		return -EINVAL;
1629 
1630 	gpd_data = genpd_alloc_dev_data(dev, gd);
1631 	if (IS_ERR(gpd_data))
1632 		return PTR_ERR(gpd_data);
1633 
1634 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1635 
1636 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1637 	if (ret)
1638 		goto out;
1639 
1640 	genpd_lock(genpd);
1641 
1642 	genpd_set_cpumask(genpd, gpd_data->cpu);
1643 	dev_pm_domain_set(dev, &genpd->domain);
1644 
1645 	genpd->device_count++;
1646 	if (gd)
1647 		gd->max_off_time_changed = true;
1648 
1649 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1650 
1651 	genpd_unlock(genpd);
1652  out:
1653 	if (ret)
1654 		genpd_free_dev_data(dev, gpd_data);
1655 	else
1656 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1657 					DEV_PM_QOS_RESUME_LATENCY);
1658 
1659 	return ret;
1660 }
1661 
1662 /**
1663  * pm_genpd_add_device - Add a device to an I/O PM domain.
1664  * @genpd: PM domain to add the device to.
1665  * @dev: Device to be added.
1666  */
1667 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1668 {
1669 	int ret;
1670 
1671 	mutex_lock(&gpd_list_lock);
1672 	ret = genpd_add_device(genpd, dev, dev);
1673 	mutex_unlock(&gpd_list_lock);
1674 
1675 	return ret;
1676 }
1677 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1678 
1679 static int genpd_remove_device(struct generic_pm_domain *genpd,
1680 			       struct device *dev)
1681 {
1682 	struct generic_pm_domain_data *gpd_data;
1683 	struct pm_domain_data *pdd;
1684 	int ret = 0;
1685 
1686 	dev_dbg(dev, "%s()\n", __func__);
1687 
1688 	pdd = dev->power.subsys_data->domain_data;
1689 	gpd_data = to_gpd_data(pdd);
1690 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1691 				   DEV_PM_QOS_RESUME_LATENCY);
1692 
1693 	genpd_lock(genpd);
1694 
1695 	if (genpd->prepared_count > 0) {
1696 		ret = -EAGAIN;
1697 		goto out;
1698 	}
1699 
1700 	genpd->device_count--;
1701 	if (genpd->gd)
1702 		genpd->gd->max_off_time_changed = true;
1703 
1704 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1705 	dev_pm_domain_set(dev, NULL);
1706 
1707 	list_del_init(&pdd->list_node);
1708 
1709 	genpd_unlock(genpd);
1710 
1711 	if (genpd->detach_dev)
1712 		genpd->detach_dev(genpd, dev);
1713 
1714 	genpd_free_dev_data(dev, gpd_data);
1715 
1716 	return 0;
1717 
1718  out:
1719 	genpd_unlock(genpd);
1720 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1721 
1722 	return ret;
1723 }
1724 
1725 /**
1726  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1727  * @dev: Device to be removed.
1728  */
1729 int pm_genpd_remove_device(struct device *dev)
1730 {
1731 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1732 
1733 	if (!genpd)
1734 		return -EINVAL;
1735 
1736 	return genpd_remove_device(genpd, dev);
1737 }
1738 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1739 
1740 /**
1741  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1742  *
1743  * @dev: Device that should be associated with the notifier
1744  * @nb: The notifier block to register
1745  *
1746  * Users may call this function to add a genpd power on/off notifier for an
1747  * attached @dev. Only one notifier per device is allowed. The notifier is
1748  * sent when genpd is powering on/off the PM domain.
1749  *
1750  * It is assumed that the user guarantee that the genpd wouldn't be detached
1751  * while this routine is getting called.
1752  *
1753  * Returns 0 on success and negative error values on failures.
1754  */
1755 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1756 {
1757 	struct generic_pm_domain *genpd;
1758 	struct generic_pm_domain_data *gpd_data;
1759 	int ret;
1760 
1761 	genpd = dev_to_genpd_safe(dev);
1762 	if (!genpd)
1763 		return -ENODEV;
1764 
1765 	if (WARN_ON(!dev->power.subsys_data ||
1766 		     !dev->power.subsys_data->domain_data))
1767 		return -EINVAL;
1768 
1769 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1770 	if (gpd_data->power_nb)
1771 		return -EEXIST;
1772 
1773 	genpd_lock(genpd);
1774 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1775 	genpd_unlock(genpd);
1776 
1777 	if (ret) {
1778 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1779 			 genpd->name);
1780 		return ret;
1781 	}
1782 
1783 	gpd_data->power_nb = nb;
1784 	return 0;
1785 }
1786 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1787 
1788 /**
1789  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1790  *
1791  * @dev: Device that is associated with the notifier
1792  *
1793  * Users may call this function to remove a genpd power on/off notifier for an
1794  * attached @dev.
1795  *
1796  * It is assumed that the user guarantee that the genpd wouldn't be detached
1797  * while this routine is getting called.
1798  *
1799  * Returns 0 on success and negative error values on failures.
1800  */
1801 int dev_pm_genpd_remove_notifier(struct device *dev)
1802 {
1803 	struct generic_pm_domain *genpd;
1804 	struct generic_pm_domain_data *gpd_data;
1805 	int ret;
1806 
1807 	genpd = dev_to_genpd_safe(dev);
1808 	if (!genpd)
1809 		return -ENODEV;
1810 
1811 	if (WARN_ON(!dev->power.subsys_data ||
1812 		     !dev->power.subsys_data->domain_data))
1813 		return -EINVAL;
1814 
1815 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1816 	if (!gpd_data->power_nb)
1817 		return -ENODEV;
1818 
1819 	genpd_lock(genpd);
1820 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1821 					    gpd_data->power_nb);
1822 	genpd_unlock(genpd);
1823 
1824 	if (ret) {
1825 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1826 			 genpd->name);
1827 		return ret;
1828 	}
1829 
1830 	gpd_data->power_nb = NULL;
1831 	return 0;
1832 }
1833 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1834 
1835 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1836 			       struct generic_pm_domain *subdomain)
1837 {
1838 	struct gpd_link *link, *itr;
1839 	int ret = 0;
1840 
1841 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1842 	    || genpd == subdomain)
1843 		return -EINVAL;
1844 
1845 	/*
1846 	 * If the domain can be powered on/off in an IRQ safe
1847 	 * context, ensure that the subdomain can also be
1848 	 * powered on/off in that context.
1849 	 */
1850 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1851 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1852 				genpd->name, subdomain->name);
1853 		return -EINVAL;
1854 	}
1855 
1856 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1857 	if (!link)
1858 		return -ENOMEM;
1859 
1860 	genpd_lock(subdomain);
1861 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1862 
1863 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1864 		ret = -EINVAL;
1865 		goto out;
1866 	}
1867 
1868 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1869 		if (itr->child == subdomain && itr->parent == genpd) {
1870 			ret = -EINVAL;
1871 			goto out;
1872 		}
1873 	}
1874 
1875 	link->parent = genpd;
1876 	list_add_tail(&link->parent_node, &genpd->parent_links);
1877 	link->child = subdomain;
1878 	list_add_tail(&link->child_node, &subdomain->child_links);
1879 	if (genpd_status_on(subdomain))
1880 		genpd_sd_counter_inc(genpd);
1881 
1882  out:
1883 	genpd_unlock(genpd);
1884 	genpd_unlock(subdomain);
1885 	if (ret)
1886 		kfree(link);
1887 	return ret;
1888 }
1889 
1890 /**
1891  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1892  * @genpd: Leader PM domain to add the subdomain to.
1893  * @subdomain: Subdomain to be added.
1894  */
1895 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1896 			   struct generic_pm_domain *subdomain)
1897 {
1898 	int ret;
1899 
1900 	mutex_lock(&gpd_list_lock);
1901 	ret = genpd_add_subdomain(genpd, subdomain);
1902 	mutex_unlock(&gpd_list_lock);
1903 
1904 	return ret;
1905 }
1906 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1907 
1908 /**
1909  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1910  * @genpd: Leader PM domain to remove the subdomain from.
1911  * @subdomain: Subdomain to be removed.
1912  */
1913 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1914 			      struct generic_pm_domain *subdomain)
1915 {
1916 	struct gpd_link *l, *link;
1917 	int ret = -EINVAL;
1918 
1919 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1920 		return -EINVAL;
1921 
1922 	genpd_lock(subdomain);
1923 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1924 
1925 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1926 		pr_warn("%s: unable to remove subdomain %s\n",
1927 			genpd->name, subdomain->name);
1928 		ret = -EBUSY;
1929 		goto out;
1930 	}
1931 
1932 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1933 		if (link->child != subdomain)
1934 			continue;
1935 
1936 		list_del(&link->parent_node);
1937 		list_del(&link->child_node);
1938 		kfree(link);
1939 		if (genpd_status_on(subdomain))
1940 			genpd_sd_counter_dec(genpd);
1941 
1942 		ret = 0;
1943 		break;
1944 	}
1945 
1946 out:
1947 	genpd_unlock(genpd);
1948 	genpd_unlock(subdomain);
1949 
1950 	return ret;
1951 }
1952 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1953 
1954 static void genpd_free_default_power_state(struct genpd_power_state *states,
1955 					   unsigned int state_count)
1956 {
1957 	kfree(states);
1958 }
1959 
1960 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1961 {
1962 	struct genpd_power_state *state;
1963 
1964 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1965 	if (!state)
1966 		return -ENOMEM;
1967 
1968 	genpd->states = state;
1969 	genpd->state_count = 1;
1970 	genpd->free_states = genpd_free_default_power_state;
1971 
1972 	return 0;
1973 }
1974 
1975 static int genpd_alloc_data(struct generic_pm_domain *genpd)
1976 {
1977 	struct genpd_governor_data *gd = NULL;
1978 	int ret;
1979 
1980 	if (genpd_is_cpu_domain(genpd) &&
1981 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1982 		return -ENOMEM;
1983 
1984 	if (genpd->gov) {
1985 		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
1986 		if (!gd) {
1987 			ret = -ENOMEM;
1988 			goto free;
1989 		}
1990 
1991 		gd->max_off_time_ns = -1;
1992 		gd->max_off_time_changed = true;
1993 		gd->next_wakeup = KTIME_MAX;
1994 	}
1995 
1996 	/* Use only one "off" state if there were no states declared */
1997 	if (genpd->state_count == 0) {
1998 		ret = genpd_set_default_power_state(genpd);
1999 		if (ret)
2000 			goto free;
2001 	}
2002 
2003 	genpd->gd = gd;
2004 	return 0;
2005 
2006 free:
2007 	if (genpd_is_cpu_domain(genpd))
2008 		free_cpumask_var(genpd->cpus);
2009 	kfree(gd);
2010 	return ret;
2011 }
2012 
2013 static void genpd_free_data(struct generic_pm_domain *genpd)
2014 {
2015 	if (genpd_is_cpu_domain(genpd))
2016 		free_cpumask_var(genpd->cpus);
2017 	if (genpd->free_states)
2018 		genpd->free_states(genpd->states, genpd->state_count);
2019 	kfree(genpd->gd);
2020 }
2021 
2022 static void genpd_lock_init(struct generic_pm_domain *genpd)
2023 {
2024 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2025 		spin_lock_init(&genpd->slock);
2026 		genpd->lock_ops = &genpd_spin_ops;
2027 	} else {
2028 		mutex_init(&genpd->mlock);
2029 		genpd->lock_ops = &genpd_mtx_ops;
2030 	}
2031 }
2032 
2033 /**
2034  * pm_genpd_init - Initialize a generic I/O PM domain object.
2035  * @genpd: PM domain object to initialize.
2036  * @gov: PM domain governor to associate with the domain (may be NULL).
2037  * @is_off: Initial value of the domain's power_is_off field.
2038  *
2039  * Returns 0 on successful initialization, else a negative error code.
2040  */
2041 int pm_genpd_init(struct generic_pm_domain *genpd,
2042 		  struct dev_power_governor *gov, bool is_off)
2043 {
2044 	int ret;
2045 
2046 	if (IS_ERR_OR_NULL(genpd))
2047 		return -EINVAL;
2048 
2049 	INIT_LIST_HEAD(&genpd->parent_links);
2050 	INIT_LIST_HEAD(&genpd->child_links);
2051 	INIT_LIST_HEAD(&genpd->dev_list);
2052 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2053 	genpd_lock_init(genpd);
2054 	genpd->gov = gov;
2055 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2056 	atomic_set(&genpd->sd_count, 0);
2057 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2058 	genpd->device_count = 0;
2059 	genpd->provider = NULL;
2060 	genpd->has_provider = false;
2061 	genpd->accounting_time = ktime_get_mono_fast_ns();
2062 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2063 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2064 	genpd->domain.ops.prepare = genpd_prepare;
2065 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2066 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2067 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2068 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2069 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2070 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2071 	genpd->domain.ops.complete = genpd_complete;
2072 	genpd->domain.start = genpd_dev_pm_start;
2073 
2074 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2075 		genpd->dev_ops.stop = pm_clk_suspend;
2076 		genpd->dev_ops.start = pm_clk_resume;
2077 	}
2078 
2079 	/* The always-on governor works better with the corresponding flag. */
2080 	if (gov == &pm_domain_always_on_gov)
2081 		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2082 
2083 	/* Always-on domains must be powered on at initialization. */
2084 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2085 			!genpd_status_on(genpd))
2086 		return -EINVAL;
2087 
2088 	/* Multiple states but no governor doesn't make sense. */
2089 	if (!gov && genpd->state_count > 1)
2090 		pr_warn("%s: no governor for states\n", genpd->name);
2091 
2092 	ret = genpd_alloc_data(genpd);
2093 	if (ret)
2094 		return ret;
2095 
2096 	device_initialize(&genpd->dev);
2097 	dev_set_name(&genpd->dev, "%s", genpd->name);
2098 
2099 	mutex_lock(&gpd_list_lock);
2100 	list_add(&genpd->gpd_list_node, &gpd_list);
2101 	mutex_unlock(&gpd_list_lock);
2102 	genpd_debug_add(genpd);
2103 
2104 	return 0;
2105 }
2106 EXPORT_SYMBOL_GPL(pm_genpd_init);
2107 
2108 static int genpd_remove(struct generic_pm_domain *genpd)
2109 {
2110 	struct gpd_link *l, *link;
2111 
2112 	if (IS_ERR_OR_NULL(genpd))
2113 		return -EINVAL;
2114 
2115 	genpd_lock(genpd);
2116 
2117 	if (genpd->has_provider) {
2118 		genpd_unlock(genpd);
2119 		pr_err("Provider present, unable to remove %s\n", genpd->name);
2120 		return -EBUSY;
2121 	}
2122 
2123 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2124 		genpd_unlock(genpd);
2125 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2126 		return -EBUSY;
2127 	}
2128 
2129 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2130 		list_del(&link->parent_node);
2131 		list_del(&link->child_node);
2132 		kfree(link);
2133 	}
2134 
2135 	list_del(&genpd->gpd_list_node);
2136 	genpd_unlock(genpd);
2137 	genpd_debug_remove(genpd);
2138 	cancel_work_sync(&genpd->power_off_work);
2139 	genpd_free_data(genpd);
2140 
2141 	pr_debug("%s: removed %s\n", __func__, genpd->name);
2142 
2143 	return 0;
2144 }
2145 
2146 /**
2147  * pm_genpd_remove - Remove a generic I/O PM domain
2148  * @genpd: Pointer to PM domain that is to be removed.
2149  *
2150  * To remove the PM domain, this function:
2151  *  - Removes the PM domain as a subdomain to any parent domains,
2152  *    if it was added.
2153  *  - Removes the PM domain from the list of registered PM domains.
2154  *
2155  * The PM domain will only be removed, if the associated provider has
2156  * been removed, it is not a parent to any other PM domain and has no
2157  * devices associated with it.
2158  */
2159 int pm_genpd_remove(struct generic_pm_domain *genpd)
2160 {
2161 	int ret;
2162 
2163 	mutex_lock(&gpd_list_lock);
2164 	ret = genpd_remove(genpd);
2165 	mutex_unlock(&gpd_list_lock);
2166 
2167 	return ret;
2168 }
2169 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2170 
2171 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2172 
2173 /*
2174  * Device Tree based PM domain providers.
2175  *
2176  * The code below implements generic device tree based PM domain providers that
2177  * bind device tree nodes with generic PM domains registered in the system.
2178  *
2179  * Any driver that registers generic PM domains and needs to support binding of
2180  * devices to these domains is supposed to register a PM domain provider, which
2181  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2182  *
2183  * Two simple mapping functions have been provided for convenience:
2184  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2185  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2186  *    index.
2187  */
2188 
2189 /**
2190  * struct of_genpd_provider - PM domain provider registration structure
2191  * @link: Entry in global list of PM domain providers
2192  * @node: Pointer to device tree node of PM domain provider
2193  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2194  *         into a PM domain.
2195  * @data: context pointer to be passed into @xlate callback
2196  */
2197 struct of_genpd_provider {
2198 	struct list_head link;
2199 	struct device_node *node;
2200 	genpd_xlate_t xlate;
2201 	void *data;
2202 };
2203 
2204 /* List of registered PM domain providers. */
2205 static LIST_HEAD(of_genpd_providers);
2206 /* Mutex to protect the list above. */
2207 static DEFINE_MUTEX(of_genpd_mutex);
2208 
2209 /**
2210  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2211  * @genpdspec: OF phandle args to map into a PM domain
2212  * @data: xlate function private data - pointer to struct generic_pm_domain
2213  *
2214  * This is a generic xlate function that can be used to model PM domains that
2215  * have their own device tree nodes. The private data of xlate function needs
2216  * to be a valid pointer to struct generic_pm_domain.
2217  */
2218 static struct generic_pm_domain *genpd_xlate_simple(
2219 					struct of_phandle_args *genpdspec,
2220 					void *data)
2221 {
2222 	return data;
2223 }
2224 
2225 /**
2226  * genpd_xlate_onecell() - Xlate function using a single index.
2227  * @genpdspec: OF phandle args to map into a PM domain
2228  * @data: xlate function private data - pointer to struct genpd_onecell_data
2229  *
2230  * This is a generic xlate function that can be used to model simple PM domain
2231  * controllers that have one device tree node and provide multiple PM domains.
2232  * A single cell is used as an index into an array of PM domains specified in
2233  * the genpd_onecell_data struct when registering the provider.
2234  */
2235 static struct generic_pm_domain *genpd_xlate_onecell(
2236 					struct of_phandle_args *genpdspec,
2237 					void *data)
2238 {
2239 	struct genpd_onecell_data *genpd_data = data;
2240 	unsigned int idx = genpdspec->args[0];
2241 
2242 	if (genpdspec->args_count != 1)
2243 		return ERR_PTR(-EINVAL);
2244 
2245 	if (idx >= genpd_data->num_domains) {
2246 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2247 		return ERR_PTR(-EINVAL);
2248 	}
2249 
2250 	if (!genpd_data->domains[idx])
2251 		return ERR_PTR(-ENOENT);
2252 
2253 	return genpd_data->domains[idx];
2254 }
2255 
2256 /**
2257  * genpd_add_provider() - Register a PM domain provider for a node
2258  * @np: Device node pointer associated with the PM domain provider.
2259  * @xlate: Callback for decoding PM domain from phandle arguments.
2260  * @data: Context pointer for @xlate callback.
2261  */
2262 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2263 			      void *data)
2264 {
2265 	struct of_genpd_provider *cp;
2266 
2267 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2268 	if (!cp)
2269 		return -ENOMEM;
2270 
2271 	cp->node = of_node_get(np);
2272 	cp->data = data;
2273 	cp->xlate = xlate;
2274 	fwnode_dev_initialized(&np->fwnode, true);
2275 
2276 	mutex_lock(&of_genpd_mutex);
2277 	list_add(&cp->link, &of_genpd_providers);
2278 	mutex_unlock(&of_genpd_mutex);
2279 	pr_debug("Added domain provider from %pOF\n", np);
2280 
2281 	return 0;
2282 }
2283 
2284 static bool genpd_present(const struct generic_pm_domain *genpd)
2285 {
2286 	bool ret = false;
2287 	const struct generic_pm_domain *gpd;
2288 
2289 	mutex_lock(&gpd_list_lock);
2290 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2291 		if (gpd == genpd) {
2292 			ret = true;
2293 			break;
2294 		}
2295 	}
2296 	mutex_unlock(&gpd_list_lock);
2297 
2298 	return ret;
2299 }
2300 
2301 /**
2302  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2303  * @np: Device node pointer associated with the PM domain provider.
2304  * @genpd: Pointer to PM domain associated with the PM domain provider.
2305  */
2306 int of_genpd_add_provider_simple(struct device_node *np,
2307 				 struct generic_pm_domain *genpd)
2308 {
2309 	int ret;
2310 
2311 	if (!np || !genpd)
2312 		return -EINVAL;
2313 
2314 	if (!genpd_present(genpd))
2315 		return -EINVAL;
2316 
2317 	genpd->dev.of_node = np;
2318 
2319 	/* Parse genpd OPP table */
2320 	if (genpd->set_performance_state) {
2321 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2322 		if (ret)
2323 			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2324 
2325 		/*
2326 		 * Save table for faster processing while setting performance
2327 		 * state.
2328 		 */
2329 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2330 		WARN_ON(IS_ERR(genpd->opp_table));
2331 	}
2332 
2333 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2334 	if (ret) {
2335 		if (genpd->set_performance_state) {
2336 			dev_pm_opp_put_opp_table(genpd->opp_table);
2337 			dev_pm_opp_of_remove_table(&genpd->dev);
2338 		}
2339 
2340 		return ret;
2341 	}
2342 
2343 	genpd->provider = &np->fwnode;
2344 	genpd->has_provider = true;
2345 
2346 	return 0;
2347 }
2348 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2349 
2350 /**
2351  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2352  * @np: Device node pointer associated with the PM domain provider.
2353  * @data: Pointer to the data associated with the PM domain provider.
2354  */
2355 int of_genpd_add_provider_onecell(struct device_node *np,
2356 				  struct genpd_onecell_data *data)
2357 {
2358 	struct generic_pm_domain *genpd;
2359 	unsigned int i;
2360 	int ret = -EINVAL;
2361 
2362 	if (!np || !data)
2363 		return -EINVAL;
2364 
2365 	if (!data->xlate)
2366 		data->xlate = genpd_xlate_onecell;
2367 
2368 	for (i = 0; i < data->num_domains; i++) {
2369 		genpd = data->domains[i];
2370 
2371 		if (!genpd)
2372 			continue;
2373 		if (!genpd_present(genpd))
2374 			goto error;
2375 
2376 		genpd->dev.of_node = np;
2377 
2378 		/* Parse genpd OPP table */
2379 		if (genpd->set_performance_state) {
2380 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2381 			if (ret) {
2382 				dev_err_probe(&genpd->dev, ret,
2383 					      "Failed to add OPP table for index %d\n", i);
2384 				goto error;
2385 			}
2386 
2387 			/*
2388 			 * Save table for faster processing while setting
2389 			 * performance state.
2390 			 */
2391 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2392 			WARN_ON(IS_ERR(genpd->opp_table));
2393 		}
2394 
2395 		genpd->provider = &np->fwnode;
2396 		genpd->has_provider = true;
2397 	}
2398 
2399 	ret = genpd_add_provider(np, data->xlate, data);
2400 	if (ret < 0)
2401 		goto error;
2402 
2403 	return 0;
2404 
2405 error:
2406 	while (i--) {
2407 		genpd = data->domains[i];
2408 
2409 		if (!genpd)
2410 			continue;
2411 
2412 		genpd->provider = NULL;
2413 		genpd->has_provider = false;
2414 
2415 		if (genpd->set_performance_state) {
2416 			dev_pm_opp_put_opp_table(genpd->opp_table);
2417 			dev_pm_opp_of_remove_table(&genpd->dev);
2418 		}
2419 	}
2420 
2421 	return ret;
2422 }
2423 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2424 
2425 /**
2426  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2427  * @np: Device node pointer associated with the PM domain provider
2428  */
2429 void of_genpd_del_provider(struct device_node *np)
2430 {
2431 	struct of_genpd_provider *cp, *tmp;
2432 	struct generic_pm_domain *gpd;
2433 
2434 	mutex_lock(&gpd_list_lock);
2435 	mutex_lock(&of_genpd_mutex);
2436 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2437 		if (cp->node == np) {
2438 			/*
2439 			 * For each PM domain associated with the
2440 			 * provider, set the 'has_provider' to false
2441 			 * so that the PM domain can be safely removed.
2442 			 */
2443 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2444 				if (gpd->provider == &np->fwnode) {
2445 					gpd->has_provider = false;
2446 
2447 					if (!gpd->set_performance_state)
2448 						continue;
2449 
2450 					dev_pm_opp_put_opp_table(gpd->opp_table);
2451 					dev_pm_opp_of_remove_table(&gpd->dev);
2452 				}
2453 			}
2454 
2455 			fwnode_dev_initialized(&cp->node->fwnode, false);
2456 			list_del(&cp->link);
2457 			of_node_put(cp->node);
2458 			kfree(cp);
2459 			break;
2460 		}
2461 	}
2462 	mutex_unlock(&of_genpd_mutex);
2463 	mutex_unlock(&gpd_list_lock);
2464 }
2465 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2466 
2467 /**
2468  * genpd_get_from_provider() - Look-up PM domain
2469  * @genpdspec: OF phandle args to use for look-up
2470  *
2471  * Looks for a PM domain provider under the node specified by @genpdspec and if
2472  * found, uses xlate function of the provider to map phandle args to a PM
2473  * domain.
2474  *
2475  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2476  * on failure.
2477  */
2478 static struct generic_pm_domain *genpd_get_from_provider(
2479 					struct of_phandle_args *genpdspec)
2480 {
2481 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2482 	struct of_genpd_provider *provider;
2483 
2484 	if (!genpdspec)
2485 		return ERR_PTR(-EINVAL);
2486 
2487 	mutex_lock(&of_genpd_mutex);
2488 
2489 	/* Check if we have such a provider in our array */
2490 	list_for_each_entry(provider, &of_genpd_providers, link) {
2491 		if (provider->node == genpdspec->np)
2492 			genpd = provider->xlate(genpdspec, provider->data);
2493 		if (!IS_ERR(genpd))
2494 			break;
2495 	}
2496 
2497 	mutex_unlock(&of_genpd_mutex);
2498 
2499 	return genpd;
2500 }
2501 
2502 /**
2503  * of_genpd_add_device() - Add a device to an I/O PM domain
2504  * @genpdspec: OF phandle args to use for look-up PM domain
2505  * @dev: Device to be added.
2506  *
2507  * Looks-up an I/O PM domain based upon phandle args provided and adds
2508  * the device to the PM domain. Returns a negative error code on failure.
2509  */
2510 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2511 {
2512 	struct generic_pm_domain *genpd;
2513 	int ret;
2514 
2515 	mutex_lock(&gpd_list_lock);
2516 
2517 	genpd = genpd_get_from_provider(genpdspec);
2518 	if (IS_ERR(genpd)) {
2519 		ret = PTR_ERR(genpd);
2520 		goto out;
2521 	}
2522 
2523 	ret = genpd_add_device(genpd, dev, dev);
2524 
2525 out:
2526 	mutex_unlock(&gpd_list_lock);
2527 
2528 	return ret;
2529 }
2530 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2531 
2532 /**
2533  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2534  * @parent_spec: OF phandle args to use for parent PM domain look-up
2535  * @subdomain_spec: OF phandle args to use for subdomain look-up
2536  *
2537  * Looks-up a parent PM domain and subdomain based upon phandle args
2538  * provided and adds the subdomain to the parent PM domain. Returns a
2539  * negative error code on failure.
2540  */
2541 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2542 			   struct of_phandle_args *subdomain_spec)
2543 {
2544 	struct generic_pm_domain *parent, *subdomain;
2545 	int ret;
2546 
2547 	mutex_lock(&gpd_list_lock);
2548 
2549 	parent = genpd_get_from_provider(parent_spec);
2550 	if (IS_ERR(parent)) {
2551 		ret = PTR_ERR(parent);
2552 		goto out;
2553 	}
2554 
2555 	subdomain = genpd_get_from_provider(subdomain_spec);
2556 	if (IS_ERR(subdomain)) {
2557 		ret = PTR_ERR(subdomain);
2558 		goto out;
2559 	}
2560 
2561 	ret = genpd_add_subdomain(parent, subdomain);
2562 
2563 out:
2564 	mutex_unlock(&gpd_list_lock);
2565 
2566 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2567 }
2568 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2569 
2570 /**
2571  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2572  * @parent_spec: OF phandle args to use for parent PM domain look-up
2573  * @subdomain_spec: OF phandle args to use for subdomain look-up
2574  *
2575  * Looks-up a parent PM domain and subdomain based upon phandle args
2576  * provided and removes the subdomain from the parent PM domain. Returns a
2577  * negative error code on failure.
2578  */
2579 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2580 			      struct of_phandle_args *subdomain_spec)
2581 {
2582 	struct generic_pm_domain *parent, *subdomain;
2583 	int ret;
2584 
2585 	mutex_lock(&gpd_list_lock);
2586 
2587 	parent = genpd_get_from_provider(parent_spec);
2588 	if (IS_ERR(parent)) {
2589 		ret = PTR_ERR(parent);
2590 		goto out;
2591 	}
2592 
2593 	subdomain = genpd_get_from_provider(subdomain_spec);
2594 	if (IS_ERR(subdomain)) {
2595 		ret = PTR_ERR(subdomain);
2596 		goto out;
2597 	}
2598 
2599 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2600 
2601 out:
2602 	mutex_unlock(&gpd_list_lock);
2603 
2604 	return ret;
2605 }
2606 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2607 
2608 /**
2609  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2610  * @np: Pointer to device node associated with provider
2611  *
2612  * Find the last PM domain that was added by a particular provider and
2613  * remove this PM domain from the list of PM domains. The provider is
2614  * identified by the 'provider' device structure that is passed. The PM
2615  * domain will only be removed, if the provider associated with domain
2616  * has been removed.
2617  *
2618  * Returns a valid pointer to struct generic_pm_domain on success or
2619  * ERR_PTR() on failure.
2620  */
2621 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2622 {
2623 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2624 	int ret;
2625 
2626 	if (IS_ERR_OR_NULL(np))
2627 		return ERR_PTR(-EINVAL);
2628 
2629 	mutex_lock(&gpd_list_lock);
2630 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2631 		if (gpd->provider == &np->fwnode) {
2632 			ret = genpd_remove(gpd);
2633 			genpd = ret ? ERR_PTR(ret) : gpd;
2634 			break;
2635 		}
2636 	}
2637 	mutex_unlock(&gpd_list_lock);
2638 
2639 	return genpd;
2640 }
2641 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2642 
2643 static void genpd_release_dev(struct device *dev)
2644 {
2645 	of_node_put(dev->of_node);
2646 	kfree(dev);
2647 }
2648 
2649 static struct bus_type genpd_bus_type = {
2650 	.name		= "genpd",
2651 };
2652 
2653 /**
2654  * genpd_dev_pm_detach - Detach a device from its PM domain.
2655  * @dev: Device to detach.
2656  * @power_off: Currently not used
2657  *
2658  * Try to locate a corresponding generic PM domain, which the device was
2659  * attached to previously. If such is found, the device is detached from it.
2660  */
2661 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2662 {
2663 	struct generic_pm_domain *pd;
2664 	unsigned int i;
2665 	int ret = 0;
2666 
2667 	pd = dev_to_genpd(dev);
2668 	if (IS_ERR(pd))
2669 		return;
2670 
2671 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2672 
2673 	/* Drop the default performance state */
2674 	if (dev_gpd_data(dev)->default_pstate) {
2675 		dev_pm_genpd_set_performance_state(dev, 0);
2676 		dev_gpd_data(dev)->default_pstate = 0;
2677 	}
2678 
2679 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2680 		ret = genpd_remove_device(pd, dev);
2681 		if (ret != -EAGAIN)
2682 			break;
2683 
2684 		mdelay(i);
2685 		cond_resched();
2686 	}
2687 
2688 	if (ret < 0) {
2689 		dev_err(dev, "failed to remove from PM domain %s: %d",
2690 			pd->name, ret);
2691 		return;
2692 	}
2693 
2694 	/* Check if PM domain can be powered off after removing this device. */
2695 	genpd_queue_power_off_work(pd);
2696 
2697 	/* Unregister the device if it was created by genpd. */
2698 	if (dev->bus == &genpd_bus_type)
2699 		device_unregister(dev);
2700 }
2701 
2702 static void genpd_dev_pm_sync(struct device *dev)
2703 {
2704 	struct generic_pm_domain *pd;
2705 
2706 	pd = dev_to_genpd(dev);
2707 	if (IS_ERR(pd))
2708 		return;
2709 
2710 	genpd_queue_power_off_work(pd);
2711 }
2712 
2713 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2714 				 unsigned int index, bool power_on)
2715 {
2716 	struct of_phandle_args pd_args;
2717 	struct generic_pm_domain *pd;
2718 	int pstate;
2719 	int ret;
2720 
2721 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2722 				"#power-domain-cells", index, &pd_args);
2723 	if (ret < 0)
2724 		return ret;
2725 
2726 	mutex_lock(&gpd_list_lock);
2727 	pd = genpd_get_from_provider(&pd_args);
2728 	of_node_put(pd_args.np);
2729 	if (IS_ERR(pd)) {
2730 		mutex_unlock(&gpd_list_lock);
2731 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2732 			__func__, PTR_ERR(pd));
2733 		return driver_deferred_probe_check_state(base_dev);
2734 	}
2735 
2736 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2737 
2738 	ret = genpd_add_device(pd, dev, base_dev);
2739 	mutex_unlock(&gpd_list_lock);
2740 
2741 	if (ret < 0)
2742 		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2743 
2744 	dev->pm_domain->detach = genpd_dev_pm_detach;
2745 	dev->pm_domain->sync = genpd_dev_pm_sync;
2746 
2747 	if (power_on) {
2748 		genpd_lock(pd);
2749 		ret = genpd_power_on(pd, 0);
2750 		genpd_unlock(pd);
2751 	}
2752 
2753 	if (ret) {
2754 		genpd_remove_device(pd, dev);
2755 		return -EPROBE_DEFER;
2756 	}
2757 
2758 	/* Set the default performance state */
2759 	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2760 	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2761 		ret = pstate;
2762 		goto err;
2763 	} else if (pstate > 0) {
2764 		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2765 		if (ret)
2766 			goto err;
2767 		dev_gpd_data(dev)->default_pstate = pstate;
2768 	}
2769 	return 1;
2770 
2771 err:
2772 	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2773 		pd->name, ret);
2774 	genpd_remove_device(pd, dev);
2775 	return ret;
2776 }
2777 
2778 /**
2779  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2780  * @dev: Device to attach.
2781  *
2782  * Parse device's OF node to find a PM domain specifier. If such is found,
2783  * attaches the device to retrieved pm_domain ops.
2784  *
2785  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2786  * PM domain or when multiple power-domains exists for it, else a negative error
2787  * code. Note that if a power-domain exists for the device, but it cannot be
2788  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2789  * not probed and to re-try again later.
2790  */
2791 int genpd_dev_pm_attach(struct device *dev)
2792 {
2793 	if (!dev->of_node)
2794 		return 0;
2795 
2796 	/*
2797 	 * Devices with multiple PM domains must be attached separately, as we
2798 	 * can only attach one PM domain per device.
2799 	 */
2800 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2801 				       "#power-domain-cells") != 1)
2802 		return 0;
2803 
2804 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2805 }
2806 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2807 
2808 /**
2809  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2810  * @dev: The device used to lookup the PM domain.
2811  * @index: The index of the PM domain.
2812  *
2813  * Parse device's OF node to find a PM domain specifier at the provided @index.
2814  * If such is found, creates a virtual device and attaches it to the retrieved
2815  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2816  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2817  *
2818  * Returns the created virtual device if successfully attached PM domain, NULL
2819  * when the device don't need a PM domain, else an ERR_PTR() in case of
2820  * failures. If a power-domain exists for the device, but cannot be found or
2821  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2822  * is not probed and to re-try again later.
2823  */
2824 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2825 					 unsigned int index)
2826 {
2827 	struct device *virt_dev;
2828 	int num_domains;
2829 	int ret;
2830 
2831 	if (!dev->of_node)
2832 		return NULL;
2833 
2834 	/* Verify that the index is within a valid range. */
2835 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2836 						 "#power-domain-cells");
2837 	if (index >= num_domains)
2838 		return NULL;
2839 
2840 	/* Allocate and register device on the genpd bus. */
2841 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2842 	if (!virt_dev)
2843 		return ERR_PTR(-ENOMEM);
2844 
2845 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2846 	virt_dev->bus = &genpd_bus_type;
2847 	virt_dev->release = genpd_release_dev;
2848 	virt_dev->of_node = of_node_get(dev->of_node);
2849 
2850 	ret = device_register(virt_dev);
2851 	if (ret) {
2852 		put_device(virt_dev);
2853 		return ERR_PTR(ret);
2854 	}
2855 
2856 	/* Try to attach the device to the PM domain at the specified index. */
2857 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2858 	if (ret < 1) {
2859 		device_unregister(virt_dev);
2860 		return ret ? ERR_PTR(ret) : NULL;
2861 	}
2862 
2863 	pm_runtime_enable(virt_dev);
2864 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2865 
2866 	return virt_dev;
2867 }
2868 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2869 
2870 /**
2871  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2872  * @dev: The device used to lookup the PM domain.
2873  * @name: The name of the PM domain.
2874  *
2875  * Parse device's OF node to find a PM domain specifier using the
2876  * power-domain-names DT property. For further description see
2877  * genpd_dev_pm_attach_by_id().
2878  */
2879 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2880 {
2881 	int index;
2882 
2883 	if (!dev->of_node)
2884 		return NULL;
2885 
2886 	index = of_property_match_string(dev->of_node, "power-domain-names",
2887 					 name);
2888 	if (index < 0)
2889 		return NULL;
2890 
2891 	return genpd_dev_pm_attach_by_id(dev, index);
2892 }
2893 
2894 static const struct of_device_id idle_state_match[] = {
2895 	{ .compatible = "domain-idle-state", },
2896 	{ }
2897 };
2898 
2899 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2900 				    struct device_node *state_node)
2901 {
2902 	int err;
2903 	u32 residency;
2904 	u32 entry_latency, exit_latency;
2905 
2906 	err = of_property_read_u32(state_node, "entry-latency-us",
2907 						&entry_latency);
2908 	if (err) {
2909 		pr_debug(" * %pOF missing entry-latency-us property\n",
2910 			 state_node);
2911 		return -EINVAL;
2912 	}
2913 
2914 	err = of_property_read_u32(state_node, "exit-latency-us",
2915 						&exit_latency);
2916 	if (err) {
2917 		pr_debug(" * %pOF missing exit-latency-us property\n",
2918 			 state_node);
2919 		return -EINVAL;
2920 	}
2921 
2922 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2923 	if (!err)
2924 		genpd_state->residency_ns = 1000 * residency;
2925 
2926 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2927 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2928 	genpd_state->fwnode = &state_node->fwnode;
2929 
2930 	return 0;
2931 }
2932 
2933 static int genpd_iterate_idle_states(struct device_node *dn,
2934 				     struct genpd_power_state *states)
2935 {
2936 	int ret;
2937 	struct of_phandle_iterator it;
2938 	struct device_node *np;
2939 	int i = 0;
2940 
2941 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2942 	if (ret <= 0)
2943 		return ret == -ENOENT ? 0 : ret;
2944 
2945 	/* Loop over the phandles until all the requested entry is found */
2946 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2947 		np = it.node;
2948 		if (!of_match_node(idle_state_match, np))
2949 			continue;
2950 		if (states) {
2951 			ret = genpd_parse_state(&states[i], np);
2952 			if (ret) {
2953 				pr_err("Parsing idle state node %pOF failed with err %d\n",
2954 				       np, ret);
2955 				of_node_put(np);
2956 				return ret;
2957 			}
2958 		}
2959 		i++;
2960 	}
2961 
2962 	return i;
2963 }
2964 
2965 /**
2966  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2967  *
2968  * @dn: The genpd device node
2969  * @states: The pointer to which the state array will be saved.
2970  * @n: The count of elements in the array returned from this function.
2971  *
2972  * Returns the device states parsed from the OF node. The memory for the states
2973  * is allocated by this function and is the responsibility of the caller to
2974  * free the memory after use. If any or zero compatible domain idle states is
2975  * found it returns 0 and in case of errors, a negative error code is returned.
2976  */
2977 int of_genpd_parse_idle_states(struct device_node *dn,
2978 			struct genpd_power_state **states, int *n)
2979 {
2980 	struct genpd_power_state *st;
2981 	int ret;
2982 
2983 	ret = genpd_iterate_idle_states(dn, NULL);
2984 	if (ret < 0)
2985 		return ret;
2986 
2987 	if (!ret) {
2988 		*states = NULL;
2989 		*n = 0;
2990 		return 0;
2991 	}
2992 
2993 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2994 	if (!st)
2995 		return -ENOMEM;
2996 
2997 	ret = genpd_iterate_idle_states(dn, st);
2998 	if (ret <= 0) {
2999 		kfree(st);
3000 		return ret < 0 ? ret : -EINVAL;
3001 	}
3002 
3003 	*states = st;
3004 	*n = ret;
3005 
3006 	return 0;
3007 }
3008 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3009 
3010 /**
3011  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
3012  *
3013  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
3014  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
3015  *	state.
3016  *
3017  * Returns performance state encoded in the OPP of the genpd. This calls
3018  * platform specific genpd->opp_to_performance_state() callback to translate
3019  * power domain OPP to performance state.
3020  *
3021  * Returns performance state on success and 0 on failure.
3022  */
3023 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
3024 					       struct dev_pm_opp *opp)
3025 {
3026 	struct generic_pm_domain *genpd = NULL;
3027 	int state;
3028 
3029 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3030 
3031 	if (unlikely(!genpd->opp_to_performance_state))
3032 		return 0;
3033 
3034 	genpd_lock(genpd);
3035 	state = genpd->opp_to_performance_state(genpd, opp);
3036 	genpd_unlock(genpd);
3037 
3038 	return state;
3039 }
3040 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
3041 
3042 static int __init genpd_bus_init(void)
3043 {
3044 	return bus_register(&genpd_bus_type);
3045 }
3046 core_initcall(genpd_bus_init);
3047 
3048 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3049 
3050 
3051 /***        debugfs support        ***/
3052 
3053 #ifdef CONFIG_DEBUG_FS
3054 /*
3055  * TODO: This function is a slightly modified version of rtpm_status_show
3056  * from sysfs.c, so generalize it.
3057  */
3058 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3059 {
3060 	static const char * const status_lookup[] = {
3061 		[RPM_ACTIVE] = "active",
3062 		[RPM_RESUMING] = "resuming",
3063 		[RPM_SUSPENDED] = "suspended",
3064 		[RPM_SUSPENDING] = "suspending"
3065 	};
3066 	const char *p = "";
3067 
3068 	if (dev->power.runtime_error)
3069 		p = "error";
3070 	else if (dev->power.disable_depth)
3071 		p = "unsupported";
3072 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3073 		p = status_lookup[dev->power.runtime_status];
3074 	else
3075 		WARN_ON(1);
3076 
3077 	seq_printf(s, "%-25s  ", p);
3078 }
3079 
3080 static void perf_status_str(struct seq_file *s, struct device *dev)
3081 {
3082 	struct generic_pm_domain_data *gpd_data;
3083 
3084 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3085 	seq_put_decimal_ull(s, "", gpd_data->performance_state);
3086 }
3087 
3088 static int genpd_summary_one(struct seq_file *s,
3089 			struct generic_pm_domain *genpd)
3090 {
3091 	static const char * const status_lookup[] = {
3092 		[GENPD_STATE_ON] = "on",
3093 		[GENPD_STATE_OFF] = "off"
3094 	};
3095 	struct pm_domain_data *pm_data;
3096 	const char *kobj_path;
3097 	struct gpd_link *link;
3098 	char state[16];
3099 	int ret;
3100 
3101 	ret = genpd_lock_interruptible(genpd);
3102 	if (ret)
3103 		return -ERESTARTSYS;
3104 
3105 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3106 		goto exit;
3107 	if (!genpd_status_on(genpd))
3108 		snprintf(state, sizeof(state), "%s-%u",
3109 			 status_lookup[genpd->status], genpd->state_idx);
3110 	else
3111 		snprintf(state, sizeof(state), "%s",
3112 			 status_lookup[genpd->status]);
3113 	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3114 
3115 	/*
3116 	 * Modifications on the list require holding locks on both
3117 	 * parent and child, so we are safe.
3118 	 * Also genpd->name is immutable.
3119 	 */
3120 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3121 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3122 			seq_printf(s, "\n%48s", " ");
3123 		seq_printf(s, "%s", link->child->name);
3124 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3125 			seq_puts(s, ", ");
3126 	}
3127 
3128 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3129 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3130 				genpd_is_irq_safe(genpd) ?
3131 				GFP_ATOMIC : GFP_KERNEL);
3132 		if (kobj_path == NULL)
3133 			continue;
3134 
3135 		seq_printf(s, "\n    %-50s  ", kobj_path);
3136 		rtpm_status_str(s, pm_data->dev);
3137 		perf_status_str(s, pm_data->dev);
3138 		kfree(kobj_path);
3139 	}
3140 
3141 	seq_puts(s, "\n");
3142 exit:
3143 	genpd_unlock(genpd);
3144 
3145 	return 0;
3146 }
3147 
3148 static int summary_show(struct seq_file *s, void *data)
3149 {
3150 	struct generic_pm_domain *genpd;
3151 	int ret = 0;
3152 
3153 	seq_puts(s, "domain                          status          children                           performance\n");
3154 	seq_puts(s, "    /device                                             runtime status\n");
3155 	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3156 
3157 	ret = mutex_lock_interruptible(&gpd_list_lock);
3158 	if (ret)
3159 		return -ERESTARTSYS;
3160 
3161 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3162 		ret = genpd_summary_one(s, genpd);
3163 		if (ret)
3164 			break;
3165 	}
3166 	mutex_unlock(&gpd_list_lock);
3167 
3168 	return ret;
3169 }
3170 
3171 static int status_show(struct seq_file *s, void *data)
3172 {
3173 	static const char * const status_lookup[] = {
3174 		[GENPD_STATE_ON] = "on",
3175 		[GENPD_STATE_OFF] = "off"
3176 	};
3177 
3178 	struct generic_pm_domain *genpd = s->private;
3179 	int ret = 0;
3180 
3181 	ret = genpd_lock_interruptible(genpd);
3182 	if (ret)
3183 		return -ERESTARTSYS;
3184 
3185 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3186 		goto exit;
3187 
3188 	if (genpd->status == GENPD_STATE_OFF)
3189 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3190 			genpd->state_idx);
3191 	else
3192 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3193 exit:
3194 	genpd_unlock(genpd);
3195 	return ret;
3196 }
3197 
3198 static int sub_domains_show(struct seq_file *s, void *data)
3199 {
3200 	struct generic_pm_domain *genpd = s->private;
3201 	struct gpd_link *link;
3202 	int ret = 0;
3203 
3204 	ret = genpd_lock_interruptible(genpd);
3205 	if (ret)
3206 		return -ERESTARTSYS;
3207 
3208 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3209 		seq_printf(s, "%s\n", link->child->name);
3210 
3211 	genpd_unlock(genpd);
3212 	return ret;
3213 }
3214 
3215 static int idle_states_show(struct seq_file *s, void *data)
3216 {
3217 	struct generic_pm_domain *genpd = s->private;
3218 	u64 now, delta, idle_time = 0;
3219 	unsigned int i;
3220 	int ret = 0;
3221 
3222 	ret = genpd_lock_interruptible(genpd);
3223 	if (ret)
3224 		return -ERESTARTSYS;
3225 
3226 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3227 
3228 	for (i = 0; i < genpd->state_count; i++) {
3229 		idle_time += genpd->states[i].idle_time;
3230 
3231 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3232 			now = ktime_get_mono_fast_ns();
3233 			if (now > genpd->accounting_time) {
3234 				delta = now - genpd->accounting_time;
3235 				idle_time += delta;
3236 			}
3237 		}
3238 
3239 		do_div(idle_time, NSEC_PER_MSEC);
3240 		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3241 			   genpd->states[i].usage, genpd->states[i].rejected);
3242 	}
3243 
3244 	genpd_unlock(genpd);
3245 	return ret;
3246 }
3247 
3248 static int active_time_show(struct seq_file *s, void *data)
3249 {
3250 	struct generic_pm_domain *genpd = s->private;
3251 	u64 now, on_time, delta = 0;
3252 	int ret = 0;
3253 
3254 	ret = genpd_lock_interruptible(genpd);
3255 	if (ret)
3256 		return -ERESTARTSYS;
3257 
3258 	if (genpd->status == GENPD_STATE_ON) {
3259 		now = ktime_get_mono_fast_ns();
3260 		if (now > genpd->accounting_time)
3261 			delta = now - genpd->accounting_time;
3262 	}
3263 
3264 	on_time = genpd->on_time + delta;
3265 	do_div(on_time, NSEC_PER_MSEC);
3266 	seq_printf(s, "%llu ms\n", on_time);
3267 
3268 	genpd_unlock(genpd);
3269 	return ret;
3270 }
3271 
3272 static int total_idle_time_show(struct seq_file *s, void *data)
3273 {
3274 	struct generic_pm_domain *genpd = s->private;
3275 	u64 now, delta, total = 0;
3276 	unsigned int i;
3277 	int ret = 0;
3278 
3279 	ret = genpd_lock_interruptible(genpd);
3280 	if (ret)
3281 		return -ERESTARTSYS;
3282 
3283 	for (i = 0; i < genpd->state_count; i++) {
3284 		total += genpd->states[i].idle_time;
3285 
3286 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3287 			now = ktime_get_mono_fast_ns();
3288 			if (now > genpd->accounting_time) {
3289 				delta = now - genpd->accounting_time;
3290 				total += delta;
3291 			}
3292 		}
3293 	}
3294 
3295 	do_div(total, NSEC_PER_MSEC);
3296 	seq_printf(s, "%llu ms\n", total);
3297 
3298 	genpd_unlock(genpd);
3299 	return ret;
3300 }
3301 
3302 
3303 static int devices_show(struct seq_file *s, void *data)
3304 {
3305 	struct generic_pm_domain *genpd = s->private;
3306 	struct pm_domain_data *pm_data;
3307 	const char *kobj_path;
3308 	int ret = 0;
3309 
3310 	ret = genpd_lock_interruptible(genpd);
3311 	if (ret)
3312 		return -ERESTARTSYS;
3313 
3314 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3315 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3316 				genpd_is_irq_safe(genpd) ?
3317 				GFP_ATOMIC : GFP_KERNEL);
3318 		if (kobj_path == NULL)
3319 			continue;
3320 
3321 		seq_printf(s, "%s\n", kobj_path);
3322 		kfree(kobj_path);
3323 	}
3324 
3325 	genpd_unlock(genpd);
3326 	return ret;
3327 }
3328 
3329 static int perf_state_show(struct seq_file *s, void *data)
3330 {
3331 	struct generic_pm_domain *genpd = s->private;
3332 
3333 	if (genpd_lock_interruptible(genpd))
3334 		return -ERESTARTSYS;
3335 
3336 	seq_printf(s, "%u\n", genpd->performance_state);
3337 
3338 	genpd_unlock(genpd);
3339 	return 0;
3340 }
3341 
3342 DEFINE_SHOW_ATTRIBUTE(summary);
3343 DEFINE_SHOW_ATTRIBUTE(status);
3344 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3345 DEFINE_SHOW_ATTRIBUTE(idle_states);
3346 DEFINE_SHOW_ATTRIBUTE(active_time);
3347 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3348 DEFINE_SHOW_ATTRIBUTE(devices);
3349 DEFINE_SHOW_ATTRIBUTE(perf_state);
3350 
3351 static void genpd_debug_add(struct generic_pm_domain *genpd)
3352 {
3353 	struct dentry *d;
3354 
3355 	if (!genpd_debugfs_dir)
3356 		return;
3357 
3358 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3359 
3360 	debugfs_create_file("current_state", 0444,
3361 			    d, genpd, &status_fops);
3362 	debugfs_create_file("sub_domains", 0444,
3363 			    d, genpd, &sub_domains_fops);
3364 	debugfs_create_file("idle_states", 0444,
3365 			    d, genpd, &idle_states_fops);
3366 	debugfs_create_file("active_time", 0444,
3367 			    d, genpd, &active_time_fops);
3368 	debugfs_create_file("total_idle_time", 0444,
3369 			    d, genpd, &total_idle_time_fops);
3370 	debugfs_create_file("devices", 0444,
3371 			    d, genpd, &devices_fops);
3372 	if (genpd->set_performance_state)
3373 		debugfs_create_file("perf_state", 0444,
3374 				    d, genpd, &perf_state_fops);
3375 }
3376 
3377 static int __init genpd_debug_init(void)
3378 {
3379 	struct generic_pm_domain *genpd;
3380 
3381 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3382 
3383 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3384 			    NULL, &summary_fops);
3385 
3386 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3387 		genpd_debug_add(genpd);
3388 
3389 	return 0;
3390 }
3391 late_initcall(genpd_debug_init);
3392 
3393 static void __exit genpd_debug_exit(void)
3394 {
3395 	debugfs_remove_recursive(genpd_debugfs_dir);
3396 }
3397 __exitcall(genpd_debug_exit);
3398 #endif /* CONFIG_DEBUG_FS */
3399