xref: /openbmc/linux/drivers/base/power/domain.c (revision 6486a57f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 #include <linux/debugfs.h>
25 
26 #include "power.h"
27 
28 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
29 
30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
31 ({								\
32 	type (*__routine)(struct device *__d); 			\
33 	type __ret = (type)0;					\
34 								\
35 	__routine = genpd->dev_ops.callback; 			\
36 	if (__routine) {					\
37 		__ret = __routine(dev); 			\
38 	}							\
39 	__ret;							\
40 })
41 
42 static LIST_HEAD(gpd_list);
43 static DEFINE_MUTEX(gpd_list_lock);
44 
45 struct genpd_lock_ops {
46 	void (*lock)(struct generic_pm_domain *genpd);
47 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 	void (*unlock)(struct generic_pm_domain *genpd);
50 };
51 
52 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53 {
54 	mutex_lock(&genpd->mlock);
55 }
56 
57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 					int depth)
59 {
60 	mutex_lock_nested(&genpd->mlock, depth);
61 }
62 
63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64 {
65 	return mutex_lock_interruptible(&genpd->mlock);
66 }
67 
68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69 {
70 	return mutex_unlock(&genpd->mlock);
71 }
72 
73 static const struct genpd_lock_ops genpd_mtx_ops = {
74 	.lock = genpd_lock_mtx,
75 	.lock_nested = genpd_lock_nested_mtx,
76 	.lock_interruptible = genpd_lock_interruptible_mtx,
77 	.unlock = genpd_unlock_mtx,
78 };
79 
80 static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 	__acquires(&genpd->slock)
82 {
83 	unsigned long flags;
84 
85 	spin_lock_irqsave(&genpd->slock, flags);
86 	genpd->lock_flags = flags;
87 }
88 
89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 					int depth)
91 	__acquires(&genpd->slock)
92 {
93 	unsigned long flags;
94 
95 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 	genpd->lock_flags = flags;
97 }
98 
99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 	__acquires(&genpd->slock)
101 {
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&genpd->slock, flags);
105 	genpd->lock_flags = flags;
106 	return 0;
107 }
108 
109 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 	__releases(&genpd->slock)
111 {
112 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113 }
114 
115 static const struct genpd_lock_ops genpd_spin_ops = {
116 	.lock = genpd_lock_spin,
117 	.lock_nested = genpd_lock_nested_spin,
118 	.lock_interruptible = genpd_lock_interruptible_spin,
119 	.unlock = genpd_unlock_spin,
120 };
121 
122 #define genpd_lock(p)			p->lock_ops->lock(p)
123 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
124 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
125 #define genpd_unlock(p)			p->lock_ops->unlock(p)
126 
127 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
128 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
129 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
130 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
133 
134 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
135 		const struct generic_pm_domain *genpd)
136 {
137 	bool ret;
138 
139 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
140 
141 	/*
142 	 * Warn once if an IRQ safe device is attached to a domain, which
143 	 * callbacks are allowed to sleep. This indicates a suboptimal
144 	 * configuration for PM, but it doesn't matter for an always on domain.
145 	 */
146 	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
147 		return ret;
148 
149 	if (ret)
150 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
151 				genpd->name);
152 
153 	return ret;
154 }
155 
156 static int genpd_runtime_suspend(struct device *dev);
157 
158 /*
159  * Get the generic PM domain for a particular struct device.
160  * This validates the struct device pointer, the PM domain pointer,
161  * and checks that the PM domain pointer is a real generic PM domain.
162  * Any failure results in NULL being returned.
163  */
164 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
165 {
166 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
167 		return NULL;
168 
169 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
170 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
171 		return pd_to_genpd(dev->pm_domain);
172 
173 	return NULL;
174 }
175 
176 /*
177  * This should only be used where we are certain that the pm_domain
178  * attached to the device is a genpd domain.
179  */
180 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
181 {
182 	if (IS_ERR_OR_NULL(dev->pm_domain))
183 		return ERR_PTR(-EINVAL);
184 
185 	return pd_to_genpd(dev->pm_domain);
186 }
187 
188 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
189 			  struct device *dev)
190 {
191 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
192 }
193 
194 static int genpd_start_dev(const struct generic_pm_domain *genpd,
195 			   struct device *dev)
196 {
197 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
198 }
199 
200 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
201 {
202 	bool ret = false;
203 
204 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
205 		ret = !!atomic_dec_and_test(&genpd->sd_count);
206 
207 	return ret;
208 }
209 
210 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
211 {
212 	atomic_inc(&genpd->sd_count);
213 	smp_mb__after_atomic();
214 }
215 
216 #ifdef CONFIG_DEBUG_FS
217 static struct dentry *genpd_debugfs_dir;
218 
219 static void genpd_debug_add(struct generic_pm_domain *genpd);
220 
221 static void genpd_debug_remove(struct generic_pm_domain *genpd)
222 {
223 	if (!genpd_debugfs_dir)
224 		return;
225 
226 	debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
227 }
228 
229 static void genpd_update_accounting(struct generic_pm_domain *genpd)
230 {
231 	u64 delta, now;
232 
233 	now = ktime_get_mono_fast_ns();
234 	if (now <= genpd->accounting_time)
235 		return;
236 
237 	delta = now - genpd->accounting_time;
238 
239 	/*
240 	 * If genpd->status is active, it means we are just
241 	 * out of off and so update the idle time and vice
242 	 * versa.
243 	 */
244 	if (genpd->status == GENPD_STATE_ON)
245 		genpd->states[genpd->state_idx].idle_time += delta;
246 	else
247 		genpd->on_time += delta;
248 
249 	genpd->accounting_time = now;
250 }
251 #else
252 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
253 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
254 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
255 #endif
256 
257 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
258 					   unsigned int state)
259 {
260 	struct generic_pm_domain_data *pd_data;
261 	struct pm_domain_data *pdd;
262 	struct gpd_link *link;
263 
264 	/* New requested state is same as Max requested state */
265 	if (state == genpd->performance_state)
266 		return state;
267 
268 	/* New requested state is higher than Max requested state */
269 	if (state > genpd->performance_state)
270 		return state;
271 
272 	/* Traverse all devices within the domain */
273 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
274 		pd_data = to_gpd_data(pdd);
275 
276 		if (pd_data->performance_state > state)
277 			state = pd_data->performance_state;
278 	}
279 
280 	/*
281 	 * Traverse all sub-domains within the domain. This can be
282 	 * done without any additional locking as the link->performance_state
283 	 * field is protected by the parent genpd->lock, which is already taken.
284 	 *
285 	 * Also note that link->performance_state (subdomain's performance state
286 	 * requirement to parent domain) is different from
287 	 * link->child->performance_state (current performance state requirement
288 	 * of the devices/sub-domains of the subdomain) and so can have a
289 	 * different value.
290 	 *
291 	 * Note that we also take vote from powered-off sub-domains into account
292 	 * as the same is done for devices right now.
293 	 */
294 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
295 		if (link->performance_state > state)
296 			state = link->performance_state;
297 	}
298 
299 	return state;
300 }
301 
302 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
303 					 struct generic_pm_domain *parent,
304 					 unsigned int pstate)
305 {
306 	if (!parent->set_performance_state)
307 		return pstate;
308 
309 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
310 						  parent->opp_table,
311 						  pstate);
312 }
313 
314 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
315 					unsigned int state, int depth)
316 {
317 	struct generic_pm_domain *parent;
318 	struct gpd_link *link;
319 	int parent_state, ret;
320 
321 	if (state == genpd->performance_state)
322 		return 0;
323 
324 	/* Propagate to parents of genpd */
325 	list_for_each_entry(link, &genpd->child_links, child_node) {
326 		parent = link->parent;
327 
328 		/* Find parent's performance state */
329 		ret = genpd_xlate_performance_state(genpd, parent, state);
330 		if (unlikely(ret < 0))
331 			goto err;
332 
333 		parent_state = ret;
334 
335 		genpd_lock_nested(parent, depth + 1);
336 
337 		link->prev_performance_state = link->performance_state;
338 		link->performance_state = parent_state;
339 		parent_state = _genpd_reeval_performance_state(parent,
340 						parent_state);
341 		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
342 		if (ret)
343 			link->performance_state = link->prev_performance_state;
344 
345 		genpd_unlock(parent);
346 
347 		if (ret)
348 			goto err;
349 	}
350 
351 	if (genpd->set_performance_state) {
352 		ret = genpd->set_performance_state(genpd, state);
353 		if (ret)
354 			goto err;
355 	}
356 
357 	genpd->performance_state = state;
358 	return 0;
359 
360 err:
361 	/* Encountered an error, lets rollback */
362 	list_for_each_entry_continue_reverse(link, &genpd->child_links,
363 					     child_node) {
364 		parent = link->parent;
365 
366 		genpd_lock_nested(parent, depth + 1);
367 
368 		parent_state = link->prev_performance_state;
369 		link->performance_state = parent_state;
370 
371 		parent_state = _genpd_reeval_performance_state(parent,
372 						parent_state);
373 		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
374 			pr_err("%s: Failed to roll back to %d performance state\n",
375 			       parent->name, parent_state);
376 		}
377 
378 		genpd_unlock(parent);
379 	}
380 
381 	return ret;
382 }
383 
384 static int genpd_set_performance_state(struct device *dev, unsigned int state)
385 {
386 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
387 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
388 	unsigned int prev_state;
389 	int ret;
390 
391 	prev_state = gpd_data->performance_state;
392 	if (prev_state == state)
393 		return 0;
394 
395 	gpd_data->performance_state = state;
396 	state = _genpd_reeval_performance_state(genpd, state);
397 
398 	ret = _genpd_set_performance_state(genpd, state, 0);
399 	if (ret)
400 		gpd_data->performance_state = prev_state;
401 
402 	return ret;
403 }
404 
405 static int genpd_drop_performance_state(struct device *dev)
406 {
407 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
408 
409 	if (!genpd_set_performance_state(dev, 0))
410 		return prev_state;
411 
412 	return 0;
413 }
414 
415 static void genpd_restore_performance_state(struct device *dev,
416 					    unsigned int state)
417 {
418 	if (state)
419 		genpd_set_performance_state(dev, state);
420 }
421 
422 /**
423  * dev_pm_genpd_set_performance_state- Set performance state of device's power
424  * domain.
425  *
426  * @dev: Device for which the performance-state needs to be set.
427  * @state: Target performance state of the device. This can be set as 0 when the
428  *	   device doesn't have any performance state constraints left (And so
429  *	   the device wouldn't participate anymore to find the target
430  *	   performance state of the genpd).
431  *
432  * It is assumed that the users guarantee that the genpd wouldn't be detached
433  * while this routine is getting called.
434  *
435  * Returns 0 on success and negative error values on failures.
436  */
437 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
438 {
439 	struct generic_pm_domain *genpd;
440 	int ret = 0;
441 
442 	genpd = dev_to_genpd_safe(dev);
443 	if (!genpd)
444 		return -ENODEV;
445 
446 	if (WARN_ON(!dev->power.subsys_data ||
447 		     !dev->power.subsys_data->domain_data))
448 		return -EINVAL;
449 
450 	genpd_lock(genpd);
451 	if (pm_runtime_suspended(dev)) {
452 		dev_gpd_data(dev)->rpm_pstate = state;
453 	} else {
454 		ret = genpd_set_performance_state(dev, state);
455 		if (!ret)
456 			dev_gpd_data(dev)->rpm_pstate = 0;
457 	}
458 	genpd_unlock(genpd);
459 
460 	return ret;
461 }
462 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
463 
464 /**
465  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
466  *
467  * @dev: Device to handle
468  * @next: impending interrupt/wakeup for the device
469  *
470  *
471  * Allow devices to inform of the next wakeup. It's assumed that the users
472  * guarantee that the genpd wouldn't be detached while this routine is getting
473  * called. Additionally, it's also assumed that @dev isn't runtime suspended
474  * (RPM_SUSPENDED)."
475  * Although devices are expected to update the next_wakeup after the end of
476  * their usecase as well, it is possible the devices themselves may not know
477  * about that, so stale @next will be ignored when powering off the domain.
478  */
479 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
480 {
481 	struct generic_pm_domain *genpd;
482 	struct gpd_timing_data *td;
483 
484 	genpd = dev_to_genpd_safe(dev);
485 	if (!genpd)
486 		return;
487 
488 	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
489 	if (td)
490 		td->next_wakeup = next;
491 }
492 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
493 
494 /**
495  * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
496  * @dev: A device that is attached to the genpd.
497  *
498  * This routine should typically be called for a device, at the point of when a
499  * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
500  *
501  * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
502  * valid value have been set.
503  */
504 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
505 {
506 	struct generic_pm_domain *genpd;
507 
508 	genpd = dev_to_genpd_safe(dev);
509 	if (!genpd)
510 		return KTIME_MAX;
511 
512 	if (genpd->gd)
513 		return genpd->gd->next_hrtimer;
514 
515 	return KTIME_MAX;
516 }
517 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
518 
519 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
520 {
521 	unsigned int state_idx = genpd->state_idx;
522 	ktime_t time_start;
523 	s64 elapsed_ns;
524 	int ret;
525 
526 	/* Notify consumers that we are about to power on. */
527 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
528 					     GENPD_NOTIFY_PRE_ON,
529 					     GENPD_NOTIFY_OFF, NULL);
530 	ret = notifier_to_errno(ret);
531 	if (ret)
532 		return ret;
533 
534 	if (!genpd->power_on)
535 		goto out;
536 
537 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
538 	if (!timed) {
539 		ret = genpd->power_on(genpd);
540 		if (ret)
541 			goto err;
542 
543 		goto out;
544 	}
545 
546 	time_start = ktime_get();
547 	ret = genpd->power_on(genpd);
548 	if (ret)
549 		goto err;
550 
551 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
552 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
553 		goto out;
554 
555 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
556 	genpd->gd->max_off_time_changed = true;
557 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
558 		 genpd->name, "on", elapsed_ns);
559 
560 out:
561 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
562 	return 0;
563 err:
564 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
565 				NULL);
566 	return ret;
567 }
568 
569 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
570 {
571 	unsigned int state_idx = genpd->state_idx;
572 	ktime_t time_start;
573 	s64 elapsed_ns;
574 	int ret;
575 
576 	/* Notify consumers that we are about to power off. */
577 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
578 					     GENPD_NOTIFY_PRE_OFF,
579 					     GENPD_NOTIFY_ON, NULL);
580 	ret = notifier_to_errno(ret);
581 	if (ret)
582 		return ret;
583 
584 	if (!genpd->power_off)
585 		goto out;
586 
587 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
588 	if (!timed) {
589 		ret = genpd->power_off(genpd);
590 		if (ret)
591 			goto busy;
592 
593 		goto out;
594 	}
595 
596 	time_start = ktime_get();
597 	ret = genpd->power_off(genpd);
598 	if (ret)
599 		goto busy;
600 
601 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
602 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
603 		goto out;
604 
605 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
606 	genpd->gd->max_off_time_changed = true;
607 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
608 		 genpd->name, "off", elapsed_ns);
609 
610 out:
611 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
612 				NULL);
613 	return 0;
614 busy:
615 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
616 	return ret;
617 }
618 
619 /**
620  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
621  * @genpd: PM domain to power off.
622  *
623  * Queue up the execution of genpd_power_off() unless it's already been done
624  * before.
625  */
626 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
627 {
628 	queue_work(pm_wq, &genpd->power_off_work);
629 }
630 
631 /**
632  * genpd_power_off - Remove power from a given PM domain.
633  * @genpd: PM domain to power down.
634  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
635  * RPM status of the releated device is in an intermediate state, not yet turned
636  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
637  * be RPM_SUSPENDED, while it tries to power off the PM domain.
638  * @depth: nesting count for lockdep.
639  *
640  * If all of the @genpd's devices have been suspended and all of its subdomains
641  * have been powered down, remove power from @genpd.
642  */
643 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
644 			   unsigned int depth)
645 {
646 	struct pm_domain_data *pdd;
647 	struct gpd_link *link;
648 	unsigned int not_suspended = 0;
649 	int ret;
650 
651 	/*
652 	 * Do not try to power off the domain in the following situations:
653 	 * (1) The domain is already in the "power off" state.
654 	 * (2) System suspend is in progress.
655 	 */
656 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
657 		return 0;
658 
659 	/*
660 	 * Abort power off for the PM domain in the following situations:
661 	 * (1) The domain is configured as always on.
662 	 * (2) When the domain has a subdomain being powered on.
663 	 */
664 	if (genpd_is_always_on(genpd) ||
665 			genpd_is_rpm_always_on(genpd) ||
666 			atomic_read(&genpd->sd_count) > 0)
667 		return -EBUSY;
668 
669 	/*
670 	 * The children must be in their deepest (powered-off) states to allow
671 	 * the parent to be powered off. Note that, there's no need for
672 	 * additional locking, as powering on a child, requires the parent's
673 	 * lock to be acquired first.
674 	 */
675 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
676 		struct generic_pm_domain *child = link->child;
677 		if (child->state_idx < child->state_count - 1)
678 			return -EBUSY;
679 	}
680 
681 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
682 		/*
683 		 * Do not allow PM domain to be powered off, when an IRQ safe
684 		 * device is part of a non-IRQ safe domain.
685 		 */
686 		if (!pm_runtime_suspended(pdd->dev) ||
687 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
688 			not_suspended++;
689 	}
690 
691 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
692 		return -EBUSY;
693 
694 	if (genpd->gov && genpd->gov->power_down_ok) {
695 		if (!genpd->gov->power_down_ok(&genpd->domain))
696 			return -EAGAIN;
697 	}
698 
699 	/* Default to shallowest state. */
700 	if (!genpd->gov)
701 		genpd->state_idx = 0;
702 
703 	/* Don't power off, if a child domain is waiting to power on. */
704 	if (atomic_read(&genpd->sd_count) > 0)
705 		return -EBUSY;
706 
707 	ret = _genpd_power_off(genpd, true);
708 	if (ret) {
709 		genpd->states[genpd->state_idx].rejected++;
710 		return ret;
711 	}
712 
713 	genpd->status = GENPD_STATE_OFF;
714 	genpd_update_accounting(genpd);
715 	genpd->states[genpd->state_idx].usage++;
716 
717 	list_for_each_entry(link, &genpd->child_links, child_node) {
718 		genpd_sd_counter_dec(link->parent);
719 		genpd_lock_nested(link->parent, depth + 1);
720 		genpd_power_off(link->parent, false, depth + 1);
721 		genpd_unlock(link->parent);
722 	}
723 
724 	return 0;
725 }
726 
727 /**
728  * genpd_power_on - Restore power to a given PM domain and its parents.
729  * @genpd: PM domain to power up.
730  * @depth: nesting count for lockdep.
731  *
732  * Restore power to @genpd and all of its parents so that it is possible to
733  * resume a device belonging to it.
734  */
735 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
736 {
737 	struct gpd_link *link;
738 	int ret = 0;
739 
740 	if (genpd_status_on(genpd))
741 		return 0;
742 
743 	/*
744 	 * The list is guaranteed not to change while the loop below is being
745 	 * executed, unless one of the parents' .power_on() callbacks fiddles
746 	 * with it.
747 	 */
748 	list_for_each_entry(link, &genpd->child_links, child_node) {
749 		struct generic_pm_domain *parent = link->parent;
750 
751 		genpd_sd_counter_inc(parent);
752 
753 		genpd_lock_nested(parent, depth + 1);
754 		ret = genpd_power_on(parent, depth + 1);
755 		genpd_unlock(parent);
756 
757 		if (ret) {
758 			genpd_sd_counter_dec(parent);
759 			goto err;
760 		}
761 	}
762 
763 	ret = _genpd_power_on(genpd, true);
764 	if (ret)
765 		goto err;
766 
767 	genpd->status = GENPD_STATE_ON;
768 	genpd_update_accounting(genpd);
769 
770 	return 0;
771 
772  err:
773 	list_for_each_entry_continue_reverse(link,
774 					&genpd->child_links,
775 					child_node) {
776 		genpd_sd_counter_dec(link->parent);
777 		genpd_lock_nested(link->parent, depth + 1);
778 		genpd_power_off(link->parent, false, depth + 1);
779 		genpd_unlock(link->parent);
780 	}
781 
782 	return ret;
783 }
784 
785 static int genpd_dev_pm_start(struct device *dev)
786 {
787 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
788 
789 	return genpd_start_dev(genpd, dev);
790 }
791 
792 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
793 				     unsigned long val, void *ptr)
794 {
795 	struct generic_pm_domain_data *gpd_data;
796 	struct device *dev;
797 
798 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
799 	dev = gpd_data->base.dev;
800 
801 	for (;;) {
802 		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
803 		struct pm_domain_data *pdd;
804 		struct gpd_timing_data *td;
805 
806 		spin_lock_irq(&dev->power.lock);
807 
808 		pdd = dev->power.subsys_data ?
809 				dev->power.subsys_data->domain_data : NULL;
810 		if (pdd) {
811 			td = to_gpd_data(pdd)->td;
812 			if (td) {
813 				td->constraint_changed = true;
814 				genpd = dev_to_genpd(dev);
815 			}
816 		}
817 
818 		spin_unlock_irq(&dev->power.lock);
819 
820 		if (!IS_ERR(genpd)) {
821 			genpd_lock(genpd);
822 			genpd->gd->max_off_time_changed = true;
823 			genpd_unlock(genpd);
824 		}
825 
826 		dev = dev->parent;
827 		if (!dev || dev->power.ignore_children)
828 			break;
829 	}
830 
831 	return NOTIFY_DONE;
832 }
833 
834 /**
835  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
836  * @work: Work structure used for scheduling the execution of this function.
837  */
838 static void genpd_power_off_work_fn(struct work_struct *work)
839 {
840 	struct generic_pm_domain *genpd;
841 
842 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
843 
844 	genpd_lock(genpd);
845 	genpd_power_off(genpd, false, 0);
846 	genpd_unlock(genpd);
847 }
848 
849 /**
850  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
851  * @dev: Device to handle.
852  */
853 static int __genpd_runtime_suspend(struct device *dev)
854 {
855 	int (*cb)(struct device *__dev);
856 
857 	if (dev->type && dev->type->pm)
858 		cb = dev->type->pm->runtime_suspend;
859 	else if (dev->class && dev->class->pm)
860 		cb = dev->class->pm->runtime_suspend;
861 	else if (dev->bus && dev->bus->pm)
862 		cb = dev->bus->pm->runtime_suspend;
863 	else
864 		cb = NULL;
865 
866 	if (!cb && dev->driver && dev->driver->pm)
867 		cb = dev->driver->pm->runtime_suspend;
868 
869 	return cb ? cb(dev) : 0;
870 }
871 
872 /**
873  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
874  * @dev: Device to handle.
875  */
876 static int __genpd_runtime_resume(struct device *dev)
877 {
878 	int (*cb)(struct device *__dev);
879 
880 	if (dev->type && dev->type->pm)
881 		cb = dev->type->pm->runtime_resume;
882 	else if (dev->class && dev->class->pm)
883 		cb = dev->class->pm->runtime_resume;
884 	else if (dev->bus && dev->bus->pm)
885 		cb = dev->bus->pm->runtime_resume;
886 	else
887 		cb = NULL;
888 
889 	if (!cb && dev->driver && dev->driver->pm)
890 		cb = dev->driver->pm->runtime_resume;
891 
892 	return cb ? cb(dev) : 0;
893 }
894 
895 /**
896  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
897  * @dev: Device to suspend.
898  *
899  * Carry out a runtime suspend of a device under the assumption that its
900  * pm_domain field points to the domain member of an object of type
901  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
902  */
903 static int genpd_runtime_suspend(struct device *dev)
904 {
905 	struct generic_pm_domain *genpd;
906 	bool (*suspend_ok)(struct device *__dev);
907 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
908 	struct gpd_timing_data *td = gpd_data->td;
909 	bool runtime_pm = pm_runtime_enabled(dev);
910 	ktime_t time_start = 0;
911 	s64 elapsed_ns;
912 	int ret;
913 
914 	dev_dbg(dev, "%s()\n", __func__);
915 
916 	genpd = dev_to_genpd(dev);
917 	if (IS_ERR(genpd))
918 		return -EINVAL;
919 
920 	/*
921 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
922 	 * callbacks for other purposes than runtime PM. In those scenarios
923 	 * runtime PM is disabled. Under these circumstances, we shall skip
924 	 * validating/measuring the PM QoS latency.
925 	 */
926 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
927 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
928 		return -EBUSY;
929 
930 	/* Measure suspend latency. */
931 	if (td && runtime_pm)
932 		time_start = ktime_get();
933 
934 	ret = __genpd_runtime_suspend(dev);
935 	if (ret)
936 		return ret;
937 
938 	ret = genpd_stop_dev(genpd, dev);
939 	if (ret) {
940 		__genpd_runtime_resume(dev);
941 		return ret;
942 	}
943 
944 	/* Update suspend latency value if the measured time exceeds it. */
945 	if (td && runtime_pm) {
946 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
947 		if (elapsed_ns > td->suspend_latency_ns) {
948 			td->suspend_latency_ns = elapsed_ns;
949 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
950 				elapsed_ns);
951 			genpd->gd->max_off_time_changed = true;
952 			td->constraint_changed = true;
953 		}
954 	}
955 
956 	/*
957 	 * If power.irq_safe is set, this routine may be run with
958 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
959 	 */
960 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
961 		return 0;
962 
963 	genpd_lock(genpd);
964 	genpd_power_off(genpd, true, 0);
965 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
966 	genpd_unlock(genpd);
967 
968 	return 0;
969 }
970 
971 /**
972  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
973  * @dev: Device to resume.
974  *
975  * Carry out a runtime resume of a device under the assumption that its
976  * pm_domain field points to the domain member of an object of type
977  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
978  */
979 static int genpd_runtime_resume(struct device *dev)
980 {
981 	struct generic_pm_domain *genpd;
982 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
983 	struct gpd_timing_data *td = gpd_data->td;
984 	bool timed = td && pm_runtime_enabled(dev);
985 	ktime_t time_start = 0;
986 	s64 elapsed_ns;
987 	int ret;
988 
989 	dev_dbg(dev, "%s()\n", __func__);
990 
991 	genpd = dev_to_genpd(dev);
992 	if (IS_ERR(genpd))
993 		return -EINVAL;
994 
995 	/*
996 	 * As we don't power off a non IRQ safe domain, which holds
997 	 * an IRQ safe device, we don't need to restore power to it.
998 	 */
999 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1000 		goto out;
1001 
1002 	genpd_lock(genpd);
1003 	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1004 	ret = genpd_power_on(genpd, 0);
1005 	genpd_unlock(genpd);
1006 
1007 	if (ret)
1008 		return ret;
1009 
1010  out:
1011 	/* Measure resume latency. */
1012 	if (timed)
1013 		time_start = ktime_get();
1014 
1015 	ret = genpd_start_dev(genpd, dev);
1016 	if (ret)
1017 		goto err_poweroff;
1018 
1019 	ret = __genpd_runtime_resume(dev);
1020 	if (ret)
1021 		goto err_stop;
1022 
1023 	/* Update resume latency value if the measured time exceeds it. */
1024 	if (timed) {
1025 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1026 		if (elapsed_ns > td->resume_latency_ns) {
1027 			td->resume_latency_ns = elapsed_ns;
1028 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1029 				elapsed_ns);
1030 			genpd->gd->max_off_time_changed = true;
1031 			td->constraint_changed = true;
1032 		}
1033 	}
1034 
1035 	return 0;
1036 
1037 err_stop:
1038 	genpd_stop_dev(genpd, dev);
1039 err_poweroff:
1040 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1041 		genpd_lock(genpd);
1042 		genpd_power_off(genpd, true, 0);
1043 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1044 		genpd_unlock(genpd);
1045 	}
1046 
1047 	return ret;
1048 }
1049 
1050 static bool pd_ignore_unused;
1051 static int __init pd_ignore_unused_setup(char *__unused)
1052 {
1053 	pd_ignore_unused = true;
1054 	return 1;
1055 }
1056 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1057 
1058 /**
1059  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1060  */
1061 static int __init genpd_power_off_unused(void)
1062 {
1063 	struct generic_pm_domain *genpd;
1064 
1065 	if (pd_ignore_unused) {
1066 		pr_warn("genpd: Not disabling unused power domains\n");
1067 		return 0;
1068 	}
1069 
1070 	mutex_lock(&gpd_list_lock);
1071 
1072 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1073 		genpd_queue_power_off_work(genpd);
1074 
1075 	mutex_unlock(&gpd_list_lock);
1076 
1077 	return 0;
1078 }
1079 late_initcall(genpd_power_off_unused);
1080 
1081 #ifdef CONFIG_PM_SLEEP
1082 
1083 /**
1084  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1085  * @genpd: PM domain to power off, if possible.
1086  * @use_lock: use the lock.
1087  * @depth: nesting count for lockdep.
1088  *
1089  * Check if the given PM domain can be powered off (during system suspend or
1090  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1091  *
1092  * This function is only called in "noirq" and "syscore" stages of system power
1093  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1094  * these cases the lock must be held.
1095  */
1096 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1097 				 unsigned int depth)
1098 {
1099 	struct gpd_link *link;
1100 
1101 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1102 		return;
1103 
1104 	if (genpd->suspended_count != genpd->device_count
1105 	    || atomic_read(&genpd->sd_count) > 0)
1106 		return;
1107 
1108 	/* Check that the children are in their deepest (powered-off) state. */
1109 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1110 		struct generic_pm_domain *child = link->child;
1111 		if (child->state_idx < child->state_count - 1)
1112 			return;
1113 	}
1114 
1115 	/* Choose the deepest state when suspending */
1116 	genpd->state_idx = genpd->state_count - 1;
1117 	if (_genpd_power_off(genpd, false))
1118 		return;
1119 
1120 	genpd->status = GENPD_STATE_OFF;
1121 
1122 	list_for_each_entry(link, &genpd->child_links, child_node) {
1123 		genpd_sd_counter_dec(link->parent);
1124 
1125 		if (use_lock)
1126 			genpd_lock_nested(link->parent, depth + 1);
1127 
1128 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1129 
1130 		if (use_lock)
1131 			genpd_unlock(link->parent);
1132 	}
1133 }
1134 
1135 /**
1136  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1137  * @genpd: PM domain to power on.
1138  * @use_lock: use the lock.
1139  * @depth: nesting count for lockdep.
1140  *
1141  * This function is only called in "noirq" and "syscore" stages of system power
1142  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1143  * these cases the lock must be held.
1144  */
1145 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1146 				unsigned int depth)
1147 {
1148 	struct gpd_link *link;
1149 
1150 	if (genpd_status_on(genpd))
1151 		return;
1152 
1153 	list_for_each_entry(link, &genpd->child_links, child_node) {
1154 		genpd_sd_counter_inc(link->parent);
1155 
1156 		if (use_lock)
1157 			genpd_lock_nested(link->parent, depth + 1);
1158 
1159 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1160 
1161 		if (use_lock)
1162 			genpd_unlock(link->parent);
1163 	}
1164 
1165 	_genpd_power_on(genpd, false);
1166 	genpd->status = GENPD_STATE_ON;
1167 }
1168 
1169 /**
1170  * genpd_prepare - Start power transition of a device in a PM domain.
1171  * @dev: Device to start the transition of.
1172  *
1173  * Start a power transition of a device (during a system-wide power transition)
1174  * under the assumption that its pm_domain field points to the domain member of
1175  * an object of type struct generic_pm_domain representing a PM domain
1176  * consisting of I/O devices.
1177  */
1178 static int genpd_prepare(struct device *dev)
1179 {
1180 	struct generic_pm_domain *genpd;
1181 	int ret;
1182 
1183 	dev_dbg(dev, "%s()\n", __func__);
1184 
1185 	genpd = dev_to_genpd(dev);
1186 	if (IS_ERR(genpd))
1187 		return -EINVAL;
1188 
1189 	genpd_lock(genpd);
1190 
1191 	if (genpd->prepared_count++ == 0)
1192 		genpd->suspended_count = 0;
1193 
1194 	genpd_unlock(genpd);
1195 
1196 	ret = pm_generic_prepare(dev);
1197 	if (ret < 0) {
1198 		genpd_lock(genpd);
1199 
1200 		genpd->prepared_count--;
1201 
1202 		genpd_unlock(genpd);
1203 	}
1204 
1205 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1206 	return ret >= 0 ? 0 : ret;
1207 }
1208 
1209 /**
1210  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1211  *   I/O pm domain.
1212  * @dev: Device to suspend.
1213  * @suspend_noirq: Generic suspend_noirq callback.
1214  * @resume_noirq: Generic resume_noirq callback.
1215  *
1216  * Stop the device and remove power from the domain if all devices in it have
1217  * been stopped.
1218  */
1219 static int genpd_finish_suspend(struct device *dev,
1220 				int (*suspend_noirq)(struct device *dev),
1221 				int (*resume_noirq)(struct device *dev))
1222 {
1223 	struct generic_pm_domain *genpd;
1224 	int ret = 0;
1225 
1226 	genpd = dev_to_genpd(dev);
1227 	if (IS_ERR(genpd))
1228 		return -EINVAL;
1229 
1230 	ret = suspend_noirq(dev);
1231 	if (ret)
1232 		return ret;
1233 
1234 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1235 		return 0;
1236 
1237 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1238 	    !pm_runtime_status_suspended(dev)) {
1239 		ret = genpd_stop_dev(genpd, dev);
1240 		if (ret) {
1241 			resume_noirq(dev);
1242 			return ret;
1243 		}
1244 	}
1245 
1246 	genpd_lock(genpd);
1247 	genpd->suspended_count++;
1248 	genpd_sync_power_off(genpd, true, 0);
1249 	genpd_unlock(genpd);
1250 
1251 	return 0;
1252 }
1253 
1254 /**
1255  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1256  * @dev: Device to suspend.
1257  *
1258  * Stop the device and remove power from the domain if all devices in it have
1259  * been stopped.
1260  */
1261 static int genpd_suspend_noirq(struct device *dev)
1262 {
1263 	dev_dbg(dev, "%s()\n", __func__);
1264 
1265 	return genpd_finish_suspend(dev,
1266 				    pm_generic_suspend_noirq,
1267 				    pm_generic_resume_noirq);
1268 }
1269 
1270 /**
1271  * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1272  * @dev: Device to resume.
1273  * @resume_noirq: Generic resume_noirq callback.
1274  *
1275  * Restore power to the device's PM domain, if necessary, and start the device.
1276  */
1277 static int genpd_finish_resume(struct device *dev,
1278 			       int (*resume_noirq)(struct device *dev))
1279 {
1280 	struct generic_pm_domain *genpd;
1281 	int ret;
1282 
1283 	dev_dbg(dev, "%s()\n", __func__);
1284 
1285 	genpd = dev_to_genpd(dev);
1286 	if (IS_ERR(genpd))
1287 		return -EINVAL;
1288 
1289 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1290 		return resume_noirq(dev);
1291 
1292 	genpd_lock(genpd);
1293 	genpd_sync_power_on(genpd, true, 0);
1294 	genpd->suspended_count--;
1295 	genpd_unlock(genpd);
1296 
1297 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1298 	    !pm_runtime_status_suspended(dev)) {
1299 		ret = genpd_start_dev(genpd, dev);
1300 		if (ret)
1301 			return ret;
1302 	}
1303 
1304 	return pm_generic_resume_noirq(dev);
1305 }
1306 
1307 /**
1308  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1309  * @dev: Device to resume.
1310  *
1311  * Restore power to the device's PM domain, if necessary, and start the device.
1312  */
1313 static int genpd_resume_noirq(struct device *dev)
1314 {
1315 	dev_dbg(dev, "%s()\n", __func__);
1316 
1317 	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1318 }
1319 
1320 /**
1321  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1322  * @dev: Device to freeze.
1323  *
1324  * Carry out a late freeze of a device under the assumption that its
1325  * pm_domain field points to the domain member of an object of type
1326  * struct generic_pm_domain representing a power domain consisting of I/O
1327  * devices.
1328  */
1329 static int genpd_freeze_noirq(struct device *dev)
1330 {
1331 	dev_dbg(dev, "%s()\n", __func__);
1332 
1333 	return genpd_finish_suspend(dev,
1334 				    pm_generic_freeze_noirq,
1335 				    pm_generic_thaw_noirq);
1336 }
1337 
1338 /**
1339  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1340  * @dev: Device to thaw.
1341  *
1342  * Start the device, unless power has been removed from the domain already
1343  * before the system transition.
1344  */
1345 static int genpd_thaw_noirq(struct device *dev)
1346 {
1347 	dev_dbg(dev, "%s()\n", __func__);
1348 
1349 	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1350 }
1351 
1352 /**
1353  * genpd_poweroff_noirq - Completion of hibernation of device in an
1354  *   I/O PM domain.
1355  * @dev: Device to poweroff.
1356  *
1357  * Stop the device and remove power from the domain if all devices in it have
1358  * been stopped.
1359  */
1360 static int genpd_poweroff_noirq(struct device *dev)
1361 {
1362 	dev_dbg(dev, "%s()\n", __func__);
1363 
1364 	return genpd_finish_suspend(dev,
1365 				    pm_generic_poweroff_noirq,
1366 				    pm_generic_restore_noirq);
1367 }
1368 
1369 /**
1370  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1371  * @dev: Device to resume.
1372  *
1373  * Make sure the domain will be in the same power state as before the
1374  * hibernation the system is resuming from and start the device if necessary.
1375  */
1376 static int genpd_restore_noirq(struct device *dev)
1377 {
1378 	dev_dbg(dev, "%s()\n", __func__);
1379 
1380 	return genpd_finish_resume(dev, pm_generic_restore_noirq);
1381 }
1382 
1383 /**
1384  * genpd_complete - Complete power transition of a device in a power domain.
1385  * @dev: Device to complete the transition of.
1386  *
1387  * Complete a power transition of a device (during a system-wide power
1388  * transition) under the assumption that its pm_domain field points to the
1389  * domain member of an object of type struct generic_pm_domain representing
1390  * a power domain consisting of I/O devices.
1391  */
1392 static void genpd_complete(struct device *dev)
1393 {
1394 	struct generic_pm_domain *genpd;
1395 
1396 	dev_dbg(dev, "%s()\n", __func__);
1397 
1398 	genpd = dev_to_genpd(dev);
1399 	if (IS_ERR(genpd))
1400 		return;
1401 
1402 	pm_generic_complete(dev);
1403 
1404 	genpd_lock(genpd);
1405 
1406 	genpd->prepared_count--;
1407 	if (!genpd->prepared_count)
1408 		genpd_queue_power_off_work(genpd);
1409 
1410 	genpd_unlock(genpd);
1411 }
1412 
1413 static void genpd_switch_state(struct device *dev, bool suspend)
1414 {
1415 	struct generic_pm_domain *genpd;
1416 	bool use_lock;
1417 
1418 	genpd = dev_to_genpd_safe(dev);
1419 	if (!genpd)
1420 		return;
1421 
1422 	use_lock = genpd_is_irq_safe(genpd);
1423 
1424 	if (use_lock)
1425 		genpd_lock(genpd);
1426 
1427 	if (suspend) {
1428 		genpd->suspended_count++;
1429 		genpd_sync_power_off(genpd, use_lock, 0);
1430 	} else {
1431 		genpd_sync_power_on(genpd, use_lock, 0);
1432 		genpd->suspended_count--;
1433 	}
1434 
1435 	if (use_lock)
1436 		genpd_unlock(genpd);
1437 }
1438 
1439 /**
1440  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1441  * @dev: The device that is attached to the genpd, that can be suspended.
1442  *
1443  * This routine should typically be called for a device that needs to be
1444  * suspended during the syscore suspend phase. It may also be called during
1445  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1446  * genpd.
1447  */
1448 void dev_pm_genpd_suspend(struct device *dev)
1449 {
1450 	genpd_switch_state(dev, true);
1451 }
1452 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1453 
1454 /**
1455  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1456  * @dev: The device that is attached to the genpd, which needs to be resumed.
1457  *
1458  * This routine should typically be called for a device that needs to be resumed
1459  * during the syscore resume phase. It may also be called during suspend-to-idle
1460  * to resume a corresponding CPU device that is attached to a genpd.
1461  */
1462 void dev_pm_genpd_resume(struct device *dev)
1463 {
1464 	genpd_switch_state(dev, false);
1465 }
1466 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1467 
1468 #else /* !CONFIG_PM_SLEEP */
1469 
1470 #define genpd_prepare		NULL
1471 #define genpd_suspend_noirq	NULL
1472 #define genpd_resume_noirq	NULL
1473 #define genpd_freeze_noirq	NULL
1474 #define genpd_thaw_noirq	NULL
1475 #define genpd_poweroff_noirq	NULL
1476 #define genpd_restore_noirq	NULL
1477 #define genpd_complete		NULL
1478 
1479 #endif /* CONFIG_PM_SLEEP */
1480 
1481 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1482 							   bool has_governor)
1483 {
1484 	struct generic_pm_domain_data *gpd_data;
1485 	struct gpd_timing_data *td;
1486 	int ret;
1487 
1488 	ret = dev_pm_get_subsys_data(dev);
1489 	if (ret)
1490 		return ERR_PTR(ret);
1491 
1492 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1493 	if (!gpd_data) {
1494 		ret = -ENOMEM;
1495 		goto err_put;
1496 	}
1497 
1498 	gpd_data->base.dev = dev;
1499 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1500 
1501 	/* Allocate data used by a governor. */
1502 	if (has_governor) {
1503 		td = kzalloc(sizeof(*td), GFP_KERNEL);
1504 		if (!td) {
1505 			ret = -ENOMEM;
1506 			goto err_free;
1507 		}
1508 
1509 		td->constraint_changed = true;
1510 		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1511 		td->next_wakeup = KTIME_MAX;
1512 		gpd_data->td = td;
1513 	}
1514 
1515 	spin_lock_irq(&dev->power.lock);
1516 
1517 	if (dev->power.subsys_data->domain_data)
1518 		ret = -EINVAL;
1519 	else
1520 		dev->power.subsys_data->domain_data = &gpd_data->base;
1521 
1522 	spin_unlock_irq(&dev->power.lock);
1523 
1524 	if (ret)
1525 		goto err_free;
1526 
1527 	return gpd_data;
1528 
1529  err_free:
1530 	kfree(gpd_data->td);
1531 	kfree(gpd_data);
1532  err_put:
1533 	dev_pm_put_subsys_data(dev);
1534 	return ERR_PTR(ret);
1535 }
1536 
1537 static void genpd_free_dev_data(struct device *dev,
1538 				struct generic_pm_domain_data *gpd_data)
1539 {
1540 	spin_lock_irq(&dev->power.lock);
1541 
1542 	dev->power.subsys_data->domain_data = NULL;
1543 
1544 	spin_unlock_irq(&dev->power.lock);
1545 
1546 	kfree(gpd_data->td);
1547 	kfree(gpd_data);
1548 	dev_pm_put_subsys_data(dev);
1549 }
1550 
1551 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1552 				 int cpu, bool set, unsigned int depth)
1553 {
1554 	struct gpd_link *link;
1555 
1556 	if (!genpd_is_cpu_domain(genpd))
1557 		return;
1558 
1559 	list_for_each_entry(link, &genpd->child_links, child_node) {
1560 		struct generic_pm_domain *parent = link->parent;
1561 
1562 		genpd_lock_nested(parent, depth + 1);
1563 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1564 		genpd_unlock(parent);
1565 	}
1566 
1567 	if (set)
1568 		cpumask_set_cpu(cpu, genpd->cpus);
1569 	else
1570 		cpumask_clear_cpu(cpu, genpd->cpus);
1571 }
1572 
1573 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1574 {
1575 	if (cpu >= 0)
1576 		genpd_update_cpumask(genpd, cpu, true, 0);
1577 }
1578 
1579 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1580 {
1581 	if (cpu >= 0)
1582 		genpd_update_cpumask(genpd, cpu, false, 0);
1583 }
1584 
1585 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1586 {
1587 	int cpu;
1588 
1589 	if (!genpd_is_cpu_domain(genpd))
1590 		return -1;
1591 
1592 	for_each_possible_cpu(cpu) {
1593 		if (get_cpu_device(cpu) == dev)
1594 			return cpu;
1595 	}
1596 
1597 	return -1;
1598 }
1599 
1600 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1601 			    struct device *base_dev)
1602 {
1603 	struct genpd_governor_data *gd = genpd->gd;
1604 	struct generic_pm_domain_data *gpd_data;
1605 	int ret;
1606 
1607 	dev_dbg(dev, "%s()\n", __func__);
1608 
1609 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1610 		return -EINVAL;
1611 
1612 	gpd_data = genpd_alloc_dev_data(dev, gd);
1613 	if (IS_ERR(gpd_data))
1614 		return PTR_ERR(gpd_data);
1615 
1616 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1617 
1618 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1619 	if (ret)
1620 		goto out;
1621 
1622 	genpd_lock(genpd);
1623 
1624 	genpd_set_cpumask(genpd, gpd_data->cpu);
1625 	dev_pm_domain_set(dev, &genpd->domain);
1626 
1627 	genpd->device_count++;
1628 	if (gd)
1629 		gd->max_off_time_changed = true;
1630 
1631 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1632 
1633 	genpd_unlock(genpd);
1634  out:
1635 	if (ret)
1636 		genpd_free_dev_data(dev, gpd_data);
1637 	else
1638 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1639 					DEV_PM_QOS_RESUME_LATENCY);
1640 
1641 	return ret;
1642 }
1643 
1644 /**
1645  * pm_genpd_add_device - Add a device to an I/O PM domain.
1646  * @genpd: PM domain to add the device to.
1647  * @dev: Device to be added.
1648  */
1649 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1650 {
1651 	int ret;
1652 
1653 	mutex_lock(&gpd_list_lock);
1654 	ret = genpd_add_device(genpd, dev, dev);
1655 	mutex_unlock(&gpd_list_lock);
1656 
1657 	return ret;
1658 }
1659 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1660 
1661 static int genpd_remove_device(struct generic_pm_domain *genpd,
1662 			       struct device *dev)
1663 {
1664 	struct generic_pm_domain_data *gpd_data;
1665 	struct pm_domain_data *pdd;
1666 	int ret = 0;
1667 
1668 	dev_dbg(dev, "%s()\n", __func__);
1669 
1670 	pdd = dev->power.subsys_data->domain_data;
1671 	gpd_data = to_gpd_data(pdd);
1672 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1673 				   DEV_PM_QOS_RESUME_LATENCY);
1674 
1675 	genpd_lock(genpd);
1676 
1677 	if (genpd->prepared_count > 0) {
1678 		ret = -EAGAIN;
1679 		goto out;
1680 	}
1681 
1682 	genpd->device_count--;
1683 	if (genpd->gd)
1684 		genpd->gd->max_off_time_changed = true;
1685 
1686 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1687 	dev_pm_domain_set(dev, NULL);
1688 
1689 	list_del_init(&pdd->list_node);
1690 
1691 	genpd_unlock(genpd);
1692 
1693 	if (genpd->detach_dev)
1694 		genpd->detach_dev(genpd, dev);
1695 
1696 	genpd_free_dev_data(dev, gpd_data);
1697 
1698 	return 0;
1699 
1700  out:
1701 	genpd_unlock(genpd);
1702 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1703 
1704 	return ret;
1705 }
1706 
1707 /**
1708  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1709  * @dev: Device to be removed.
1710  */
1711 int pm_genpd_remove_device(struct device *dev)
1712 {
1713 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1714 
1715 	if (!genpd)
1716 		return -EINVAL;
1717 
1718 	return genpd_remove_device(genpd, dev);
1719 }
1720 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1721 
1722 /**
1723  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1724  *
1725  * @dev: Device that should be associated with the notifier
1726  * @nb: The notifier block to register
1727  *
1728  * Users may call this function to add a genpd power on/off notifier for an
1729  * attached @dev. Only one notifier per device is allowed. The notifier is
1730  * sent when genpd is powering on/off the PM domain.
1731  *
1732  * It is assumed that the user guarantee that the genpd wouldn't be detached
1733  * while this routine is getting called.
1734  *
1735  * Returns 0 on success and negative error values on failures.
1736  */
1737 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1738 {
1739 	struct generic_pm_domain *genpd;
1740 	struct generic_pm_domain_data *gpd_data;
1741 	int ret;
1742 
1743 	genpd = dev_to_genpd_safe(dev);
1744 	if (!genpd)
1745 		return -ENODEV;
1746 
1747 	if (WARN_ON(!dev->power.subsys_data ||
1748 		     !dev->power.subsys_data->domain_data))
1749 		return -EINVAL;
1750 
1751 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1752 	if (gpd_data->power_nb)
1753 		return -EEXIST;
1754 
1755 	genpd_lock(genpd);
1756 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1757 	genpd_unlock(genpd);
1758 
1759 	if (ret) {
1760 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1761 			 genpd->name);
1762 		return ret;
1763 	}
1764 
1765 	gpd_data->power_nb = nb;
1766 	return 0;
1767 }
1768 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1769 
1770 /**
1771  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1772  *
1773  * @dev: Device that is associated with the notifier
1774  *
1775  * Users may call this function to remove a genpd power on/off notifier for an
1776  * attached @dev.
1777  *
1778  * It is assumed that the user guarantee that the genpd wouldn't be detached
1779  * while this routine is getting called.
1780  *
1781  * Returns 0 on success and negative error values on failures.
1782  */
1783 int dev_pm_genpd_remove_notifier(struct device *dev)
1784 {
1785 	struct generic_pm_domain *genpd;
1786 	struct generic_pm_domain_data *gpd_data;
1787 	int ret;
1788 
1789 	genpd = dev_to_genpd_safe(dev);
1790 	if (!genpd)
1791 		return -ENODEV;
1792 
1793 	if (WARN_ON(!dev->power.subsys_data ||
1794 		     !dev->power.subsys_data->domain_data))
1795 		return -EINVAL;
1796 
1797 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1798 	if (!gpd_data->power_nb)
1799 		return -ENODEV;
1800 
1801 	genpd_lock(genpd);
1802 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1803 					    gpd_data->power_nb);
1804 	genpd_unlock(genpd);
1805 
1806 	if (ret) {
1807 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1808 			 genpd->name);
1809 		return ret;
1810 	}
1811 
1812 	gpd_data->power_nb = NULL;
1813 	return 0;
1814 }
1815 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1816 
1817 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1818 			       struct generic_pm_domain *subdomain)
1819 {
1820 	struct gpd_link *link, *itr;
1821 	int ret = 0;
1822 
1823 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1824 	    || genpd == subdomain)
1825 		return -EINVAL;
1826 
1827 	/*
1828 	 * If the domain can be powered on/off in an IRQ safe
1829 	 * context, ensure that the subdomain can also be
1830 	 * powered on/off in that context.
1831 	 */
1832 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1833 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1834 				genpd->name, subdomain->name);
1835 		return -EINVAL;
1836 	}
1837 
1838 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1839 	if (!link)
1840 		return -ENOMEM;
1841 
1842 	genpd_lock(subdomain);
1843 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1844 
1845 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1846 		ret = -EINVAL;
1847 		goto out;
1848 	}
1849 
1850 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1851 		if (itr->child == subdomain && itr->parent == genpd) {
1852 			ret = -EINVAL;
1853 			goto out;
1854 		}
1855 	}
1856 
1857 	link->parent = genpd;
1858 	list_add_tail(&link->parent_node, &genpd->parent_links);
1859 	link->child = subdomain;
1860 	list_add_tail(&link->child_node, &subdomain->child_links);
1861 	if (genpd_status_on(subdomain))
1862 		genpd_sd_counter_inc(genpd);
1863 
1864  out:
1865 	genpd_unlock(genpd);
1866 	genpd_unlock(subdomain);
1867 	if (ret)
1868 		kfree(link);
1869 	return ret;
1870 }
1871 
1872 /**
1873  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1874  * @genpd: Leader PM domain to add the subdomain to.
1875  * @subdomain: Subdomain to be added.
1876  */
1877 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1878 			   struct generic_pm_domain *subdomain)
1879 {
1880 	int ret;
1881 
1882 	mutex_lock(&gpd_list_lock);
1883 	ret = genpd_add_subdomain(genpd, subdomain);
1884 	mutex_unlock(&gpd_list_lock);
1885 
1886 	return ret;
1887 }
1888 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1889 
1890 /**
1891  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1892  * @genpd: Leader PM domain to remove the subdomain from.
1893  * @subdomain: Subdomain to be removed.
1894  */
1895 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1896 			      struct generic_pm_domain *subdomain)
1897 {
1898 	struct gpd_link *l, *link;
1899 	int ret = -EINVAL;
1900 
1901 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1902 		return -EINVAL;
1903 
1904 	genpd_lock(subdomain);
1905 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1906 
1907 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1908 		pr_warn("%s: unable to remove subdomain %s\n",
1909 			genpd->name, subdomain->name);
1910 		ret = -EBUSY;
1911 		goto out;
1912 	}
1913 
1914 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1915 		if (link->child != subdomain)
1916 			continue;
1917 
1918 		list_del(&link->parent_node);
1919 		list_del(&link->child_node);
1920 		kfree(link);
1921 		if (genpd_status_on(subdomain))
1922 			genpd_sd_counter_dec(genpd);
1923 
1924 		ret = 0;
1925 		break;
1926 	}
1927 
1928 out:
1929 	genpd_unlock(genpd);
1930 	genpd_unlock(subdomain);
1931 
1932 	return ret;
1933 }
1934 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1935 
1936 static void genpd_free_default_power_state(struct genpd_power_state *states,
1937 					   unsigned int state_count)
1938 {
1939 	kfree(states);
1940 }
1941 
1942 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1943 {
1944 	struct genpd_power_state *state;
1945 
1946 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1947 	if (!state)
1948 		return -ENOMEM;
1949 
1950 	genpd->states = state;
1951 	genpd->state_count = 1;
1952 	genpd->free_states = genpd_free_default_power_state;
1953 
1954 	return 0;
1955 }
1956 
1957 static int genpd_alloc_data(struct generic_pm_domain *genpd)
1958 {
1959 	struct genpd_governor_data *gd = NULL;
1960 	int ret;
1961 
1962 	if (genpd_is_cpu_domain(genpd) &&
1963 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1964 		return -ENOMEM;
1965 
1966 	if (genpd->gov) {
1967 		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
1968 		if (!gd) {
1969 			ret = -ENOMEM;
1970 			goto free;
1971 		}
1972 
1973 		gd->max_off_time_ns = -1;
1974 		gd->max_off_time_changed = true;
1975 		gd->next_wakeup = KTIME_MAX;
1976 		gd->next_hrtimer = KTIME_MAX;
1977 	}
1978 
1979 	/* Use only one "off" state if there were no states declared */
1980 	if (genpd->state_count == 0) {
1981 		ret = genpd_set_default_power_state(genpd);
1982 		if (ret)
1983 			goto free;
1984 	}
1985 
1986 	genpd->gd = gd;
1987 	return 0;
1988 
1989 free:
1990 	if (genpd_is_cpu_domain(genpd))
1991 		free_cpumask_var(genpd->cpus);
1992 	kfree(gd);
1993 	return ret;
1994 }
1995 
1996 static void genpd_free_data(struct generic_pm_domain *genpd)
1997 {
1998 	if (genpd_is_cpu_domain(genpd))
1999 		free_cpumask_var(genpd->cpus);
2000 	if (genpd->free_states)
2001 		genpd->free_states(genpd->states, genpd->state_count);
2002 	kfree(genpd->gd);
2003 }
2004 
2005 static void genpd_lock_init(struct generic_pm_domain *genpd)
2006 {
2007 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2008 		spin_lock_init(&genpd->slock);
2009 		genpd->lock_ops = &genpd_spin_ops;
2010 	} else {
2011 		mutex_init(&genpd->mlock);
2012 		genpd->lock_ops = &genpd_mtx_ops;
2013 	}
2014 }
2015 
2016 /**
2017  * pm_genpd_init - Initialize a generic I/O PM domain object.
2018  * @genpd: PM domain object to initialize.
2019  * @gov: PM domain governor to associate with the domain (may be NULL).
2020  * @is_off: Initial value of the domain's power_is_off field.
2021  *
2022  * Returns 0 on successful initialization, else a negative error code.
2023  */
2024 int pm_genpd_init(struct generic_pm_domain *genpd,
2025 		  struct dev_power_governor *gov, bool is_off)
2026 {
2027 	int ret;
2028 
2029 	if (IS_ERR_OR_NULL(genpd))
2030 		return -EINVAL;
2031 
2032 	INIT_LIST_HEAD(&genpd->parent_links);
2033 	INIT_LIST_HEAD(&genpd->child_links);
2034 	INIT_LIST_HEAD(&genpd->dev_list);
2035 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2036 	genpd_lock_init(genpd);
2037 	genpd->gov = gov;
2038 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2039 	atomic_set(&genpd->sd_count, 0);
2040 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2041 	genpd->device_count = 0;
2042 	genpd->provider = NULL;
2043 	genpd->has_provider = false;
2044 	genpd->accounting_time = ktime_get_mono_fast_ns();
2045 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2046 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2047 	genpd->domain.ops.prepare = genpd_prepare;
2048 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2049 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2050 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2051 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2052 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2053 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2054 	genpd->domain.ops.complete = genpd_complete;
2055 	genpd->domain.start = genpd_dev_pm_start;
2056 
2057 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2058 		genpd->dev_ops.stop = pm_clk_suspend;
2059 		genpd->dev_ops.start = pm_clk_resume;
2060 	}
2061 
2062 	/* The always-on governor works better with the corresponding flag. */
2063 	if (gov == &pm_domain_always_on_gov)
2064 		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2065 
2066 	/* Always-on domains must be powered on at initialization. */
2067 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2068 			!genpd_status_on(genpd)) {
2069 		pr_err("always-on PM domain %s is not on\n", genpd->name);
2070 		return -EINVAL;
2071 	}
2072 
2073 	/* Multiple states but no governor doesn't make sense. */
2074 	if (!gov && genpd->state_count > 1)
2075 		pr_warn("%s: no governor for states\n", genpd->name);
2076 
2077 	ret = genpd_alloc_data(genpd);
2078 	if (ret)
2079 		return ret;
2080 
2081 	device_initialize(&genpd->dev);
2082 	dev_set_name(&genpd->dev, "%s", genpd->name);
2083 
2084 	mutex_lock(&gpd_list_lock);
2085 	list_add(&genpd->gpd_list_node, &gpd_list);
2086 	mutex_unlock(&gpd_list_lock);
2087 	genpd_debug_add(genpd);
2088 
2089 	return 0;
2090 }
2091 EXPORT_SYMBOL_GPL(pm_genpd_init);
2092 
2093 static int genpd_remove(struct generic_pm_domain *genpd)
2094 {
2095 	struct gpd_link *l, *link;
2096 
2097 	if (IS_ERR_OR_NULL(genpd))
2098 		return -EINVAL;
2099 
2100 	genpd_lock(genpd);
2101 
2102 	if (genpd->has_provider) {
2103 		genpd_unlock(genpd);
2104 		pr_err("Provider present, unable to remove %s\n", genpd->name);
2105 		return -EBUSY;
2106 	}
2107 
2108 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2109 		genpd_unlock(genpd);
2110 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2111 		return -EBUSY;
2112 	}
2113 
2114 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2115 		list_del(&link->parent_node);
2116 		list_del(&link->child_node);
2117 		kfree(link);
2118 	}
2119 
2120 	list_del(&genpd->gpd_list_node);
2121 	genpd_unlock(genpd);
2122 	genpd_debug_remove(genpd);
2123 	cancel_work_sync(&genpd->power_off_work);
2124 	genpd_free_data(genpd);
2125 
2126 	pr_debug("%s: removed %s\n", __func__, genpd->name);
2127 
2128 	return 0;
2129 }
2130 
2131 /**
2132  * pm_genpd_remove - Remove a generic I/O PM domain
2133  * @genpd: Pointer to PM domain that is to be removed.
2134  *
2135  * To remove the PM domain, this function:
2136  *  - Removes the PM domain as a subdomain to any parent domains,
2137  *    if it was added.
2138  *  - Removes the PM domain from the list of registered PM domains.
2139  *
2140  * The PM domain will only be removed, if the associated provider has
2141  * been removed, it is not a parent to any other PM domain and has no
2142  * devices associated with it.
2143  */
2144 int pm_genpd_remove(struct generic_pm_domain *genpd)
2145 {
2146 	int ret;
2147 
2148 	mutex_lock(&gpd_list_lock);
2149 	ret = genpd_remove(genpd);
2150 	mutex_unlock(&gpd_list_lock);
2151 
2152 	return ret;
2153 }
2154 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2155 
2156 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2157 
2158 /*
2159  * Device Tree based PM domain providers.
2160  *
2161  * The code below implements generic device tree based PM domain providers that
2162  * bind device tree nodes with generic PM domains registered in the system.
2163  *
2164  * Any driver that registers generic PM domains and needs to support binding of
2165  * devices to these domains is supposed to register a PM domain provider, which
2166  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2167  *
2168  * Two simple mapping functions have been provided for convenience:
2169  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2170  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2171  *    index.
2172  */
2173 
2174 /**
2175  * struct of_genpd_provider - PM domain provider registration structure
2176  * @link: Entry in global list of PM domain providers
2177  * @node: Pointer to device tree node of PM domain provider
2178  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2179  *         into a PM domain.
2180  * @data: context pointer to be passed into @xlate callback
2181  */
2182 struct of_genpd_provider {
2183 	struct list_head link;
2184 	struct device_node *node;
2185 	genpd_xlate_t xlate;
2186 	void *data;
2187 };
2188 
2189 /* List of registered PM domain providers. */
2190 static LIST_HEAD(of_genpd_providers);
2191 /* Mutex to protect the list above. */
2192 static DEFINE_MUTEX(of_genpd_mutex);
2193 
2194 /**
2195  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2196  * @genpdspec: OF phandle args to map into a PM domain
2197  * @data: xlate function private data - pointer to struct generic_pm_domain
2198  *
2199  * This is a generic xlate function that can be used to model PM domains that
2200  * have their own device tree nodes. The private data of xlate function needs
2201  * to be a valid pointer to struct generic_pm_domain.
2202  */
2203 static struct generic_pm_domain *genpd_xlate_simple(
2204 					struct of_phandle_args *genpdspec,
2205 					void *data)
2206 {
2207 	return data;
2208 }
2209 
2210 /**
2211  * genpd_xlate_onecell() - Xlate function using a single index.
2212  * @genpdspec: OF phandle args to map into a PM domain
2213  * @data: xlate function private data - pointer to struct genpd_onecell_data
2214  *
2215  * This is a generic xlate function that can be used to model simple PM domain
2216  * controllers that have one device tree node and provide multiple PM domains.
2217  * A single cell is used as an index into an array of PM domains specified in
2218  * the genpd_onecell_data struct when registering the provider.
2219  */
2220 static struct generic_pm_domain *genpd_xlate_onecell(
2221 					struct of_phandle_args *genpdspec,
2222 					void *data)
2223 {
2224 	struct genpd_onecell_data *genpd_data = data;
2225 	unsigned int idx = genpdspec->args[0];
2226 
2227 	if (genpdspec->args_count != 1)
2228 		return ERR_PTR(-EINVAL);
2229 
2230 	if (idx >= genpd_data->num_domains) {
2231 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2232 		return ERR_PTR(-EINVAL);
2233 	}
2234 
2235 	if (!genpd_data->domains[idx])
2236 		return ERR_PTR(-ENOENT);
2237 
2238 	return genpd_data->domains[idx];
2239 }
2240 
2241 /**
2242  * genpd_add_provider() - Register a PM domain provider for a node
2243  * @np: Device node pointer associated with the PM domain provider.
2244  * @xlate: Callback for decoding PM domain from phandle arguments.
2245  * @data: Context pointer for @xlate callback.
2246  */
2247 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2248 			      void *data)
2249 {
2250 	struct of_genpd_provider *cp;
2251 
2252 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2253 	if (!cp)
2254 		return -ENOMEM;
2255 
2256 	cp->node = of_node_get(np);
2257 	cp->data = data;
2258 	cp->xlate = xlate;
2259 	fwnode_dev_initialized(&np->fwnode, true);
2260 
2261 	mutex_lock(&of_genpd_mutex);
2262 	list_add(&cp->link, &of_genpd_providers);
2263 	mutex_unlock(&of_genpd_mutex);
2264 	pr_debug("Added domain provider from %pOF\n", np);
2265 
2266 	return 0;
2267 }
2268 
2269 static bool genpd_present(const struct generic_pm_domain *genpd)
2270 {
2271 	bool ret = false;
2272 	const struct generic_pm_domain *gpd;
2273 
2274 	mutex_lock(&gpd_list_lock);
2275 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2276 		if (gpd == genpd) {
2277 			ret = true;
2278 			break;
2279 		}
2280 	}
2281 	mutex_unlock(&gpd_list_lock);
2282 
2283 	return ret;
2284 }
2285 
2286 /**
2287  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2288  * @np: Device node pointer associated with the PM domain provider.
2289  * @genpd: Pointer to PM domain associated with the PM domain provider.
2290  */
2291 int of_genpd_add_provider_simple(struct device_node *np,
2292 				 struct generic_pm_domain *genpd)
2293 {
2294 	int ret;
2295 
2296 	if (!np || !genpd)
2297 		return -EINVAL;
2298 
2299 	if (!genpd_present(genpd))
2300 		return -EINVAL;
2301 
2302 	genpd->dev.of_node = np;
2303 
2304 	/* Parse genpd OPP table */
2305 	if (genpd->set_performance_state) {
2306 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2307 		if (ret)
2308 			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2309 
2310 		/*
2311 		 * Save table for faster processing while setting performance
2312 		 * state.
2313 		 */
2314 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2315 		WARN_ON(IS_ERR(genpd->opp_table));
2316 	}
2317 
2318 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2319 	if (ret) {
2320 		if (genpd->set_performance_state) {
2321 			dev_pm_opp_put_opp_table(genpd->opp_table);
2322 			dev_pm_opp_of_remove_table(&genpd->dev);
2323 		}
2324 
2325 		return ret;
2326 	}
2327 
2328 	genpd->provider = &np->fwnode;
2329 	genpd->has_provider = true;
2330 
2331 	return 0;
2332 }
2333 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2334 
2335 /**
2336  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2337  * @np: Device node pointer associated with the PM domain provider.
2338  * @data: Pointer to the data associated with the PM domain provider.
2339  */
2340 int of_genpd_add_provider_onecell(struct device_node *np,
2341 				  struct genpd_onecell_data *data)
2342 {
2343 	struct generic_pm_domain *genpd;
2344 	unsigned int i;
2345 	int ret = -EINVAL;
2346 
2347 	if (!np || !data)
2348 		return -EINVAL;
2349 
2350 	if (!data->xlate)
2351 		data->xlate = genpd_xlate_onecell;
2352 
2353 	for (i = 0; i < data->num_domains; i++) {
2354 		genpd = data->domains[i];
2355 
2356 		if (!genpd)
2357 			continue;
2358 		if (!genpd_present(genpd))
2359 			goto error;
2360 
2361 		genpd->dev.of_node = np;
2362 
2363 		/* Parse genpd OPP table */
2364 		if (genpd->set_performance_state) {
2365 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2366 			if (ret) {
2367 				dev_err_probe(&genpd->dev, ret,
2368 					      "Failed to add OPP table for index %d\n", i);
2369 				goto error;
2370 			}
2371 
2372 			/*
2373 			 * Save table for faster processing while setting
2374 			 * performance state.
2375 			 */
2376 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2377 			WARN_ON(IS_ERR(genpd->opp_table));
2378 		}
2379 
2380 		genpd->provider = &np->fwnode;
2381 		genpd->has_provider = true;
2382 	}
2383 
2384 	ret = genpd_add_provider(np, data->xlate, data);
2385 	if (ret < 0)
2386 		goto error;
2387 
2388 	return 0;
2389 
2390 error:
2391 	while (i--) {
2392 		genpd = data->domains[i];
2393 
2394 		if (!genpd)
2395 			continue;
2396 
2397 		genpd->provider = NULL;
2398 		genpd->has_provider = false;
2399 
2400 		if (genpd->set_performance_state) {
2401 			dev_pm_opp_put_opp_table(genpd->opp_table);
2402 			dev_pm_opp_of_remove_table(&genpd->dev);
2403 		}
2404 	}
2405 
2406 	return ret;
2407 }
2408 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2409 
2410 /**
2411  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2412  * @np: Device node pointer associated with the PM domain provider
2413  */
2414 void of_genpd_del_provider(struct device_node *np)
2415 {
2416 	struct of_genpd_provider *cp, *tmp;
2417 	struct generic_pm_domain *gpd;
2418 
2419 	mutex_lock(&gpd_list_lock);
2420 	mutex_lock(&of_genpd_mutex);
2421 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2422 		if (cp->node == np) {
2423 			/*
2424 			 * For each PM domain associated with the
2425 			 * provider, set the 'has_provider' to false
2426 			 * so that the PM domain can be safely removed.
2427 			 */
2428 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2429 				if (gpd->provider == &np->fwnode) {
2430 					gpd->has_provider = false;
2431 
2432 					if (!gpd->set_performance_state)
2433 						continue;
2434 
2435 					dev_pm_opp_put_opp_table(gpd->opp_table);
2436 					dev_pm_opp_of_remove_table(&gpd->dev);
2437 				}
2438 			}
2439 
2440 			fwnode_dev_initialized(&cp->node->fwnode, false);
2441 			list_del(&cp->link);
2442 			of_node_put(cp->node);
2443 			kfree(cp);
2444 			break;
2445 		}
2446 	}
2447 	mutex_unlock(&of_genpd_mutex);
2448 	mutex_unlock(&gpd_list_lock);
2449 }
2450 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2451 
2452 /**
2453  * genpd_get_from_provider() - Look-up PM domain
2454  * @genpdspec: OF phandle args to use for look-up
2455  *
2456  * Looks for a PM domain provider under the node specified by @genpdspec and if
2457  * found, uses xlate function of the provider to map phandle args to a PM
2458  * domain.
2459  *
2460  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2461  * on failure.
2462  */
2463 static struct generic_pm_domain *genpd_get_from_provider(
2464 					struct of_phandle_args *genpdspec)
2465 {
2466 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2467 	struct of_genpd_provider *provider;
2468 
2469 	if (!genpdspec)
2470 		return ERR_PTR(-EINVAL);
2471 
2472 	mutex_lock(&of_genpd_mutex);
2473 
2474 	/* Check if we have such a provider in our array */
2475 	list_for_each_entry(provider, &of_genpd_providers, link) {
2476 		if (provider->node == genpdspec->np)
2477 			genpd = provider->xlate(genpdspec, provider->data);
2478 		if (!IS_ERR(genpd))
2479 			break;
2480 	}
2481 
2482 	mutex_unlock(&of_genpd_mutex);
2483 
2484 	return genpd;
2485 }
2486 
2487 /**
2488  * of_genpd_add_device() - Add a device to an I/O PM domain
2489  * @genpdspec: OF phandle args to use for look-up PM domain
2490  * @dev: Device to be added.
2491  *
2492  * Looks-up an I/O PM domain based upon phandle args provided and adds
2493  * the device to the PM domain. Returns a negative error code on failure.
2494  */
2495 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2496 {
2497 	struct generic_pm_domain *genpd;
2498 	int ret;
2499 
2500 	mutex_lock(&gpd_list_lock);
2501 
2502 	genpd = genpd_get_from_provider(genpdspec);
2503 	if (IS_ERR(genpd)) {
2504 		ret = PTR_ERR(genpd);
2505 		goto out;
2506 	}
2507 
2508 	ret = genpd_add_device(genpd, dev, dev);
2509 
2510 out:
2511 	mutex_unlock(&gpd_list_lock);
2512 
2513 	return ret;
2514 }
2515 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2516 
2517 /**
2518  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2519  * @parent_spec: OF phandle args to use for parent PM domain look-up
2520  * @subdomain_spec: OF phandle args to use for subdomain look-up
2521  *
2522  * Looks-up a parent PM domain and subdomain based upon phandle args
2523  * provided and adds the subdomain to the parent PM domain. Returns a
2524  * negative error code on failure.
2525  */
2526 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2527 			   struct of_phandle_args *subdomain_spec)
2528 {
2529 	struct generic_pm_domain *parent, *subdomain;
2530 	int ret;
2531 
2532 	mutex_lock(&gpd_list_lock);
2533 
2534 	parent = genpd_get_from_provider(parent_spec);
2535 	if (IS_ERR(parent)) {
2536 		ret = PTR_ERR(parent);
2537 		goto out;
2538 	}
2539 
2540 	subdomain = genpd_get_from_provider(subdomain_spec);
2541 	if (IS_ERR(subdomain)) {
2542 		ret = PTR_ERR(subdomain);
2543 		goto out;
2544 	}
2545 
2546 	ret = genpd_add_subdomain(parent, subdomain);
2547 
2548 out:
2549 	mutex_unlock(&gpd_list_lock);
2550 
2551 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2552 }
2553 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2554 
2555 /**
2556  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2557  * @parent_spec: OF phandle args to use for parent PM domain look-up
2558  * @subdomain_spec: OF phandle args to use for subdomain look-up
2559  *
2560  * Looks-up a parent PM domain and subdomain based upon phandle args
2561  * provided and removes the subdomain from the parent PM domain. Returns a
2562  * negative error code on failure.
2563  */
2564 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2565 			      struct of_phandle_args *subdomain_spec)
2566 {
2567 	struct generic_pm_domain *parent, *subdomain;
2568 	int ret;
2569 
2570 	mutex_lock(&gpd_list_lock);
2571 
2572 	parent = genpd_get_from_provider(parent_spec);
2573 	if (IS_ERR(parent)) {
2574 		ret = PTR_ERR(parent);
2575 		goto out;
2576 	}
2577 
2578 	subdomain = genpd_get_from_provider(subdomain_spec);
2579 	if (IS_ERR(subdomain)) {
2580 		ret = PTR_ERR(subdomain);
2581 		goto out;
2582 	}
2583 
2584 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2585 
2586 out:
2587 	mutex_unlock(&gpd_list_lock);
2588 
2589 	return ret;
2590 }
2591 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2592 
2593 /**
2594  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2595  * @np: Pointer to device node associated with provider
2596  *
2597  * Find the last PM domain that was added by a particular provider and
2598  * remove this PM domain from the list of PM domains. The provider is
2599  * identified by the 'provider' device structure that is passed. The PM
2600  * domain will only be removed, if the provider associated with domain
2601  * has been removed.
2602  *
2603  * Returns a valid pointer to struct generic_pm_domain on success or
2604  * ERR_PTR() on failure.
2605  */
2606 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2607 {
2608 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2609 	int ret;
2610 
2611 	if (IS_ERR_OR_NULL(np))
2612 		return ERR_PTR(-EINVAL);
2613 
2614 	mutex_lock(&gpd_list_lock);
2615 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2616 		if (gpd->provider == &np->fwnode) {
2617 			ret = genpd_remove(gpd);
2618 			genpd = ret ? ERR_PTR(ret) : gpd;
2619 			break;
2620 		}
2621 	}
2622 	mutex_unlock(&gpd_list_lock);
2623 
2624 	return genpd;
2625 }
2626 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2627 
2628 static void genpd_release_dev(struct device *dev)
2629 {
2630 	of_node_put(dev->of_node);
2631 	kfree(dev);
2632 }
2633 
2634 static struct bus_type genpd_bus_type = {
2635 	.name		= "genpd",
2636 };
2637 
2638 /**
2639  * genpd_dev_pm_detach - Detach a device from its PM domain.
2640  * @dev: Device to detach.
2641  * @power_off: Currently not used
2642  *
2643  * Try to locate a corresponding generic PM domain, which the device was
2644  * attached to previously. If such is found, the device is detached from it.
2645  */
2646 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2647 {
2648 	struct generic_pm_domain *pd;
2649 	unsigned int i;
2650 	int ret = 0;
2651 
2652 	pd = dev_to_genpd(dev);
2653 	if (IS_ERR(pd))
2654 		return;
2655 
2656 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2657 
2658 	/* Drop the default performance state */
2659 	if (dev_gpd_data(dev)->default_pstate) {
2660 		dev_pm_genpd_set_performance_state(dev, 0);
2661 		dev_gpd_data(dev)->default_pstate = 0;
2662 	}
2663 
2664 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2665 		ret = genpd_remove_device(pd, dev);
2666 		if (ret != -EAGAIN)
2667 			break;
2668 
2669 		mdelay(i);
2670 		cond_resched();
2671 	}
2672 
2673 	if (ret < 0) {
2674 		dev_err(dev, "failed to remove from PM domain %s: %d",
2675 			pd->name, ret);
2676 		return;
2677 	}
2678 
2679 	/* Check if PM domain can be powered off after removing this device. */
2680 	genpd_queue_power_off_work(pd);
2681 
2682 	/* Unregister the device if it was created by genpd. */
2683 	if (dev->bus == &genpd_bus_type)
2684 		device_unregister(dev);
2685 }
2686 
2687 static void genpd_dev_pm_sync(struct device *dev)
2688 {
2689 	struct generic_pm_domain *pd;
2690 
2691 	pd = dev_to_genpd(dev);
2692 	if (IS_ERR(pd))
2693 		return;
2694 
2695 	genpd_queue_power_off_work(pd);
2696 }
2697 
2698 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2699 				 unsigned int index, bool power_on)
2700 {
2701 	struct of_phandle_args pd_args;
2702 	struct generic_pm_domain *pd;
2703 	int pstate;
2704 	int ret;
2705 
2706 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2707 				"#power-domain-cells", index, &pd_args);
2708 	if (ret < 0)
2709 		return ret;
2710 
2711 	mutex_lock(&gpd_list_lock);
2712 	pd = genpd_get_from_provider(&pd_args);
2713 	of_node_put(pd_args.np);
2714 	if (IS_ERR(pd)) {
2715 		mutex_unlock(&gpd_list_lock);
2716 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2717 			__func__, PTR_ERR(pd));
2718 		return driver_deferred_probe_check_state(base_dev);
2719 	}
2720 
2721 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2722 
2723 	ret = genpd_add_device(pd, dev, base_dev);
2724 	mutex_unlock(&gpd_list_lock);
2725 
2726 	if (ret < 0)
2727 		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2728 
2729 	dev->pm_domain->detach = genpd_dev_pm_detach;
2730 	dev->pm_domain->sync = genpd_dev_pm_sync;
2731 
2732 	/* Set the default performance state */
2733 	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2734 	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2735 		ret = pstate;
2736 		goto err;
2737 	} else if (pstate > 0) {
2738 		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2739 		if (ret)
2740 			goto err;
2741 		dev_gpd_data(dev)->default_pstate = pstate;
2742 	}
2743 
2744 	if (power_on) {
2745 		genpd_lock(pd);
2746 		ret = genpd_power_on(pd, 0);
2747 		genpd_unlock(pd);
2748 	}
2749 
2750 	if (ret) {
2751 		/* Drop the default performance state */
2752 		if (dev_gpd_data(dev)->default_pstate) {
2753 			dev_pm_genpd_set_performance_state(dev, 0);
2754 			dev_gpd_data(dev)->default_pstate = 0;
2755 		}
2756 
2757 		genpd_remove_device(pd, dev);
2758 		return -EPROBE_DEFER;
2759 	}
2760 
2761 	return 1;
2762 
2763 err:
2764 	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2765 		pd->name, ret);
2766 	genpd_remove_device(pd, dev);
2767 	return ret;
2768 }
2769 
2770 /**
2771  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2772  * @dev: Device to attach.
2773  *
2774  * Parse device's OF node to find a PM domain specifier. If such is found,
2775  * attaches the device to retrieved pm_domain ops.
2776  *
2777  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2778  * PM domain or when multiple power-domains exists for it, else a negative error
2779  * code. Note that if a power-domain exists for the device, but it cannot be
2780  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2781  * not probed and to re-try again later.
2782  */
2783 int genpd_dev_pm_attach(struct device *dev)
2784 {
2785 	if (!dev->of_node)
2786 		return 0;
2787 
2788 	/*
2789 	 * Devices with multiple PM domains must be attached separately, as we
2790 	 * can only attach one PM domain per device.
2791 	 */
2792 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2793 				       "#power-domain-cells") != 1)
2794 		return 0;
2795 
2796 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2797 }
2798 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2799 
2800 /**
2801  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2802  * @dev: The device used to lookup the PM domain.
2803  * @index: The index of the PM domain.
2804  *
2805  * Parse device's OF node to find a PM domain specifier at the provided @index.
2806  * If such is found, creates a virtual device and attaches it to the retrieved
2807  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2808  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2809  *
2810  * Returns the created virtual device if successfully attached PM domain, NULL
2811  * when the device don't need a PM domain, else an ERR_PTR() in case of
2812  * failures. If a power-domain exists for the device, but cannot be found or
2813  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2814  * is not probed and to re-try again later.
2815  */
2816 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2817 					 unsigned int index)
2818 {
2819 	struct device *virt_dev;
2820 	int num_domains;
2821 	int ret;
2822 
2823 	if (!dev->of_node)
2824 		return NULL;
2825 
2826 	/* Verify that the index is within a valid range. */
2827 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2828 						 "#power-domain-cells");
2829 	if (index >= num_domains)
2830 		return NULL;
2831 
2832 	/* Allocate and register device on the genpd bus. */
2833 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2834 	if (!virt_dev)
2835 		return ERR_PTR(-ENOMEM);
2836 
2837 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2838 	virt_dev->bus = &genpd_bus_type;
2839 	virt_dev->release = genpd_release_dev;
2840 	virt_dev->of_node = of_node_get(dev->of_node);
2841 
2842 	ret = device_register(virt_dev);
2843 	if (ret) {
2844 		put_device(virt_dev);
2845 		return ERR_PTR(ret);
2846 	}
2847 
2848 	/* Try to attach the device to the PM domain at the specified index. */
2849 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2850 	if (ret < 1) {
2851 		device_unregister(virt_dev);
2852 		return ret ? ERR_PTR(ret) : NULL;
2853 	}
2854 
2855 	pm_runtime_enable(virt_dev);
2856 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2857 
2858 	return virt_dev;
2859 }
2860 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2861 
2862 /**
2863  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2864  * @dev: The device used to lookup the PM domain.
2865  * @name: The name of the PM domain.
2866  *
2867  * Parse device's OF node to find a PM domain specifier using the
2868  * power-domain-names DT property. For further description see
2869  * genpd_dev_pm_attach_by_id().
2870  */
2871 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2872 {
2873 	int index;
2874 
2875 	if (!dev->of_node)
2876 		return NULL;
2877 
2878 	index = of_property_match_string(dev->of_node, "power-domain-names",
2879 					 name);
2880 	if (index < 0)
2881 		return NULL;
2882 
2883 	return genpd_dev_pm_attach_by_id(dev, index);
2884 }
2885 
2886 static const struct of_device_id idle_state_match[] = {
2887 	{ .compatible = "domain-idle-state", },
2888 	{ }
2889 };
2890 
2891 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2892 				    struct device_node *state_node)
2893 {
2894 	int err;
2895 	u32 residency;
2896 	u32 entry_latency, exit_latency;
2897 
2898 	err = of_property_read_u32(state_node, "entry-latency-us",
2899 						&entry_latency);
2900 	if (err) {
2901 		pr_debug(" * %pOF missing entry-latency-us property\n",
2902 			 state_node);
2903 		return -EINVAL;
2904 	}
2905 
2906 	err = of_property_read_u32(state_node, "exit-latency-us",
2907 						&exit_latency);
2908 	if (err) {
2909 		pr_debug(" * %pOF missing exit-latency-us property\n",
2910 			 state_node);
2911 		return -EINVAL;
2912 	}
2913 
2914 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2915 	if (!err)
2916 		genpd_state->residency_ns = 1000 * residency;
2917 
2918 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2919 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2920 	genpd_state->fwnode = &state_node->fwnode;
2921 
2922 	return 0;
2923 }
2924 
2925 static int genpd_iterate_idle_states(struct device_node *dn,
2926 				     struct genpd_power_state *states)
2927 {
2928 	int ret;
2929 	struct of_phandle_iterator it;
2930 	struct device_node *np;
2931 	int i = 0;
2932 
2933 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2934 	if (ret <= 0)
2935 		return ret == -ENOENT ? 0 : ret;
2936 
2937 	/* Loop over the phandles until all the requested entry is found */
2938 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2939 		np = it.node;
2940 		if (!of_match_node(idle_state_match, np))
2941 			continue;
2942 
2943 		if (!of_device_is_available(np))
2944 			continue;
2945 
2946 		if (states) {
2947 			ret = genpd_parse_state(&states[i], np);
2948 			if (ret) {
2949 				pr_err("Parsing idle state node %pOF failed with err %d\n",
2950 				       np, ret);
2951 				of_node_put(np);
2952 				return ret;
2953 			}
2954 		}
2955 		i++;
2956 	}
2957 
2958 	return i;
2959 }
2960 
2961 /**
2962  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2963  *
2964  * @dn: The genpd device node
2965  * @states: The pointer to which the state array will be saved.
2966  * @n: The count of elements in the array returned from this function.
2967  *
2968  * Returns the device states parsed from the OF node. The memory for the states
2969  * is allocated by this function and is the responsibility of the caller to
2970  * free the memory after use. If any or zero compatible domain idle states is
2971  * found it returns 0 and in case of errors, a negative error code is returned.
2972  */
2973 int of_genpd_parse_idle_states(struct device_node *dn,
2974 			struct genpd_power_state **states, int *n)
2975 {
2976 	struct genpd_power_state *st;
2977 	int ret;
2978 
2979 	ret = genpd_iterate_idle_states(dn, NULL);
2980 	if (ret < 0)
2981 		return ret;
2982 
2983 	if (!ret) {
2984 		*states = NULL;
2985 		*n = 0;
2986 		return 0;
2987 	}
2988 
2989 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2990 	if (!st)
2991 		return -ENOMEM;
2992 
2993 	ret = genpd_iterate_idle_states(dn, st);
2994 	if (ret <= 0) {
2995 		kfree(st);
2996 		return ret < 0 ? ret : -EINVAL;
2997 	}
2998 
2999 	*states = st;
3000 	*n = ret;
3001 
3002 	return 0;
3003 }
3004 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3005 
3006 /**
3007  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
3008  *
3009  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
3010  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
3011  *	state.
3012  *
3013  * Returns performance state encoded in the OPP of the genpd. This calls
3014  * platform specific genpd->opp_to_performance_state() callback to translate
3015  * power domain OPP to performance state.
3016  *
3017  * Returns performance state on success and 0 on failure.
3018  */
3019 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
3020 					       struct dev_pm_opp *opp)
3021 {
3022 	struct generic_pm_domain *genpd = NULL;
3023 	int state;
3024 
3025 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3026 
3027 	if (unlikely(!genpd->opp_to_performance_state))
3028 		return 0;
3029 
3030 	genpd_lock(genpd);
3031 	state = genpd->opp_to_performance_state(genpd, opp);
3032 	genpd_unlock(genpd);
3033 
3034 	return state;
3035 }
3036 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
3037 
3038 static int __init genpd_bus_init(void)
3039 {
3040 	return bus_register(&genpd_bus_type);
3041 }
3042 core_initcall(genpd_bus_init);
3043 
3044 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3045 
3046 
3047 /***        debugfs support        ***/
3048 
3049 #ifdef CONFIG_DEBUG_FS
3050 /*
3051  * TODO: This function is a slightly modified version of rtpm_status_show
3052  * from sysfs.c, so generalize it.
3053  */
3054 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3055 {
3056 	static const char * const status_lookup[] = {
3057 		[RPM_ACTIVE] = "active",
3058 		[RPM_RESUMING] = "resuming",
3059 		[RPM_SUSPENDED] = "suspended",
3060 		[RPM_SUSPENDING] = "suspending"
3061 	};
3062 	const char *p = "";
3063 
3064 	if (dev->power.runtime_error)
3065 		p = "error";
3066 	else if (dev->power.disable_depth)
3067 		p = "unsupported";
3068 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3069 		p = status_lookup[dev->power.runtime_status];
3070 	else
3071 		WARN_ON(1);
3072 
3073 	seq_printf(s, "%-25s  ", p);
3074 }
3075 
3076 static void perf_status_str(struct seq_file *s, struct device *dev)
3077 {
3078 	struct generic_pm_domain_data *gpd_data;
3079 
3080 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3081 	seq_put_decimal_ull(s, "", gpd_data->performance_state);
3082 }
3083 
3084 static int genpd_summary_one(struct seq_file *s,
3085 			struct generic_pm_domain *genpd)
3086 {
3087 	static const char * const status_lookup[] = {
3088 		[GENPD_STATE_ON] = "on",
3089 		[GENPD_STATE_OFF] = "off"
3090 	};
3091 	struct pm_domain_data *pm_data;
3092 	const char *kobj_path;
3093 	struct gpd_link *link;
3094 	char state[16];
3095 	int ret;
3096 
3097 	ret = genpd_lock_interruptible(genpd);
3098 	if (ret)
3099 		return -ERESTARTSYS;
3100 
3101 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3102 		goto exit;
3103 	if (!genpd_status_on(genpd))
3104 		snprintf(state, sizeof(state), "%s-%u",
3105 			 status_lookup[genpd->status], genpd->state_idx);
3106 	else
3107 		snprintf(state, sizeof(state), "%s",
3108 			 status_lookup[genpd->status]);
3109 	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3110 
3111 	/*
3112 	 * Modifications on the list require holding locks on both
3113 	 * parent and child, so we are safe.
3114 	 * Also genpd->name is immutable.
3115 	 */
3116 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3117 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3118 			seq_printf(s, "\n%48s", " ");
3119 		seq_printf(s, "%s", link->child->name);
3120 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3121 			seq_puts(s, ", ");
3122 	}
3123 
3124 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3125 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3126 				genpd_is_irq_safe(genpd) ?
3127 				GFP_ATOMIC : GFP_KERNEL);
3128 		if (kobj_path == NULL)
3129 			continue;
3130 
3131 		seq_printf(s, "\n    %-50s  ", kobj_path);
3132 		rtpm_status_str(s, pm_data->dev);
3133 		perf_status_str(s, pm_data->dev);
3134 		kfree(kobj_path);
3135 	}
3136 
3137 	seq_puts(s, "\n");
3138 exit:
3139 	genpd_unlock(genpd);
3140 
3141 	return 0;
3142 }
3143 
3144 static int summary_show(struct seq_file *s, void *data)
3145 {
3146 	struct generic_pm_domain *genpd;
3147 	int ret = 0;
3148 
3149 	seq_puts(s, "domain                          status          children                           performance\n");
3150 	seq_puts(s, "    /device                                             runtime status\n");
3151 	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3152 
3153 	ret = mutex_lock_interruptible(&gpd_list_lock);
3154 	if (ret)
3155 		return -ERESTARTSYS;
3156 
3157 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3158 		ret = genpd_summary_one(s, genpd);
3159 		if (ret)
3160 			break;
3161 	}
3162 	mutex_unlock(&gpd_list_lock);
3163 
3164 	return ret;
3165 }
3166 
3167 static int status_show(struct seq_file *s, void *data)
3168 {
3169 	static const char * const status_lookup[] = {
3170 		[GENPD_STATE_ON] = "on",
3171 		[GENPD_STATE_OFF] = "off"
3172 	};
3173 
3174 	struct generic_pm_domain *genpd = s->private;
3175 	int ret = 0;
3176 
3177 	ret = genpd_lock_interruptible(genpd);
3178 	if (ret)
3179 		return -ERESTARTSYS;
3180 
3181 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3182 		goto exit;
3183 
3184 	if (genpd->status == GENPD_STATE_OFF)
3185 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3186 			genpd->state_idx);
3187 	else
3188 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3189 exit:
3190 	genpd_unlock(genpd);
3191 	return ret;
3192 }
3193 
3194 static int sub_domains_show(struct seq_file *s, void *data)
3195 {
3196 	struct generic_pm_domain *genpd = s->private;
3197 	struct gpd_link *link;
3198 	int ret = 0;
3199 
3200 	ret = genpd_lock_interruptible(genpd);
3201 	if (ret)
3202 		return -ERESTARTSYS;
3203 
3204 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3205 		seq_printf(s, "%s\n", link->child->name);
3206 
3207 	genpd_unlock(genpd);
3208 	return ret;
3209 }
3210 
3211 static int idle_states_show(struct seq_file *s, void *data)
3212 {
3213 	struct generic_pm_domain *genpd = s->private;
3214 	u64 now, delta, idle_time = 0;
3215 	unsigned int i;
3216 	int ret = 0;
3217 
3218 	ret = genpd_lock_interruptible(genpd);
3219 	if (ret)
3220 		return -ERESTARTSYS;
3221 
3222 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3223 
3224 	for (i = 0; i < genpd->state_count; i++) {
3225 		idle_time += genpd->states[i].idle_time;
3226 
3227 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3228 			now = ktime_get_mono_fast_ns();
3229 			if (now > genpd->accounting_time) {
3230 				delta = now - genpd->accounting_time;
3231 				idle_time += delta;
3232 			}
3233 		}
3234 
3235 		do_div(idle_time, NSEC_PER_MSEC);
3236 		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3237 			   genpd->states[i].usage, genpd->states[i].rejected);
3238 	}
3239 
3240 	genpd_unlock(genpd);
3241 	return ret;
3242 }
3243 
3244 static int active_time_show(struct seq_file *s, void *data)
3245 {
3246 	struct generic_pm_domain *genpd = s->private;
3247 	u64 now, on_time, delta = 0;
3248 	int ret = 0;
3249 
3250 	ret = genpd_lock_interruptible(genpd);
3251 	if (ret)
3252 		return -ERESTARTSYS;
3253 
3254 	if (genpd->status == GENPD_STATE_ON) {
3255 		now = ktime_get_mono_fast_ns();
3256 		if (now > genpd->accounting_time)
3257 			delta = now - genpd->accounting_time;
3258 	}
3259 
3260 	on_time = genpd->on_time + delta;
3261 	do_div(on_time, NSEC_PER_MSEC);
3262 	seq_printf(s, "%llu ms\n", on_time);
3263 
3264 	genpd_unlock(genpd);
3265 	return ret;
3266 }
3267 
3268 static int total_idle_time_show(struct seq_file *s, void *data)
3269 {
3270 	struct generic_pm_domain *genpd = s->private;
3271 	u64 now, delta, total = 0;
3272 	unsigned int i;
3273 	int ret = 0;
3274 
3275 	ret = genpd_lock_interruptible(genpd);
3276 	if (ret)
3277 		return -ERESTARTSYS;
3278 
3279 	for (i = 0; i < genpd->state_count; i++) {
3280 		total += genpd->states[i].idle_time;
3281 
3282 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3283 			now = ktime_get_mono_fast_ns();
3284 			if (now > genpd->accounting_time) {
3285 				delta = now - genpd->accounting_time;
3286 				total += delta;
3287 			}
3288 		}
3289 	}
3290 
3291 	do_div(total, NSEC_PER_MSEC);
3292 	seq_printf(s, "%llu ms\n", total);
3293 
3294 	genpd_unlock(genpd);
3295 	return ret;
3296 }
3297 
3298 
3299 static int devices_show(struct seq_file *s, void *data)
3300 {
3301 	struct generic_pm_domain *genpd = s->private;
3302 	struct pm_domain_data *pm_data;
3303 	const char *kobj_path;
3304 	int ret = 0;
3305 
3306 	ret = genpd_lock_interruptible(genpd);
3307 	if (ret)
3308 		return -ERESTARTSYS;
3309 
3310 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3311 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3312 				genpd_is_irq_safe(genpd) ?
3313 				GFP_ATOMIC : GFP_KERNEL);
3314 		if (kobj_path == NULL)
3315 			continue;
3316 
3317 		seq_printf(s, "%s\n", kobj_path);
3318 		kfree(kobj_path);
3319 	}
3320 
3321 	genpd_unlock(genpd);
3322 	return ret;
3323 }
3324 
3325 static int perf_state_show(struct seq_file *s, void *data)
3326 {
3327 	struct generic_pm_domain *genpd = s->private;
3328 
3329 	if (genpd_lock_interruptible(genpd))
3330 		return -ERESTARTSYS;
3331 
3332 	seq_printf(s, "%u\n", genpd->performance_state);
3333 
3334 	genpd_unlock(genpd);
3335 	return 0;
3336 }
3337 
3338 DEFINE_SHOW_ATTRIBUTE(summary);
3339 DEFINE_SHOW_ATTRIBUTE(status);
3340 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3341 DEFINE_SHOW_ATTRIBUTE(idle_states);
3342 DEFINE_SHOW_ATTRIBUTE(active_time);
3343 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3344 DEFINE_SHOW_ATTRIBUTE(devices);
3345 DEFINE_SHOW_ATTRIBUTE(perf_state);
3346 
3347 static void genpd_debug_add(struct generic_pm_domain *genpd)
3348 {
3349 	struct dentry *d;
3350 
3351 	if (!genpd_debugfs_dir)
3352 		return;
3353 
3354 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3355 
3356 	debugfs_create_file("current_state", 0444,
3357 			    d, genpd, &status_fops);
3358 	debugfs_create_file("sub_domains", 0444,
3359 			    d, genpd, &sub_domains_fops);
3360 	debugfs_create_file("idle_states", 0444,
3361 			    d, genpd, &idle_states_fops);
3362 	debugfs_create_file("active_time", 0444,
3363 			    d, genpd, &active_time_fops);
3364 	debugfs_create_file("total_idle_time", 0444,
3365 			    d, genpd, &total_idle_time_fops);
3366 	debugfs_create_file("devices", 0444,
3367 			    d, genpd, &devices_fops);
3368 	if (genpd->set_performance_state)
3369 		debugfs_create_file("perf_state", 0444,
3370 				    d, genpd, &perf_state_fops);
3371 }
3372 
3373 static int __init genpd_debug_init(void)
3374 {
3375 	struct generic_pm_domain *genpd;
3376 
3377 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3378 
3379 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3380 			    NULL, &summary_fops);
3381 
3382 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3383 		genpd_debug_add(genpd);
3384 
3385 	return 0;
3386 }
3387 late_initcall(genpd_debug_init);
3388 
3389 static void __exit genpd_debug_exit(void)
3390 {
3391 	debugfs_remove_recursive(genpd_debugfs_dir);
3392 }
3393 __exitcall(genpd_debug_exit);
3394 #endif /* CONFIG_DEBUG_FS */
3395