xref: /openbmc/linux/drivers/base/power/domain.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 #include <linux/debugfs.h>
25 
26 #include "power.h"
27 
28 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
29 
30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
31 ({								\
32 	type (*__routine)(struct device *__d); 			\
33 	type __ret = (type)0;					\
34 								\
35 	__routine = genpd->dev_ops.callback; 			\
36 	if (__routine) {					\
37 		__ret = __routine(dev); 			\
38 	}							\
39 	__ret;							\
40 })
41 
42 static LIST_HEAD(gpd_list);
43 static DEFINE_MUTEX(gpd_list_lock);
44 
45 struct genpd_lock_ops {
46 	void (*lock)(struct generic_pm_domain *genpd);
47 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 	void (*unlock)(struct generic_pm_domain *genpd);
50 };
51 
52 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53 {
54 	mutex_lock(&genpd->mlock);
55 }
56 
57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 					int depth)
59 {
60 	mutex_lock_nested(&genpd->mlock, depth);
61 }
62 
63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64 {
65 	return mutex_lock_interruptible(&genpd->mlock);
66 }
67 
68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69 {
70 	return mutex_unlock(&genpd->mlock);
71 }
72 
73 static const struct genpd_lock_ops genpd_mtx_ops = {
74 	.lock = genpd_lock_mtx,
75 	.lock_nested = genpd_lock_nested_mtx,
76 	.lock_interruptible = genpd_lock_interruptible_mtx,
77 	.unlock = genpd_unlock_mtx,
78 };
79 
80 static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 	__acquires(&genpd->slock)
82 {
83 	unsigned long flags;
84 
85 	spin_lock_irqsave(&genpd->slock, flags);
86 	genpd->lock_flags = flags;
87 }
88 
89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 					int depth)
91 	__acquires(&genpd->slock)
92 {
93 	unsigned long flags;
94 
95 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 	genpd->lock_flags = flags;
97 }
98 
99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 	__acquires(&genpd->slock)
101 {
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&genpd->slock, flags);
105 	genpd->lock_flags = flags;
106 	return 0;
107 }
108 
109 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 	__releases(&genpd->slock)
111 {
112 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113 }
114 
115 static const struct genpd_lock_ops genpd_spin_ops = {
116 	.lock = genpd_lock_spin,
117 	.lock_nested = genpd_lock_nested_spin,
118 	.lock_interruptible = genpd_lock_interruptible_spin,
119 	.unlock = genpd_unlock_spin,
120 };
121 
122 #define genpd_lock(p)			p->lock_ops->lock(p)
123 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
124 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
125 #define genpd_unlock(p)			p->lock_ops->unlock(p)
126 
127 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
128 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
129 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
130 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
133 
134 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
135 		const struct generic_pm_domain *genpd)
136 {
137 	bool ret;
138 
139 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
140 
141 	/*
142 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
143 	 * to indicate a suboptimal configuration for PM. For an always on
144 	 * domain this isn't case, thus don't warn.
145 	 */
146 	if (ret && !genpd_is_always_on(genpd))
147 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
148 				genpd->name);
149 
150 	return ret;
151 }
152 
153 static int genpd_runtime_suspend(struct device *dev);
154 
155 /*
156  * Get the generic PM domain for a particular struct device.
157  * This validates the struct device pointer, the PM domain pointer,
158  * and checks that the PM domain pointer is a real generic PM domain.
159  * Any failure results in NULL being returned.
160  */
161 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
162 {
163 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
164 		return NULL;
165 
166 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
167 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
168 		return pd_to_genpd(dev->pm_domain);
169 
170 	return NULL;
171 }
172 
173 /*
174  * This should only be used where we are certain that the pm_domain
175  * attached to the device is a genpd domain.
176  */
177 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
178 {
179 	if (IS_ERR_OR_NULL(dev->pm_domain))
180 		return ERR_PTR(-EINVAL);
181 
182 	return pd_to_genpd(dev->pm_domain);
183 }
184 
185 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
186 			  struct device *dev)
187 {
188 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
189 }
190 
191 static int genpd_start_dev(const struct generic_pm_domain *genpd,
192 			   struct device *dev)
193 {
194 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
195 }
196 
197 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
198 {
199 	bool ret = false;
200 
201 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
202 		ret = !!atomic_dec_and_test(&genpd->sd_count);
203 
204 	return ret;
205 }
206 
207 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
208 {
209 	atomic_inc(&genpd->sd_count);
210 	smp_mb__after_atomic();
211 }
212 
213 #ifdef CONFIG_DEBUG_FS
214 static struct dentry *genpd_debugfs_dir;
215 
216 static void genpd_debug_add(struct generic_pm_domain *genpd);
217 
218 static void genpd_debug_remove(struct generic_pm_domain *genpd)
219 {
220 	struct dentry *d;
221 
222 	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
223 	debugfs_remove(d);
224 }
225 
226 static void genpd_update_accounting(struct generic_pm_domain *genpd)
227 {
228 	ktime_t delta, now;
229 
230 	now = ktime_get();
231 	delta = ktime_sub(now, genpd->accounting_time);
232 
233 	/*
234 	 * If genpd->status is active, it means we are just
235 	 * out of off and so update the idle time and vice
236 	 * versa.
237 	 */
238 	if (genpd->status == GENPD_STATE_ON) {
239 		int state_idx = genpd->state_idx;
240 
241 		genpd->states[state_idx].idle_time =
242 			ktime_add(genpd->states[state_idx].idle_time, delta);
243 	} else {
244 		genpd->on_time = ktime_add(genpd->on_time, delta);
245 	}
246 
247 	genpd->accounting_time = now;
248 }
249 #else
250 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
251 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
252 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
253 #endif
254 
255 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
256 					   unsigned int state)
257 {
258 	struct generic_pm_domain_data *pd_data;
259 	struct pm_domain_data *pdd;
260 	struct gpd_link *link;
261 
262 	/* New requested state is same as Max requested state */
263 	if (state == genpd->performance_state)
264 		return state;
265 
266 	/* New requested state is higher than Max requested state */
267 	if (state > genpd->performance_state)
268 		return state;
269 
270 	/* Traverse all devices within the domain */
271 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
272 		pd_data = to_gpd_data(pdd);
273 
274 		if (pd_data->performance_state > state)
275 			state = pd_data->performance_state;
276 	}
277 
278 	/*
279 	 * Traverse all sub-domains within the domain. This can be
280 	 * done without any additional locking as the link->performance_state
281 	 * field is protected by the parent genpd->lock, which is already taken.
282 	 *
283 	 * Also note that link->performance_state (subdomain's performance state
284 	 * requirement to parent domain) is different from
285 	 * link->child->performance_state (current performance state requirement
286 	 * of the devices/sub-domains of the subdomain) and so can have a
287 	 * different value.
288 	 *
289 	 * Note that we also take vote from powered-off sub-domains into account
290 	 * as the same is done for devices right now.
291 	 */
292 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
293 		if (link->performance_state > state)
294 			state = link->performance_state;
295 	}
296 
297 	return state;
298 }
299 
300 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
301 					 struct generic_pm_domain *parent,
302 					 unsigned int pstate)
303 {
304 	if (!parent->set_performance_state)
305 		return pstate;
306 
307 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
308 						  parent->opp_table,
309 						  pstate);
310 }
311 
312 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
313 					unsigned int state, int depth)
314 {
315 	struct generic_pm_domain *parent;
316 	struct gpd_link *link;
317 	int parent_state, ret;
318 
319 	if (state == genpd->performance_state)
320 		return 0;
321 
322 	/* Propagate to parents of genpd */
323 	list_for_each_entry(link, &genpd->child_links, child_node) {
324 		parent = link->parent;
325 
326 		/* Find parent's performance state */
327 		ret = genpd_xlate_performance_state(genpd, parent, state);
328 		if (unlikely(ret < 0))
329 			goto err;
330 
331 		parent_state = ret;
332 
333 		genpd_lock_nested(parent, depth + 1);
334 
335 		link->prev_performance_state = link->performance_state;
336 		link->performance_state = parent_state;
337 		parent_state = _genpd_reeval_performance_state(parent,
338 						parent_state);
339 		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
340 		if (ret)
341 			link->performance_state = link->prev_performance_state;
342 
343 		genpd_unlock(parent);
344 
345 		if (ret)
346 			goto err;
347 	}
348 
349 	if (genpd->set_performance_state) {
350 		ret = genpd->set_performance_state(genpd, state);
351 		if (ret)
352 			goto err;
353 	}
354 
355 	genpd->performance_state = state;
356 	return 0;
357 
358 err:
359 	/* Encountered an error, lets rollback */
360 	list_for_each_entry_continue_reverse(link, &genpd->child_links,
361 					     child_node) {
362 		parent = link->parent;
363 
364 		genpd_lock_nested(parent, depth + 1);
365 
366 		parent_state = link->prev_performance_state;
367 		link->performance_state = parent_state;
368 
369 		parent_state = _genpd_reeval_performance_state(parent,
370 						parent_state);
371 		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
372 			pr_err("%s: Failed to roll back to %d performance state\n",
373 			       parent->name, parent_state);
374 		}
375 
376 		genpd_unlock(parent);
377 	}
378 
379 	return ret;
380 }
381 
382 /**
383  * dev_pm_genpd_set_performance_state- Set performance state of device's power
384  * domain.
385  *
386  * @dev: Device for which the performance-state needs to be set.
387  * @state: Target performance state of the device. This can be set as 0 when the
388  *	   device doesn't have any performance state constraints left (And so
389  *	   the device wouldn't participate anymore to find the target
390  *	   performance state of the genpd).
391  *
392  * It is assumed that the users guarantee that the genpd wouldn't be detached
393  * while this routine is getting called.
394  *
395  * Returns 0 on success and negative error values on failures.
396  */
397 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
398 {
399 	struct generic_pm_domain *genpd;
400 	struct generic_pm_domain_data *gpd_data;
401 	unsigned int prev;
402 	int ret;
403 
404 	genpd = dev_to_genpd_safe(dev);
405 	if (!genpd)
406 		return -ENODEV;
407 
408 	if (WARN_ON(!dev->power.subsys_data ||
409 		     !dev->power.subsys_data->domain_data))
410 		return -EINVAL;
411 
412 	genpd_lock(genpd);
413 
414 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
415 	prev = gpd_data->performance_state;
416 	gpd_data->performance_state = state;
417 
418 	state = _genpd_reeval_performance_state(genpd, state);
419 	ret = _genpd_set_performance_state(genpd, state, 0);
420 	if (ret)
421 		gpd_data->performance_state = prev;
422 
423 	genpd_unlock(genpd);
424 
425 	return ret;
426 }
427 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
428 
429 /**
430  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
431  *
432  * @dev: Device to handle
433  * @next: impending interrupt/wakeup for the device
434  *
435  *
436  * Allow devices to inform of the next wakeup. It's assumed that the users
437  * guarantee that the genpd wouldn't be detached while this routine is getting
438  * called. Additionally, it's also assumed that @dev isn't runtime suspended
439  * (RPM_SUSPENDED)."
440  * Although devices are expected to update the next_wakeup after the end of
441  * their usecase as well, it is possible the devices themselves may not know
442  * about that, so stale @next will be ignored when powering off the domain.
443  */
444 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
445 {
446 	struct generic_pm_domain_data *gpd_data;
447 	struct generic_pm_domain *genpd;
448 
449 	genpd = dev_to_genpd_safe(dev);
450 	if (!genpd)
451 		return;
452 
453 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
454 	gpd_data->next_wakeup = next;
455 }
456 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
457 
458 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
459 {
460 	unsigned int state_idx = genpd->state_idx;
461 	ktime_t time_start;
462 	s64 elapsed_ns;
463 	int ret;
464 
465 	/* Notify consumers that we are about to power on. */
466 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
467 					     GENPD_NOTIFY_PRE_ON,
468 					     GENPD_NOTIFY_OFF, NULL);
469 	ret = notifier_to_errno(ret);
470 	if (ret)
471 		return ret;
472 
473 	if (!genpd->power_on)
474 		goto out;
475 
476 	if (!timed) {
477 		ret = genpd->power_on(genpd);
478 		if (ret)
479 			goto err;
480 
481 		goto out;
482 	}
483 
484 	time_start = ktime_get();
485 	ret = genpd->power_on(genpd);
486 	if (ret)
487 		goto err;
488 
489 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
490 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
491 		goto out;
492 
493 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
494 	genpd->max_off_time_changed = true;
495 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
496 		 genpd->name, "on", elapsed_ns);
497 
498 out:
499 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
500 	return 0;
501 err:
502 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
503 				NULL);
504 	return ret;
505 }
506 
507 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
508 {
509 	unsigned int state_idx = genpd->state_idx;
510 	ktime_t time_start;
511 	s64 elapsed_ns;
512 	int ret;
513 
514 	/* Notify consumers that we are about to power off. */
515 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
516 					     GENPD_NOTIFY_PRE_OFF,
517 					     GENPD_NOTIFY_ON, NULL);
518 	ret = notifier_to_errno(ret);
519 	if (ret)
520 		return ret;
521 
522 	if (!genpd->power_off)
523 		goto out;
524 
525 	if (!timed) {
526 		ret = genpd->power_off(genpd);
527 		if (ret)
528 			goto busy;
529 
530 		goto out;
531 	}
532 
533 	time_start = ktime_get();
534 	ret = genpd->power_off(genpd);
535 	if (ret)
536 		goto busy;
537 
538 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
539 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
540 		goto out;
541 
542 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
543 	genpd->max_off_time_changed = true;
544 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
545 		 genpd->name, "off", elapsed_ns);
546 
547 out:
548 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
549 				NULL);
550 	return 0;
551 busy:
552 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
553 	return ret;
554 }
555 
556 /**
557  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
558  * @genpd: PM domain to power off.
559  *
560  * Queue up the execution of genpd_power_off() unless it's already been done
561  * before.
562  */
563 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
564 {
565 	queue_work(pm_wq, &genpd->power_off_work);
566 }
567 
568 /**
569  * genpd_power_off - Remove power from a given PM domain.
570  * @genpd: PM domain to power down.
571  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
572  * RPM status of the releated device is in an intermediate state, not yet turned
573  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
574  * be RPM_SUSPENDED, while it tries to power off the PM domain.
575  *
576  * If all of the @genpd's devices have been suspended and all of its subdomains
577  * have been powered down, remove power from @genpd.
578  */
579 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
580 			   unsigned int depth)
581 {
582 	struct pm_domain_data *pdd;
583 	struct gpd_link *link;
584 	unsigned int not_suspended = 0;
585 	int ret;
586 
587 	/*
588 	 * Do not try to power off the domain in the following situations:
589 	 * (1) The domain is already in the "power off" state.
590 	 * (2) System suspend is in progress.
591 	 */
592 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
593 		return 0;
594 
595 	/*
596 	 * Abort power off for the PM domain in the following situations:
597 	 * (1) The domain is configured as always on.
598 	 * (2) When the domain has a subdomain being powered on.
599 	 */
600 	if (genpd_is_always_on(genpd) ||
601 			genpd_is_rpm_always_on(genpd) ||
602 			atomic_read(&genpd->sd_count) > 0)
603 		return -EBUSY;
604 
605 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
606 		enum pm_qos_flags_status stat;
607 
608 		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
609 		if (stat > PM_QOS_FLAGS_NONE)
610 			return -EBUSY;
611 
612 		/*
613 		 * Do not allow PM domain to be powered off, when an IRQ safe
614 		 * device is part of a non-IRQ safe domain.
615 		 */
616 		if (!pm_runtime_suspended(pdd->dev) ||
617 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
618 			not_suspended++;
619 	}
620 
621 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
622 		return -EBUSY;
623 
624 	if (genpd->gov && genpd->gov->power_down_ok) {
625 		if (!genpd->gov->power_down_ok(&genpd->domain))
626 			return -EAGAIN;
627 	}
628 
629 	/* Default to shallowest state. */
630 	if (!genpd->gov)
631 		genpd->state_idx = 0;
632 
633 	/* Don't power off, if a child domain is waiting to power on. */
634 	if (atomic_read(&genpd->sd_count) > 0)
635 		return -EBUSY;
636 
637 	ret = _genpd_power_off(genpd, true);
638 	if (ret) {
639 		genpd->states[genpd->state_idx].rejected++;
640 		return ret;
641 	}
642 
643 	genpd->status = GENPD_STATE_OFF;
644 	genpd_update_accounting(genpd);
645 	genpd->states[genpd->state_idx].usage++;
646 
647 	list_for_each_entry(link, &genpd->child_links, child_node) {
648 		genpd_sd_counter_dec(link->parent);
649 		genpd_lock_nested(link->parent, depth + 1);
650 		genpd_power_off(link->parent, false, depth + 1);
651 		genpd_unlock(link->parent);
652 	}
653 
654 	return 0;
655 }
656 
657 /**
658  * genpd_power_on - Restore power to a given PM domain and its parents.
659  * @genpd: PM domain to power up.
660  * @depth: nesting count for lockdep.
661  *
662  * Restore power to @genpd and all of its parents so that it is possible to
663  * resume a device belonging to it.
664  */
665 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
666 {
667 	struct gpd_link *link;
668 	int ret = 0;
669 
670 	if (genpd_status_on(genpd))
671 		return 0;
672 
673 	/*
674 	 * The list is guaranteed not to change while the loop below is being
675 	 * executed, unless one of the parents' .power_on() callbacks fiddles
676 	 * with it.
677 	 */
678 	list_for_each_entry(link, &genpd->child_links, child_node) {
679 		struct generic_pm_domain *parent = link->parent;
680 
681 		genpd_sd_counter_inc(parent);
682 
683 		genpd_lock_nested(parent, depth + 1);
684 		ret = genpd_power_on(parent, depth + 1);
685 		genpd_unlock(parent);
686 
687 		if (ret) {
688 			genpd_sd_counter_dec(parent);
689 			goto err;
690 		}
691 	}
692 
693 	ret = _genpd_power_on(genpd, true);
694 	if (ret)
695 		goto err;
696 
697 	genpd->status = GENPD_STATE_ON;
698 	genpd_update_accounting(genpd);
699 
700 	return 0;
701 
702  err:
703 	list_for_each_entry_continue_reverse(link,
704 					&genpd->child_links,
705 					child_node) {
706 		genpd_sd_counter_dec(link->parent);
707 		genpd_lock_nested(link->parent, depth + 1);
708 		genpd_power_off(link->parent, false, depth + 1);
709 		genpd_unlock(link->parent);
710 	}
711 
712 	return ret;
713 }
714 
715 static int genpd_dev_pm_start(struct device *dev)
716 {
717 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
718 
719 	return genpd_start_dev(genpd, dev);
720 }
721 
722 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
723 				     unsigned long val, void *ptr)
724 {
725 	struct generic_pm_domain_data *gpd_data;
726 	struct device *dev;
727 
728 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
729 	dev = gpd_data->base.dev;
730 
731 	for (;;) {
732 		struct generic_pm_domain *genpd;
733 		struct pm_domain_data *pdd;
734 
735 		spin_lock_irq(&dev->power.lock);
736 
737 		pdd = dev->power.subsys_data ?
738 				dev->power.subsys_data->domain_data : NULL;
739 		if (pdd) {
740 			to_gpd_data(pdd)->td.constraint_changed = true;
741 			genpd = dev_to_genpd(dev);
742 		} else {
743 			genpd = ERR_PTR(-ENODATA);
744 		}
745 
746 		spin_unlock_irq(&dev->power.lock);
747 
748 		if (!IS_ERR(genpd)) {
749 			genpd_lock(genpd);
750 			genpd->max_off_time_changed = true;
751 			genpd_unlock(genpd);
752 		}
753 
754 		dev = dev->parent;
755 		if (!dev || dev->power.ignore_children)
756 			break;
757 	}
758 
759 	return NOTIFY_DONE;
760 }
761 
762 /**
763  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
764  * @work: Work structure used for scheduling the execution of this function.
765  */
766 static void genpd_power_off_work_fn(struct work_struct *work)
767 {
768 	struct generic_pm_domain *genpd;
769 
770 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
771 
772 	genpd_lock(genpd);
773 	genpd_power_off(genpd, false, 0);
774 	genpd_unlock(genpd);
775 }
776 
777 /**
778  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
779  * @dev: Device to handle.
780  */
781 static int __genpd_runtime_suspend(struct device *dev)
782 {
783 	int (*cb)(struct device *__dev);
784 
785 	if (dev->type && dev->type->pm)
786 		cb = dev->type->pm->runtime_suspend;
787 	else if (dev->class && dev->class->pm)
788 		cb = dev->class->pm->runtime_suspend;
789 	else if (dev->bus && dev->bus->pm)
790 		cb = dev->bus->pm->runtime_suspend;
791 	else
792 		cb = NULL;
793 
794 	if (!cb && dev->driver && dev->driver->pm)
795 		cb = dev->driver->pm->runtime_suspend;
796 
797 	return cb ? cb(dev) : 0;
798 }
799 
800 /**
801  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
802  * @dev: Device to handle.
803  */
804 static int __genpd_runtime_resume(struct device *dev)
805 {
806 	int (*cb)(struct device *__dev);
807 
808 	if (dev->type && dev->type->pm)
809 		cb = dev->type->pm->runtime_resume;
810 	else if (dev->class && dev->class->pm)
811 		cb = dev->class->pm->runtime_resume;
812 	else if (dev->bus && dev->bus->pm)
813 		cb = dev->bus->pm->runtime_resume;
814 	else
815 		cb = NULL;
816 
817 	if (!cb && dev->driver && dev->driver->pm)
818 		cb = dev->driver->pm->runtime_resume;
819 
820 	return cb ? cb(dev) : 0;
821 }
822 
823 /**
824  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
825  * @dev: Device to suspend.
826  *
827  * Carry out a runtime suspend of a device under the assumption that its
828  * pm_domain field points to the domain member of an object of type
829  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
830  */
831 static int genpd_runtime_suspend(struct device *dev)
832 {
833 	struct generic_pm_domain *genpd;
834 	bool (*suspend_ok)(struct device *__dev);
835 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
836 	bool runtime_pm = pm_runtime_enabled(dev);
837 	ktime_t time_start;
838 	s64 elapsed_ns;
839 	int ret;
840 
841 	dev_dbg(dev, "%s()\n", __func__);
842 
843 	genpd = dev_to_genpd(dev);
844 	if (IS_ERR(genpd))
845 		return -EINVAL;
846 
847 	/*
848 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
849 	 * callbacks for other purposes than runtime PM. In those scenarios
850 	 * runtime PM is disabled. Under these circumstances, we shall skip
851 	 * validating/measuring the PM QoS latency.
852 	 */
853 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
854 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
855 		return -EBUSY;
856 
857 	/* Measure suspend latency. */
858 	time_start = 0;
859 	if (runtime_pm)
860 		time_start = ktime_get();
861 
862 	ret = __genpd_runtime_suspend(dev);
863 	if (ret)
864 		return ret;
865 
866 	ret = genpd_stop_dev(genpd, dev);
867 	if (ret) {
868 		__genpd_runtime_resume(dev);
869 		return ret;
870 	}
871 
872 	/* Update suspend latency value if the measured time exceeds it. */
873 	if (runtime_pm) {
874 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
875 		if (elapsed_ns > td->suspend_latency_ns) {
876 			td->suspend_latency_ns = elapsed_ns;
877 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
878 				elapsed_ns);
879 			genpd->max_off_time_changed = true;
880 			td->constraint_changed = true;
881 		}
882 	}
883 
884 	/*
885 	 * If power.irq_safe is set, this routine may be run with
886 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
887 	 */
888 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
889 		return 0;
890 
891 	genpd_lock(genpd);
892 	genpd_power_off(genpd, true, 0);
893 	genpd_unlock(genpd);
894 
895 	return 0;
896 }
897 
898 /**
899  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
900  * @dev: Device to resume.
901  *
902  * Carry out a runtime resume of a device under the assumption that its
903  * pm_domain field points to the domain member of an object of type
904  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
905  */
906 static int genpd_runtime_resume(struct device *dev)
907 {
908 	struct generic_pm_domain *genpd;
909 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
910 	bool runtime_pm = pm_runtime_enabled(dev);
911 	ktime_t time_start;
912 	s64 elapsed_ns;
913 	int ret;
914 	bool timed = true;
915 
916 	dev_dbg(dev, "%s()\n", __func__);
917 
918 	genpd = dev_to_genpd(dev);
919 	if (IS_ERR(genpd))
920 		return -EINVAL;
921 
922 	/*
923 	 * As we don't power off a non IRQ safe domain, which holds
924 	 * an IRQ safe device, we don't need to restore power to it.
925 	 */
926 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
927 		timed = false;
928 		goto out;
929 	}
930 
931 	genpd_lock(genpd);
932 	ret = genpd_power_on(genpd, 0);
933 	genpd_unlock(genpd);
934 
935 	if (ret)
936 		return ret;
937 
938  out:
939 	/* Measure resume latency. */
940 	time_start = 0;
941 	if (timed && runtime_pm)
942 		time_start = ktime_get();
943 
944 	ret = genpd_start_dev(genpd, dev);
945 	if (ret)
946 		goto err_poweroff;
947 
948 	ret = __genpd_runtime_resume(dev);
949 	if (ret)
950 		goto err_stop;
951 
952 	/* Update resume latency value if the measured time exceeds it. */
953 	if (timed && runtime_pm) {
954 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
955 		if (elapsed_ns > td->resume_latency_ns) {
956 			td->resume_latency_ns = elapsed_ns;
957 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
958 				elapsed_ns);
959 			genpd->max_off_time_changed = true;
960 			td->constraint_changed = true;
961 		}
962 	}
963 
964 	return 0;
965 
966 err_stop:
967 	genpd_stop_dev(genpd, dev);
968 err_poweroff:
969 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
970 		genpd_lock(genpd);
971 		genpd_power_off(genpd, true, 0);
972 		genpd_unlock(genpd);
973 	}
974 
975 	return ret;
976 }
977 
978 static bool pd_ignore_unused;
979 static int __init pd_ignore_unused_setup(char *__unused)
980 {
981 	pd_ignore_unused = true;
982 	return 1;
983 }
984 __setup("pd_ignore_unused", pd_ignore_unused_setup);
985 
986 /**
987  * genpd_power_off_unused - Power off all PM domains with no devices in use.
988  */
989 static int __init genpd_power_off_unused(void)
990 {
991 	struct generic_pm_domain *genpd;
992 
993 	if (pd_ignore_unused) {
994 		pr_warn("genpd: Not disabling unused power domains\n");
995 		return 0;
996 	}
997 
998 	mutex_lock(&gpd_list_lock);
999 
1000 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1001 		genpd_queue_power_off_work(genpd);
1002 
1003 	mutex_unlock(&gpd_list_lock);
1004 
1005 	return 0;
1006 }
1007 late_initcall(genpd_power_off_unused);
1008 
1009 #ifdef CONFIG_PM_SLEEP
1010 
1011 /**
1012  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1013  * @genpd: PM domain to power off, if possible.
1014  * @use_lock: use the lock.
1015  * @depth: nesting count for lockdep.
1016  *
1017  * Check if the given PM domain can be powered off (during system suspend or
1018  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1019  *
1020  * This function is only called in "noirq" and "syscore" stages of system power
1021  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1022  * these cases the lock must be held.
1023  */
1024 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1025 				 unsigned int depth)
1026 {
1027 	struct gpd_link *link;
1028 
1029 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1030 		return;
1031 
1032 	if (genpd->suspended_count != genpd->device_count
1033 	    || atomic_read(&genpd->sd_count) > 0)
1034 		return;
1035 
1036 	/* Choose the deepest state when suspending */
1037 	genpd->state_idx = genpd->state_count - 1;
1038 	if (_genpd_power_off(genpd, false))
1039 		return;
1040 
1041 	genpd->status = GENPD_STATE_OFF;
1042 
1043 	list_for_each_entry(link, &genpd->child_links, child_node) {
1044 		genpd_sd_counter_dec(link->parent);
1045 
1046 		if (use_lock)
1047 			genpd_lock_nested(link->parent, depth + 1);
1048 
1049 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1050 
1051 		if (use_lock)
1052 			genpd_unlock(link->parent);
1053 	}
1054 }
1055 
1056 /**
1057  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1058  * @genpd: PM domain to power on.
1059  * @use_lock: use the lock.
1060  * @depth: nesting count for lockdep.
1061  *
1062  * This function is only called in "noirq" and "syscore" stages of system power
1063  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1064  * these cases the lock must be held.
1065  */
1066 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1067 				unsigned int depth)
1068 {
1069 	struct gpd_link *link;
1070 
1071 	if (genpd_status_on(genpd))
1072 		return;
1073 
1074 	list_for_each_entry(link, &genpd->child_links, child_node) {
1075 		genpd_sd_counter_inc(link->parent);
1076 
1077 		if (use_lock)
1078 			genpd_lock_nested(link->parent, depth + 1);
1079 
1080 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1081 
1082 		if (use_lock)
1083 			genpd_unlock(link->parent);
1084 	}
1085 
1086 	_genpd_power_on(genpd, false);
1087 	genpd->status = GENPD_STATE_ON;
1088 }
1089 
1090 /**
1091  * resume_needed - Check whether to resume a device before system suspend.
1092  * @dev: Device to check.
1093  * @genpd: PM domain the device belongs to.
1094  *
1095  * There are two cases in which a device that can wake up the system from sleep
1096  * states should be resumed by genpd_prepare(): (1) if the device is enabled
1097  * to wake up the system and it has to remain active for this purpose while the
1098  * system is in the sleep state and (2) if the device is not enabled to wake up
1099  * the system from sleep states and it generally doesn't generate wakeup signals
1100  * by itself (those signals are generated on its behalf by other parts of the
1101  * system).  In the latter case it may be necessary to reconfigure the device's
1102  * wakeup settings during system suspend, because it may have been set up to
1103  * signal remote wakeup from the system's working state as needed by runtime PM.
1104  * Return 'true' in either of the above cases.
1105  */
1106 static bool resume_needed(struct device *dev,
1107 			  const struct generic_pm_domain *genpd)
1108 {
1109 	bool active_wakeup;
1110 
1111 	if (!device_can_wakeup(dev))
1112 		return false;
1113 
1114 	active_wakeup = genpd_is_active_wakeup(genpd);
1115 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1116 }
1117 
1118 /**
1119  * genpd_prepare - Start power transition of a device in a PM domain.
1120  * @dev: Device to start the transition of.
1121  *
1122  * Start a power transition of a device (during a system-wide power transition)
1123  * under the assumption that its pm_domain field points to the domain member of
1124  * an object of type struct generic_pm_domain representing a PM domain
1125  * consisting of I/O devices.
1126  */
1127 static int genpd_prepare(struct device *dev)
1128 {
1129 	struct generic_pm_domain *genpd;
1130 	int ret;
1131 
1132 	dev_dbg(dev, "%s()\n", __func__);
1133 
1134 	genpd = dev_to_genpd(dev);
1135 	if (IS_ERR(genpd))
1136 		return -EINVAL;
1137 
1138 	/*
1139 	 * If a wakeup request is pending for the device, it should be woken up
1140 	 * at this point and a system wakeup event should be reported if it's
1141 	 * set up to wake up the system from sleep states.
1142 	 */
1143 	if (resume_needed(dev, genpd))
1144 		pm_runtime_resume(dev);
1145 
1146 	genpd_lock(genpd);
1147 
1148 	if (genpd->prepared_count++ == 0)
1149 		genpd->suspended_count = 0;
1150 
1151 	genpd_unlock(genpd);
1152 
1153 	ret = pm_generic_prepare(dev);
1154 	if (ret < 0) {
1155 		genpd_lock(genpd);
1156 
1157 		genpd->prepared_count--;
1158 
1159 		genpd_unlock(genpd);
1160 	}
1161 
1162 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1163 	return ret >= 0 ? 0 : ret;
1164 }
1165 
1166 /**
1167  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1168  *   I/O pm domain.
1169  * @dev: Device to suspend.
1170  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1171  *
1172  * Stop the device and remove power from the domain if all devices in it have
1173  * been stopped.
1174  */
1175 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1176 {
1177 	struct generic_pm_domain *genpd;
1178 	int ret = 0;
1179 
1180 	genpd = dev_to_genpd(dev);
1181 	if (IS_ERR(genpd))
1182 		return -EINVAL;
1183 
1184 	if (poweroff)
1185 		ret = pm_generic_poweroff_noirq(dev);
1186 	else
1187 		ret = pm_generic_suspend_noirq(dev);
1188 	if (ret)
1189 		return ret;
1190 
1191 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1192 		return 0;
1193 
1194 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1195 	    !pm_runtime_status_suspended(dev)) {
1196 		ret = genpd_stop_dev(genpd, dev);
1197 		if (ret) {
1198 			if (poweroff)
1199 				pm_generic_restore_noirq(dev);
1200 			else
1201 				pm_generic_resume_noirq(dev);
1202 			return ret;
1203 		}
1204 	}
1205 
1206 	genpd_lock(genpd);
1207 	genpd->suspended_count++;
1208 	genpd_sync_power_off(genpd, true, 0);
1209 	genpd_unlock(genpd);
1210 
1211 	return 0;
1212 }
1213 
1214 /**
1215  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1216  * @dev: Device to suspend.
1217  *
1218  * Stop the device and remove power from the domain if all devices in it have
1219  * been stopped.
1220  */
1221 static int genpd_suspend_noirq(struct device *dev)
1222 {
1223 	dev_dbg(dev, "%s()\n", __func__);
1224 
1225 	return genpd_finish_suspend(dev, false);
1226 }
1227 
1228 /**
1229  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1230  * @dev: Device to resume.
1231  *
1232  * Restore power to the device's PM domain, if necessary, and start the device.
1233  */
1234 static int genpd_resume_noirq(struct device *dev)
1235 {
1236 	struct generic_pm_domain *genpd;
1237 	int ret;
1238 
1239 	dev_dbg(dev, "%s()\n", __func__);
1240 
1241 	genpd = dev_to_genpd(dev);
1242 	if (IS_ERR(genpd))
1243 		return -EINVAL;
1244 
1245 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1246 		return pm_generic_resume_noirq(dev);
1247 
1248 	genpd_lock(genpd);
1249 	genpd_sync_power_on(genpd, true, 0);
1250 	genpd->suspended_count--;
1251 	genpd_unlock(genpd);
1252 
1253 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1254 	    !pm_runtime_status_suspended(dev)) {
1255 		ret = genpd_start_dev(genpd, dev);
1256 		if (ret)
1257 			return ret;
1258 	}
1259 
1260 	return pm_generic_resume_noirq(dev);
1261 }
1262 
1263 /**
1264  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1265  * @dev: Device to freeze.
1266  *
1267  * Carry out a late freeze of a device under the assumption that its
1268  * pm_domain field points to the domain member of an object of type
1269  * struct generic_pm_domain representing a power domain consisting of I/O
1270  * devices.
1271  */
1272 static int genpd_freeze_noirq(struct device *dev)
1273 {
1274 	const struct generic_pm_domain *genpd;
1275 	int ret = 0;
1276 
1277 	dev_dbg(dev, "%s()\n", __func__);
1278 
1279 	genpd = dev_to_genpd(dev);
1280 	if (IS_ERR(genpd))
1281 		return -EINVAL;
1282 
1283 	ret = pm_generic_freeze_noirq(dev);
1284 	if (ret)
1285 		return ret;
1286 
1287 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1288 	    !pm_runtime_status_suspended(dev))
1289 		ret = genpd_stop_dev(genpd, dev);
1290 
1291 	return ret;
1292 }
1293 
1294 /**
1295  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1296  * @dev: Device to thaw.
1297  *
1298  * Start the device, unless power has been removed from the domain already
1299  * before the system transition.
1300  */
1301 static int genpd_thaw_noirq(struct device *dev)
1302 {
1303 	const struct generic_pm_domain *genpd;
1304 	int ret = 0;
1305 
1306 	dev_dbg(dev, "%s()\n", __func__);
1307 
1308 	genpd = dev_to_genpd(dev);
1309 	if (IS_ERR(genpd))
1310 		return -EINVAL;
1311 
1312 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1313 	    !pm_runtime_status_suspended(dev)) {
1314 		ret = genpd_start_dev(genpd, dev);
1315 		if (ret)
1316 			return ret;
1317 	}
1318 
1319 	return pm_generic_thaw_noirq(dev);
1320 }
1321 
1322 /**
1323  * genpd_poweroff_noirq - Completion of hibernation of device in an
1324  *   I/O PM domain.
1325  * @dev: Device to poweroff.
1326  *
1327  * Stop the device and remove power from the domain if all devices in it have
1328  * been stopped.
1329  */
1330 static int genpd_poweroff_noirq(struct device *dev)
1331 {
1332 	dev_dbg(dev, "%s()\n", __func__);
1333 
1334 	return genpd_finish_suspend(dev, true);
1335 }
1336 
1337 /**
1338  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1339  * @dev: Device to resume.
1340  *
1341  * Make sure the domain will be in the same power state as before the
1342  * hibernation the system is resuming from and start the device if necessary.
1343  */
1344 static int genpd_restore_noirq(struct device *dev)
1345 {
1346 	struct generic_pm_domain *genpd;
1347 	int ret = 0;
1348 
1349 	dev_dbg(dev, "%s()\n", __func__);
1350 
1351 	genpd = dev_to_genpd(dev);
1352 	if (IS_ERR(genpd))
1353 		return -EINVAL;
1354 
1355 	/*
1356 	 * At this point suspended_count == 0 means we are being run for the
1357 	 * first time for the given domain in the present cycle.
1358 	 */
1359 	genpd_lock(genpd);
1360 	if (genpd->suspended_count++ == 0) {
1361 		/*
1362 		 * The boot kernel might put the domain into arbitrary state,
1363 		 * so make it appear as powered off to genpd_sync_power_on(),
1364 		 * so that it tries to power it on in case it was really off.
1365 		 */
1366 		genpd->status = GENPD_STATE_OFF;
1367 	}
1368 
1369 	genpd_sync_power_on(genpd, true, 0);
1370 	genpd_unlock(genpd);
1371 
1372 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1373 	    !pm_runtime_status_suspended(dev)) {
1374 		ret = genpd_start_dev(genpd, dev);
1375 		if (ret)
1376 			return ret;
1377 	}
1378 
1379 	return pm_generic_restore_noirq(dev);
1380 }
1381 
1382 /**
1383  * genpd_complete - Complete power transition of a device in a power domain.
1384  * @dev: Device to complete the transition of.
1385  *
1386  * Complete a power transition of a device (during a system-wide power
1387  * transition) under the assumption that its pm_domain field points to the
1388  * domain member of an object of type struct generic_pm_domain representing
1389  * a power domain consisting of I/O devices.
1390  */
1391 static void genpd_complete(struct device *dev)
1392 {
1393 	struct generic_pm_domain *genpd;
1394 
1395 	dev_dbg(dev, "%s()\n", __func__);
1396 
1397 	genpd = dev_to_genpd(dev);
1398 	if (IS_ERR(genpd))
1399 		return;
1400 
1401 	pm_generic_complete(dev);
1402 
1403 	genpd_lock(genpd);
1404 
1405 	genpd->prepared_count--;
1406 	if (!genpd->prepared_count)
1407 		genpd_queue_power_off_work(genpd);
1408 
1409 	genpd_unlock(genpd);
1410 }
1411 
1412 static void genpd_switch_state(struct device *dev, bool suspend)
1413 {
1414 	struct generic_pm_domain *genpd;
1415 	bool use_lock;
1416 
1417 	genpd = dev_to_genpd_safe(dev);
1418 	if (!genpd)
1419 		return;
1420 
1421 	use_lock = genpd_is_irq_safe(genpd);
1422 
1423 	if (use_lock)
1424 		genpd_lock(genpd);
1425 
1426 	if (suspend) {
1427 		genpd->suspended_count++;
1428 		genpd_sync_power_off(genpd, use_lock, 0);
1429 	} else {
1430 		genpd_sync_power_on(genpd, use_lock, 0);
1431 		genpd->suspended_count--;
1432 	}
1433 
1434 	if (use_lock)
1435 		genpd_unlock(genpd);
1436 }
1437 
1438 /**
1439  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1440  * @dev: The device that is attached to the genpd, that can be suspended.
1441  *
1442  * This routine should typically be called for a device that needs to be
1443  * suspended during the syscore suspend phase. It may also be called during
1444  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1445  * genpd.
1446  */
1447 void dev_pm_genpd_suspend(struct device *dev)
1448 {
1449 	genpd_switch_state(dev, true);
1450 }
1451 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1452 
1453 /**
1454  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1455  * @dev: The device that is attached to the genpd, which needs to be resumed.
1456  *
1457  * This routine should typically be called for a device that needs to be resumed
1458  * during the syscore resume phase. It may also be called during suspend-to-idle
1459  * to resume a corresponding CPU device that is attached to a genpd.
1460  */
1461 void dev_pm_genpd_resume(struct device *dev)
1462 {
1463 	genpd_switch_state(dev, false);
1464 }
1465 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1466 
1467 #else /* !CONFIG_PM_SLEEP */
1468 
1469 #define genpd_prepare		NULL
1470 #define genpd_suspend_noirq	NULL
1471 #define genpd_resume_noirq	NULL
1472 #define genpd_freeze_noirq	NULL
1473 #define genpd_thaw_noirq	NULL
1474 #define genpd_poweroff_noirq	NULL
1475 #define genpd_restore_noirq	NULL
1476 #define genpd_complete		NULL
1477 
1478 #endif /* CONFIG_PM_SLEEP */
1479 
1480 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1481 {
1482 	struct generic_pm_domain_data *gpd_data;
1483 	int ret;
1484 
1485 	ret = dev_pm_get_subsys_data(dev);
1486 	if (ret)
1487 		return ERR_PTR(ret);
1488 
1489 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1490 	if (!gpd_data) {
1491 		ret = -ENOMEM;
1492 		goto err_put;
1493 	}
1494 
1495 	gpd_data->base.dev = dev;
1496 	gpd_data->td.constraint_changed = true;
1497 	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1498 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1499 	gpd_data->next_wakeup = KTIME_MAX;
1500 
1501 	spin_lock_irq(&dev->power.lock);
1502 
1503 	if (dev->power.subsys_data->domain_data) {
1504 		ret = -EINVAL;
1505 		goto err_free;
1506 	}
1507 
1508 	dev->power.subsys_data->domain_data = &gpd_data->base;
1509 
1510 	spin_unlock_irq(&dev->power.lock);
1511 
1512 	return gpd_data;
1513 
1514  err_free:
1515 	spin_unlock_irq(&dev->power.lock);
1516 	kfree(gpd_data);
1517  err_put:
1518 	dev_pm_put_subsys_data(dev);
1519 	return ERR_PTR(ret);
1520 }
1521 
1522 static void genpd_free_dev_data(struct device *dev,
1523 				struct generic_pm_domain_data *gpd_data)
1524 {
1525 	spin_lock_irq(&dev->power.lock);
1526 
1527 	dev->power.subsys_data->domain_data = NULL;
1528 
1529 	spin_unlock_irq(&dev->power.lock);
1530 
1531 	kfree(gpd_data);
1532 	dev_pm_put_subsys_data(dev);
1533 }
1534 
1535 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1536 				 int cpu, bool set, unsigned int depth)
1537 {
1538 	struct gpd_link *link;
1539 
1540 	if (!genpd_is_cpu_domain(genpd))
1541 		return;
1542 
1543 	list_for_each_entry(link, &genpd->child_links, child_node) {
1544 		struct generic_pm_domain *parent = link->parent;
1545 
1546 		genpd_lock_nested(parent, depth + 1);
1547 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1548 		genpd_unlock(parent);
1549 	}
1550 
1551 	if (set)
1552 		cpumask_set_cpu(cpu, genpd->cpus);
1553 	else
1554 		cpumask_clear_cpu(cpu, genpd->cpus);
1555 }
1556 
1557 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1558 {
1559 	if (cpu >= 0)
1560 		genpd_update_cpumask(genpd, cpu, true, 0);
1561 }
1562 
1563 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1564 {
1565 	if (cpu >= 0)
1566 		genpd_update_cpumask(genpd, cpu, false, 0);
1567 }
1568 
1569 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1570 {
1571 	int cpu;
1572 
1573 	if (!genpd_is_cpu_domain(genpd))
1574 		return -1;
1575 
1576 	for_each_possible_cpu(cpu) {
1577 		if (get_cpu_device(cpu) == dev)
1578 			return cpu;
1579 	}
1580 
1581 	return -1;
1582 }
1583 
1584 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1585 			    struct device *base_dev)
1586 {
1587 	struct generic_pm_domain_data *gpd_data;
1588 	int ret;
1589 
1590 	dev_dbg(dev, "%s()\n", __func__);
1591 
1592 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1593 		return -EINVAL;
1594 
1595 	gpd_data = genpd_alloc_dev_data(dev);
1596 	if (IS_ERR(gpd_data))
1597 		return PTR_ERR(gpd_data);
1598 
1599 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1600 
1601 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1602 	if (ret)
1603 		goto out;
1604 
1605 	genpd_lock(genpd);
1606 
1607 	genpd_set_cpumask(genpd, gpd_data->cpu);
1608 	dev_pm_domain_set(dev, &genpd->domain);
1609 
1610 	genpd->device_count++;
1611 	genpd->max_off_time_changed = true;
1612 
1613 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1614 
1615 	genpd_unlock(genpd);
1616  out:
1617 	if (ret)
1618 		genpd_free_dev_data(dev, gpd_data);
1619 	else
1620 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1621 					DEV_PM_QOS_RESUME_LATENCY);
1622 
1623 	return ret;
1624 }
1625 
1626 /**
1627  * pm_genpd_add_device - Add a device to an I/O PM domain.
1628  * @genpd: PM domain to add the device to.
1629  * @dev: Device to be added.
1630  */
1631 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1632 {
1633 	int ret;
1634 
1635 	mutex_lock(&gpd_list_lock);
1636 	ret = genpd_add_device(genpd, dev, dev);
1637 	mutex_unlock(&gpd_list_lock);
1638 
1639 	return ret;
1640 }
1641 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1642 
1643 static int genpd_remove_device(struct generic_pm_domain *genpd,
1644 			       struct device *dev)
1645 {
1646 	struct generic_pm_domain_data *gpd_data;
1647 	struct pm_domain_data *pdd;
1648 	int ret = 0;
1649 
1650 	dev_dbg(dev, "%s()\n", __func__);
1651 
1652 	pdd = dev->power.subsys_data->domain_data;
1653 	gpd_data = to_gpd_data(pdd);
1654 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1655 				   DEV_PM_QOS_RESUME_LATENCY);
1656 
1657 	genpd_lock(genpd);
1658 
1659 	if (genpd->prepared_count > 0) {
1660 		ret = -EAGAIN;
1661 		goto out;
1662 	}
1663 
1664 	genpd->device_count--;
1665 	genpd->max_off_time_changed = true;
1666 
1667 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1668 	dev_pm_domain_set(dev, NULL);
1669 
1670 	list_del_init(&pdd->list_node);
1671 
1672 	genpd_unlock(genpd);
1673 
1674 	if (genpd->detach_dev)
1675 		genpd->detach_dev(genpd, dev);
1676 
1677 	genpd_free_dev_data(dev, gpd_data);
1678 
1679 	return 0;
1680 
1681  out:
1682 	genpd_unlock(genpd);
1683 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1684 
1685 	return ret;
1686 }
1687 
1688 /**
1689  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1690  * @dev: Device to be removed.
1691  */
1692 int pm_genpd_remove_device(struct device *dev)
1693 {
1694 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1695 
1696 	if (!genpd)
1697 		return -EINVAL;
1698 
1699 	return genpd_remove_device(genpd, dev);
1700 }
1701 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1702 
1703 /**
1704  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1705  *
1706  * @dev: Device that should be associated with the notifier
1707  * @nb: The notifier block to register
1708  *
1709  * Users may call this function to add a genpd power on/off notifier for an
1710  * attached @dev. Only one notifier per device is allowed. The notifier is
1711  * sent when genpd is powering on/off the PM domain.
1712  *
1713  * It is assumed that the user guarantee that the genpd wouldn't be detached
1714  * while this routine is getting called.
1715  *
1716  * Returns 0 on success and negative error values on failures.
1717  */
1718 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1719 {
1720 	struct generic_pm_domain *genpd;
1721 	struct generic_pm_domain_data *gpd_data;
1722 	int ret;
1723 
1724 	genpd = dev_to_genpd_safe(dev);
1725 	if (!genpd)
1726 		return -ENODEV;
1727 
1728 	if (WARN_ON(!dev->power.subsys_data ||
1729 		     !dev->power.subsys_data->domain_data))
1730 		return -EINVAL;
1731 
1732 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1733 	if (gpd_data->power_nb)
1734 		return -EEXIST;
1735 
1736 	genpd_lock(genpd);
1737 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1738 	genpd_unlock(genpd);
1739 
1740 	if (ret) {
1741 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1742 			 genpd->name);
1743 		return ret;
1744 	}
1745 
1746 	gpd_data->power_nb = nb;
1747 	return 0;
1748 }
1749 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1750 
1751 /**
1752  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1753  *
1754  * @dev: Device that is associated with the notifier
1755  *
1756  * Users may call this function to remove a genpd power on/off notifier for an
1757  * attached @dev.
1758  *
1759  * It is assumed that the user guarantee that the genpd wouldn't be detached
1760  * while this routine is getting called.
1761  *
1762  * Returns 0 on success and negative error values on failures.
1763  */
1764 int dev_pm_genpd_remove_notifier(struct device *dev)
1765 {
1766 	struct generic_pm_domain *genpd;
1767 	struct generic_pm_domain_data *gpd_data;
1768 	int ret;
1769 
1770 	genpd = dev_to_genpd_safe(dev);
1771 	if (!genpd)
1772 		return -ENODEV;
1773 
1774 	if (WARN_ON(!dev->power.subsys_data ||
1775 		     !dev->power.subsys_data->domain_data))
1776 		return -EINVAL;
1777 
1778 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1779 	if (!gpd_data->power_nb)
1780 		return -ENODEV;
1781 
1782 	genpd_lock(genpd);
1783 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1784 					    gpd_data->power_nb);
1785 	genpd_unlock(genpd);
1786 
1787 	if (ret) {
1788 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1789 			 genpd->name);
1790 		return ret;
1791 	}
1792 
1793 	gpd_data->power_nb = NULL;
1794 	return 0;
1795 }
1796 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1797 
1798 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1799 			       struct generic_pm_domain *subdomain)
1800 {
1801 	struct gpd_link *link, *itr;
1802 	int ret = 0;
1803 
1804 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1805 	    || genpd == subdomain)
1806 		return -EINVAL;
1807 
1808 	/*
1809 	 * If the domain can be powered on/off in an IRQ safe
1810 	 * context, ensure that the subdomain can also be
1811 	 * powered on/off in that context.
1812 	 */
1813 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1814 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1815 				genpd->name, subdomain->name);
1816 		return -EINVAL;
1817 	}
1818 
1819 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1820 	if (!link)
1821 		return -ENOMEM;
1822 
1823 	genpd_lock(subdomain);
1824 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1825 
1826 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1827 		ret = -EINVAL;
1828 		goto out;
1829 	}
1830 
1831 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1832 		if (itr->child == subdomain && itr->parent == genpd) {
1833 			ret = -EINVAL;
1834 			goto out;
1835 		}
1836 	}
1837 
1838 	link->parent = genpd;
1839 	list_add_tail(&link->parent_node, &genpd->parent_links);
1840 	link->child = subdomain;
1841 	list_add_tail(&link->child_node, &subdomain->child_links);
1842 	if (genpd_status_on(subdomain))
1843 		genpd_sd_counter_inc(genpd);
1844 
1845  out:
1846 	genpd_unlock(genpd);
1847 	genpd_unlock(subdomain);
1848 	if (ret)
1849 		kfree(link);
1850 	return ret;
1851 }
1852 
1853 /**
1854  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1855  * @genpd: Leader PM domain to add the subdomain to.
1856  * @subdomain: Subdomain to be added.
1857  */
1858 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1859 			   struct generic_pm_domain *subdomain)
1860 {
1861 	int ret;
1862 
1863 	mutex_lock(&gpd_list_lock);
1864 	ret = genpd_add_subdomain(genpd, subdomain);
1865 	mutex_unlock(&gpd_list_lock);
1866 
1867 	return ret;
1868 }
1869 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1870 
1871 /**
1872  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1873  * @genpd: Leader PM domain to remove the subdomain from.
1874  * @subdomain: Subdomain to be removed.
1875  */
1876 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1877 			      struct generic_pm_domain *subdomain)
1878 {
1879 	struct gpd_link *l, *link;
1880 	int ret = -EINVAL;
1881 
1882 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1883 		return -EINVAL;
1884 
1885 	genpd_lock(subdomain);
1886 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1887 
1888 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1889 		pr_warn("%s: unable to remove subdomain %s\n",
1890 			genpd->name, subdomain->name);
1891 		ret = -EBUSY;
1892 		goto out;
1893 	}
1894 
1895 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1896 		if (link->child != subdomain)
1897 			continue;
1898 
1899 		list_del(&link->parent_node);
1900 		list_del(&link->child_node);
1901 		kfree(link);
1902 		if (genpd_status_on(subdomain))
1903 			genpd_sd_counter_dec(genpd);
1904 
1905 		ret = 0;
1906 		break;
1907 	}
1908 
1909 out:
1910 	genpd_unlock(genpd);
1911 	genpd_unlock(subdomain);
1912 
1913 	return ret;
1914 }
1915 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1916 
1917 static void genpd_free_default_power_state(struct genpd_power_state *states,
1918 					   unsigned int state_count)
1919 {
1920 	kfree(states);
1921 }
1922 
1923 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1924 {
1925 	struct genpd_power_state *state;
1926 
1927 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1928 	if (!state)
1929 		return -ENOMEM;
1930 
1931 	genpd->states = state;
1932 	genpd->state_count = 1;
1933 	genpd->free_states = genpd_free_default_power_state;
1934 
1935 	return 0;
1936 }
1937 
1938 static void genpd_lock_init(struct generic_pm_domain *genpd)
1939 {
1940 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1941 		spin_lock_init(&genpd->slock);
1942 		genpd->lock_ops = &genpd_spin_ops;
1943 	} else {
1944 		mutex_init(&genpd->mlock);
1945 		genpd->lock_ops = &genpd_mtx_ops;
1946 	}
1947 }
1948 
1949 /**
1950  * pm_genpd_init - Initialize a generic I/O PM domain object.
1951  * @genpd: PM domain object to initialize.
1952  * @gov: PM domain governor to associate with the domain (may be NULL).
1953  * @is_off: Initial value of the domain's power_is_off field.
1954  *
1955  * Returns 0 on successful initialization, else a negative error code.
1956  */
1957 int pm_genpd_init(struct generic_pm_domain *genpd,
1958 		  struct dev_power_governor *gov, bool is_off)
1959 {
1960 	int ret;
1961 
1962 	if (IS_ERR_OR_NULL(genpd))
1963 		return -EINVAL;
1964 
1965 	INIT_LIST_HEAD(&genpd->parent_links);
1966 	INIT_LIST_HEAD(&genpd->child_links);
1967 	INIT_LIST_HEAD(&genpd->dev_list);
1968 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
1969 	genpd_lock_init(genpd);
1970 	genpd->gov = gov;
1971 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1972 	atomic_set(&genpd->sd_count, 0);
1973 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
1974 	genpd->device_count = 0;
1975 	genpd->max_off_time_ns = -1;
1976 	genpd->max_off_time_changed = true;
1977 	genpd->provider = NULL;
1978 	genpd->has_provider = false;
1979 	genpd->accounting_time = ktime_get();
1980 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1981 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1982 	genpd->domain.ops.prepare = genpd_prepare;
1983 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1984 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1985 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1986 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1987 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1988 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1989 	genpd->domain.ops.complete = genpd_complete;
1990 	genpd->domain.start = genpd_dev_pm_start;
1991 
1992 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1993 		genpd->dev_ops.stop = pm_clk_suspend;
1994 		genpd->dev_ops.start = pm_clk_resume;
1995 	}
1996 
1997 	/* Always-on domains must be powered on at initialization. */
1998 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1999 			!genpd_status_on(genpd))
2000 		return -EINVAL;
2001 
2002 	if (genpd_is_cpu_domain(genpd) &&
2003 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2004 		return -ENOMEM;
2005 
2006 	/* Use only one "off" state if there were no states declared */
2007 	if (genpd->state_count == 0) {
2008 		ret = genpd_set_default_power_state(genpd);
2009 		if (ret) {
2010 			if (genpd_is_cpu_domain(genpd))
2011 				free_cpumask_var(genpd->cpus);
2012 			return ret;
2013 		}
2014 	} else if (!gov && genpd->state_count > 1) {
2015 		pr_warn("%s: no governor for states\n", genpd->name);
2016 	}
2017 
2018 	device_initialize(&genpd->dev);
2019 	dev_set_name(&genpd->dev, "%s", genpd->name);
2020 
2021 	mutex_lock(&gpd_list_lock);
2022 	list_add(&genpd->gpd_list_node, &gpd_list);
2023 	genpd_debug_add(genpd);
2024 	mutex_unlock(&gpd_list_lock);
2025 
2026 	return 0;
2027 }
2028 EXPORT_SYMBOL_GPL(pm_genpd_init);
2029 
2030 static int genpd_remove(struct generic_pm_domain *genpd)
2031 {
2032 	struct gpd_link *l, *link;
2033 
2034 	if (IS_ERR_OR_NULL(genpd))
2035 		return -EINVAL;
2036 
2037 	genpd_lock(genpd);
2038 
2039 	if (genpd->has_provider) {
2040 		genpd_unlock(genpd);
2041 		pr_err("Provider present, unable to remove %s\n", genpd->name);
2042 		return -EBUSY;
2043 	}
2044 
2045 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2046 		genpd_unlock(genpd);
2047 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2048 		return -EBUSY;
2049 	}
2050 
2051 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2052 		list_del(&link->parent_node);
2053 		list_del(&link->child_node);
2054 		kfree(link);
2055 	}
2056 
2057 	genpd_debug_remove(genpd);
2058 	list_del(&genpd->gpd_list_node);
2059 	genpd_unlock(genpd);
2060 	cancel_work_sync(&genpd->power_off_work);
2061 	if (genpd_is_cpu_domain(genpd))
2062 		free_cpumask_var(genpd->cpus);
2063 	if (genpd->free_states)
2064 		genpd->free_states(genpd->states, genpd->state_count);
2065 
2066 	pr_debug("%s: removed %s\n", __func__, genpd->name);
2067 
2068 	return 0;
2069 }
2070 
2071 /**
2072  * pm_genpd_remove - Remove a generic I/O PM domain
2073  * @genpd: Pointer to PM domain that is to be removed.
2074  *
2075  * To remove the PM domain, this function:
2076  *  - Removes the PM domain as a subdomain to any parent domains,
2077  *    if it was added.
2078  *  - Removes the PM domain from the list of registered PM domains.
2079  *
2080  * The PM domain will only be removed, if the associated provider has
2081  * been removed, it is not a parent to any other PM domain and has no
2082  * devices associated with it.
2083  */
2084 int pm_genpd_remove(struct generic_pm_domain *genpd)
2085 {
2086 	int ret;
2087 
2088 	mutex_lock(&gpd_list_lock);
2089 	ret = genpd_remove(genpd);
2090 	mutex_unlock(&gpd_list_lock);
2091 
2092 	return ret;
2093 }
2094 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2095 
2096 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2097 
2098 /*
2099  * Device Tree based PM domain providers.
2100  *
2101  * The code below implements generic device tree based PM domain providers that
2102  * bind device tree nodes with generic PM domains registered in the system.
2103  *
2104  * Any driver that registers generic PM domains and needs to support binding of
2105  * devices to these domains is supposed to register a PM domain provider, which
2106  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2107  *
2108  * Two simple mapping functions have been provided for convenience:
2109  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2110  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2111  *    index.
2112  */
2113 
2114 /**
2115  * struct of_genpd_provider - PM domain provider registration structure
2116  * @link: Entry in global list of PM domain providers
2117  * @node: Pointer to device tree node of PM domain provider
2118  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2119  *         into a PM domain.
2120  * @data: context pointer to be passed into @xlate callback
2121  */
2122 struct of_genpd_provider {
2123 	struct list_head link;
2124 	struct device_node *node;
2125 	genpd_xlate_t xlate;
2126 	void *data;
2127 };
2128 
2129 /* List of registered PM domain providers. */
2130 static LIST_HEAD(of_genpd_providers);
2131 /* Mutex to protect the list above. */
2132 static DEFINE_MUTEX(of_genpd_mutex);
2133 
2134 /**
2135  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2136  * @genpdspec: OF phandle args to map into a PM domain
2137  * @data: xlate function private data - pointer to struct generic_pm_domain
2138  *
2139  * This is a generic xlate function that can be used to model PM domains that
2140  * have their own device tree nodes. The private data of xlate function needs
2141  * to be a valid pointer to struct generic_pm_domain.
2142  */
2143 static struct generic_pm_domain *genpd_xlate_simple(
2144 					struct of_phandle_args *genpdspec,
2145 					void *data)
2146 {
2147 	return data;
2148 }
2149 
2150 /**
2151  * genpd_xlate_onecell() - Xlate function using a single index.
2152  * @genpdspec: OF phandle args to map into a PM domain
2153  * @data: xlate function private data - pointer to struct genpd_onecell_data
2154  *
2155  * This is a generic xlate function that can be used to model simple PM domain
2156  * controllers that have one device tree node and provide multiple PM domains.
2157  * A single cell is used as an index into an array of PM domains specified in
2158  * the genpd_onecell_data struct when registering the provider.
2159  */
2160 static struct generic_pm_domain *genpd_xlate_onecell(
2161 					struct of_phandle_args *genpdspec,
2162 					void *data)
2163 {
2164 	struct genpd_onecell_data *genpd_data = data;
2165 	unsigned int idx = genpdspec->args[0];
2166 
2167 	if (genpdspec->args_count != 1)
2168 		return ERR_PTR(-EINVAL);
2169 
2170 	if (idx >= genpd_data->num_domains) {
2171 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2172 		return ERR_PTR(-EINVAL);
2173 	}
2174 
2175 	if (!genpd_data->domains[idx])
2176 		return ERR_PTR(-ENOENT);
2177 
2178 	return genpd_data->domains[idx];
2179 }
2180 
2181 /**
2182  * genpd_add_provider() - Register a PM domain provider for a node
2183  * @np: Device node pointer associated with the PM domain provider.
2184  * @xlate: Callback for decoding PM domain from phandle arguments.
2185  * @data: Context pointer for @xlate callback.
2186  */
2187 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2188 			      void *data)
2189 {
2190 	struct of_genpd_provider *cp;
2191 
2192 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2193 	if (!cp)
2194 		return -ENOMEM;
2195 
2196 	cp->node = of_node_get(np);
2197 	cp->data = data;
2198 	cp->xlate = xlate;
2199 	fwnode_dev_initialized(&np->fwnode, true);
2200 
2201 	mutex_lock(&of_genpd_mutex);
2202 	list_add(&cp->link, &of_genpd_providers);
2203 	mutex_unlock(&of_genpd_mutex);
2204 	pr_debug("Added domain provider from %pOF\n", np);
2205 
2206 	return 0;
2207 }
2208 
2209 static bool genpd_present(const struct generic_pm_domain *genpd)
2210 {
2211 	const struct generic_pm_domain *gpd;
2212 
2213 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
2214 		if (gpd == genpd)
2215 			return true;
2216 	return false;
2217 }
2218 
2219 /**
2220  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2221  * @np: Device node pointer associated with the PM domain provider.
2222  * @genpd: Pointer to PM domain associated with the PM domain provider.
2223  */
2224 int of_genpd_add_provider_simple(struct device_node *np,
2225 				 struct generic_pm_domain *genpd)
2226 {
2227 	int ret = -EINVAL;
2228 
2229 	if (!np || !genpd)
2230 		return -EINVAL;
2231 
2232 	mutex_lock(&gpd_list_lock);
2233 
2234 	if (!genpd_present(genpd))
2235 		goto unlock;
2236 
2237 	genpd->dev.of_node = np;
2238 
2239 	/* Parse genpd OPP table */
2240 	if (genpd->set_performance_state) {
2241 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2242 		if (ret) {
2243 			if (ret != -EPROBE_DEFER)
2244 				dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2245 					ret);
2246 			goto unlock;
2247 		}
2248 
2249 		/*
2250 		 * Save table for faster processing while setting performance
2251 		 * state.
2252 		 */
2253 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2254 		WARN_ON(IS_ERR(genpd->opp_table));
2255 	}
2256 
2257 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2258 	if (ret) {
2259 		if (genpd->set_performance_state) {
2260 			dev_pm_opp_put_opp_table(genpd->opp_table);
2261 			dev_pm_opp_of_remove_table(&genpd->dev);
2262 		}
2263 
2264 		goto unlock;
2265 	}
2266 
2267 	genpd->provider = &np->fwnode;
2268 	genpd->has_provider = true;
2269 
2270 unlock:
2271 	mutex_unlock(&gpd_list_lock);
2272 
2273 	return ret;
2274 }
2275 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2276 
2277 /**
2278  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2279  * @np: Device node pointer associated with the PM domain provider.
2280  * @data: Pointer to the data associated with the PM domain provider.
2281  */
2282 int of_genpd_add_provider_onecell(struct device_node *np,
2283 				  struct genpd_onecell_data *data)
2284 {
2285 	struct generic_pm_domain *genpd;
2286 	unsigned int i;
2287 	int ret = -EINVAL;
2288 
2289 	if (!np || !data)
2290 		return -EINVAL;
2291 
2292 	mutex_lock(&gpd_list_lock);
2293 
2294 	if (!data->xlate)
2295 		data->xlate = genpd_xlate_onecell;
2296 
2297 	for (i = 0; i < data->num_domains; i++) {
2298 		genpd = data->domains[i];
2299 
2300 		if (!genpd)
2301 			continue;
2302 		if (!genpd_present(genpd))
2303 			goto error;
2304 
2305 		genpd->dev.of_node = np;
2306 
2307 		/* Parse genpd OPP table */
2308 		if (genpd->set_performance_state) {
2309 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2310 			if (ret) {
2311 				if (ret != -EPROBE_DEFER)
2312 					dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2313 						i, ret);
2314 				goto error;
2315 			}
2316 
2317 			/*
2318 			 * Save table for faster processing while setting
2319 			 * performance state.
2320 			 */
2321 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2322 			WARN_ON(IS_ERR(genpd->opp_table));
2323 		}
2324 
2325 		genpd->provider = &np->fwnode;
2326 		genpd->has_provider = true;
2327 	}
2328 
2329 	ret = genpd_add_provider(np, data->xlate, data);
2330 	if (ret < 0)
2331 		goto error;
2332 
2333 	mutex_unlock(&gpd_list_lock);
2334 
2335 	return 0;
2336 
2337 error:
2338 	while (i--) {
2339 		genpd = data->domains[i];
2340 
2341 		if (!genpd)
2342 			continue;
2343 
2344 		genpd->provider = NULL;
2345 		genpd->has_provider = false;
2346 
2347 		if (genpd->set_performance_state) {
2348 			dev_pm_opp_put_opp_table(genpd->opp_table);
2349 			dev_pm_opp_of_remove_table(&genpd->dev);
2350 		}
2351 	}
2352 
2353 	mutex_unlock(&gpd_list_lock);
2354 
2355 	return ret;
2356 }
2357 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2358 
2359 /**
2360  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2361  * @np: Device node pointer associated with the PM domain provider
2362  */
2363 void of_genpd_del_provider(struct device_node *np)
2364 {
2365 	struct of_genpd_provider *cp, *tmp;
2366 	struct generic_pm_domain *gpd;
2367 
2368 	mutex_lock(&gpd_list_lock);
2369 	mutex_lock(&of_genpd_mutex);
2370 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2371 		if (cp->node == np) {
2372 			/*
2373 			 * For each PM domain associated with the
2374 			 * provider, set the 'has_provider' to false
2375 			 * so that the PM domain can be safely removed.
2376 			 */
2377 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2378 				if (gpd->provider == &np->fwnode) {
2379 					gpd->has_provider = false;
2380 
2381 					if (!gpd->set_performance_state)
2382 						continue;
2383 
2384 					dev_pm_opp_put_opp_table(gpd->opp_table);
2385 					dev_pm_opp_of_remove_table(&gpd->dev);
2386 				}
2387 			}
2388 
2389 			fwnode_dev_initialized(&cp->node->fwnode, false);
2390 			list_del(&cp->link);
2391 			of_node_put(cp->node);
2392 			kfree(cp);
2393 			break;
2394 		}
2395 	}
2396 	mutex_unlock(&of_genpd_mutex);
2397 	mutex_unlock(&gpd_list_lock);
2398 }
2399 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2400 
2401 /**
2402  * genpd_get_from_provider() - Look-up PM domain
2403  * @genpdspec: OF phandle args to use for look-up
2404  *
2405  * Looks for a PM domain provider under the node specified by @genpdspec and if
2406  * found, uses xlate function of the provider to map phandle args to a PM
2407  * domain.
2408  *
2409  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2410  * on failure.
2411  */
2412 static struct generic_pm_domain *genpd_get_from_provider(
2413 					struct of_phandle_args *genpdspec)
2414 {
2415 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2416 	struct of_genpd_provider *provider;
2417 
2418 	if (!genpdspec)
2419 		return ERR_PTR(-EINVAL);
2420 
2421 	mutex_lock(&of_genpd_mutex);
2422 
2423 	/* Check if we have such a provider in our array */
2424 	list_for_each_entry(provider, &of_genpd_providers, link) {
2425 		if (provider->node == genpdspec->np)
2426 			genpd = provider->xlate(genpdspec, provider->data);
2427 		if (!IS_ERR(genpd))
2428 			break;
2429 	}
2430 
2431 	mutex_unlock(&of_genpd_mutex);
2432 
2433 	return genpd;
2434 }
2435 
2436 /**
2437  * of_genpd_add_device() - Add a device to an I/O PM domain
2438  * @genpdspec: OF phandle args to use for look-up PM domain
2439  * @dev: Device to be added.
2440  *
2441  * Looks-up an I/O PM domain based upon phandle args provided and adds
2442  * the device to the PM domain. Returns a negative error code on failure.
2443  */
2444 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2445 {
2446 	struct generic_pm_domain *genpd;
2447 	int ret;
2448 
2449 	mutex_lock(&gpd_list_lock);
2450 
2451 	genpd = genpd_get_from_provider(genpdspec);
2452 	if (IS_ERR(genpd)) {
2453 		ret = PTR_ERR(genpd);
2454 		goto out;
2455 	}
2456 
2457 	ret = genpd_add_device(genpd, dev, dev);
2458 
2459 out:
2460 	mutex_unlock(&gpd_list_lock);
2461 
2462 	return ret;
2463 }
2464 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2465 
2466 /**
2467  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2468  * @parent_spec: OF phandle args to use for parent PM domain look-up
2469  * @subdomain_spec: OF phandle args to use for subdomain look-up
2470  *
2471  * Looks-up a parent PM domain and subdomain based upon phandle args
2472  * provided and adds the subdomain to the parent PM domain. Returns a
2473  * negative error code on failure.
2474  */
2475 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2476 			   struct of_phandle_args *subdomain_spec)
2477 {
2478 	struct generic_pm_domain *parent, *subdomain;
2479 	int ret;
2480 
2481 	mutex_lock(&gpd_list_lock);
2482 
2483 	parent = genpd_get_from_provider(parent_spec);
2484 	if (IS_ERR(parent)) {
2485 		ret = PTR_ERR(parent);
2486 		goto out;
2487 	}
2488 
2489 	subdomain = genpd_get_from_provider(subdomain_spec);
2490 	if (IS_ERR(subdomain)) {
2491 		ret = PTR_ERR(subdomain);
2492 		goto out;
2493 	}
2494 
2495 	ret = genpd_add_subdomain(parent, subdomain);
2496 
2497 out:
2498 	mutex_unlock(&gpd_list_lock);
2499 
2500 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2501 }
2502 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2503 
2504 /**
2505  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2506  * @parent_spec: OF phandle args to use for parent PM domain look-up
2507  * @subdomain_spec: OF phandle args to use for subdomain look-up
2508  *
2509  * Looks-up a parent PM domain and subdomain based upon phandle args
2510  * provided and removes the subdomain from the parent PM domain. Returns a
2511  * negative error code on failure.
2512  */
2513 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2514 			      struct of_phandle_args *subdomain_spec)
2515 {
2516 	struct generic_pm_domain *parent, *subdomain;
2517 	int ret;
2518 
2519 	mutex_lock(&gpd_list_lock);
2520 
2521 	parent = genpd_get_from_provider(parent_spec);
2522 	if (IS_ERR(parent)) {
2523 		ret = PTR_ERR(parent);
2524 		goto out;
2525 	}
2526 
2527 	subdomain = genpd_get_from_provider(subdomain_spec);
2528 	if (IS_ERR(subdomain)) {
2529 		ret = PTR_ERR(subdomain);
2530 		goto out;
2531 	}
2532 
2533 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2534 
2535 out:
2536 	mutex_unlock(&gpd_list_lock);
2537 
2538 	return ret;
2539 }
2540 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2541 
2542 /**
2543  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2544  * @provider: Pointer to device structure associated with provider
2545  *
2546  * Find the last PM domain that was added by a particular provider and
2547  * remove this PM domain from the list of PM domains. The provider is
2548  * identified by the 'provider' device structure that is passed. The PM
2549  * domain will only be removed, if the provider associated with domain
2550  * has been removed.
2551  *
2552  * Returns a valid pointer to struct generic_pm_domain on success or
2553  * ERR_PTR() on failure.
2554  */
2555 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2556 {
2557 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2558 	int ret;
2559 
2560 	if (IS_ERR_OR_NULL(np))
2561 		return ERR_PTR(-EINVAL);
2562 
2563 	mutex_lock(&gpd_list_lock);
2564 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2565 		if (gpd->provider == &np->fwnode) {
2566 			ret = genpd_remove(gpd);
2567 			genpd = ret ? ERR_PTR(ret) : gpd;
2568 			break;
2569 		}
2570 	}
2571 	mutex_unlock(&gpd_list_lock);
2572 
2573 	return genpd;
2574 }
2575 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2576 
2577 static void genpd_release_dev(struct device *dev)
2578 {
2579 	of_node_put(dev->of_node);
2580 	kfree(dev);
2581 }
2582 
2583 static struct bus_type genpd_bus_type = {
2584 	.name		= "genpd",
2585 };
2586 
2587 /**
2588  * genpd_dev_pm_detach - Detach a device from its PM domain.
2589  * @dev: Device to detach.
2590  * @power_off: Currently not used
2591  *
2592  * Try to locate a corresponding generic PM domain, which the device was
2593  * attached to previously. If such is found, the device is detached from it.
2594  */
2595 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2596 {
2597 	struct generic_pm_domain *pd;
2598 	unsigned int i;
2599 	int ret = 0;
2600 
2601 	pd = dev_to_genpd(dev);
2602 	if (IS_ERR(pd))
2603 		return;
2604 
2605 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2606 
2607 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2608 		ret = genpd_remove_device(pd, dev);
2609 		if (ret != -EAGAIN)
2610 			break;
2611 
2612 		mdelay(i);
2613 		cond_resched();
2614 	}
2615 
2616 	if (ret < 0) {
2617 		dev_err(dev, "failed to remove from PM domain %s: %d",
2618 			pd->name, ret);
2619 		return;
2620 	}
2621 
2622 	/* Check if PM domain can be powered off after removing this device. */
2623 	genpd_queue_power_off_work(pd);
2624 
2625 	/* Unregister the device if it was created by genpd. */
2626 	if (dev->bus == &genpd_bus_type)
2627 		device_unregister(dev);
2628 }
2629 
2630 static void genpd_dev_pm_sync(struct device *dev)
2631 {
2632 	struct generic_pm_domain *pd;
2633 
2634 	pd = dev_to_genpd(dev);
2635 	if (IS_ERR(pd))
2636 		return;
2637 
2638 	genpd_queue_power_off_work(pd);
2639 }
2640 
2641 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2642 				 unsigned int index, bool power_on)
2643 {
2644 	struct of_phandle_args pd_args;
2645 	struct generic_pm_domain *pd;
2646 	int ret;
2647 
2648 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2649 				"#power-domain-cells", index, &pd_args);
2650 	if (ret < 0)
2651 		return ret;
2652 
2653 	mutex_lock(&gpd_list_lock);
2654 	pd = genpd_get_from_provider(&pd_args);
2655 	of_node_put(pd_args.np);
2656 	if (IS_ERR(pd)) {
2657 		mutex_unlock(&gpd_list_lock);
2658 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2659 			__func__, PTR_ERR(pd));
2660 		return driver_deferred_probe_check_state(base_dev);
2661 	}
2662 
2663 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2664 
2665 	ret = genpd_add_device(pd, dev, base_dev);
2666 	mutex_unlock(&gpd_list_lock);
2667 
2668 	if (ret < 0) {
2669 		if (ret != -EPROBE_DEFER)
2670 			dev_err(dev, "failed to add to PM domain %s: %d",
2671 				pd->name, ret);
2672 		return ret;
2673 	}
2674 
2675 	dev->pm_domain->detach = genpd_dev_pm_detach;
2676 	dev->pm_domain->sync = genpd_dev_pm_sync;
2677 
2678 	if (power_on) {
2679 		genpd_lock(pd);
2680 		ret = genpd_power_on(pd, 0);
2681 		genpd_unlock(pd);
2682 	}
2683 
2684 	if (ret)
2685 		genpd_remove_device(pd, dev);
2686 
2687 	return ret ? -EPROBE_DEFER : 1;
2688 }
2689 
2690 /**
2691  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2692  * @dev: Device to attach.
2693  *
2694  * Parse device's OF node to find a PM domain specifier. If such is found,
2695  * attaches the device to retrieved pm_domain ops.
2696  *
2697  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2698  * PM domain or when multiple power-domains exists for it, else a negative error
2699  * code. Note that if a power-domain exists for the device, but it cannot be
2700  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2701  * not probed and to re-try again later.
2702  */
2703 int genpd_dev_pm_attach(struct device *dev)
2704 {
2705 	if (!dev->of_node)
2706 		return 0;
2707 
2708 	/*
2709 	 * Devices with multiple PM domains must be attached separately, as we
2710 	 * can only attach one PM domain per device.
2711 	 */
2712 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2713 				       "#power-domain-cells") != 1)
2714 		return 0;
2715 
2716 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2717 }
2718 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2719 
2720 /**
2721  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2722  * @dev: The device used to lookup the PM domain.
2723  * @index: The index of the PM domain.
2724  *
2725  * Parse device's OF node to find a PM domain specifier at the provided @index.
2726  * If such is found, creates a virtual device and attaches it to the retrieved
2727  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2728  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2729  *
2730  * Returns the created virtual device if successfully attached PM domain, NULL
2731  * when the device don't need a PM domain, else an ERR_PTR() in case of
2732  * failures. If a power-domain exists for the device, but cannot be found or
2733  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2734  * is not probed and to re-try again later.
2735  */
2736 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2737 					 unsigned int index)
2738 {
2739 	struct device *virt_dev;
2740 	int num_domains;
2741 	int ret;
2742 
2743 	if (!dev->of_node)
2744 		return NULL;
2745 
2746 	/* Verify that the index is within a valid range. */
2747 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2748 						 "#power-domain-cells");
2749 	if (index >= num_domains)
2750 		return NULL;
2751 
2752 	/* Allocate and register device on the genpd bus. */
2753 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2754 	if (!virt_dev)
2755 		return ERR_PTR(-ENOMEM);
2756 
2757 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2758 	virt_dev->bus = &genpd_bus_type;
2759 	virt_dev->release = genpd_release_dev;
2760 	virt_dev->of_node = of_node_get(dev->of_node);
2761 
2762 	ret = device_register(virt_dev);
2763 	if (ret) {
2764 		put_device(virt_dev);
2765 		return ERR_PTR(ret);
2766 	}
2767 
2768 	/* Try to attach the device to the PM domain at the specified index. */
2769 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2770 	if (ret < 1) {
2771 		device_unregister(virt_dev);
2772 		return ret ? ERR_PTR(ret) : NULL;
2773 	}
2774 
2775 	pm_runtime_enable(virt_dev);
2776 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2777 
2778 	return virt_dev;
2779 }
2780 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2781 
2782 /**
2783  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2784  * @dev: The device used to lookup the PM domain.
2785  * @name: The name of the PM domain.
2786  *
2787  * Parse device's OF node to find a PM domain specifier using the
2788  * power-domain-names DT property. For further description see
2789  * genpd_dev_pm_attach_by_id().
2790  */
2791 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2792 {
2793 	int index;
2794 
2795 	if (!dev->of_node)
2796 		return NULL;
2797 
2798 	index = of_property_match_string(dev->of_node, "power-domain-names",
2799 					 name);
2800 	if (index < 0)
2801 		return NULL;
2802 
2803 	return genpd_dev_pm_attach_by_id(dev, index);
2804 }
2805 
2806 static const struct of_device_id idle_state_match[] = {
2807 	{ .compatible = "domain-idle-state", },
2808 	{ }
2809 };
2810 
2811 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2812 				    struct device_node *state_node)
2813 {
2814 	int err;
2815 	u32 residency;
2816 	u32 entry_latency, exit_latency;
2817 
2818 	err = of_property_read_u32(state_node, "entry-latency-us",
2819 						&entry_latency);
2820 	if (err) {
2821 		pr_debug(" * %pOF missing entry-latency-us property\n",
2822 			 state_node);
2823 		return -EINVAL;
2824 	}
2825 
2826 	err = of_property_read_u32(state_node, "exit-latency-us",
2827 						&exit_latency);
2828 	if (err) {
2829 		pr_debug(" * %pOF missing exit-latency-us property\n",
2830 			 state_node);
2831 		return -EINVAL;
2832 	}
2833 
2834 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2835 	if (!err)
2836 		genpd_state->residency_ns = 1000 * residency;
2837 
2838 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2839 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2840 	genpd_state->fwnode = &state_node->fwnode;
2841 
2842 	return 0;
2843 }
2844 
2845 static int genpd_iterate_idle_states(struct device_node *dn,
2846 				     struct genpd_power_state *states)
2847 {
2848 	int ret;
2849 	struct of_phandle_iterator it;
2850 	struct device_node *np;
2851 	int i = 0;
2852 
2853 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2854 	if (ret <= 0)
2855 		return ret == -ENOENT ? 0 : ret;
2856 
2857 	/* Loop over the phandles until all the requested entry is found */
2858 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2859 		np = it.node;
2860 		if (!of_match_node(idle_state_match, np))
2861 			continue;
2862 		if (states) {
2863 			ret = genpd_parse_state(&states[i], np);
2864 			if (ret) {
2865 				pr_err("Parsing idle state node %pOF failed with err %d\n",
2866 				       np, ret);
2867 				of_node_put(np);
2868 				return ret;
2869 			}
2870 		}
2871 		i++;
2872 	}
2873 
2874 	return i;
2875 }
2876 
2877 /**
2878  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2879  *
2880  * @dn: The genpd device node
2881  * @states: The pointer to which the state array will be saved.
2882  * @n: The count of elements in the array returned from this function.
2883  *
2884  * Returns the device states parsed from the OF node. The memory for the states
2885  * is allocated by this function and is the responsibility of the caller to
2886  * free the memory after use. If any or zero compatible domain idle states is
2887  * found it returns 0 and in case of errors, a negative error code is returned.
2888  */
2889 int of_genpd_parse_idle_states(struct device_node *dn,
2890 			struct genpd_power_state **states, int *n)
2891 {
2892 	struct genpd_power_state *st;
2893 	int ret;
2894 
2895 	ret = genpd_iterate_idle_states(dn, NULL);
2896 	if (ret < 0)
2897 		return ret;
2898 
2899 	if (!ret) {
2900 		*states = NULL;
2901 		*n = 0;
2902 		return 0;
2903 	}
2904 
2905 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2906 	if (!st)
2907 		return -ENOMEM;
2908 
2909 	ret = genpd_iterate_idle_states(dn, st);
2910 	if (ret <= 0) {
2911 		kfree(st);
2912 		return ret < 0 ? ret : -EINVAL;
2913 	}
2914 
2915 	*states = st;
2916 	*n = ret;
2917 
2918 	return 0;
2919 }
2920 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2921 
2922 /**
2923  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2924  *
2925  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2926  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2927  *	state.
2928  *
2929  * Returns performance state encoded in the OPP of the genpd. This calls
2930  * platform specific genpd->opp_to_performance_state() callback to translate
2931  * power domain OPP to performance state.
2932  *
2933  * Returns performance state on success and 0 on failure.
2934  */
2935 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2936 					       struct dev_pm_opp *opp)
2937 {
2938 	struct generic_pm_domain *genpd = NULL;
2939 	int state;
2940 
2941 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2942 
2943 	if (unlikely(!genpd->opp_to_performance_state))
2944 		return 0;
2945 
2946 	genpd_lock(genpd);
2947 	state = genpd->opp_to_performance_state(genpd, opp);
2948 	genpd_unlock(genpd);
2949 
2950 	return state;
2951 }
2952 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2953 
2954 static int __init genpd_bus_init(void)
2955 {
2956 	return bus_register(&genpd_bus_type);
2957 }
2958 core_initcall(genpd_bus_init);
2959 
2960 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2961 
2962 
2963 /***        debugfs support        ***/
2964 
2965 #ifdef CONFIG_DEBUG_FS
2966 /*
2967  * TODO: This function is a slightly modified version of rtpm_status_show
2968  * from sysfs.c, so generalize it.
2969  */
2970 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2971 {
2972 	static const char * const status_lookup[] = {
2973 		[RPM_ACTIVE] = "active",
2974 		[RPM_RESUMING] = "resuming",
2975 		[RPM_SUSPENDED] = "suspended",
2976 		[RPM_SUSPENDING] = "suspending"
2977 	};
2978 	const char *p = "";
2979 
2980 	if (dev->power.runtime_error)
2981 		p = "error";
2982 	else if (dev->power.disable_depth)
2983 		p = "unsupported";
2984 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2985 		p = status_lookup[dev->power.runtime_status];
2986 	else
2987 		WARN_ON(1);
2988 
2989 	seq_printf(s, "%-25s  ", p);
2990 }
2991 
2992 static void perf_status_str(struct seq_file *s, struct device *dev)
2993 {
2994 	struct generic_pm_domain_data *gpd_data;
2995 
2996 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2997 	seq_put_decimal_ull(s, "", gpd_data->performance_state);
2998 }
2999 
3000 static int genpd_summary_one(struct seq_file *s,
3001 			struct generic_pm_domain *genpd)
3002 {
3003 	static const char * const status_lookup[] = {
3004 		[GENPD_STATE_ON] = "on",
3005 		[GENPD_STATE_OFF] = "off"
3006 	};
3007 	struct pm_domain_data *pm_data;
3008 	const char *kobj_path;
3009 	struct gpd_link *link;
3010 	char state[16];
3011 	int ret;
3012 
3013 	ret = genpd_lock_interruptible(genpd);
3014 	if (ret)
3015 		return -ERESTARTSYS;
3016 
3017 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3018 		goto exit;
3019 	if (!genpd_status_on(genpd))
3020 		snprintf(state, sizeof(state), "%s-%u",
3021 			 status_lookup[genpd->status], genpd->state_idx);
3022 	else
3023 		snprintf(state, sizeof(state), "%s",
3024 			 status_lookup[genpd->status]);
3025 	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3026 
3027 	/*
3028 	 * Modifications on the list require holding locks on both
3029 	 * parent and child, so we are safe.
3030 	 * Also genpd->name is immutable.
3031 	 */
3032 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3033 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3034 			seq_printf(s, "\n%48s", " ");
3035 		seq_printf(s, "%s", link->child->name);
3036 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3037 			seq_puts(s, ", ");
3038 	}
3039 
3040 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3041 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3042 				genpd_is_irq_safe(genpd) ?
3043 				GFP_ATOMIC : GFP_KERNEL);
3044 		if (kobj_path == NULL)
3045 			continue;
3046 
3047 		seq_printf(s, "\n    %-50s  ", kobj_path);
3048 		rtpm_status_str(s, pm_data->dev);
3049 		perf_status_str(s, pm_data->dev);
3050 		kfree(kobj_path);
3051 	}
3052 
3053 	seq_puts(s, "\n");
3054 exit:
3055 	genpd_unlock(genpd);
3056 
3057 	return 0;
3058 }
3059 
3060 static int summary_show(struct seq_file *s, void *data)
3061 {
3062 	struct generic_pm_domain *genpd;
3063 	int ret = 0;
3064 
3065 	seq_puts(s, "domain                          status          children                           performance\n");
3066 	seq_puts(s, "    /device                                             runtime status\n");
3067 	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3068 
3069 	ret = mutex_lock_interruptible(&gpd_list_lock);
3070 	if (ret)
3071 		return -ERESTARTSYS;
3072 
3073 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3074 		ret = genpd_summary_one(s, genpd);
3075 		if (ret)
3076 			break;
3077 	}
3078 	mutex_unlock(&gpd_list_lock);
3079 
3080 	return ret;
3081 }
3082 
3083 static int status_show(struct seq_file *s, void *data)
3084 {
3085 	static const char * const status_lookup[] = {
3086 		[GENPD_STATE_ON] = "on",
3087 		[GENPD_STATE_OFF] = "off"
3088 	};
3089 
3090 	struct generic_pm_domain *genpd = s->private;
3091 	int ret = 0;
3092 
3093 	ret = genpd_lock_interruptible(genpd);
3094 	if (ret)
3095 		return -ERESTARTSYS;
3096 
3097 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3098 		goto exit;
3099 
3100 	if (genpd->status == GENPD_STATE_OFF)
3101 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3102 			genpd->state_idx);
3103 	else
3104 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3105 exit:
3106 	genpd_unlock(genpd);
3107 	return ret;
3108 }
3109 
3110 static int sub_domains_show(struct seq_file *s, void *data)
3111 {
3112 	struct generic_pm_domain *genpd = s->private;
3113 	struct gpd_link *link;
3114 	int ret = 0;
3115 
3116 	ret = genpd_lock_interruptible(genpd);
3117 	if (ret)
3118 		return -ERESTARTSYS;
3119 
3120 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3121 		seq_printf(s, "%s\n", link->child->name);
3122 
3123 	genpd_unlock(genpd);
3124 	return ret;
3125 }
3126 
3127 static int idle_states_show(struct seq_file *s, void *data)
3128 {
3129 	struct generic_pm_domain *genpd = s->private;
3130 	unsigned int i;
3131 	int ret = 0;
3132 
3133 	ret = genpd_lock_interruptible(genpd);
3134 	if (ret)
3135 		return -ERESTARTSYS;
3136 
3137 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3138 
3139 	for (i = 0; i < genpd->state_count; i++) {
3140 		ktime_t delta = 0;
3141 		s64 msecs;
3142 
3143 		if ((genpd->status == GENPD_STATE_OFF) &&
3144 				(genpd->state_idx == i))
3145 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
3146 
3147 		msecs = ktime_to_ms(
3148 			ktime_add(genpd->states[i].idle_time, delta));
3149 		seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
3150 			      genpd->states[i].usage, genpd->states[i].rejected);
3151 	}
3152 
3153 	genpd_unlock(genpd);
3154 	return ret;
3155 }
3156 
3157 static int active_time_show(struct seq_file *s, void *data)
3158 {
3159 	struct generic_pm_domain *genpd = s->private;
3160 	ktime_t delta = 0;
3161 	int ret = 0;
3162 
3163 	ret = genpd_lock_interruptible(genpd);
3164 	if (ret)
3165 		return -ERESTARTSYS;
3166 
3167 	if (genpd->status == GENPD_STATE_ON)
3168 		delta = ktime_sub(ktime_get(), genpd->accounting_time);
3169 
3170 	seq_printf(s, "%lld ms\n", ktime_to_ms(
3171 				ktime_add(genpd->on_time, delta)));
3172 
3173 	genpd_unlock(genpd);
3174 	return ret;
3175 }
3176 
3177 static int total_idle_time_show(struct seq_file *s, void *data)
3178 {
3179 	struct generic_pm_domain *genpd = s->private;
3180 	ktime_t delta = 0, total = 0;
3181 	unsigned int i;
3182 	int ret = 0;
3183 
3184 	ret = genpd_lock_interruptible(genpd);
3185 	if (ret)
3186 		return -ERESTARTSYS;
3187 
3188 	for (i = 0; i < genpd->state_count; i++) {
3189 
3190 		if ((genpd->status == GENPD_STATE_OFF) &&
3191 				(genpd->state_idx == i))
3192 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
3193 
3194 		total = ktime_add(total, genpd->states[i].idle_time);
3195 	}
3196 	total = ktime_add(total, delta);
3197 
3198 	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
3199 
3200 	genpd_unlock(genpd);
3201 	return ret;
3202 }
3203 
3204 
3205 static int devices_show(struct seq_file *s, void *data)
3206 {
3207 	struct generic_pm_domain *genpd = s->private;
3208 	struct pm_domain_data *pm_data;
3209 	const char *kobj_path;
3210 	int ret = 0;
3211 
3212 	ret = genpd_lock_interruptible(genpd);
3213 	if (ret)
3214 		return -ERESTARTSYS;
3215 
3216 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3217 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3218 				genpd_is_irq_safe(genpd) ?
3219 				GFP_ATOMIC : GFP_KERNEL);
3220 		if (kobj_path == NULL)
3221 			continue;
3222 
3223 		seq_printf(s, "%s\n", kobj_path);
3224 		kfree(kobj_path);
3225 	}
3226 
3227 	genpd_unlock(genpd);
3228 	return ret;
3229 }
3230 
3231 static int perf_state_show(struct seq_file *s, void *data)
3232 {
3233 	struct generic_pm_domain *genpd = s->private;
3234 
3235 	if (genpd_lock_interruptible(genpd))
3236 		return -ERESTARTSYS;
3237 
3238 	seq_printf(s, "%u\n", genpd->performance_state);
3239 
3240 	genpd_unlock(genpd);
3241 	return 0;
3242 }
3243 
3244 DEFINE_SHOW_ATTRIBUTE(summary);
3245 DEFINE_SHOW_ATTRIBUTE(status);
3246 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3247 DEFINE_SHOW_ATTRIBUTE(idle_states);
3248 DEFINE_SHOW_ATTRIBUTE(active_time);
3249 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3250 DEFINE_SHOW_ATTRIBUTE(devices);
3251 DEFINE_SHOW_ATTRIBUTE(perf_state);
3252 
3253 static void genpd_debug_add(struct generic_pm_domain *genpd)
3254 {
3255 	struct dentry *d;
3256 
3257 	if (!genpd_debugfs_dir)
3258 		return;
3259 
3260 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3261 
3262 	debugfs_create_file("current_state", 0444,
3263 			    d, genpd, &status_fops);
3264 	debugfs_create_file("sub_domains", 0444,
3265 			    d, genpd, &sub_domains_fops);
3266 	debugfs_create_file("idle_states", 0444,
3267 			    d, genpd, &idle_states_fops);
3268 	debugfs_create_file("active_time", 0444,
3269 			    d, genpd, &active_time_fops);
3270 	debugfs_create_file("total_idle_time", 0444,
3271 			    d, genpd, &total_idle_time_fops);
3272 	debugfs_create_file("devices", 0444,
3273 			    d, genpd, &devices_fops);
3274 	if (genpd->set_performance_state)
3275 		debugfs_create_file("perf_state", 0444,
3276 				    d, genpd, &perf_state_fops);
3277 }
3278 
3279 static int __init genpd_debug_init(void)
3280 {
3281 	struct generic_pm_domain *genpd;
3282 
3283 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3284 
3285 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3286 			    NULL, &summary_fops);
3287 
3288 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3289 		genpd_debug_add(genpd);
3290 
3291 	return 0;
3292 }
3293 late_initcall(genpd_debug_init);
3294 
3295 static void __exit genpd_debug_exit(void)
3296 {
3297 	debugfs_remove_recursive(genpd_debugfs_dir);
3298 }
3299 __exitcall(genpd_debug_exit);
3300 #endif /* CONFIG_DEBUG_FS */
3301