xref: /openbmc/linux/drivers/base/power/domain.c (revision 75f25bd3)
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 
19 static LIST_HEAD(gpd_list);
20 static DEFINE_MUTEX(gpd_list_lock);
21 
22 #ifdef CONFIG_PM
23 
24 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
25 {
26 	if (IS_ERR_OR_NULL(dev->pm_domain))
27 		return ERR_PTR(-EINVAL);
28 
29 	return pd_to_genpd(dev->pm_domain);
30 }
31 
32 static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33 {
34 	if (!WARN_ON(genpd->sd_count == 0))
35 			genpd->sd_count--;
36 }
37 
38 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
39 {
40 	DEFINE_WAIT(wait);
41 
42 	mutex_lock(&genpd->lock);
43 	/*
44 	 * Wait for the domain to transition into either the active,
45 	 * or the power off state.
46 	 */
47 	for (;;) {
48 		prepare_to_wait(&genpd->status_wait_queue, &wait,
49 				TASK_UNINTERRUPTIBLE);
50 		if (genpd->status == GPD_STATE_ACTIVE
51 		    || genpd->status == GPD_STATE_POWER_OFF)
52 			break;
53 		mutex_unlock(&genpd->lock);
54 
55 		schedule();
56 
57 		mutex_lock(&genpd->lock);
58 	}
59 	finish_wait(&genpd->status_wait_queue, &wait);
60 }
61 
62 static void genpd_release_lock(struct generic_pm_domain *genpd)
63 {
64 	mutex_unlock(&genpd->lock);
65 }
66 
67 static void genpd_set_active(struct generic_pm_domain *genpd)
68 {
69 	if (genpd->resume_count == 0)
70 		genpd->status = GPD_STATE_ACTIVE;
71 }
72 
73 /**
74  * pm_genpd_poweron - Restore power to a given PM domain and its parents.
75  * @genpd: PM domain to power up.
76  *
77  * Restore power to @genpd and all of its parents so that it is possible to
78  * resume a device belonging to it.
79  */
80 int pm_genpd_poweron(struct generic_pm_domain *genpd)
81 {
82 	struct generic_pm_domain *parent = genpd->parent;
83 	int ret = 0;
84 
85  start:
86 	if (parent) {
87 		genpd_acquire_lock(parent);
88 		mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
89 	} else {
90 		mutex_lock(&genpd->lock);
91 	}
92 
93 	if (genpd->status == GPD_STATE_ACTIVE
94 	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
95 		goto out;
96 
97 	if (genpd->status != GPD_STATE_POWER_OFF) {
98 		genpd_set_active(genpd);
99 		goto out;
100 	}
101 
102 	if (parent && parent->status != GPD_STATE_ACTIVE) {
103 		mutex_unlock(&genpd->lock);
104 		genpd_release_lock(parent);
105 
106 		ret = pm_genpd_poweron(parent);
107 		if (ret)
108 			return ret;
109 
110 		goto start;
111 	}
112 
113 	if (genpd->power_on) {
114 		ret = genpd->power_on(genpd);
115 		if (ret)
116 			goto out;
117 	}
118 
119 	genpd_set_active(genpd);
120 	if (parent)
121 		parent->sd_count++;
122 
123  out:
124 	mutex_unlock(&genpd->lock);
125 	if (parent)
126 		genpd_release_lock(parent);
127 
128 	return ret;
129 }
130 
131 #endif /* CONFIG_PM */
132 
133 #ifdef CONFIG_PM_RUNTIME
134 
135 /**
136  * __pm_genpd_save_device - Save the pre-suspend state of a device.
137  * @dle: Device list entry of the device to save the state of.
138  * @genpd: PM domain the device belongs to.
139  */
140 static int __pm_genpd_save_device(struct dev_list_entry *dle,
141 				  struct generic_pm_domain *genpd)
142 	__releases(&genpd->lock) __acquires(&genpd->lock)
143 {
144 	struct device *dev = dle->dev;
145 	struct device_driver *drv = dev->driver;
146 	int ret = 0;
147 
148 	if (dle->need_restore)
149 		return 0;
150 
151 	mutex_unlock(&genpd->lock);
152 
153 	if (drv && drv->pm && drv->pm->runtime_suspend) {
154 		if (genpd->start_device)
155 			genpd->start_device(dev);
156 
157 		ret = drv->pm->runtime_suspend(dev);
158 
159 		if (genpd->stop_device)
160 			genpd->stop_device(dev);
161 	}
162 
163 	mutex_lock(&genpd->lock);
164 
165 	if (!ret)
166 		dle->need_restore = true;
167 
168 	return ret;
169 }
170 
171 /**
172  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
173  * @dle: Device list entry of the device to restore the state of.
174  * @genpd: PM domain the device belongs to.
175  */
176 static void __pm_genpd_restore_device(struct dev_list_entry *dle,
177 				      struct generic_pm_domain *genpd)
178 	__releases(&genpd->lock) __acquires(&genpd->lock)
179 {
180 	struct device *dev = dle->dev;
181 	struct device_driver *drv = dev->driver;
182 
183 	if (!dle->need_restore)
184 		return;
185 
186 	mutex_unlock(&genpd->lock);
187 
188 	if (drv && drv->pm && drv->pm->runtime_resume) {
189 		if (genpd->start_device)
190 			genpd->start_device(dev);
191 
192 		drv->pm->runtime_resume(dev);
193 
194 		if (genpd->stop_device)
195 			genpd->stop_device(dev);
196 	}
197 
198 	mutex_lock(&genpd->lock);
199 
200 	dle->need_restore = false;
201 }
202 
203 /**
204  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
205  * @genpd: PM domain to check.
206  *
207  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
208  * a "power off" operation, which means that a "power on" has occured in the
209  * meantime, or if its resume_count field is different from zero, which means
210  * that one of its devices has been resumed in the meantime.
211  */
212 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
213 {
214 	return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
215 }
216 
217 /**
218  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
219  * @genpd: PM domait to power off.
220  *
221  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
222  * before.
223  */
224 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
225 {
226 	if (!work_pending(&genpd->power_off_work))
227 		queue_work(pm_wq, &genpd->power_off_work);
228 }
229 
230 /**
231  * pm_genpd_poweroff - Remove power from a given PM domain.
232  * @genpd: PM domain to power down.
233  *
234  * If all of the @genpd's devices have been suspended and all of its subdomains
235  * have been powered down, run the runtime suspend callbacks provided by all of
236  * the @genpd's devices' drivers and remove power from @genpd.
237  */
238 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
239 	__releases(&genpd->lock) __acquires(&genpd->lock)
240 {
241 	struct generic_pm_domain *parent;
242 	struct dev_list_entry *dle;
243 	unsigned int not_suspended;
244 	int ret = 0;
245 
246  start:
247 	/*
248 	 * Do not try to power off the domain in the following situations:
249 	 * (1) The domain is already in the "power off" state.
250 	 * (2) System suspend is in progress.
251 	 * (3) One of the domain's devices is being resumed right now.
252 	 */
253 	if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
254 	    || genpd->resume_count > 0)
255 		return 0;
256 
257 	if (genpd->sd_count > 0)
258 		return -EBUSY;
259 
260 	not_suspended = 0;
261 	list_for_each_entry(dle, &genpd->dev_list, node)
262 		if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
263 			not_suspended++;
264 
265 	if (not_suspended > genpd->in_progress)
266 		return -EBUSY;
267 
268 	if (genpd->poweroff_task) {
269 		/*
270 		 * Another instance of pm_genpd_poweroff() is executing
271 		 * callbacks, so tell it to start over and return.
272 		 */
273 		genpd->status = GPD_STATE_REPEAT;
274 		return 0;
275 	}
276 
277 	if (genpd->gov && genpd->gov->power_down_ok) {
278 		if (!genpd->gov->power_down_ok(&genpd->domain))
279 			return -EAGAIN;
280 	}
281 
282 	genpd->status = GPD_STATE_BUSY;
283 	genpd->poweroff_task = current;
284 
285 	list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
286 		ret = __pm_genpd_save_device(dle, genpd);
287 		if (ret) {
288 			genpd_set_active(genpd);
289 			goto out;
290 		}
291 
292 		if (genpd_abort_poweroff(genpd))
293 			goto out;
294 
295 		if (genpd->status == GPD_STATE_REPEAT) {
296 			genpd->poweroff_task = NULL;
297 			goto start;
298 		}
299 	}
300 
301 	parent = genpd->parent;
302 	if (parent) {
303 		mutex_unlock(&genpd->lock);
304 
305 		genpd_acquire_lock(parent);
306 		mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
307 
308 		if (genpd_abort_poweroff(genpd)) {
309 			genpd_release_lock(parent);
310 			goto out;
311 		}
312 	}
313 
314 	if (genpd->power_off) {
315 		ret = genpd->power_off(genpd);
316 		if (ret == -EBUSY) {
317 			genpd_set_active(genpd);
318 			if (parent)
319 				genpd_release_lock(parent);
320 
321 			goto out;
322 		}
323 	}
324 
325 	genpd->status = GPD_STATE_POWER_OFF;
326 
327 	if (parent) {
328 		genpd_sd_counter_dec(parent);
329 		if (parent->sd_count == 0)
330 			genpd_queue_power_off_work(parent);
331 
332 		genpd_release_lock(parent);
333 	}
334 
335  out:
336 	genpd->poweroff_task = NULL;
337 	wake_up_all(&genpd->status_wait_queue);
338 	return ret;
339 }
340 
341 /**
342  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
343  * @work: Work structure used for scheduling the execution of this function.
344  */
345 static void genpd_power_off_work_fn(struct work_struct *work)
346 {
347 	struct generic_pm_domain *genpd;
348 
349 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
350 
351 	genpd_acquire_lock(genpd);
352 	pm_genpd_poweroff(genpd);
353 	genpd_release_lock(genpd);
354 }
355 
356 /**
357  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
358  * @dev: Device to suspend.
359  *
360  * Carry out a runtime suspend of a device under the assumption that its
361  * pm_domain field points to the domain member of an object of type
362  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
363  */
364 static int pm_genpd_runtime_suspend(struct device *dev)
365 {
366 	struct generic_pm_domain *genpd;
367 
368 	dev_dbg(dev, "%s()\n", __func__);
369 
370 	genpd = dev_to_genpd(dev);
371 	if (IS_ERR(genpd))
372 		return -EINVAL;
373 
374 	if (genpd->stop_device) {
375 		int ret = genpd->stop_device(dev);
376 		if (ret)
377 			return ret;
378 	}
379 
380 	mutex_lock(&genpd->lock);
381 	genpd->in_progress++;
382 	pm_genpd_poweroff(genpd);
383 	genpd->in_progress--;
384 	mutex_unlock(&genpd->lock);
385 
386 	return 0;
387 }
388 
389 /**
390  * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
391  * @dev: Device to resume.
392  * @genpd: PM domain the device belongs to.
393  */
394 static void __pm_genpd_runtime_resume(struct device *dev,
395 				      struct generic_pm_domain *genpd)
396 {
397 	struct dev_list_entry *dle;
398 
399 	list_for_each_entry(dle, &genpd->dev_list, node) {
400 		if (dle->dev == dev) {
401 			__pm_genpd_restore_device(dle, genpd);
402 			break;
403 		}
404 	}
405 }
406 
407 /**
408  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
409  * @dev: Device to resume.
410  *
411  * Carry out a runtime resume of a device under the assumption that its
412  * pm_domain field points to the domain member of an object of type
413  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
414  */
415 static int pm_genpd_runtime_resume(struct device *dev)
416 {
417 	struct generic_pm_domain *genpd;
418 	DEFINE_WAIT(wait);
419 	int ret;
420 
421 	dev_dbg(dev, "%s()\n", __func__);
422 
423 	genpd = dev_to_genpd(dev);
424 	if (IS_ERR(genpd))
425 		return -EINVAL;
426 
427 	ret = pm_genpd_poweron(genpd);
428 	if (ret)
429 		return ret;
430 
431 	mutex_lock(&genpd->lock);
432 	genpd->status = GPD_STATE_BUSY;
433 	genpd->resume_count++;
434 	for (;;) {
435 		prepare_to_wait(&genpd->status_wait_queue, &wait,
436 				TASK_UNINTERRUPTIBLE);
437 		/*
438 		 * If current is the powering off task, we have been called
439 		 * reentrantly from one of the device callbacks, so we should
440 		 * not wait.
441 		 */
442 		if (!genpd->poweroff_task || genpd->poweroff_task == current)
443 			break;
444 		mutex_unlock(&genpd->lock);
445 
446 		schedule();
447 
448 		mutex_lock(&genpd->lock);
449 	}
450 	finish_wait(&genpd->status_wait_queue, &wait);
451 	__pm_genpd_runtime_resume(dev, genpd);
452 	genpd->resume_count--;
453 	genpd_set_active(genpd);
454 	wake_up_all(&genpd->status_wait_queue);
455 	mutex_unlock(&genpd->lock);
456 
457 	if (genpd->start_device)
458 		genpd->start_device(dev);
459 
460 	return 0;
461 }
462 
463 #else
464 
465 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
466 static inline void __pm_genpd_runtime_resume(struct device *dev,
467 					     struct generic_pm_domain *genpd) {}
468 
469 #define pm_genpd_runtime_suspend	NULL
470 #define pm_genpd_runtime_resume		NULL
471 
472 #endif /* CONFIG_PM_RUNTIME */
473 
474 #ifdef CONFIG_PM_SLEEP
475 
476 /**
477  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
478  * @genpd: PM domain to power off, if possible.
479  *
480  * Check if the given PM domain can be powered off (during system suspend or
481  * hibernation) and do that if so.  Also, in that case propagate to its parent.
482  *
483  * This function is only called in "noirq" stages of system power transitions,
484  * so it need not acquire locks (all of the "noirq" callbacks are executed
485  * sequentially, so it is guaranteed that it will never run twice in parallel).
486  */
487 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
488 {
489 	struct generic_pm_domain *parent = genpd->parent;
490 
491 	if (genpd->status == GPD_STATE_POWER_OFF)
492 		return;
493 
494 	if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
495 		return;
496 
497 	if (genpd->power_off)
498 		genpd->power_off(genpd);
499 
500 	genpd->status = GPD_STATE_POWER_OFF;
501 	if (parent) {
502 		genpd_sd_counter_dec(parent);
503 		pm_genpd_sync_poweroff(parent);
504 	}
505 }
506 
507 /**
508  * resume_needed - Check whether to resume a device before system suspend.
509  * @dev: Device to check.
510  * @genpd: PM domain the device belongs to.
511  *
512  * There are two cases in which a device that can wake up the system from sleep
513  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
514  * to wake up the system and it has to remain active for this purpose while the
515  * system is in the sleep state and (2) if the device is not enabled to wake up
516  * the system from sleep states and it generally doesn't generate wakeup signals
517  * by itself (those signals are generated on its behalf by other parts of the
518  * system).  In the latter case it may be necessary to reconfigure the device's
519  * wakeup settings during system suspend, because it may have been set up to
520  * signal remote wakeup from the system's working state as needed by runtime PM.
521  * Return 'true' in either of the above cases.
522  */
523 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
524 {
525 	bool active_wakeup;
526 
527 	if (!device_can_wakeup(dev))
528 		return false;
529 
530 	active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
531 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
532 }
533 
534 /**
535  * pm_genpd_prepare - Start power transition of a device in a PM domain.
536  * @dev: Device to start the transition of.
537  *
538  * Start a power transition of a device (during a system-wide power transition)
539  * under the assumption that its pm_domain field points to the domain member of
540  * an object of type struct generic_pm_domain representing a PM domain
541  * consisting of I/O devices.
542  */
543 static int pm_genpd_prepare(struct device *dev)
544 {
545 	struct generic_pm_domain *genpd;
546 	int ret;
547 
548 	dev_dbg(dev, "%s()\n", __func__);
549 
550 	genpd = dev_to_genpd(dev);
551 	if (IS_ERR(genpd))
552 		return -EINVAL;
553 
554 	/*
555 	 * If a wakeup request is pending for the device, it should be woken up
556 	 * at this point and a system wakeup event should be reported if it's
557 	 * set up to wake up the system from sleep states.
558 	 */
559 	pm_runtime_get_noresume(dev);
560 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
561 		pm_wakeup_event(dev, 0);
562 
563 	if (pm_wakeup_pending()) {
564 		pm_runtime_put_sync(dev);
565 		return -EBUSY;
566 	}
567 
568 	if (resume_needed(dev, genpd))
569 		pm_runtime_resume(dev);
570 
571 	genpd_acquire_lock(genpd);
572 
573 	if (genpd->prepared_count++ == 0)
574 		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
575 
576 	genpd_release_lock(genpd);
577 
578 	if (genpd->suspend_power_off) {
579 		pm_runtime_put_noidle(dev);
580 		return 0;
581 	}
582 
583 	/*
584 	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
585 	 * so pm_genpd_poweron() will return immediately, but if the device
586 	 * is suspended (e.g. it's been stopped by .stop_device()), we need
587 	 * to make it operational.
588 	 */
589 	pm_runtime_resume(dev);
590 	__pm_runtime_disable(dev, false);
591 
592 	ret = pm_generic_prepare(dev);
593 	if (ret) {
594 		mutex_lock(&genpd->lock);
595 
596 		if (--genpd->prepared_count == 0)
597 			genpd->suspend_power_off = false;
598 
599 		mutex_unlock(&genpd->lock);
600 		pm_runtime_enable(dev);
601 	}
602 
603 	pm_runtime_put_sync(dev);
604 	return ret;
605 }
606 
607 /**
608  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
609  * @dev: Device to suspend.
610  *
611  * Suspend a device under the assumption that its pm_domain field points to the
612  * domain member of an object of type struct generic_pm_domain representing
613  * a PM domain consisting of I/O devices.
614  */
615 static int pm_genpd_suspend(struct device *dev)
616 {
617 	struct generic_pm_domain *genpd;
618 
619 	dev_dbg(dev, "%s()\n", __func__);
620 
621 	genpd = dev_to_genpd(dev);
622 	if (IS_ERR(genpd))
623 		return -EINVAL;
624 
625 	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
626 }
627 
628 /**
629  * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
630  * @dev: Device to suspend.
631  *
632  * Carry out a late suspend of a device under the assumption that its
633  * pm_domain field points to the domain member of an object of type
634  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
635  */
636 static int pm_genpd_suspend_noirq(struct device *dev)
637 {
638 	struct generic_pm_domain *genpd;
639 	int ret;
640 
641 	dev_dbg(dev, "%s()\n", __func__);
642 
643 	genpd = dev_to_genpd(dev);
644 	if (IS_ERR(genpd))
645 		return -EINVAL;
646 
647 	if (genpd->suspend_power_off)
648 		return 0;
649 
650 	ret = pm_generic_suspend_noirq(dev);
651 	if (ret)
652 		return ret;
653 
654 	if (device_may_wakeup(dev)
655 	    && genpd->active_wakeup && genpd->active_wakeup(dev))
656 		return 0;
657 
658 	if (genpd->stop_device)
659 		genpd->stop_device(dev);
660 
661 	/*
662 	 * Since all of the "noirq" callbacks are executed sequentially, it is
663 	 * guaranteed that this function will never run twice in parallel for
664 	 * the same PM domain, so it is not necessary to use locking here.
665 	 */
666 	genpd->suspended_count++;
667 	pm_genpd_sync_poweroff(genpd);
668 
669 	return 0;
670 }
671 
672 /**
673  * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
674  * @dev: Device to resume.
675  *
676  * Carry out an early resume of a device under the assumption that its
677  * pm_domain field points to the domain member of an object of type
678  * struct generic_pm_domain representing a power domain consisting of I/O
679  * devices.
680  */
681 static int pm_genpd_resume_noirq(struct device *dev)
682 {
683 	struct generic_pm_domain *genpd;
684 
685 	dev_dbg(dev, "%s()\n", __func__);
686 
687 	genpd = dev_to_genpd(dev);
688 	if (IS_ERR(genpd))
689 		return -EINVAL;
690 
691 	if (genpd->suspend_power_off)
692 		return 0;
693 
694 	/*
695 	 * Since all of the "noirq" callbacks are executed sequentially, it is
696 	 * guaranteed that this function will never run twice in parallel for
697 	 * the same PM domain, so it is not necessary to use locking here.
698 	 */
699 	pm_genpd_poweron(genpd);
700 	genpd->suspended_count--;
701 	if (genpd->start_device)
702 		genpd->start_device(dev);
703 
704 	return pm_generic_resume_noirq(dev);
705 }
706 
707 /**
708  * pm_genpd_resume - Resume a device belonging to an I/O power domain.
709  * @dev: Device to resume.
710  *
711  * Resume a device under the assumption that its pm_domain field points to the
712  * domain member of an object of type struct generic_pm_domain representing
713  * a power domain consisting of I/O devices.
714  */
715 static int pm_genpd_resume(struct device *dev)
716 {
717 	struct generic_pm_domain *genpd;
718 
719 	dev_dbg(dev, "%s()\n", __func__);
720 
721 	genpd = dev_to_genpd(dev);
722 	if (IS_ERR(genpd))
723 		return -EINVAL;
724 
725 	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
726 }
727 
728 /**
729  * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
730  * @dev: Device to freeze.
731  *
732  * Freeze a device under the assumption that its pm_domain field points to the
733  * domain member of an object of type struct generic_pm_domain representing
734  * a power domain consisting of I/O devices.
735  */
736 static int pm_genpd_freeze(struct device *dev)
737 {
738 	struct generic_pm_domain *genpd;
739 
740 	dev_dbg(dev, "%s()\n", __func__);
741 
742 	genpd = dev_to_genpd(dev);
743 	if (IS_ERR(genpd))
744 		return -EINVAL;
745 
746 	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
747 }
748 
749 /**
750  * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
751  * @dev: Device to freeze.
752  *
753  * Carry out a late freeze of a device under the assumption that its
754  * pm_domain field points to the domain member of an object of type
755  * struct generic_pm_domain representing a power domain consisting of I/O
756  * devices.
757  */
758 static int pm_genpd_freeze_noirq(struct device *dev)
759 {
760 	struct generic_pm_domain *genpd;
761 	int ret;
762 
763 	dev_dbg(dev, "%s()\n", __func__);
764 
765 	genpd = dev_to_genpd(dev);
766 	if (IS_ERR(genpd))
767 		return -EINVAL;
768 
769 	if (genpd->suspend_power_off)
770 		return 0;
771 
772 	ret = pm_generic_freeze_noirq(dev);
773 	if (ret)
774 		return ret;
775 
776 	if (genpd->stop_device)
777 		genpd->stop_device(dev);
778 
779 	return 0;
780 }
781 
782 /**
783  * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
784  * @dev: Device to thaw.
785  *
786  * Carry out an early thaw of a device under the assumption that its
787  * pm_domain field points to the domain member of an object of type
788  * struct generic_pm_domain representing a power domain consisting of I/O
789  * devices.
790  */
791 static int pm_genpd_thaw_noirq(struct device *dev)
792 {
793 	struct generic_pm_domain *genpd;
794 
795 	dev_dbg(dev, "%s()\n", __func__);
796 
797 	genpd = dev_to_genpd(dev);
798 	if (IS_ERR(genpd))
799 		return -EINVAL;
800 
801 	if (genpd->suspend_power_off)
802 		return 0;
803 
804 	if (genpd->start_device)
805 		genpd->start_device(dev);
806 
807 	return pm_generic_thaw_noirq(dev);
808 }
809 
810 /**
811  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
812  * @dev: Device to thaw.
813  *
814  * Thaw a device under the assumption that its pm_domain field points to the
815  * domain member of an object of type struct generic_pm_domain representing
816  * a power domain consisting of I/O devices.
817  */
818 static int pm_genpd_thaw(struct device *dev)
819 {
820 	struct generic_pm_domain *genpd;
821 
822 	dev_dbg(dev, "%s()\n", __func__);
823 
824 	genpd = dev_to_genpd(dev);
825 	if (IS_ERR(genpd))
826 		return -EINVAL;
827 
828 	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
829 }
830 
831 /**
832  * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
833  * @dev: Device to suspend.
834  *
835  * Power off a device under the assumption that its pm_domain field points to
836  * the domain member of an object of type struct generic_pm_domain representing
837  * a PM domain consisting of I/O devices.
838  */
839 static int pm_genpd_dev_poweroff(struct device *dev)
840 {
841 	struct generic_pm_domain *genpd;
842 
843 	dev_dbg(dev, "%s()\n", __func__);
844 
845 	genpd = dev_to_genpd(dev);
846 	if (IS_ERR(genpd))
847 		return -EINVAL;
848 
849 	return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
850 }
851 
852 /**
853  * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
854  * @dev: Device to suspend.
855  *
856  * Carry out a late powering off of a device under the assumption that its
857  * pm_domain field points to the domain member of an object of type
858  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
859  */
860 static int pm_genpd_dev_poweroff_noirq(struct device *dev)
861 {
862 	struct generic_pm_domain *genpd;
863 	int ret;
864 
865 	dev_dbg(dev, "%s()\n", __func__);
866 
867 	genpd = dev_to_genpd(dev);
868 	if (IS_ERR(genpd))
869 		return -EINVAL;
870 
871 	if (genpd->suspend_power_off)
872 		return 0;
873 
874 	ret = pm_generic_poweroff_noirq(dev);
875 	if (ret)
876 		return ret;
877 
878 	if (device_may_wakeup(dev)
879 	    && genpd->active_wakeup && genpd->active_wakeup(dev))
880 		return 0;
881 
882 	if (genpd->stop_device)
883 		genpd->stop_device(dev);
884 
885 	/*
886 	 * Since all of the "noirq" callbacks are executed sequentially, it is
887 	 * guaranteed that this function will never run twice in parallel for
888 	 * the same PM domain, so it is not necessary to use locking here.
889 	 */
890 	genpd->suspended_count++;
891 	pm_genpd_sync_poweroff(genpd);
892 
893 	return 0;
894 }
895 
896 /**
897  * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
898  * @dev: Device to resume.
899  *
900  * Carry out an early restore of a device under the assumption that its
901  * pm_domain field points to the domain member of an object of type
902  * struct generic_pm_domain representing a power domain consisting of I/O
903  * devices.
904  */
905 static int pm_genpd_restore_noirq(struct device *dev)
906 {
907 	struct generic_pm_domain *genpd;
908 
909 	dev_dbg(dev, "%s()\n", __func__);
910 
911 	genpd = dev_to_genpd(dev);
912 	if (IS_ERR(genpd))
913 		return -EINVAL;
914 
915 	/*
916 	 * Since all of the "noirq" callbacks are executed sequentially, it is
917 	 * guaranteed that this function will never run twice in parallel for
918 	 * the same PM domain, so it is not necessary to use locking here.
919 	 */
920 	genpd->status = GPD_STATE_POWER_OFF;
921 	if (genpd->suspend_power_off) {
922 		/*
923 		 * The boot kernel might put the domain into the power on state,
924 		 * so make sure it really is powered off.
925 		 */
926 		if (genpd->power_off)
927 			genpd->power_off(genpd);
928 		return 0;
929 	}
930 
931 	pm_genpd_poweron(genpd);
932 	genpd->suspended_count--;
933 	if (genpd->start_device)
934 		genpd->start_device(dev);
935 
936 	return pm_generic_restore_noirq(dev);
937 }
938 
939 /**
940  * pm_genpd_restore - Restore a device belonging to an I/O power domain.
941  * @dev: Device to resume.
942  *
943  * Restore a device under the assumption that its pm_domain field points to the
944  * domain member of an object of type struct generic_pm_domain representing
945  * a power domain consisting of I/O devices.
946  */
947 static int pm_genpd_restore(struct device *dev)
948 {
949 	struct generic_pm_domain *genpd;
950 
951 	dev_dbg(dev, "%s()\n", __func__);
952 
953 	genpd = dev_to_genpd(dev);
954 	if (IS_ERR(genpd))
955 		return -EINVAL;
956 
957 	return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
958 }
959 
960 /**
961  * pm_genpd_complete - Complete power transition of a device in a power domain.
962  * @dev: Device to complete the transition of.
963  *
964  * Complete a power transition of a device (during a system-wide power
965  * transition) under the assumption that its pm_domain field points to the
966  * domain member of an object of type struct generic_pm_domain representing
967  * a power domain consisting of I/O devices.
968  */
969 static void pm_genpd_complete(struct device *dev)
970 {
971 	struct generic_pm_domain *genpd;
972 	bool run_complete;
973 
974 	dev_dbg(dev, "%s()\n", __func__);
975 
976 	genpd = dev_to_genpd(dev);
977 	if (IS_ERR(genpd))
978 		return;
979 
980 	mutex_lock(&genpd->lock);
981 
982 	run_complete = !genpd->suspend_power_off;
983 	if (--genpd->prepared_count == 0)
984 		genpd->suspend_power_off = false;
985 
986 	mutex_unlock(&genpd->lock);
987 
988 	if (run_complete) {
989 		pm_generic_complete(dev);
990 		pm_runtime_set_active(dev);
991 		pm_runtime_enable(dev);
992 		pm_runtime_idle(dev);
993 	}
994 }
995 
996 #else
997 
998 #define pm_genpd_prepare		NULL
999 #define pm_genpd_suspend		NULL
1000 #define pm_genpd_suspend_noirq		NULL
1001 #define pm_genpd_resume_noirq		NULL
1002 #define pm_genpd_resume			NULL
1003 #define pm_genpd_freeze			NULL
1004 #define pm_genpd_freeze_noirq		NULL
1005 #define pm_genpd_thaw_noirq		NULL
1006 #define pm_genpd_thaw			NULL
1007 #define pm_genpd_dev_poweroff_noirq	NULL
1008 #define pm_genpd_dev_poweroff		NULL
1009 #define pm_genpd_restore_noirq		NULL
1010 #define pm_genpd_restore		NULL
1011 #define pm_genpd_complete		NULL
1012 
1013 #endif /* CONFIG_PM_SLEEP */
1014 
1015 /**
1016  * pm_genpd_add_device - Add a device to an I/O PM domain.
1017  * @genpd: PM domain to add the device to.
1018  * @dev: Device to be added.
1019  */
1020 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1021 {
1022 	struct dev_list_entry *dle;
1023 	int ret = 0;
1024 
1025 	dev_dbg(dev, "%s()\n", __func__);
1026 
1027 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1028 		return -EINVAL;
1029 
1030 	genpd_acquire_lock(genpd);
1031 
1032 	if (genpd->status == GPD_STATE_POWER_OFF) {
1033 		ret = -EINVAL;
1034 		goto out;
1035 	}
1036 
1037 	if (genpd->prepared_count > 0) {
1038 		ret = -EAGAIN;
1039 		goto out;
1040 	}
1041 
1042 	list_for_each_entry(dle, &genpd->dev_list, node)
1043 		if (dle->dev == dev) {
1044 			ret = -EINVAL;
1045 			goto out;
1046 		}
1047 
1048 	dle = kzalloc(sizeof(*dle), GFP_KERNEL);
1049 	if (!dle) {
1050 		ret = -ENOMEM;
1051 		goto out;
1052 	}
1053 
1054 	dle->dev = dev;
1055 	dle->need_restore = false;
1056 	list_add_tail(&dle->node, &genpd->dev_list);
1057 	genpd->device_count++;
1058 
1059 	spin_lock_irq(&dev->power.lock);
1060 	dev->pm_domain = &genpd->domain;
1061 	spin_unlock_irq(&dev->power.lock);
1062 
1063  out:
1064 	genpd_release_lock(genpd);
1065 
1066 	return ret;
1067 }
1068 
1069 /**
1070  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1071  * @genpd: PM domain to remove the device from.
1072  * @dev: Device to be removed.
1073  */
1074 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1075 			   struct device *dev)
1076 {
1077 	struct dev_list_entry *dle;
1078 	int ret = -EINVAL;
1079 
1080 	dev_dbg(dev, "%s()\n", __func__);
1081 
1082 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1083 		return -EINVAL;
1084 
1085 	genpd_acquire_lock(genpd);
1086 
1087 	if (genpd->prepared_count > 0) {
1088 		ret = -EAGAIN;
1089 		goto out;
1090 	}
1091 
1092 	list_for_each_entry(dle, &genpd->dev_list, node) {
1093 		if (dle->dev != dev)
1094 			continue;
1095 
1096 		spin_lock_irq(&dev->power.lock);
1097 		dev->pm_domain = NULL;
1098 		spin_unlock_irq(&dev->power.lock);
1099 
1100 		genpd->device_count--;
1101 		list_del(&dle->node);
1102 		kfree(dle);
1103 
1104 		ret = 0;
1105 		break;
1106 	}
1107 
1108  out:
1109 	genpd_release_lock(genpd);
1110 
1111 	return ret;
1112 }
1113 
1114 /**
1115  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1116  * @genpd: Master PM domain to add the subdomain to.
1117  * @new_subdomain: Subdomain to be added.
1118  */
1119 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1120 			   struct generic_pm_domain *new_subdomain)
1121 {
1122 	struct generic_pm_domain *subdomain;
1123 	int ret = 0;
1124 
1125 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
1126 		return -EINVAL;
1127 
1128  start:
1129 	genpd_acquire_lock(genpd);
1130 	mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
1131 
1132 	if (new_subdomain->status != GPD_STATE_POWER_OFF
1133 	    && new_subdomain->status != GPD_STATE_ACTIVE) {
1134 		mutex_unlock(&new_subdomain->lock);
1135 		genpd_release_lock(genpd);
1136 		goto start;
1137 	}
1138 
1139 	if (genpd->status == GPD_STATE_POWER_OFF
1140 	    &&  new_subdomain->status != GPD_STATE_POWER_OFF) {
1141 		ret = -EINVAL;
1142 		goto out;
1143 	}
1144 
1145 	list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1146 		if (subdomain == new_subdomain) {
1147 			ret = -EINVAL;
1148 			goto out;
1149 		}
1150 	}
1151 
1152 	list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
1153 	new_subdomain->parent = genpd;
1154 	if (subdomain->status != GPD_STATE_POWER_OFF)
1155 		genpd->sd_count++;
1156 
1157  out:
1158 	mutex_unlock(&new_subdomain->lock);
1159 	genpd_release_lock(genpd);
1160 
1161 	return ret;
1162 }
1163 
1164 /**
1165  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1166  * @genpd: Master PM domain to remove the subdomain from.
1167  * @target: Subdomain to be removed.
1168  */
1169 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1170 			      struct generic_pm_domain *target)
1171 {
1172 	struct generic_pm_domain *subdomain;
1173 	int ret = -EINVAL;
1174 
1175 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
1176 		return -EINVAL;
1177 
1178  start:
1179 	genpd_acquire_lock(genpd);
1180 
1181 	list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1182 		if (subdomain != target)
1183 			continue;
1184 
1185 		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1186 
1187 		if (subdomain->status != GPD_STATE_POWER_OFF
1188 		    && subdomain->status != GPD_STATE_ACTIVE) {
1189 			mutex_unlock(&subdomain->lock);
1190 			genpd_release_lock(genpd);
1191 			goto start;
1192 		}
1193 
1194 		list_del(&subdomain->sd_node);
1195 		subdomain->parent = NULL;
1196 		if (subdomain->status != GPD_STATE_POWER_OFF)
1197 			genpd_sd_counter_dec(genpd);
1198 
1199 		mutex_unlock(&subdomain->lock);
1200 
1201 		ret = 0;
1202 		break;
1203 	}
1204 
1205 	genpd_release_lock(genpd);
1206 
1207 	return ret;
1208 }
1209 
1210 /**
1211  * pm_genpd_init - Initialize a generic I/O PM domain object.
1212  * @genpd: PM domain object to initialize.
1213  * @gov: PM domain governor to associate with the domain (may be NULL).
1214  * @is_off: Initial value of the domain's power_is_off field.
1215  */
1216 void pm_genpd_init(struct generic_pm_domain *genpd,
1217 		   struct dev_power_governor *gov, bool is_off)
1218 {
1219 	if (IS_ERR_OR_NULL(genpd))
1220 		return;
1221 
1222 	INIT_LIST_HEAD(&genpd->sd_node);
1223 	genpd->parent = NULL;
1224 	INIT_LIST_HEAD(&genpd->dev_list);
1225 	INIT_LIST_HEAD(&genpd->sd_list);
1226 	mutex_init(&genpd->lock);
1227 	genpd->gov = gov;
1228 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1229 	genpd->in_progress = 0;
1230 	genpd->sd_count = 0;
1231 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1232 	init_waitqueue_head(&genpd->status_wait_queue);
1233 	genpd->poweroff_task = NULL;
1234 	genpd->resume_count = 0;
1235 	genpd->device_count = 0;
1236 	genpd->suspended_count = 0;
1237 	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1238 	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1239 	genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1240 	genpd->domain.ops.prepare = pm_genpd_prepare;
1241 	genpd->domain.ops.suspend = pm_genpd_suspend;
1242 	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1243 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1244 	genpd->domain.ops.resume = pm_genpd_resume;
1245 	genpd->domain.ops.freeze = pm_genpd_freeze;
1246 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1247 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1248 	genpd->domain.ops.thaw = pm_genpd_thaw;
1249 	genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1250 	genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1251 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1252 	genpd->domain.ops.restore = pm_genpd_restore;
1253 	genpd->domain.ops.complete = pm_genpd_complete;
1254 	mutex_lock(&gpd_list_lock);
1255 	list_add(&genpd->gpd_list_node, &gpd_list);
1256 	mutex_unlock(&gpd_list_lock);
1257 }
1258 
1259 /**
1260  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
1261  */
1262 void pm_genpd_poweroff_unused(void)
1263 {
1264 	struct generic_pm_domain *genpd;
1265 
1266 	mutex_lock(&gpd_list_lock);
1267 
1268 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1269 		genpd_queue_power_off_work(genpd);
1270 
1271 	mutex_unlock(&gpd_list_lock);
1272 }
1273