xref: /openbmc/linux/drivers/base/power/domain.c (revision 95e9fd10)
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/suspend.h>
19 #include <linux/export.h>
20 
21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
22 ({								\
23 	type (*__routine)(struct device *__d); 			\
24 	type __ret = (type)0;					\
25 								\
26 	__routine = genpd->dev_ops.callback; 			\
27 	if (__routine) {					\
28 		__ret = __routine(dev); 			\
29 	} else {						\
30 		__routine = dev_gpd_data(dev)->ops.callback;	\
31 		if (__routine) 					\
32 			__ret = __routine(dev);			\
33 	}							\
34 	__ret;							\
35 })
36 
37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)	\
38 ({										\
39 	ktime_t __start = ktime_get();						\
40 	type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);		\
41 	s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));		\
42 	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\
43 	if (!__retval && __elapsed > __td->field) {				\
44 		__td->field = __elapsed;					\
45 		dev_warn(dev, name " latency exceeded, new value %lld ns\n",	\
46 			__elapsed);						\
47 		genpd->max_off_time_changed = true;				\
48 		__td->constraint_changed = true;				\
49 	}									\
50 	__retval;								\
51 })
52 
53 static LIST_HEAD(gpd_list);
54 static DEFINE_MUTEX(gpd_list_lock);
55 
56 #ifdef CONFIG_PM
57 
58 struct generic_pm_domain *dev_to_genpd(struct device *dev)
59 {
60 	if (IS_ERR_OR_NULL(dev->pm_domain))
61 		return ERR_PTR(-EINVAL);
62 
63 	return pd_to_genpd(dev->pm_domain);
64 }
65 
66 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
67 {
68 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
69 					stop_latency_ns, "stop");
70 }
71 
72 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
73 {
74 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
75 					start_latency_ns, "start");
76 }
77 
78 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
79 {
80 	bool ret = false;
81 
82 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
83 		ret = !!atomic_dec_and_test(&genpd->sd_count);
84 
85 	return ret;
86 }
87 
88 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
89 {
90 	atomic_inc(&genpd->sd_count);
91 	smp_mb__after_atomic_inc();
92 }
93 
94 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
95 {
96 	DEFINE_WAIT(wait);
97 
98 	mutex_lock(&genpd->lock);
99 	/*
100 	 * Wait for the domain to transition into either the active,
101 	 * or the power off state.
102 	 */
103 	for (;;) {
104 		prepare_to_wait(&genpd->status_wait_queue, &wait,
105 				TASK_UNINTERRUPTIBLE);
106 		if (genpd->status == GPD_STATE_ACTIVE
107 		    || genpd->status == GPD_STATE_POWER_OFF)
108 			break;
109 		mutex_unlock(&genpd->lock);
110 
111 		schedule();
112 
113 		mutex_lock(&genpd->lock);
114 	}
115 	finish_wait(&genpd->status_wait_queue, &wait);
116 }
117 
118 static void genpd_release_lock(struct generic_pm_domain *genpd)
119 {
120 	mutex_unlock(&genpd->lock);
121 }
122 
123 static void genpd_set_active(struct generic_pm_domain *genpd)
124 {
125 	if (genpd->resume_count == 0)
126 		genpd->status = GPD_STATE_ACTIVE;
127 }
128 
129 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
130 {
131 	s64 usecs64;
132 
133 	if (!genpd->cpu_data)
134 		return;
135 
136 	usecs64 = genpd->power_on_latency_ns;
137 	do_div(usecs64, NSEC_PER_USEC);
138 	usecs64 += genpd->cpu_data->saved_exit_latency;
139 	genpd->cpu_data->idle_state->exit_latency = usecs64;
140 }
141 
142 /**
143  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
144  * @genpd: PM domain to power up.
145  *
146  * Restore power to @genpd and all of its masters so that it is possible to
147  * resume a device belonging to it.
148  */
149 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
150 	__releases(&genpd->lock) __acquires(&genpd->lock)
151 {
152 	struct gpd_link *link;
153 	DEFINE_WAIT(wait);
154 	int ret = 0;
155 
156 	/* If the domain's master is being waited for, we have to wait too. */
157 	for (;;) {
158 		prepare_to_wait(&genpd->status_wait_queue, &wait,
159 				TASK_UNINTERRUPTIBLE);
160 		if (genpd->status != GPD_STATE_WAIT_MASTER)
161 			break;
162 		mutex_unlock(&genpd->lock);
163 
164 		schedule();
165 
166 		mutex_lock(&genpd->lock);
167 	}
168 	finish_wait(&genpd->status_wait_queue, &wait);
169 
170 	if (genpd->status == GPD_STATE_ACTIVE
171 	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
172 		return 0;
173 
174 	if (genpd->status != GPD_STATE_POWER_OFF) {
175 		genpd_set_active(genpd);
176 		return 0;
177 	}
178 
179 	if (genpd->cpu_data) {
180 		cpuidle_pause_and_lock();
181 		genpd->cpu_data->idle_state->disabled = true;
182 		cpuidle_resume_and_unlock();
183 		goto out;
184 	}
185 
186 	/*
187 	 * The list is guaranteed not to change while the loop below is being
188 	 * executed, unless one of the masters' .power_on() callbacks fiddles
189 	 * with it.
190 	 */
191 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
192 		genpd_sd_counter_inc(link->master);
193 		genpd->status = GPD_STATE_WAIT_MASTER;
194 
195 		mutex_unlock(&genpd->lock);
196 
197 		ret = pm_genpd_poweron(link->master);
198 
199 		mutex_lock(&genpd->lock);
200 
201 		/*
202 		 * The "wait for parent" status is guaranteed not to change
203 		 * while the master is powering on.
204 		 */
205 		genpd->status = GPD_STATE_POWER_OFF;
206 		wake_up_all(&genpd->status_wait_queue);
207 		if (ret) {
208 			genpd_sd_counter_dec(link->master);
209 			goto err;
210 		}
211 	}
212 
213 	if (genpd->power_on) {
214 		ktime_t time_start = ktime_get();
215 		s64 elapsed_ns;
216 
217 		ret = genpd->power_on(genpd);
218 		if (ret)
219 			goto err;
220 
221 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
222 		if (elapsed_ns > genpd->power_on_latency_ns) {
223 			genpd->power_on_latency_ns = elapsed_ns;
224 			genpd->max_off_time_changed = true;
225 			genpd_recalc_cpu_exit_latency(genpd);
226 			if (genpd->name)
227 				pr_warning("%s: Power-on latency exceeded, "
228 					"new value %lld ns\n", genpd->name,
229 					elapsed_ns);
230 		}
231 	}
232 
233  out:
234 	genpd_set_active(genpd);
235 
236 	return 0;
237 
238  err:
239 	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
240 		genpd_sd_counter_dec(link->master);
241 
242 	return ret;
243 }
244 
245 /**
246  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
247  * @genpd: PM domain to power up.
248  */
249 int pm_genpd_poweron(struct generic_pm_domain *genpd)
250 {
251 	int ret;
252 
253 	mutex_lock(&genpd->lock);
254 	ret = __pm_genpd_poweron(genpd);
255 	mutex_unlock(&genpd->lock);
256 	return ret;
257 }
258 
259 #endif /* CONFIG_PM */
260 
261 #ifdef CONFIG_PM_RUNTIME
262 
263 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
264 {
265 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
266 					save_state_latency_ns, "state save");
267 }
268 
269 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
270 {
271 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
272 					restore_state_latency_ns,
273 					"state restore");
274 }
275 
276 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
277 				     unsigned long val, void *ptr)
278 {
279 	struct generic_pm_domain_data *gpd_data;
280 	struct device *dev;
281 
282 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
283 
284 	mutex_lock(&gpd_data->lock);
285 	dev = gpd_data->base.dev;
286 	if (!dev) {
287 		mutex_unlock(&gpd_data->lock);
288 		return NOTIFY_DONE;
289 	}
290 	mutex_unlock(&gpd_data->lock);
291 
292 	for (;;) {
293 		struct generic_pm_domain *genpd;
294 		struct pm_domain_data *pdd;
295 
296 		spin_lock_irq(&dev->power.lock);
297 
298 		pdd = dev->power.subsys_data ?
299 				dev->power.subsys_data->domain_data : NULL;
300 		if (pdd && pdd->dev) {
301 			to_gpd_data(pdd)->td.constraint_changed = true;
302 			genpd = dev_to_genpd(dev);
303 		} else {
304 			genpd = ERR_PTR(-ENODATA);
305 		}
306 
307 		spin_unlock_irq(&dev->power.lock);
308 
309 		if (!IS_ERR(genpd)) {
310 			mutex_lock(&genpd->lock);
311 			genpd->max_off_time_changed = true;
312 			mutex_unlock(&genpd->lock);
313 		}
314 
315 		dev = dev->parent;
316 		if (!dev || dev->power.ignore_children)
317 			break;
318 	}
319 
320 	return NOTIFY_DONE;
321 }
322 
323 /**
324  * __pm_genpd_save_device - Save the pre-suspend state of a device.
325  * @pdd: Domain data of the device to save the state of.
326  * @genpd: PM domain the device belongs to.
327  */
328 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
329 				  struct generic_pm_domain *genpd)
330 	__releases(&genpd->lock) __acquires(&genpd->lock)
331 {
332 	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
333 	struct device *dev = pdd->dev;
334 	int ret = 0;
335 
336 	if (gpd_data->need_restore)
337 		return 0;
338 
339 	mutex_unlock(&genpd->lock);
340 
341 	genpd_start_dev(genpd, dev);
342 	ret = genpd_save_dev(genpd, dev);
343 	genpd_stop_dev(genpd, dev);
344 
345 	mutex_lock(&genpd->lock);
346 
347 	if (!ret)
348 		gpd_data->need_restore = true;
349 
350 	return ret;
351 }
352 
353 /**
354  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
355  * @pdd: Domain data of the device to restore the state of.
356  * @genpd: PM domain the device belongs to.
357  */
358 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
359 				      struct generic_pm_domain *genpd)
360 	__releases(&genpd->lock) __acquires(&genpd->lock)
361 {
362 	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
363 	struct device *dev = pdd->dev;
364 	bool need_restore = gpd_data->need_restore;
365 
366 	gpd_data->need_restore = false;
367 	mutex_unlock(&genpd->lock);
368 
369 	genpd_start_dev(genpd, dev);
370 	if (need_restore)
371 		genpd_restore_dev(genpd, dev);
372 
373 	mutex_lock(&genpd->lock);
374 }
375 
376 /**
377  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
378  * @genpd: PM domain to check.
379  *
380  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
381  * a "power off" operation, which means that a "power on" has occured in the
382  * meantime, or if its resume_count field is different from zero, which means
383  * that one of its devices has been resumed in the meantime.
384  */
385 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
386 {
387 	return genpd->status == GPD_STATE_WAIT_MASTER
388 		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
389 }
390 
391 /**
392  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
393  * @genpd: PM domait to power off.
394  *
395  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
396  * before.
397  */
398 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
399 {
400 	if (!work_pending(&genpd->power_off_work))
401 		queue_work(pm_wq, &genpd->power_off_work);
402 }
403 
404 /**
405  * pm_genpd_poweroff - Remove power from a given PM domain.
406  * @genpd: PM domain to power down.
407  *
408  * If all of the @genpd's devices have been suspended and all of its subdomains
409  * have been powered down, run the runtime suspend callbacks provided by all of
410  * the @genpd's devices' drivers and remove power from @genpd.
411  */
412 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
413 	__releases(&genpd->lock) __acquires(&genpd->lock)
414 {
415 	struct pm_domain_data *pdd;
416 	struct gpd_link *link;
417 	unsigned int not_suspended;
418 	int ret = 0;
419 
420  start:
421 	/*
422 	 * Do not try to power off the domain in the following situations:
423 	 * (1) The domain is already in the "power off" state.
424 	 * (2) The domain is waiting for its master to power up.
425 	 * (3) One of the domain's devices is being resumed right now.
426 	 * (4) System suspend is in progress.
427 	 */
428 	if (genpd->status == GPD_STATE_POWER_OFF
429 	    || genpd->status == GPD_STATE_WAIT_MASTER
430 	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
431 		return 0;
432 
433 	if (atomic_read(&genpd->sd_count) > 0)
434 		return -EBUSY;
435 
436 	not_suspended = 0;
437 	list_for_each_entry(pdd, &genpd->dev_list, list_node)
438 		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
439 		    || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
440 			not_suspended++;
441 
442 	if (not_suspended > genpd->in_progress)
443 		return -EBUSY;
444 
445 	if (genpd->poweroff_task) {
446 		/*
447 		 * Another instance of pm_genpd_poweroff() is executing
448 		 * callbacks, so tell it to start over and return.
449 		 */
450 		genpd->status = GPD_STATE_REPEAT;
451 		return 0;
452 	}
453 
454 	if (genpd->gov && genpd->gov->power_down_ok) {
455 		if (!genpd->gov->power_down_ok(&genpd->domain))
456 			return -EAGAIN;
457 	}
458 
459 	genpd->status = GPD_STATE_BUSY;
460 	genpd->poweroff_task = current;
461 
462 	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
463 		ret = atomic_read(&genpd->sd_count) == 0 ?
464 			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
465 
466 		if (genpd_abort_poweroff(genpd))
467 			goto out;
468 
469 		if (ret) {
470 			genpd_set_active(genpd);
471 			goto out;
472 		}
473 
474 		if (genpd->status == GPD_STATE_REPEAT) {
475 			genpd->poweroff_task = NULL;
476 			goto start;
477 		}
478 	}
479 
480 	if (genpd->cpu_data) {
481 		/*
482 		 * If cpu_data is set, cpuidle should turn the domain off when
483 		 * the CPU in it is idle.  In that case we don't decrement the
484 		 * subdomain counts of the master domains, so that power is not
485 		 * removed from the current domain prematurely as a result of
486 		 * cutting off the masters' power.
487 		 */
488 		genpd->status = GPD_STATE_POWER_OFF;
489 		cpuidle_pause_and_lock();
490 		genpd->cpu_data->idle_state->disabled = false;
491 		cpuidle_resume_and_unlock();
492 		goto out;
493 	}
494 
495 	if (genpd->power_off) {
496 		ktime_t time_start;
497 		s64 elapsed_ns;
498 
499 		if (atomic_read(&genpd->sd_count) > 0) {
500 			ret = -EBUSY;
501 			goto out;
502 		}
503 
504 		time_start = ktime_get();
505 
506 		/*
507 		 * If sd_count > 0 at this point, one of the subdomains hasn't
508 		 * managed to call pm_genpd_poweron() for the master yet after
509 		 * incrementing it.  In that case pm_genpd_poweron() will wait
510 		 * for us to drop the lock, so we can call .power_off() and let
511 		 * the pm_genpd_poweron() restore power for us (this shouldn't
512 		 * happen very often).
513 		 */
514 		ret = genpd->power_off(genpd);
515 		if (ret == -EBUSY) {
516 			genpd_set_active(genpd);
517 			goto out;
518 		}
519 
520 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
521 		if (elapsed_ns > genpd->power_off_latency_ns) {
522 			genpd->power_off_latency_ns = elapsed_ns;
523 			genpd->max_off_time_changed = true;
524 			if (genpd->name)
525 				pr_warning("%s: Power-off latency exceeded, "
526 					"new value %lld ns\n", genpd->name,
527 					elapsed_ns);
528 		}
529 	}
530 
531 	genpd->status = GPD_STATE_POWER_OFF;
532 
533 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
534 		genpd_sd_counter_dec(link->master);
535 		genpd_queue_power_off_work(link->master);
536 	}
537 
538  out:
539 	genpd->poweroff_task = NULL;
540 	wake_up_all(&genpd->status_wait_queue);
541 	return ret;
542 }
543 
544 /**
545  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
546  * @work: Work structure used for scheduling the execution of this function.
547  */
548 static void genpd_power_off_work_fn(struct work_struct *work)
549 {
550 	struct generic_pm_domain *genpd;
551 
552 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
553 
554 	genpd_acquire_lock(genpd);
555 	pm_genpd_poweroff(genpd);
556 	genpd_release_lock(genpd);
557 }
558 
559 /**
560  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
561  * @dev: Device to suspend.
562  *
563  * Carry out a runtime suspend of a device under the assumption that its
564  * pm_domain field points to the domain member of an object of type
565  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
566  */
567 static int pm_genpd_runtime_suspend(struct device *dev)
568 {
569 	struct generic_pm_domain *genpd;
570 	bool (*stop_ok)(struct device *__dev);
571 	int ret;
572 
573 	dev_dbg(dev, "%s()\n", __func__);
574 
575 	genpd = dev_to_genpd(dev);
576 	if (IS_ERR(genpd))
577 		return -EINVAL;
578 
579 	might_sleep_if(!genpd->dev_irq_safe);
580 
581 	if (dev_gpd_data(dev)->always_on)
582 		return -EBUSY;
583 
584 	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
585 	if (stop_ok && !stop_ok(dev))
586 		return -EBUSY;
587 
588 	ret = genpd_stop_dev(genpd, dev);
589 	if (ret)
590 		return ret;
591 
592 	/*
593 	 * If power.irq_safe is set, this routine will be run with interrupts
594 	 * off, so it can't use mutexes.
595 	 */
596 	if (dev->power.irq_safe)
597 		return 0;
598 
599 	mutex_lock(&genpd->lock);
600 	genpd->in_progress++;
601 	pm_genpd_poweroff(genpd);
602 	genpd->in_progress--;
603 	mutex_unlock(&genpd->lock);
604 
605 	return 0;
606 }
607 
608 /**
609  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
610  * @dev: Device to resume.
611  *
612  * Carry out a runtime resume of a device under the assumption that its
613  * pm_domain field points to the domain member of an object of type
614  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
615  */
616 static int pm_genpd_runtime_resume(struct device *dev)
617 {
618 	struct generic_pm_domain *genpd;
619 	DEFINE_WAIT(wait);
620 	int ret;
621 
622 	dev_dbg(dev, "%s()\n", __func__);
623 
624 	genpd = dev_to_genpd(dev);
625 	if (IS_ERR(genpd))
626 		return -EINVAL;
627 
628 	might_sleep_if(!genpd->dev_irq_safe);
629 
630 	/* If power.irq_safe, the PM domain is never powered off. */
631 	if (dev->power.irq_safe)
632 		return genpd_start_dev(genpd, dev);
633 
634 	mutex_lock(&genpd->lock);
635 	ret = __pm_genpd_poweron(genpd);
636 	if (ret) {
637 		mutex_unlock(&genpd->lock);
638 		return ret;
639 	}
640 	genpd->status = GPD_STATE_BUSY;
641 	genpd->resume_count++;
642 	for (;;) {
643 		prepare_to_wait(&genpd->status_wait_queue, &wait,
644 				TASK_UNINTERRUPTIBLE);
645 		/*
646 		 * If current is the powering off task, we have been called
647 		 * reentrantly from one of the device callbacks, so we should
648 		 * not wait.
649 		 */
650 		if (!genpd->poweroff_task || genpd->poweroff_task == current)
651 			break;
652 		mutex_unlock(&genpd->lock);
653 
654 		schedule();
655 
656 		mutex_lock(&genpd->lock);
657 	}
658 	finish_wait(&genpd->status_wait_queue, &wait);
659 	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
660 	genpd->resume_count--;
661 	genpd_set_active(genpd);
662 	wake_up_all(&genpd->status_wait_queue);
663 	mutex_unlock(&genpd->lock);
664 
665 	return 0;
666 }
667 
668 /**
669  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
670  */
671 void pm_genpd_poweroff_unused(void)
672 {
673 	struct generic_pm_domain *genpd;
674 
675 	mutex_lock(&gpd_list_lock);
676 
677 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
678 		genpd_queue_power_off_work(genpd);
679 
680 	mutex_unlock(&gpd_list_lock);
681 }
682 
683 #else
684 
685 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
686 					    unsigned long val, void *ptr)
687 {
688 	return NOTIFY_DONE;
689 }
690 
691 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
692 
693 #define pm_genpd_runtime_suspend	NULL
694 #define pm_genpd_runtime_resume		NULL
695 
696 #endif /* CONFIG_PM_RUNTIME */
697 
698 #ifdef CONFIG_PM_SLEEP
699 
700 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
701 				    struct device *dev)
702 {
703 	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
704 }
705 
706 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
707 {
708 	return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
709 }
710 
711 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
712 {
713 	return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
714 }
715 
716 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
717 {
718 	return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
719 }
720 
721 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
722 {
723 	return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
724 }
725 
726 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
727 {
728 	return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
729 }
730 
731 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
732 {
733 	return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
734 }
735 
736 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
737 {
738 	return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
739 }
740 
741 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
742 {
743 	return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
744 }
745 
746 /**
747  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
748  * @genpd: PM domain to power off, if possible.
749  *
750  * Check if the given PM domain can be powered off (during system suspend or
751  * hibernation) and do that if so.  Also, in that case propagate to its masters.
752  *
753  * This function is only called in "noirq" stages of system power transitions,
754  * so it need not acquire locks (all of the "noirq" callbacks are executed
755  * sequentially, so it is guaranteed that it will never run twice in parallel).
756  */
757 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
758 {
759 	struct gpd_link *link;
760 
761 	if (genpd->status == GPD_STATE_POWER_OFF)
762 		return;
763 
764 	if (genpd->suspended_count != genpd->device_count
765 	    || atomic_read(&genpd->sd_count) > 0)
766 		return;
767 
768 	if (genpd->power_off)
769 		genpd->power_off(genpd);
770 
771 	genpd->status = GPD_STATE_POWER_OFF;
772 
773 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
774 		genpd_sd_counter_dec(link->master);
775 		pm_genpd_sync_poweroff(link->master);
776 	}
777 }
778 
779 /**
780  * resume_needed - Check whether to resume a device before system suspend.
781  * @dev: Device to check.
782  * @genpd: PM domain the device belongs to.
783  *
784  * There are two cases in which a device that can wake up the system from sleep
785  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
786  * to wake up the system and it has to remain active for this purpose while the
787  * system is in the sleep state and (2) if the device is not enabled to wake up
788  * the system from sleep states and it generally doesn't generate wakeup signals
789  * by itself (those signals are generated on its behalf by other parts of the
790  * system).  In the latter case it may be necessary to reconfigure the device's
791  * wakeup settings during system suspend, because it may have been set up to
792  * signal remote wakeup from the system's working state as needed by runtime PM.
793  * Return 'true' in either of the above cases.
794  */
795 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
796 {
797 	bool active_wakeup;
798 
799 	if (!device_can_wakeup(dev))
800 		return false;
801 
802 	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
803 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
804 }
805 
806 /**
807  * pm_genpd_prepare - Start power transition of a device in a PM domain.
808  * @dev: Device to start the transition of.
809  *
810  * Start a power transition of a device (during a system-wide power transition)
811  * under the assumption that its pm_domain field points to the domain member of
812  * an object of type struct generic_pm_domain representing a PM domain
813  * consisting of I/O devices.
814  */
815 static int pm_genpd_prepare(struct device *dev)
816 {
817 	struct generic_pm_domain *genpd;
818 	int ret;
819 
820 	dev_dbg(dev, "%s()\n", __func__);
821 
822 	genpd = dev_to_genpd(dev);
823 	if (IS_ERR(genpd))
824 		return -EINVAL;
825 
826 	/*
827 	 * If a wakeup request is pending for the device, it should be woken up
828 	 * at this point and a system wakeup event should be reported if it's
829 	 * set up to wake up the system from sleep states.
830 	 */
831 	pm_runtime_get_noresume(dev);
832 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
833 		pm_wakeup_event(dev, 0);
834 
835 	if (pm_wakeup_pending()) {
836 		pm_runtime_put_sync(dev);
837 		return -EBUSY;
838 	}
839 
840 	if (resume_needed(dev, genpd))
841 		pm_runtime_resume(dev);
842 
843 	genpd_acquire_lock(genpd);
844 
845 	if (genpd->prepared_count++ == 0) {
846 		genpd->suspended_count = 0;
847 		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
848 	}
849 
850 	genpd_release_lock(genpd);
851 
852 	if (genpd->suspend_power_off) {
853 		pm_runtime_put_noidle(dev);
854 		return 0;
855 	}
856 
857 	/*
858 	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
859 	 * so pm_genpd_poweron() will return immediately, but if the device
860 	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
861 	 * to make it operational.
862 	 */
863 	pm_runtime_resume(dev);
864 	__pm_runtime_disable(dev, false);
865 
866 	ret = pm_generic_prepare(dev);
867 	if (ret) {
868 		mutex_lock(&genpd->lock);
869 
870 		if (--genpd->prepared_count == 0)
871 			genpd->suspend_power_off = false;
872 
873 		mutex_unlock(&genpd->lock);
874 		pm_runtime_enable(dev);
875 	}
876 
877 	pm_runtime_put_sync(dev);
878 	return ret;
879 }
880 
881 /**
882  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
883  * @dev: Device to suspend.
884  *
885  * Suspend a device under the assumption that its pm_domain field points to the
886  * domain member of an object of type struct generic_pm_domain representing
887  * a PM domain consisting of I/O devices.
888  */
889 static int pm_genpd_suspend(struct device *dev)
890 {
891 	struct generic_pm_domain *genpd;
892 
893 	dev_dbg(dev, "%s()\n", __func__);
894 
895 	genpd = dev_to_genpd(dev);
896 	if (IS_ERR(genpd))
897 		return -EINVAL;
898 
899 	return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
900 }
901 
902 /**
903  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
904  * @dev: Device to suspend.
905  *
906  * Carry out a late suspend of a device under the assumption that its
907  * pm_domain field points to the domain member of an object of type
908  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
909  */
910 static int pm_genpd_suspend_late(struct device *dev)
911 {
912 	struct generic_pm_domain *genpd;
913 
914 	dev_dbg(dev, "%s()\n", __func__);
915 
916 	genpd = dev_to_genpd(dev);
917 	if (IS_ERR(genpd))
918 		return -EINVAL;
919 
920 	return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
921 }
922 
923 /**
924  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
925  * @dev: Device to suspend.
926  *
927  * Stop the device and remove power from the domain if all devices in it have
928  * been stopped.
929  */
930 static int pm_genpd_suspend_noirq(struct device *dev)
931 {
932 	struct generic_pm_domain *genpd;
933 
934 	dev_dbg(dev, "%s()\n", __func__);
935 
936 	genpd = dev_to_genpd(dev);
937 	if (IS_ERR(genpd))
938 		return -EINVAL;
939 
940 	if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
941 	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
942 		return 0;
943 
944 	genpd_stop_dev(genpd, dev);
945 
946 	/*
947 	 * Since all of the "noirq" callbacks are executed sequentially, it is
948 	 * guaranteed that this function will never run twice in parallel for
949 	 * the same PM domain, so it is not necessary to use locking here.
950 	 */
951 	genpd->suspended_count++;
952 	pm_genpd_sync_poweroff(genpd);
953 
954 	return 0;
955 }
956 
957 /**
958  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
959  * @dev: Device to resume.
960  *
961  * Restore power to the device's PM domain, if necessary, and start the device.
962  */
963 static int pm_genpd_resume_noirq(struct device *dev)
964 {
965 	struct generic_pm_domain *genpd;
966 
967 	dev_dbg(dev, "%s()\n", __func__);
968 
969 	genpd = dev_to_genpd(dev);
970 	if (IS_ERR(genpd))
971 		return -EINVAL;
972 
973 	if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
974 	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
975 		return 0;
976 
977 	/*
978 	 * Since all of the "noirq" callbacks are executed sequentially, it is
979 	 * guaranteed that this function will never run twice in parallel for
980 	 * the same PM domain, so it is not necessary to use locking here.
981 	 */
982 	pm_genpd_poweron(genpd);
983 	genpd->suspended_count--;
984 
985 	return genpd_start_dev(genpd, dev);
986 }
987 
988 /**
989  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
990  * @dev: Device to resume.
991  *
992  * Carry out an early resume of a device under the assumption that its
993  * pm_domain field points to the domain member of an object of type
994  * struct generic_pm_domain representing a power domain consisting of I/O
995  * devices.
996  */
997 static int pm_genpd_resume_early(struct device *dev)
998 {
999 	struct generic_pm_domain *genpd;
1000 
1001 	dev_dbg(dev, "%s()\n", __func__);
1002 
1003 	genpd = dev_to_genpd(dev);
1004 	if (IS_ERR(genpd))
1005 		return -EINVAL;
1006 
1007 	return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
1008 }
1009 
1010 /**
1011  * pm_genpd_resume - Resume of device in an I/O PM domain.
1012  * @dev: Device to resume.
1013  *
1014  * Resume a device under the assumption that its pm_domain field points to the
1015  * domain member of an object of type struct generic_pm_domain representing
1016  * a power domain consisting of I/O devices.
1017  */
1018 static int pm_genpd_resume(struct device *dev)
1019 {
1020 	struct generic_pm_domain *genpd;
1021 
1022 	dev_dbg(dev, "%s()\n", __func__);
1023 
1024 	genpd = dev_to_genpd(dev);
1025 	if (IS_ERR(genpd))
1026 		return -EINVAL;
1027 
1028 	return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1029 }
1030 
1031 /**
1032  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1033  * @dev: Device to freeze.
1034  *
1035  * Freeze a device under the assumption that its pm_domain field points to the
1036  * domain member of an object of type struct generic_pm_domain representing
1037  * a power domain consisting of I/O devices.
1038  */
1039 static int pm_genpd_freeze(struct device *dev)
1040 {
1041 	struct generic_pm_domain *genpd;
1042 
1043 	dev_dbg(dev, "%s()\n", __func__);
1044 
1045 	genpd = dev_to_genpd(dev);
1046 	if (IS_ERR(genpd))
1047 		return -EINVAL;
1048 
1049 	return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1050 }
1051 
1052 /**
1053  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1054  * @dev: Device to freeze.
1055  *
1056  * Carry out a late freeze of a device under the assumption that its
1057  * pm_domain field points to the domain member of an object of type
1058  * struct generic_pm_domain representing a power domain consisting of I/O
1059  * devices.
1060  */
1061 static int pm_genpd_freeze_late(struct device *dev)
1062 {
1063 	struct generic_pm_domain *genpd;
1064 
1065 	dev_dbg(dev, "%s()\n", __func__);
1066 
1067 	genpd = dev_to_genpd(dev);
1068 	if (IS_ERR(genpd))
1069 		return -EINVAL;
1070 
1071 	return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1072 }
1073 
1074 /**
1075  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1076  * @dev: Device to freeze.
1077  *
1078  * Carry out a late freeze of a device under the assumption that its
1079  * pm_domain field points to the domain member of an object of type
1080  * struct generic_pm_domain representing a power domain consisting of I/O
1081  * devices.
1082  */
1083 static int pm_genpd_freeze_noirq(struct device *dev)
1084 {
1085 	struct generic_pm_domain *genpd;
1086 
1087 	dev_dbg(dev, "%s()\n", __func__);
1088 
1089 	genpd = dev_to_genpd(dev);
1090 	if (IS_ERR(genpd))
1091 		return -EINVAL;
1092 
1093 	return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1094 		0 : genpd_stop_dev(genpd, dev);
1095 }
1096 
1097 /**
1098  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1099  * @dev: Device to thaw.
1100  *
1101  * Start the device, unless power has been removed from the domain already
1102  * before the system transition.
1103  */
1104 static int pm_genpd_thaw_noirq(struct device *dev)
1105 {
1106 	struct generic_pm_domain *genpd;
1107 
1108 	dev_dbg(dev, "%s()\n", __func__);
1109 
1110 	genpd = dev_to_genpd(dev);
1111 	if (IS_ERR(genpd))
1112 		return -EINVAL;
1113 
1114 	return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1115 		0 : genpd_start_dev(genpd, dev);
1116 }
1117 
1118 /**
1119  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1120  * @dev: Device to thaw.
1121  *
1122  * Carry out an early thaw of a device under the assumption that its
1123  * pm_domain field points to the domain member of an object of type
1124  * struct generic_pm_domain representing a power domain consisting of I/O
1125  * devices.
1126  */
1127 static int pm_genpd_thaw_early(struct device *dev)
1128 {
1129 	struct generic_pm_domain *genpd;
1130 
1131 	dev_dbg(dev, "%s()\n", __func__);
1132 
1133 	genpd = dev_to_genpd(dev);
1134 	if (IS_ERR(genpd))
1135 		return -EINVAL;
1136 
1137 	return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1138 }
1139 
1140 /**
1141  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1142  * @dev: Device to thaw.
1143  *
1144  * Thaw a device under the assumption that its pm_domain field points to the
1145  * domain member of an object of type struct generic_pm_domain representing
1146  * a power domain consisting of I/O devices.
1147  */
1148 static int pm_genpd_thaw(struct device *dev)
1149 {
1150 	struct generic_pm_domain *genpd;
1151 
1152 	dev_dbg(dev, "%s()\n", __func__);
1153 
1154 	genpd = dev_to_genpd(dev);
1155 	if (IS_ERR(genpd))
1156 		return -EINVAL;
1157 
1158 	return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1159 }
1160 
1161 /**
1162  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1163  * @dev: Device to resume.
1164  *
1165  * Make sure the domain will be in the same power state as before the
1166  * hibernation the system is resuming from and start the device if necessary.
1167  */
1168 static int pm_genpd_restore_noirq(struct device *dev)
1169 {
1170 	struct generic_pm_domain *genpd;
1171 
1172 	dev_dbg(dev, "%s()\n", __func__);
1173 
1174 	genpd = dev_to_genpd(dev);
1175 	if (IS_ERR(genpd))
1176 		return -EINVAL;
1177 
1178 	/*
1179 	 * Since all of the "noirq" callbacks are executed sequentially, it is
1180 	 * guaranteed that this function will never run twice in parallel for
1181 	 * the same PM domain, so it is not necessary to use locking here.
1182 	 *
1183 	 * At this point suspended_count == 0 means we are being run for the
1184 	 * first time for the given domain in the present cycle.
1185 	 */
1186 	if (genpd->suspended_count++ == 0) {
1187 		/*
1188 		 * The boot kernel might put the domain into arbitrary state,
1189 		 * so make it appear as powered off to pm_genpd_poweron(), so
1190 		 * that it tries to power it on in case it was really off.
1191 		 */
1192 		genpd->status = GPD_STATE_POWER_OFF;
1193 		if (genpd->suspend_power_off) {
1194 			/*
1195 			 * If the domain was off before the hibernation, make
1196 			 * sure it will be off going forward.
1197 			 */
1198 			if (genpd->power_off)
1199 				genpd->power_off(genpd);
1200 
1201 			return 0;
1202 		}
1203 	}
1204 
1205 	if (genpd->suspend_power_off)
1206 		return 0;
1207 
1208 	pm_genpd_poweron(genpd);
1209 
1210 	return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
1211 }
1212 
1213 /**
1214  * pm_genpd_complete - Complete power transition of a device in a power domain.
1215  * @dev: Device to complete the transition of.
1216  *
1217  * Complete a power transition of a device (during a system-wide power
1218  * transition) under the assumption that its pm_domain field points to the
1219  * domain member of an object of type struct generic_pm_domain representing
1220  * a power domain consisting of I/O devices.
1221  */
1222 static void pm_genpd_complete(struct device *dev)
1223 {
1224 	struct generic_pm_domain *genpd;
1225 	bool run_complete;
1226 
1227 	dev_dbg(dev, "%s()\n", __func__);
1228 
1229 	genpd = dev_to_genpd(dev);
1230 	if (IS_ERR(genpd))
1231 		return;
1232 
1233 	mutex_lock(&genpd->lock);
1234 
1235 	run_complete = !genpd->suspend_power_off;
1236 	if (--genpd->prepared_count == 0)
1237 		genpd->suspend_power_off = false;
1238 
1239 	mutex_unlock(&genpd->lock);
1240 
1241 	if (run_complete) {
1242 		pm_generic_complete(dev);
1243 		pm_runtime_set_active(dev);
1244 		pm_runtime_enable(dev);
1245 		pm_runtime_idle(dev);
1246 	}
1247 }
1248 
1249 #else
1250 
1251 #define pm_genpd_prepare		NULL
1252 #define pm_genpd_suspend		NULL
1253 #define pm_genpd_suspend_late		NULL
1254 #define pm_genpd_suspend_noirq		NULL
1255 #define pm_genpd_resume_early		NULL
1256 #define pm_genpd_resume_noirq		NULL
1257 #define pm_genpd_resume			NULL
1258 #define pm_genpd_freeze			NULL
1259 #define pm_genpd_freeze_late		NULL
1260 #define pm_genpd_freeze_noirq		NULL
1261 #define pm_genpd_thaw_early		NULL
1262 #define pm_genpd_thaw_noirq		NULL
1263 #define pm_genpd_thaw			NULL
1264 #define pm_genpd_restore_noirq		NULL
1265 #define pm_genpd_complete		NULL
1266 
1267 #endif /* CONFIG_PM_SLEEP */
1268 
1269 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1270 {
1271 	struct generic_pm_domain_data *gpd_data;
1272 
1273 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1274 	if (!gpd_data)
1275 		return NULL;
1276 
1277 	mutex_init(&gpd_data->lock);
1278 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1279 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1280 	return gpd_data;
1281 }
1282 
1283 static void __pm_genpd_free_dev_data(struct device *dev,
1284 				     struct generic_pm_domain_data *gpd_data)
1285 {
1286 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1287 	kfree(gpd_data);
1288 }
1289 
1290 /**
1291  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1292  * @genpd: PM domain to add the device to.
1293  * @dev: Device to be added.
1294  * @td: Set of PM QoS timing parameters to attach to the device.
1295  */
1296 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1297 			  struct gpd_timing_data *td)
1298 {
1299 	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1300 	struct pm_domain_data *pdd;
1301 	int ret = 0;
1302 
1303 	dev_dbg(dev, "%s()\n", __func__);
1304 
1305 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1306 		return -EINVAL;
1307 
1308 	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1309 	if (!gpd_data_new)
1310 		return -ENOMEM;
1311 
1312 	genpd_acquire_lock(genpd);
1313 
1314 	if (genpd->prepared_count > 0) {
1315 		ret = -EAGAIN;
1316 		goto out;
1317 	}
1318 
1319 	list_for_each_entry(pdd, &genpd->dev_list, list_node)
1320 		if (pdd->dev == dev) {
1321 			ret = -EINVAL;
1322 			goto out;
1323 		}
1324 
1325 	ret = dev_pm_get_subsys_data(dev);
1326 	if (ret)
1327 		goto out;
1328 
1329 	genpd->device_count++;
1330 	genpd->max_off_time_changed = true;
1331 
1332 	spin_lock_irq(&dev->power.lock);
1333 
1334 	dev->pm_domain = &genpd->domain;
1335 	if (dev->power.subsys_data->domain_data) {
1336 		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1337 	} else {
1338 		gpd_data = gpd_data_new;
1339 		dev->power.subsys_data->domain_data = &gpd_data->base;
1340 	}
1341 	gpd_data->refcount++;
1342 	if (td)
1343 		gpd_data->td = *td;
1344 
1345 	spin_unlock_irq(&dev->power.lock);
1346 
1347 	mutex_lock(&gpd_data->lock);
1348 	gpd_data->base.dev = dev;
1349 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1350 	gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1351 	gpd_data->td.constraint_changed = true;
1352 	gpd_data->td.effective_constraint_ns = -1;
1353 	mutex_unlock(&gpd_data->lock);
1354 
1355  out:
1356 	genpd_release_lock(genpd);
1357 
1358 	if (gpd_data != gpd_data_new)
1359 		__pm_genpd_free_dev_data(dev, gpd_data_new);
1360 
1361 	return ret;
1362 }
1363 
1364 /**
1365  * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1366  * @genpd_node: Device tree node pointer representing a PM domain to which the
1367  *   the device is added to.
1368  * @dev: Device to be added.
1369  * @td: Set of PM QoS timing parameters to attach to the device.
1370  */
1371 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1372 			     struct gpd_timing_data *td)
1373 {
1374 	struct generic_pm_domain *genpd = NULL, *gpd;
1375 
1376 	dev_dbg(dev, "%s()\n", __func__);
1377 
1378 	if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1379 		return -EINVAL;
1380 
1381 	mutex_lock(&gpd_list_lock);
1382 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1383 		if (gpd->of_node == genpd_node) {
1384 			genpd = gpd;
1385 			break;
1386 		}
1387 	}
1388 	mutex_unlock(&gpd_list_lock);
1389 
1390 	if (!genpd)
1391 		return -EINVAL;
1392 
1393 	return __pm_genpd_add_device(genpd, dev, td);
1394 }
1395 
1396 /**
1397  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1398  * @genpd: PM domain to remove the device from.
1399  * @dev: Device to be removed.
1400  */
1401 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1402 			   struct device *dev)
1403 {
1404 	struct generic_pm_domain_data *gpd_data;
1405 	struct pm_domain_data *pdd;
1406 	bool remove = false;
1407 	int ret = 0;
1408 
1409 	dev_dbg(dev, "%s()\n", __func__);
1410 
1411 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1412 	    ||  IS_ERR_OR_NULL(dev->pm_domain)
1413 	    ||  pd_to_genpd(dev->pm_domain) != genpd)
1414 		return -EINVAL;
1415 
1416 	genpd_acquire_lock(genpd);
1417 
1418 	if (genpd->prepared_count > 0) {
1419 		ret = -EAGAIN;
1420 		goto out;
1421 	}
1422 
1423 	genpd->device_count--;
1424 	genpd->max_off_time_changed = true;
1425 
1426 	spin_lock_irq(&dev->power.lock);
1427 
1428 	dev->pm_domain = NULL;
1429 	pdd = dev->power.subsys_data->domain_data;
1430 	list_del_init(&pdd->list_node);
1431 	gpd_data = to_gpd_data(pdd);
1432 	if (--gpd_data->refcount == 0) {
1433 		dev->power.subsys_data->domain_data = NULL;
1434 		remove = true;
1435 	}
1436 
1437 	spin_unlock_irq(&dev->power.lock);
1438 
1439 	mutex_lock(&gpd_data->lock);
1440 	pdd->dev = NULL;
1441 	mutex_unlock(&gpd_data->lock);
1442 
1443 	genpd_release_lock(genpd);
1444 
1445 	dev_pm_put_subsys_data(dev);
1446 	if (remove)
1447 		__pm_genpd_free_dev_data(dev, gpd_data);
1448 
1449 	return 0;
1450 
1451  out:
1452 	genpd_release_lock(genpd);
1453 
1454 	return ret;
1455 }
1456 
1457 /**
1458  * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1459  * @dev: Device to set/unset the flag for.
1460  * @val: The new value of the device's "always on" flag.
1461  */
1462 void pm_genpd_dev_always_on(struct device *dev, bool val)
1463 {
1464 	struct pm_subsys_data *psd;
1465 	unsigned long flags;
1466 
1467 	spin_lock_irqsave(&dev->power.lock, flags);
1468 
1469 	psd = dev_to_psd(dev);
1470 	if (psd && psd->domain_data)
1471 		to_gpd_data(psd->domain_data)->always_on = val;
1472 
1473 	spin_unlock_irqrestore(&dev->power.lock, flags);
1474 }
1475 EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1476 
1477 /**
1478  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1479  * @dev: Device to set/unset the flag for.
1480  * @val: The new value of the device's "need restore" flag.
1481  */
1482 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1483 {
1484 	struct pm_subsys_data *psd;
1485 	unsigned long flags;
1486 
1487 	spin_lock_irqsave(&dev->power.lock, flags);
1488 
1489 	psd = dev_to_psd(dev);
1490 	if (psd && psd->domain_data)
1491 		to_gpd_data(psd->domain_data)->need_restore = val;
1492 
1493 	spin_unlock_irqrestore(&dev->power.lock, flags);
1494 }
1495 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1496 
1497 /**
1498  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1499  * @genpd: Master PM domain to add the subdomain to.
1500  * @subdomain: Subdomain to be added.
1501  */
1502 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1503 			   struct generic_pm_domain *subdomain)
1504 {
1505 	struct gpd_link *link;
1506 	int ret = 0;
1507 
1508 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1509 		return -EINVAL;
1510 
1511  start:
1512 	genpd_acquire_lock(genpd);
1513 	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1514 
1515 	if (subdomain->status != GPD_STATE_POWER_OFF
1516 	    && subdomain->status != GPD_STATE_ACTIVE) {
1517 		mutex_unlock(&subdomain->lock);
1518 		genpd_release_lock(genpd);
1519 		goto start;
1520 	}
1521 
1522 	if (genpd->status == GPD_STATE_POWER_OFF
1523 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1524 		ret = -EINVAL;
1525 		goto out;
1526 	}
1527 
1528 	list_for_each_entry(link, &genpd->master_links, master_node) {
1529 		if (link->slave == subdomain && link->master == genpd) {
1530 			ret = -EINVAL;
1531 			goto out;
1532 		}
1533 	}
1534 
1535 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1536 	if (!link) {
1537 		ret = -ENOMEM;
1538 		goto out;
1539 	}
1540 	link->master = genpd;
1541 	list_add_tail(&link->master_node, &genpd->master_links);
1542 	link->slave = subdomain;
1543 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1544 	if (subdomain->status != GPD_STATE_POWER_OFF)
1545 		genpd_sd_counter_inc(genpd);
1546 
1547  out:
1548 	mutex_unlock(&subdomain->lock);
1549 	genpd_release_lock(genpd);
1550 
1551 	return ret;
1552 }
1553 
1554 /**
1555  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1556  * @genpd: Master PM domain to remove the subdomain from.
1557  * @subdomain: Subdomain to be removed.
1558  */
1559 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1560 			      struct generic_pm_domain *subdomain)
1561 {
1562 	struct gpd_link *link;
1563 	int ret = -EINVAL;
1564 
1565 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1566 		return -EINVAL;
1567 
1568  start:
1569 	genpd_acquire_lock(genpd);
1570 
1571 	list_for_each_entry(link, &genpd->master_links, master_node) {
1572 		if (link->slave != subdomain)
1573 			continue;
1574 
1575 		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1576 
1577 		if (subdomain->status != GPD_STATE_POWER_OFF
1578 		    && subdomain->status != GPD_STATE_ACTIVE) {
1579 			mutex_unlock(&subdomain->lock);
1580 			genpd_release_lock(genpd);
1581 			goto start;
1582 		}
1583 
1584 		list_del(&link->master_node);
1585 		list_del(&link->slave_node);
1586 		kfree(link);
1587 		if (subdomain->status != GPD_STATE_POWER_OFF)
1588 			genpd_sd_counter_dec(genpd);
1589 
1590 		mutex_unlock(&subdomain->lock);
1591 
1592 		ret = 0;
1593 		break;
1594 	}
1595 
1596 	genpd_release_lock(genpd);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1603  * @dev: Device to add the callbacks to.
1604  * @ops: Set of callbacks to add.
1605  * @td: Timing data to add to the device along with the callbacks (optional).
1606  *
1607  * Every call to this routine should be balanced with a call to
1608  * __pm_genpd_remove_callbacks() and they must not be nested.
1609  */
1610 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1611 			   struct gpd_timing_data *td)
1612 {
1613 	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1614 	int ret = 0;
1615 
1616 	if (!(dev && ops))
1617 		return -EINVAL;
1618 
1619 	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1620 	if (!gpd_data_new)
1621 		return -ENOMEM;
1622 
1623 	pm_runtime_disable(dev);
1624 	device_pm_lock();
1625 
1626 	ret = dev_pm_get_subsys_data(dev);
1627 	if (ret)
1628 		goto out;
1629 
1630 	spin_lock_irq(&dev->power.lock);
1631 
1632 	if (dev->power.subsys_data->domain_data) {
1633 		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1634 	} else {
1635 		gpd_data = gpd_data_new;
1636 		dev->power.subsys_data->domain_data = &gpd_data->base;
1637 	}
1638 	gpd_data->refcount++;
1639 	gpd_data->ops = *ops;
1640 	if (td)
1641 		gpd_data->td = *td;
1642 
1643 	spin_unlock_irq(&dev->power.lock);
1644 
1645  out:
1646 	device_pm_unlock();
1647 	pm_runtime_enable(dev);
1648 
1649 	if (gpd_data != gpd_data_new)
1650 		__pm_genpd_free_dev_data(dev, gpd_data_new);
1651 
1652 	return ret;
1653 }
1654 EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1655 
1656 /**
1657  * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1658  * @dev: Device to remove the callbacks from.
1659  * @clear_td: If set, clear the device's timing data too.
1660  *
1661  * This routine can only be called after pm_genpd_add_callbacks().
1662  */
1663 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1664 {
1665 	struct generic_pm_domain_data *gpd_data = NULL;
1666 	bool remove = false;
1667 	int ret = 0;
1668 
1669 	if (!(dev && dev->power.subsys_data))
1670 		return -EINVAL;
1671 
1672 	pm_runtime_disable(dev);
1673 	device_pm_lock();
1674 
1675 	spin_lock_irq(&dev->power.lock);
1676 
1677 	if (dev->power.subsys_data->domain_data) {
1678 		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1679 		gpd_data->ops = (struct gpd_dev_ops){ NULL };
1680 		if (clear_td)
1681 			gpd_data->td = (struct gpd_timing_data){ 0 };
1682 
1683 		if (--gpd_data->refcount == 0) {
1684 			dev->power.subsys_data->domain_data = NULL;
1685 			remove = true;
1686 		}
1687 	} else {
1688 		ret = -EINVAL;
1689 	}
1690 
1691 	spin_unlock_irq(&dev->power.lock);
1692 
1693 	device_pm_unlock();
1694 	pm_runtime_enable(dev);
1695 
1696 	if (ret)
1697 		return ret;
1698 
1699 	dev_pm_put_subsys_data(dev);
1700 	if (remove)
1701 		__pm_genpd_free_dev_data(dev, gpd_data);
1702 
1703 	return 0;
1704 }
1705 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1706 
1707 int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1708 {
1709 	struct cpuidle_driver *cpuidle_drv;
1710 	struct gpd_cpu_data *cpu_data;
1711 	struct cpuidle_state *idle_state;
1712 	int ret = 0;
1713 
1714 	if (IS_ERR_OR_NULL(genpd) || state < 0)
1715 		return -EINVAL;
1716 
1717 	genpd_acquire_lock(genpd);
1718 
1719 	if (genpd->cpu_data) {
1720 		ret = -EEXIST;
1721 		goto out;
1722 	}
1723 	cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1724 	if (!cpu_data) {
1725 		ret = -ENOMEM;
1726 		goto out;
1727 	}
1728 	cpuidle_drv = cpuidle_driver_ref();
1729 	if (!cpuidle_drv) {
1730 		ret = -ENODEV;
1731 		goto out;
1732 	}
1733 	if (cpuidle_drv->state_count <= state) {
1734 		ret = -EINVAL;
1735 		goto err;
1736 	}
1737 	idle_state = &cpuidle_drv->states[state];
1738 	if (!idle_state->disabled) {
1739 		ret = -EAGAIN;
1740 		goto err;
1741 	}
1742 	cpu_data->idle_state = idle_state;
1743 	cpu_data->saved_exit_latency = idle_state->exit_latency;
1744 	genpd->cpu_data = cpu_data;
1745 	genpd_recalc_cpu_exit_latency(genpd);
1746 
1747  out:
1748 	genpd_release_lock(genpd);
1749 	return ret;
1750 
1751  err:
1752 	cpuidle_driver_unref();
1753 	goto out;
1754 }
1755 
1756 int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1757 {
1758 	struct gpd_cpu_data *cpu_data;
1759 	struct cpuidle_state *idle_state;
1760 	int ret = 0;
1761 
1762 	if (IS_ERR_OR_NULL(genpd))
1763 		return -EINVAL;
1764 
1765 	genpd_acquire_lock(genpd);
1766 
1767 	cpu_data = genpd->cpu_data;
1768 	if (!cpu_data) {
1769 		ret = -ENODEV;
1770 		goto out;
1771 	}
1772 	idle_state = cpu_data->idle_state;
1773 	if (!idle_state->disabled) {
1774 		ret = -EAGAIN;
1775 		goto out;
1776 	}
1777 	idle_state->exit_latency = cpu_data->saved_exit_latency;
1778 	cpuidle_driver_unref();
1779 	genpd->cpu_data = NULL;
1780 	kfree(cpu_data);
1781 
1782  out:
1783 	genpd_release_lock(genpd);
1784 	return ret;
1785 }
1786 
1787 /* Default device callbacks for generic PM domains. */
1788 
1789 /**
1790  * pm_genpd_default_save_state - Default "save device state" for PM domians.
1791  * @dev: Device to handle.
1792  */
1793 static int pm_genpd_default_save_state(struct device *dev)
1794 {
1795 	int (*cb)(struct device *__dev);
1796 
1797 	cb = dev_gpd_data(dev)->ops.save_state;
1798 	if (cb)
1799 		return cb(dev);
1800 
1801 	if (dev->type && dev->type->pm)
1802 		cb = dev->type->pm->runtime_suspend;
1803 	else if (dev->class && dev->class->pm)
1804 		cb = dev->class->pm->runtime_suspend;
1805 	else if (dev->bus && dev->bus->pm)
1806 		cb = dev->bus->pm->runtime_suspend;
1807 	else
1808 		cb = NULL;
1809 
1810 	if (!cb && dev->driver && dev->driver->pm)
1811 		cb = dev->driver->pm->runtime_suspend;
1812 
1813 	return cb ? cb(dev) : 0;
1814 }
1815 
1816 /**
1817  * pm_genpd_default_restore_state - Default PM domians "restore device state".
1818  * @dev: Device to handle.
1819  */
1820 static int pm_genpd_default_restore_state(struct device *dev)
1821 {
1822 	int (*cb)(struct device *__dev);
1823 
1824 	cb = dev_gpd_data(dev)->ops.restore_state;
1825 	if (cb)
1826 		return cb(dev);
1827 
1828 	if (dev->type && dev->type->pm)
1829 		cb = dev->type->pm->runtime_resume;
1830 	else if (dev->class && dev->class->pm)
1831 		cb = dev->class->pm->runtime_resume;
1832 	else if (dev->bus && dev->bus->pm)
1833 		cb = dev->bus->pm->runtime_resume;
1834 	else
1835 		cb = NULL;
1836 
1837 	if (!cb && dev->driver && dev->driver->pm)
1838 		cb = dev->driver->pm->runtime_resume;
1839 
1840 	return cb ? cb(dev) : 0;
1841 }
1842 
1843 #ifdef CONFIG_PM_SLEEP
1844 
1845 /**
1846  * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1847  * @dev: Device to handle.
1848  */
1849 static int pm_genpd_default_suspend(struct device *dev)
1850 {
1851 	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1852 
1853 	return cb ? cb(dev) : pm_generic_suspend(dev);
1854 }
1855 
1856 /**
1857  * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1858  * @dev: Device to handle.
1859  */
1860 static int pm_genpd_default_suspend_late(struct device *dev)
1861 {
1862 	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1863 
1864 	return cb ? cb(dev) : pm_generic_suspend_late(dev);
1865 }
1866 
1867 /**
1868  * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1869  * @dev: Device to handle.
1870  */
1871 static int pm_genpd_default_resume_early(struct device *dev)
1872 {
1873 	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1874 
1875 	return cb ? cb(dev) : pm_generic_resume_early(dev);
1876 }
1877 
1878 /**
1879  * pm_genpd_default_resume - Default "device resume" for PM domians.
1880  * @dev: Device to handle.
1881  */
1882 static int pm_genpd_default_resume(struct device *dev)
1883 {
1884 	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1885 
1886 	return cb ? cb(dev) : pm_generic_resume(dev);
1887 }
1888 
1889 /**
1890  * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1891  * @dev: Device to handle.
1892  */
1893 static int pm_genpd_default_freeze(struct device *dev)
1894 {
1895 	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1896 
1897 	return cb ? cb(dev) : pm_generic_freeze(dev);
1898 }
1899 
1900 /**
1901  * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1902  * @dev: Device to handle.
1903  */
1904 static int pm_genpd_default_freeze_late(struct device *dev)
1905 {
1906 	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1907 
1908 	return cb ? cb(dev) : pm_generic_freeze_late(dev);
1909 }
1910 
1911 /**
1912  * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1913  * @dev: Device to handle.
1914  */
1915 static int pm_genpd_default_thaw_early(struct device *dev)
1916 {
1917 	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1918 
1919 	return cb ? cb(dev) : pm_generic_thaw_early(dev);
1920 }
1921 
1922 /**
1923  * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1924  * @dev: Device to handle.
1925  */
1926 static int pm_genpd_default_thaw(struct device *dev)
1927 {
1928 	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1929 
1930 	return cb ? cb(dev) : pm_generic_thaw(dev);
1931 }
1932 
1933 #else /* !CONFIG_PM_SLEEP */
1934 
1935 #define pm_genpd_default_suspend	NULL
1936 #define pm_genpd_default_suspend_late	NULL
1937 #define pm_genpd_default_resume_early	NULL
1938 #define pm_genpd_default_resume		NULL
1939 #define pm_genpd_default_freeze		NULL
1940 #define pm_genpd_default_freeze_late	NULL
1941 #define pm_genpd_default_thaw_early	NULL
1942 #define pm_genpd_default_thaw		NULL
1943 
1944 #endif /* !CONFIG_PM_SLEEP */
1945 
1946 /**
1947  * pm_genpd_init - Initialize a generic I/O PM domain object.
1948  * @genpd: PM domain object to initialize.
1949  * @gov: PM domain governor to associate with the domain (may be NULL).
1950  * @is_off: Initial value of the domain's power_is_off field.
1951  */
1952 void pm_genpd_init(struct generic_pm_domain *genpd,
1953 		   struct dev_power_governor *gov, bool is_off)
1954 {
1955 	if (IS_ERR_OR_NULL(genpd))
1956 		return;
1957 
1958 	INIT_LIST_HEAD(&genpd->master_links);
1959 	INIT_LIST_HEAD(&genpd->slave_links);
1960 	INIT_LIST_HEAD(&genpd->dev_list);
1961 	mutex_init(&genpd->lock);
1962 	genpd->gov = gov;
1963 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1964 	genpd->in_progress = 0;
1965 	atomic_set(&genpd->sd_count, 0);
1966 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1967 	init_waitqueue_head(&genpd->status_wait_queue);
1968 	genpd->poweroff_task = NULL;
1969 	genpd->resume_count = 0;
1970 	genpd->device_count = 0;
1971 	genpd->max_off_time_ns = -1;
1972 	genpd->max_off_time_changed = true;
1973 	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1974 	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1975 	genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1976 	genpd->domain.ops.prepare = pm_genpd_prepare;
1977 	genpd->domain.ops.suspend = pm_genpd_suspend;
1978 	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1979 	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1980 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1981 	genpd->domain.ops.resume_early = pm_genpd_resume_early;
1982 	genpd->domain.ops.resume = pm_genpd_resume;
1983 	genpd->domain.ops.freeze = pm_genpd_freeze;
1984 	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1985 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1986 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1987 	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1988 	genpd->domain.ops.thaw = pm_genpd_thaw;
1989 	genpd->domain.ops.poweroff = pm_genpd_suspend;
1990 	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1991 	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1992 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1993 	genpd->domain.ops.restore_early = pm_genpd_resume_early;
1994 	genpd->domain.ops.restore = pm_genpd_resume;
1995 	genpd->domain.ops.complete = pm_genpd_complete;
1996 	genpd->dev_ops.save_state = pm_genpd_default_save_state;
1997 	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1998 	genpd->dev_ops.suspend = pm_genpd_default_suspend;
1999 	genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2000 	genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2001 	genpd->dev_ops.resume = pm_genpd_default_resume;
2002 	genpd->dev_ops.freeze = pm_genpd_default_freeze;
2003 	genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2004 	genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2005 	genpd->dev_ops.thaw = pm_genpd_default_thaw;
2006 	mutex_lock(&gpd_list_lock);
2007 	list_add(&genpd->gpd_list_node, &gpd_list);
2008 	mutex_unlock(&gpd_list_lock);
2009 }
2010