xref: /openbmc/linux/drivers/base/power/runtime.c (revision 8730046c)
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
15 
16 #include "../base.h"
17 #include "power.h"
18 
19 typedef int (*pm_callback_t)(struct device *);
20 
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22 {
23 	pm_callback_t cb;
24 	const struct dev_pm_ops *ops;
25 
26 	if (dev->pm_domain)
27 		ops = &dev->pm_domain->ops;
28 	else if (dev->type && dev->type->pm)
29 		ops = dev->type->pm;
30 	else if (dev->class && dev->class->pm)
31 		ops = dev->class->pm;
32 	else if (dev->bus && dev->bus->pm)
33 		ops = dev->bus->pm;
34 	else
35 		ops = NULL;
36 
37 	if (ops)
38 		cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 	else
40 		cb = NULL;
41 
42 	if (!cb && dev->driver && dev->driver->pm)
43 		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44 
45 	return cb;
46 }
47 
48 #define RPM_GET_CALLBACK(dev, callback) \
49 		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50 
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
53 
54 /**
55  * update_pm_runtime_accounting - Update the time accounting of power states
56  * @dev: Device to update the accounting for
57  *
58  * In order to be able to have time accounting of the various power states
59  * (as used by programs such as PowerTOP to show the effectiveness of runtime
60  * PM), we need to track the time spent in each state.
61  * update_pm_runtime_accounting must be called each time before the
62  * runtime_status field is updated, to account the time in the old state
63  * correctly.
64  */
65 void update_pm_runtime_accounting(struct device *dev)
66 {
67 	unsigned long now = jiffies;
68 	unsigned long delta;
69 
70 	delta = now - dev->power.accounting_timestamp;
71 
72 	dev->power.accounting_timestamp = now;
73 
74 	if (dev->power.disable_depth > 0)
75 		return;
76 
77 	if (dev->power.runtime_status == RPM_SUSPENDED)
78 		dev->power.suspended_jiffies += delta;
79 	else
80 		dev->power.active_jiffies += delta;
81 }
82 
83 static void __update_runtime_status(struct device *dev, enum rpm_status status)
84 {
85 	update_pm_runtime_accounting(dev);
86 	dev->power.runtime_status = status;
87 }
88 
89 /**
90  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
91  * @dev: Device to handle.
92  */
93 static void pm_runtime_deactivate_timer(struct device *dev)
94 {
95 	if (dev->power.timer_expires > 0) {
96 		del_timer(&dev->power.suspend_timer);
97 		dev->power.timer_expires = 0;
98 	}
99 }
100 
101 /**
102  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
103  * @dev: Device to handle.
104  */
105 static void pm_runtime_cancel_pending(struct device *dev)
106 {
107 	pm_runtime_deactivate_timer(dev);
108 	/*
109 	 * In case there's a request pending, make sure its work function will
110 	 * return without doing anything.
111 	 */
112 	dev->power.request = RPM_REQ_NONE;
113 }
114 
115 /*
116  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
117  * @dev: Device to handle.
118  *
119  * Compute the autosuspend-delay expiration time based on the device's
120  * power.last_busy time.  If the delay has already expired or is disabled
121  * (negative) or the power.use_autosuspend flag isn't set, return 0.
122  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
123  *
124  * This function may be called either with or without dev->power.lock held.
125  * Either way it can be racy, since power.last_busy may be updated at any time.
126  */
127 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
128 {
129 	int autosuspend_delay;
130 	long elapsed;
131 	unsigned long last_busy;
132 	unsigned long expires = 0;
133 
134 	if (!dev->power.use_autosuspend)
135 		goto out;
136 
137 	autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
138 	if (autosuspend_delay < 0)
139 		goto out;
140 
141 	last_busy = ACCESS_ONCE(dev->power.last_busy);
142 	elapsed = jiffies - last_busy;
143 	if (elapsed < 0)
144 		goto out;	/* jiffies has wrapped around. */
145 
146 	/*
147 	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
148 	 * up to the nearest second.
149 	 */
150 	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151 	if (autosuspend_delay >= 1000)
152 		expires = round_jiffies(expires);
153 	expires += !expires;
154 	if (elapsed >= expires - last_busy)
155 		expires = 0;	/* Already expired. */
156 
157  out:
158 	return expires;
159 }
160 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
161 
162 static int dev_memalloc_noio(struct device *dev, void *data)
163 {
164 	return dev->power.memalloc_noio;
165 }
166 
167 /*
168  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
169  * @dev: Device to handle.
170  * @enable: True for setting the flag and False for clearing the flag.
171  *
172  * Set the flag for all devices in the path from the device to the
173  * root device in the device tree if @enable is true, otherwise clear
174  * the flag for devices in the path whose siblings don't set the flag.
175  *
176  * The function should only be called by block device, or network
177  * device driver for solving the deadlock problem during runtime
178  * resume/suspend:
179  *
180  *     If memory allocation with GFP_KERNEL is called inside runtime
181  *     resume/suspend callback of any one of its ancestors(or the
182  *     block device itself), the deadlock may be triggered inside the
183  *     memory allocation since it might not complete until the block
184  *     device becomes active and the involed page I/O finishes. The
185  *     situation is pointed out first by Alan Stern. Network device
186  *     are involved in iSCSI kind of situation.
187  *
188  * The lock of dev_hotplug_mutex is held in the function for handling
189  * hotplug race because pm_runtime_set_memalloc_noio() may be called
190  * in async probe().
191  *
192  * The function should be called between device_add() and device_del()
193  * on the affected device(block/network device).
194  */
195 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
196 {
197 	static DEFINE_MUTEX(dev_hotplug_mutex);
198 
199 	mutex_lock(&dev_hotplug_mutex);
200 	for (;;) {
201 		bool enabled;
202 
203 		/* hold power lock since bitfield is not SMP-safe. */
204 		spin_lock_irq(&dev->power.lock);
205 		enabled = dev->power.memalloc_noio;
206 		dev->power.memalloc_noio = enable;
207 		spin_unlock_irq(&dev->power.lock);
208 
209 		/*
210 		 * not need to enable ancestors any more if the device
211 		 * has been enabled.
212 		 */
213 		if (enabled && enable)
214 			break;
215 
216 		dev = dev->parent;
217 
218 		/*
219 		 * clear flag of the parent device only if all the
220 		 * children don't set the flag because ancestor's
221 		 * flag was set by any one of the descendants.
222 		 */
223 		if (!dev || (!enable &&
224 			     device_for_each_child(dev, NULL,
225 						   dev_memalloc_noio)))
226 			break;
227 	}
228 	mutex_unlock(&dev_hotplug_mutex);
229 }
230 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
231 
232 /**
233  * rpm_check_suspend_allowed - Test whether a device may be suspended.
234  * @dev: Device to test.
235  */
236 static int rpm_check_suspend_allowed(struct device *dev)
237 {
238 	int retval = 0;
239 
240 	if (dev->power.runtime_error)
241 		retval = -EINVAL;
242 	else if (dev->power.disable_depth > 0)
243 		retval = -EACCES;
244 	else if (atomic_read(&dev->power.usage_count) > 0)
245 		retval = -EAGAIN;
246 	else if (!dev->power.ignore_children &&
247 			atomic_read(&dev->power.child_count))
248 		retval = -EBUSY;
249 
250 	/* Pending resume requests take precedence over suspends. */
251 	else if ((dev->power.deferred_resume
252 			&& dev->power.runtime_status == RPM_SUSPENDING)
253 	    || (dev->power.request_pending
254 			&& dev->power.request == RPM_REQ_RESUME))
255 		retval = -EAGAIN;
256 	else if (__dev_pm_qos_read_value(dev) < 0)
257 		retval = -EPERM;
258 	else if (dev->power.runtime_status == RPM_SUSPENDED)
259 		retval = 1;
260 
261 	return retval;
262 }
263 
264 static int rpm_get_suppliers(struct device *dev)
265 {
266 	struct device_link *link;
267 
268 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
269 		int retval;
270 
271 		if (!(link->flags & DL_FLAG_PM_RUNTIME))
272 			continue;
273 
274 		if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
275 		    link->rpm_active)
276 			continue;
277 
278 		retval = pm_runtime_get_sync(link->supplier);
279 		if (retval < 0) {
280 			pm_runtime_put_noidle(link->supplier);
281 			return retval;
282 		}
283 		link->rpm_active = true;
284 	}
285 	return 0;
286 }
287 
288 static void rpm_put_suppliers(struct device *dev)
289 {
290 	struct device_link *link;
291 
292 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
293 		if (link->rpm_active &&
294 		    READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
295 			pm_runtime_put(link->supplier);
296 			link->rpm_active = false;
297 		}
298 }
299 
300 /**
301  * __rpm_callback - Run a given runtime PM callback for a given device.
302  * @cb: Runtime PM callback to run.
303  * @dev: Device to run the callback for.
304  */
305 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
306 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
307 {
308 	int retval, idx;
309 	bool use_links = dev->power.links_count > 0;
310 
311 	if (dev->power.irq_safe) {
312 		spin_unlock(&dev->power.lock);
313 	} else {
314 		spin_unlock_irq(&dev->power.lock);
315 
316 		/*
317 		 * Resume suppliers if necessary.
318 		 *
319 		 * The device's runtime PM status cannot change until this
320 		 * routine returns, so it is safe to read the status outside of
321 		 * the lock.
322 		 */
323 		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
324 			idx = device_links_read_lock();
325 
326 			retval = rpm_get_suppliers(dev);
327 			if (retval)
328 				goto fail;
329 
330 			device_links_read_unlock(idx);
331 		}
332 	}
333 
334 	retval = cb(dev);
335 
336 	if (dev->power.irq_safe) {
337 		spin_lock(&dev->power.lock);
338 	} else {
339 		/*
340 		 * If the device is suspending and the callback has returned
341 		 * success, drop the usage counters of the suppliers that have
342 		 * been reference counted on its resume.
343 		 *
344 		 * Do that if resume fails too.
345 		 */
346 		if (use_links
347 		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
348 		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
349 			idx = device_links_read_lock();
350 
351  fail:
352 			rpm_put_suppliers(dev);
353 
354 			device_links_read_unlock(idx);
355 		}
356 
357 		spin_lock_irq(&dev->power.lock);
358 	}
359 
360 	return retval;
361 }
362 
363 /**
364  * rpm_idle - Notify device bus type if the device can be suspended.
365  * @dev: Device to notify the bus type about.
366  * @rpmflags: Flag bits.
367  *
368  * Check if the device's runtime PM status allows it to be suspended.  If
369  * another idle notification has been started earlier, return immediately.  If
370  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
371  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
372  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
373  *
374  * This function must be called under dev->power.lock with interrupts disabled.
375  */
376 static int rpm_idle(struct device *dev, int rpmflags)
377 {
378 	int (*callback)(struct device *);
379 	int retval;
380 
381 	trace_rpm_idle_rcuidle(dev, rpmflags);
382 	retval = rpm_check_suspend_allowed(dev);
383 	if (retval < 0)
384 		;	/* Conditions are wrong. */
385 
386 	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
387 	else if (dev->power.runtime_status != RPM_ACTIVE)
388 		retval = -EAGAIN;
389 
390 	/*
391 	 * Any pending request other than an idle notification takes
392 	 * precedence over us, except that the timer may be running.
393 	 */
394 	else if (dev->power.request_pending &&
395 	    dev->power.request > RPM_REQ_IDLE)
396 		retval = -EAGAIN;
397 
398 	/* Act as though RPM_NOWAIT is always set. */
399 	else if (dev->power.idle_notification)
400 		retval = -EINPROGRESS;
401 	if (retval)
402 		goto out;
403 
404 	/* Pending requests need to be canceled. */
405 	dev->power.request = RPM_REQ_NONE;
406 
407 	if (dev->power.no_callbacks)
408 		goto out;
409 
410 	/* Carry out an asynchronous or a synchronous idle notification. */
411 	if (rpmflags & RPM_ASYNC) {
412 		dev->power.request = RPM_REQ_IDLE;
413 		if (!dev->power.request_pending) {
414 			dev->power.request_pending = true;
415 			queue_work(pm_wq, &dev->power.work);
416 		}
417 		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
418 		return 0;
419 	}
420 
421 	dev->power.idle_notification = true;
422 
423 	callback = RPM_GET_CALLBACK(dev, runtime_idle);
424 
425 	if (callback)
426 		retval = __rpm_callback(callback, dev);
427 
428 	dev->power.idle_notification = false;
429 	wake_up_all(&dev->power.wait_queue);
430 
431  out:
432 	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
433 	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
434 }
435 
436 /**
437  * rpm_callback - Run a given runtime PM callback for a given device.
438  * @cb: Runtime PM callback to run.
439  * @dev: Device to run the callback for.
440  */
441 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
442 {
443 	int retval;
444 
445 	if (!cb)
446 		return -ENOSYS;
447 
448 	if (dev->power.memalloc_noio) {
449 		unsigned int noio_flag;
450 
451 		/*
452 		 * Deadlock might be caused if memory allocation with
453 		 * GFP_KERNEL happens inside runtime_suspend and
454 		 * runtime_resume callbacks of one block device's
455 		 * ancestor or the block device itself. Network
456 		 * device might be thought as part of iSCSI block
457 		 * device, so network device and its ancestor should
458 		 * be marked as memalloc_noio too.
459 		 */
460 		noio_flag = memalloc_noio_save();
461 		retval = __rpm_callback(cb, dev);
462 		memalloc_noio_restore(noio_flag);
463 	} else {
464 		retval = __rpm_callback(cb, dev);
465 	}
466 
467 	dev->power.runtime_error = retval;
468 	return retval != -EACCES ? retval : -EIO;
469 }
470 
471 /**
472  * rpm_suspend - Carry out runtime suspend of given device.
473  * @dev: Device to suspend.
474  * @rpmflags: Flag bits.
475  *
476  * Check if the device's runtime PM status allows it to be suspended.
477  * Cancel a pending idle notification, autosuspend or suspend. If
478  * another suspend has been started earlier, either return immediately
479  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
480  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
481  * otherwise run the ->runtime_suspend() callback directly. When
482  * ->runtime_suspend succeeded, if a deferred resume was requested while
483  * the callback was running then carry it out, otherwise send an idle
484  * notification for its parent (if the suspend succeeded and both
485  * ignore_children of parent->power and irq_safe of dev->power are not set).
486  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
487  * flag is set and the next autosuspend-delay expiration time is in the
488  * future, schedule another autosuspend attempt.
489  *
490  * This function must be called under dev->power.lock with interrupts disabled.
491  */
492 static int rpm_suspend(struct device *dev, int rpmflags)
493 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
494 {
495 	int (*callback)(struct device *);
496 	struct device *parent = NULL;
497 	int retval;
498 
499 	trace_rpm_suspend_rcuidle(dev, rpmflags);
500 
501  repeat:
502 	retval = rpm_check_suspend_allowed(dev);
503 
504 	if (retval < 0)
505 		;	/* Conditions are wrong. */
506 
507 	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
508 	else if (dev->power.runtime_status == RPM_RESUMING &&
509 	    !(rpmflags & RPM_ASYNC))
510 		retval = -EAGAIN;
511 	if (retval)
512 		goto out;
513 
514 	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
515 	if ((rpmflags & RPM_AUTO)
516 	    && dev->power.runtime_status != RPM_SUSPENDING) {
517 		unsigned long expires = pm_runtime_autosuspend_expiration(dev);
518 
519 		if (expires != 0) {
520 			/* Pending requests need to be canceled. */
521 			dev->power.request = RPM_REQ_NONE;
522 
523 			/*
524 			 * Optimization: If the timer is already running and is
525 			 * set to expire at or before the autosuspend delay,
526 			 * avoid the overhead of resetting it.  Just let it
527 			 * expire; pm_suspend_timer_fn() will take care of the
528 			 * rest.
529 			 */
530 			if (!(dev->power.timer_expires && time_before_eq(
531 			    dev->power.timer_expires, expires))) {
532 				dev->power.timer_expires = expires;
533 				mod_timer(&dev->power.suspend_timer, expires);
534 			}
535 			dev->power.timer_autosuspends = 1;
536 			goto out;
537 		}
538 	}
539 
540 	/* Other scheduled or pending requests need to be canceled. */
541 	pm_runtime_cancel_pending(dev);
542 
543 	if (dev->power.runtime_status == RPM_SUSPENDING) {
544 		DEFINE_WAIT(wait);
545 
546 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
547 			retval = -EINPROGRESS;
548 			goto out;
549 		}
550 
551 		if (dev->power.irq_safe) {
552 			spin_unlock(&dev->power.lock);
553 
554 			cpu_relax();
555 
556 			spin_lock(&dev->power.lock);
557 			goto repeat;
558 		}
559 
560 		/* Wait for the other suspend running in parallel with us. */
561 		for (;;) {
562 			prepare_to_wait(&dev->power.wait_queue, &wait,
563 					TASK_UNINTERRUPTIBLE);
564 			if (dev->power.runtime_status != RPM_SUSPENDING)
565 				break;
566 
567 			spin_unlock_irq(&dev->power.lock);
568 
569 			schedule();
570 
571 			spin_lock_irq(&dev->power.lock);
572 		}
573 		finish_wait(&dev->power.wait_queue, &wait);
574 		goto repeat;
575 	}
576 
577 	if (dev->power.no_callbacks)
578 		goto no_callback;	/* Assume success. */
579 
580 	/* Carry out an asynchronous or a synchronous suspend. */
581 	if (rpmflags & RPM_ASYNC) {
582 		dev->power.request = (rpmflags & RPM_AUTO) ?
583 		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
584 		if (!dev->power.request_pending) {
585 			dev->power.request_pending = true;
586 			queue_work(pm_wq, &dev->power.work);
587 		}
588 		goto out;
589 	}
590 
591 	__update_runtime_status(dev, RPM_SUSPENDING);
592 
593 	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
594 
595 	dev_pm_enable_wake_irq_check(dev, true);
596 	retval = rpm_callback(callback, dev);
597 	if (retval)
598 		goto fail;
599 
600  no_callback:
601 	__update_runtime_status(dev, RPM_SUSPENDED);
602 	pm_runtime_deactivate_timer(dev);
603 
604 	if (dev->parent) {
605 		parent = dev->parent;
606 		atomic_add_unless(&parent->power.child_count, -1, 0);
607 	}
608 	wake_up_all(&dev->power.wait_queue);
609 
610 	if (dev->power.deferred_resume) {
611 		dev->power.deferred_resume = false;
612 		rpm_resume(dev, 0);
613 		retval = -EAGAIN;
614 		goto out;
615 	}
616 
617 	/* Maybe the parent is now able to suspend. */
618 	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
619 		spin_unlock(&dev->power.lock);
620 
621 		spin_lock(&parent->power.lock);
622 		rpm_idle(parent, RPM_ASYNC);
623 		spin_unlock(&parent->power.lock);
624 
625 		spin_lock(&dev->power.lock);
626 	}
627 
628  out:
629 	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
630 
631 	return retval;
632 
633  fail:
634 	dev_pm_disable_wake_irq_check(dev);
635 	__update_runtime_status(dev, RPM_ACTIVE);
636 	dev->power.deferred_resume = false;
637 	wake_up_all(&dev->power.wait_queue);
638 
639 	if (retval == -EAGAIN || retval == -EBUSY) {
640 		dev->power.runtime_error = 0;
641 
642 		/*
643 		 * If the callback routine failed an autosuspend, and
644 		 * if the last_busy time has been updated so that there
645 		 * is a new autosuspend expiration time, automatically
646 		 * reschedule another autosuspend.
647 		 */
648 		if ((rpmflags & RPM_AUTO) &&
649 		    pm_runtime_autosuspend_expiration(dev) != 0)
650 			goto repeat;
651 	} else {
652 		pm_runtime_cancel_pending(dev);
653 	}
654 	goto out;
655 }
656 
657 /**
658  * rpm_resume - Carry out runtime resume of given device.
659  * @dev: Device to resume.
660  * @rpmflags: Flag bits.
661  *
662  * Check if the device's runtime PM status allows it to be resumed.  Cancel
663  * any scheduled or pending requests.  If another resume has been started
664  * earlier, either return immediately or wait for it to finish, depending on the
665  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
666  * parallel with this function, either tell the other process to resume after
667  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
668  * flag is set then queue a resume request; otherwise run the
669  * ->runtime_resume() callback directly.  Queue an idle notification for the
670  * device if the resume succeeded.
671  *
672  * This function must be called under dev->power.lock with interrupts disabled.
673  */
674 static int rpm_resume(struct device *dev, int rpmflags)
675 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
676 {
677 	int (*callback)(struct device *);
678 	struct device *parent = NULL;
679 	int retval = 0;
680 
681 	trace_rpm_resume_rcuidle(dev, rpmflags);
682 
683  repeat:
684 	if (dev->power.runtime_error)
685 		retval = -EINVAL;
686 	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
687 	    && dev->power.runtime_status == RPM_ACTIVE)
688 		retval = 1;
689 	else if (dev->power.disable_depth > 0)
690 		retval = -EACCES;
691 	if (retval)
692 		goto out;
693 
694 	/*
695 	 * Other scheduled or pending requests need to be canceled.  Small
696 	 * optimization: If an autosuspend timer is running, leave it running
697 	 * rather than cancelling it now only to restart it again in the near
698 	 * future.
699 	 */
700 	dev->power.request = RPM_REQ_NONE;
701 	if (!dev->power.timer_autosuspends)
702 		pm_runtime_deactivate_timer(dev);
703 
704 	if (dev->power.runtime_status == RPM_ACTIVE) {
705 		retval = 1;
706 		goto out;
707 	}
708 
709 	if (dev->power.runtime_status == RPM_RESUMING
710 	    || dev->power.runtime_status == RPM_SUSPENDING) {
711 		DEFINE_WAIT(wait);
712 
713 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
714 			if (dev->power.runtime_status == RPM_SUSPENDING)
715 				dev->power.deferred_resume = true;
716 			else
717 				retval = -EINPROGRESS;
718 			goto out;
719 		}
720 
721 		if (dev->power.irq_safe) {
722 			spin_unlock(&dev->power.lock);
723 
724 			cpu_relax();
725 
726 			spin_lock(&dev->power.lock);
727 			goto repeat;
728 		}
729 
730 		/* Wait for the operation carried out in parallel with us. */
731 		for (;;) {
732 			prepare_to_wait(&dev->power.wait_queue, &wait,
733 					TASK_UNINTERRUPTIBLE);
734 			if (dev->power.runtime_status != RPM_RESUMING
735 			    && dev->power.runtime_status != RPM_SUSPENDING)
736 				break;
737 
738 			spin_unlock_irq(&dev->power.lock);
739 
740 			schedule();
741 
742 			spin_lock_irq(&dev->power.lock);
743 		}
744 		finish_wait(&dev->power.wait_queue, &wait);
745 		goto repeat;
746 	}
747 
748 	/*
749 	 * See if we can skip waking up the parent.  This is safe only if
750 	 * power.no_callbacks is set, because otherwise we don't know whether
751 	 * the resume will actually succeed.
752 	 */
753 	if (dev->power.no_callbacks && !parent && dev->parent) {
754 		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
755 		if (dev->parent->power.disable_depth > 0
756 		    || dev->parent->power.ignore_children
757 		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
758 			atomic_inc(&dev->parent->power.child_count);
759 			spin_unlock(&dev->parent->power.lock);
760 			retval = 1;
761 			goto no_callback;	/* Assume success. */
762 		}
763 		spin_unlock(&dev->parent->power.lock);
764 	}
765 
766 	/* Carry out an asynchronous or a synchronous resume. */
767 	if (rpmflags & RPM_ASYNC) {
768 		dev->power.request = RPM_REQ_RESUME;
769 		if (!dev->power.request_pending) {
770 			dev->power.request_pending = true;
771 			queue_work(pm_wq, &dev->power.work);
772 		}
773 		retval = 0;
774 		goto out;
775 	}
776 
777 	if (!parent && dev->parent) {
778 		/*
779 		 * Increment the parent's usage counter and resume it if
780 		 * necessary.  Not needed if dev is irq-safe; then the
781 		 * parent is permanently resumed.
782 		 */
783 		parent = dev->parent;
784 		if (dev->power.irq_safe)
785 			goto skip_parent;
786 		spin_unlock(&dev->power.lock);
787 
788 		pm_runtime_get_noresume(parent);
789 
790 		spin_lock(&parent->power.lock);
791 		/*
792 		 * Resume the parent if it has runtime PM enabled and not been
793 		 * set to ignore its children.
794 		 */
795 		if (!parent->power.disable_depth
796 		    && !parent->power.ignore_children) {
797 			rpm_resume(parent, 0);
798 			if (parent->power.runtime_status != RPM_ACTIVE)
799 				retval = -EBUSY;
800 		}
801 		spin_unlock(&parent->power.lock);
802 
803 		spin_lock(&dev->power.lock);
804 		if (retval)
805 			goto out;
806 		goto repeat;
807 	}
808  skip_parent:
809 
810 	if (dev->power.no_callbacks)
811 		goto no_callback;	/* Assume success. */
812 
813 	__update_runtime_status(dev, RPM_RESUMING);
814 
815 	callback = RPM_GET_CALLBACK(dev, runtime_resume);
816 
817 	dev_pm_disable_wake_irq_check(dev);
818 	retval = rpm_callback(callback, dev);
819 	if (retval) {
820 		__update_runtime_status(dev, RPM_SUSPENDED);
821 		pm_runtime_cancel_pending(dev);
822 		dev_pm_enable_wake_irq_check(dev, false);
823 	} else {
824  no_callback:
825 		__update_runtime_status(dev, RPM_ACTIVE);
826 		pm_runtime_mark_last_busy(dev);
827 		if (parent)
828 			atomic_inc(&parent->power.child_count);
829 	}
830 	wake_up_all(&dev->power.wait_queue);
831 
832 	if (retval >= 0)
833 		rpm_idle(dev, RPM_ASYNC);
834 
835  out:
836 	if (parent && !dev->power.irq_safe) {
837 		spin_unlock_irq(&dev->power.lock);
838 
839 		pm_runtime_put(parent);
840 
841 		spin_lock_irq(&dev->power.lock);
842 	}
843 
844 	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
845 
846 	return retval;
847 }
848 
849 /**
850  * pm_runtime_work - Universal runtime PM work function.
851  * @work: Work structure used for scheduling the execution of this function.
852  *
853  * Use @work to get the device object the work is to be done for, determine what
854  * is to be done and execute the appropriate runtime PM function.
855  */
856 static void pm_runtime_work(struct work_struct *work)
857 {
858 	struct device *dev = container_of(work, struct device, power.work);
859 	enum rpm_request req;
860 
861 	spin_lock_irq(&dev->power.lock);
862 
863 	if (!dev->power.request_pending)
864 		goto out;
865 
866 	req = dev->power.request;
867 	dev->power.request = RPM_REQ_NONE;
868 	dev->power.request_pending = false;
869 
870 	switch (req) {
871 	case RPM_REQ_NONE:
872 		break;
873 	case RPM_REQ_IDLE:
874 		rpm_idle(dev, RPM_NOWAIT);
875 		break;
876 	case RPM_REQ_SUSPEND:
877 		rpm_suspend(dev, RPM_NOWAIT);
878 		break;
879 	case RPM_REQ_AUTOSUSPEND:
880 		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
881 		break;
882 	case RPM_REQ_RESUME:
883 		rpm_resume(dev, RPM_NOWAIT);
884 		break;
885 	}
886 
887  out:
888 	spin_unlock_irq(&dev->power.lock);
889 }
890 
891 /**
892  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
893  * @data: Device pointer passed by pm_schedule_suspend().
894  *
895  * Check if the time is right and queue a suspend request.
896  */
897 static void pm_suspend_timer_fn(unsigned long data)
898 {
899 	struct device *dev = (struct device *)data;
900 	unsigned long flags;
901 	unsigned long expires;
902 
903 	spin_lock_irqsave(&dev->power.lock, flags);
904 
905 	expires = dev->power.timer_expires;
906 	/* If 'expire' is after 'jiffies' we've been called too early. */
907 	if (expires > 0 && !time_after(expires, jiffies)) {
908 		dev->power.timer_expires = 0;
909 		rpm_suspend(dev, dev->power.timer_autosuspends ?
910 		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
911 	}
912 
913 	spin_unlock_irqrestore(&dev->power.lock, flags);
914 }
915 
916 /**
917  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
918  * @dev: Device to suspend.
919  * @delay: Time to wait before submitting a suspend request, in milliseconds.
920  */
921 int pm_schedule_suspend(struct device *dev, unsigned int delay)
922 {
923 	unsigned long flags;
924 	int retval;
925 
926 	spin_lock_irqsave(&dev->power.lock, flags);
927 
928 	if (!delay) {
929 		retval = rpm_suspend(dev, RPM_ASYNC);
930 		goto out;
931 	}
932 
933 	retval = rpm_check_suspend_allowed(dev);
934 	if (retval)
935 		goto out;
936 
937 	/* Other scheduled or pending requests need to be canceled. */
938 	pm_runtime_cancel_pending(dev);
939 
940 	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
941 	dev->power.timer_expires += !dev->power.timer_expires;
942 	dev->power.timer_autosuspends = 0;
943 	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
944 
945  out:
946 	spin_unlock_irqrestore(&dev->power.lock, flags);
947 
948 	return retval;
949 }
950 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
951 
952 /**
953  * __pm_runtime_idle - Entry point for runtime idle operations.
954  * @dev: Device to send idle notification for.
955  * @rpmflags: Flag bits.
956  *
957  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
958  * return immediately if it is larger than zero.  Then carry out an idle
959  * notification, either synchronous or asynchronous.
960  *
961  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
962  * or if pm_runtime_irq_safe() has been called.
963  */
964 int __pm_runtime_idle(struct device *dev, int rpmflags)
965 {
966 	unsigned long flags;
967 	int retval;
968 
969 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
970 
971 	if (rpmflags & RPM_GET_PUT) {
972 		if (!atomic_dec_and_test(&dev->power.usage_count))
973 			return 0;
974 	}
975 
976 	spin_lock_irqsave(&dev->power.lock, flags);
977 	retval = rpm_idle(dev, rpmflags);
978 	spin_unlock_irqrestore(&dev->power.lock, flags);
979 
980 	return retval;
981 }
982 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
983 
984 /**
985  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
986  * @dev: Device to suspend.
987  * @rpmflags: Flag bits.
988  *
989  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
990  * return immediately if it is larger than zero.  Then carry out a suspend,
991  * either synchronous or asynchronous.
992  *
993  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
994  * or if pm_runtime_irq_safe() has been called.
995  */
996 int __pm_runtime_suspend(struct device *dev, int rpmflags)
997 {
998 	unsigned long flags;
999 	int retval;
1000 
1001 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1002 
1003 	if (rpmflags & RPM_GET_PUT) {
1004 		if (!atomic_dec_and_test(&dev->power.usage_count))
1005 			return 0;
1006 	}
1007 
1008 	spin_lock_irqsave(&dev->power.lock, flags);
1009 	retval = rpm_suspend(dev, rpmflags);
1010 	spin_unlock_irqrestore(&dev->power.lock, flags);
1011 
1012 	return retval;
1013 }
1014 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1015 
1016 /**
1017  * __pm_runtime_resume - Entry point for runtime resume operations.
1018  * @dev: Device to resume.
1019  * @rpmflags: Flag bits.
1020  *
1021  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1022  * carry out a resume, either synchronous or asynchronous.
1023  *
1024  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1025  * or if pm_runtime_irq_safe() has been called.
1026  */
1027 int __pm_runtime_resume(struct device *dev, int rpmflags)
1028 {
1029 	unsigned long flags;
1030 	int retval;
1031 
1032 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1033 
1034 	if (rpmflags & RPM_GET_PUT)
1035 		atomic_inc(&dev->power.usage_count);
1036 
1037 	spin_lock_irqsave(&dev->power.lock, flags);
1038 	retval = rpm_resume(dev, rpmflags);
1039 	spin_unlock_irqrestore(&dev->power.lock, flags);
1040 
1041 	return retval;
1042 }
1043 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1044 
1045 /**
1046  * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1047  * @dev: Device to handle.
1048  *
1049  * Return -EINVAL if runtime PM is disabled for the device.
1050  *
1051  * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1052  * and the runtime PM usage counter is nonzero, increment the counter and
1053  * return 1.  Otherwise return 0 without changing the counter.
1054  */
1055 int pm_runtime_get_if_in_use(struct device *dev)
1056 {
1057 	unsigned long flags;
1058 	int retval;
1059 
1060 	spin_lock_irqsave(&dev->power.lock, flags);
1061 	retval = dev->power.disable_depth > 0 ? -EINVAL :
1062 		dev->power.runtime_status == RPM_ACTIVE
1063 			&& atomic_inc_not_zero(&dev->power.usage_count);
1064 	spin_unlock_irqrestore(&dev->power.lock, flags);
1065 	return retval;
1066 }
1067 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1068 
1069 /**
1070  * __pm_runtime_set_status - Set runtime PM status of a device.
1071  * @dev: Device to handle.
1072  * @status: New runtime PM status of the device.
1073  *
1074  * If runtime PM of the device is disabled or its power.runtime_error field is
1075  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1076  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1077  * However, if the device has a parent and the parent is not active, and the
1078  * parent's power.ignore_children flag is unset, the device's status cannot be
1079  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1080  *
1081  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1082  * and the device parent's counter of unsuspended children is modified to
1083  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1084  * notification request for the parent is submitted.
1085  */
1086 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1087 {
1088 	struct device *parent = dev->parent;
1089 	unsigned long flags;
1090 	bool notify_parent = false;
1091 	int error = 0;
1092 
1093 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1094 		return -EINVAL;
1095 
1096 	spin_lock_irqsave(&dev->power.lock, flags);
1097 
1098 	if (!dev->power.runtime_error && !dev->power.disable_depth) {
1099 		error = -EAGAIN;
1100 		goto out;
1101 	}
1102 
1103 	if (dev->power.runtime_status == status)
1104 		goto out_set;
1105 
1106 	if (status == RPM_SUSPENDED) {
1107 		/*
1108 		 * It is invalid to suspend a device with an active child,
1109 		 * unless it has been set to ignore its children.
1110 		 */
1111 		if (!dev->power.ignore_children &&
1112 			atomic_read(&dev->power.child_count)) {
1113 			dev_err(dev, "runtime PM trying to suspend device but active child\n");
1114 			error = -EBUSY;
1115 			goto out;
1116 		}
1117 
1118 		if (parent) {
1119 			atomic_add_unless(&parent->power.child_count, -1, 0);
1120 			notify_parent = !parent->power.ignore_children;
1121 		}
1122 		goto out_set;
1123 	}
1124 
1125 	if (parent) {
1126 		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1127 
1128 		/*
1129 		 * It is invalid to put an active child under a parent that is
1130 		 * not active, has runtime PM enabled and the
1131 		 * 'power.ignore_children' flag unset.
1132 		 */
1133 		if (!parent->power.disable_depth
1134 		    && !parent->power.ignore_children
1135 		    && parent->power.runtime_status != RPM_ACTIVE) {
1136 			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1137 				dev_name(dev),
1138 				dev_name(parent));
1139 			error = -EBUSY;
1140 		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1141 			atomic_inc(&parent->power.child_count);
1142 		}
1143 
1144 		spin_unlock(&parent->power.lock);
1145 
1146 		if (error)
1147 			goto out;
1148 	}
1149 
1150  out_set:
1151 	__update_runtime_status(dev, status);
1152 	dev->power.runtime_error = 0;
1153  out:
1154 	spin_unlock_irqrestore(&dev->power.lock, flags);
1155 
1156 	if (notify_parent)
1157 		pm_request_idle(parent);
1158 
1159 	return error;
1160 }
1161 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1162 
1163 /**
1164  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1165  * @dev: Device to handle.
1166  *
1167  * Flush all pending requests for the device from pm_wq and wait for all
1168  * runtime PM operations involving the device in progress to complete.
1169  *
1170  * Should be called under dev->power.lock with interrupts disabled.
1171  */
1172 static void __pm_runtime_barrier(struct device *dev)
1173 {
1174 	pm_runtime_deactivate_timer(dev);
1175 
1176 	if (dev->power.request_pending) {
1177 		dev->power.request = RPM_REQ_NONE;
1178 		spin_unlock_irq(&dev->power.lock);
1179 
1180 		cancel_work_sync(&dev->power.work);
1181 
1182 		spin_lock_irq(&dev->power.lock);
1183 		dev->power.request_pending = false;
1184 	}
1185 
1186 	if (dev->power.runtime_status == RPM_SUSPENDING
1187 	    || dev->power.runtime_status == RPM_RESUMING
1188 	    || dev->power.idle_notification) {
1189 		DEFINE_WAIT(wait);
1190 
1191 		/* Suspend, wake-up or idle notification in progress. */
1192 		for (;;) {
1193 			prepare_to_wait(&dev->power.wait_queue, &wait,
1194 					TASK_UNINTERRUPTIBLE);
1195 			if (dev->power.runtime_status != RPM_SUSPENDING
1196 			    && dev->power.runtime_status != RPM_RESUMING
1197 			    && !dev->power.idle_notification)
1198 				break;
1199 			spin_unlock_irq(&dev->power.lock);
1200 
1201 			schedule();
1202 
1203 			spin_lock_irq(&dev->power.lock);
1204 		}
1205 		finish_wait(&dev->power.wait_queue, &wait);
1206 	}
1207 }
1208 
1209 /**
1210  * pm_runtime_barrier - Flush pending requests and wait for completions.
1211  * @dev: Device to handle.
1212  *
1213  * Prevent the device from being suspended by incrementing its usage counter and
1214  * if there's a pending resume request for the device, wake the device up.
1215  * Next, make sure that all pending requests for the device have been flushed
1216  * from pm_wq and wait for all runtime PM operations involving the device in
1217  * progress to complete.
1218  *
1219  * Return value:
1220  * 1, if there was a resume request pending and the device had to be woken up,
1221  * 0, otherwise
1222  */
1223 int pm_runtime_barrier(struct device *dev)
1224 {
1225 	int retval = 0;
1226 
1227 	pm_runtime_get_noresume(dev);
1228 	spin_lock_irq(&dev->power.lock);
1229 
1230 	if (dev->power.request_pending
1231 	    && dev->power.request == RPM_REQ_RESUME) {
1232 		rpm_resume(dev, 0);
1233 		retval = 1;
1234 	}
1235 
1236 	__pm_runtime_barrier(dev);
1237 
1238 	spin_unlock_irq(&dev->power.lock);
1239 	pm_runtime_put_noidle(dev);
1240 
1241 	return retval;
1242 }
1243 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1244 
1245 /**
1246  * __pm_runtime_disable - Disable runtime PM of a device.
1247  * @dev: Device to handle.
1248  * @check_resume: If set, check if there's a resume request for the device.
1249  *
1250  * Increment power.disable_depth for the device and if it was zero previously,
1251  * cancel all pending runtime PM requests for the device and wait for all
1252  * operations in progress to complete.  The device can be either active or
1253  * suspended after its runtime PM has been disabled.
1254  *
1255  * If @check_resume is set and there's a resume request pending when
1256  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1257  * function will wake up the device before disabling its runtime PM.
1258  */
1259 void __pm_runtime_disable(struct device *dev, bool check_resume)
1260 {
1261 	spin_lock_irq(&dev->power.lock);
1262 
1263 	if (dev->power.disable_depth > 0) {
1264 		dev->power.disable_depth++;
1265 		goto out;
1266 	}
1267 
1268 	/*
1269 	 * Wake up the device if there's a resume request pending, because that
1270 	 * means there probably is some I/O to process and disabling runtime PM
1271 	 * shouldn't prevent the device from processing the I/O.
1272 	 */
1273 	if (check_resume && dev->power.request_pending
1274 	    && dev->power.request == RPM_REQ_RESUME) {
1275 		/*
1276 		 * Prevent suspends and idle notifications from being carried
1277 		 * out after we have woken up the device.
1278 		 */
1279 		pm_runtime_get_noresume(dev);
1280 
1281 		rpm_resume(dev, 0);
1282 
1283 		pm_runtime_put_noidle(dev);
1284 	}
1285 
1286 	if (!dev->power.disable_depth++)
1287 		__pm_runtime_barrier(dev);
1288 
1289  out:
1290 	spin_unlock_irq(&dev->power.lock);
1291 }
1292 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1293 
1294 /**
1295  * pm_runtime_enable - Enable runtime PM of a device.
1296  * @dev: Device to handle.
1297  */
1298 void pm_runtime_enable(struct device *dev)
1299 {
1300 	unsigned long flags;
1301 
1302 	spin_lock_irqsave(&dev->power.lock, flags);
1303 
1304 	if (dev->power.disable_depth > 0)
1305 		dev->power.disable_depth--;
1306 	else
1307 		dev_warn(dev, "Unbalanced %s!\n", __func__);
1308 
1309 	spin_unlock_irqrestore(&dev->power.lock, flags);
1310 }
1311 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1312 
1313 /**
1314  * pm_runtime_forbid - Block runtime PM of a device.
1315  * @dev: Device to handle.
1316  *
1317  * Increase the device's usage count and clear its power.runtime_auto flag,
1318  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1319  * for it.
1320  */
1321 void pm_runtime_forbid(struct device *dev)
1322 {
1323 	spin_lock_irq(&dev->power.lock);
1324 	if (!dev->power.runtime_auto)
1325 		goto out;
1326 
1327 	dev->power.runtime_auto = false;
1328 	atomic_inc(&dev->power.usage_count);
1329 	rpm_resume(dev, 0);
1330 
1331  out:
1332 	spin_unlock_irq(&dev->power.lock);
1333 }
1334 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1335 
1336 /**
1337  * pm_runtime_allow - Unblock runtime PM of a device.
1338  * @dev: Device to handle.
1339  *
1340  * Decrease the device's usage count and set its power.runtime_auto flag.
1341  */
1342 void pm_runtime_allow(struct device *dev)
1343 {
1344 	spin_lock_irq(&dev->power.lock);
1345 	if (dev->power.runtime_auto)
1346 		goto out;
1347 
1348 	dev->power.runtime_auto = true;
1349 	if (atomic_dec_and_test(&dev->power.usage_count))
1350 		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1351 
1352  out:
1353 	spin_unlock_irq(&dev->power.lock);
1354 }
1355 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1356 
1357 /**
1358  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1359  * @dev: Device to handle.
1360  *
1361  * Set the power.no_callbacks flag, which tells the PM core that this
1362  * device is power-managed through its parent and has no runtime PM
1363  * callbacks of its own.  The runtime sysfs attributes will be removed.
1364  */
1365 void pm_runtime_no_callbacks(struct device *dev)
1366 {
1367 	spin_lock_irq(&dev->power.lock);
1368 	dev->power.no_callbacks = 1;
1369 	spin_unlock_irq(&dev->power.lock);
1370 	if (device_is_registered(dev))
1371 		rpm_sysfs_remove(dev);
1372 }
1373 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1374 
1375 /**
1376  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1377  * @dev: Device to handle
1378  *
1379  * Set the power.irq_safe flag, which tells the PM core that the
1380  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1381  * always be invoked with the spinlock held and interrupts disabled.  It also
1382  * causes the parent's usage counter to be permanently incremented, preventing
1383  * the parent from runtime suspending -- otherwise an irq-safe child might have
1384  * to wait for a non-irq-safe parent.
1385  */
1386 void pm_runtime_irq_safe(struct device *dev)
1387 {
1388 	if (dev->parent)
1389 		pm_runtime_get_sync(dev->parent);
1390 	spin_lock_irq(&dev->power.lock);
1391 	dev->power.irq_safe = 1;
1392 	spin_unlock_irq(&dev->power.lock);
1393 }
1394 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1395 
1396 /**
1397  * update_autosuspend - Handle a change to a device's autosuspend settings.
1398  * @dev: Device to handle.
1399  * @old_delay: The former autosuspend_delay value.
1400  * @old_use: The former use_autosuspend value.
1401  *
1402  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1403  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1404  *
1405  * This function must be called under dev->power.lock with interrupts disabled.
1406  */
1407 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1408 {
1409 	int delay = dev->power.autosuspend_delay;
1410 
1411 	/* Should runtime suspend be prevented now? */
1412 	if (dev->power.use_autosuspend && delay < 0) {
1413 
1414 		/* If it used to be allowed then prevent it. */
1415 		if (!old_use || old_delay >= 0) {
1416 			atomic_inc(&dev->power.usage_count);
1417 			rpm_resume(dev, 0);
1418 		}
1419 	}
1420 
1421 	/* Runtime suspend should be allowed now. */
1422 	else {
1423 
1424 		/* If it used to be prevented then allow it. */
1425 		if (old_use && old_delay < 0)
1426 			atomic_dec(&dev->power.usage_count);
1427 
1428 		/* Maybe we can autosuspend now. */
1429 		rpm_idle(dev, RPM_AUTO);
1430 	}
1431 }
1432 
1433 /**
1434  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1435  * @dev: Device to handle.
1436  * @delay: Value of the new delay in milliseconds.
1437  *
1438  * Set the device's power.autosuspend_delay value.  If it changes to negative
1439  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1440  * changes the other way, allow runtime suspends.
1441  */
1442 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1443 {
1444 	int old_delay, old_use;
1445 
1446 	spin_lock_irq(&dev->power.lock);
1447 	old_delay = dev->power.autosuspend_delay;
1448 	old_use = dev->power.use_autosuspend;
1449 	dev->power.autosuspend_delay = delay;
1450 	update_autosuspend(dev, old_delay, old_use);
1451 	spin_unlock_irq(&dev->power.lock);
1452 }
1453 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1454 
1455 /**
1456  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1457  * @dev: Device to handle.
1458  * @use: New value for use_autosuspend.
1459  *
1460  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1461  * suspends as needed.
1462  */
1463 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1464 {
1465 	int old_delay, old_use;
1466 
1467 	spin_lock_irq(&dev->power.lock);
1468 	old_delay = dev->power.autosuspend_delay;
1469 	old_use = dev->power.use_autosuspend;
1470 	dev->power.use_autosuspend = use;
1471 	update_autosuspend(dev, old_delay, old_use);
1472 	spin_unlock_irq(&dev->power.lock);
1473 }
1474 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1475 
1476 /**
1477  * pm_runtime_init - Initialize runtime PM fields in given device object.
1478  * @dev: Device object to initialize.
1479  */
1480 void pm_runtime_init(struct device *dev)
1481 {
1482 	dev->power.runtime_status = RPM_SUSPENDED;
1483 	dev->power.idle_notification = false;
1484 
1485 	dev->power.disable_depth = 1;
1486 	atomic_set(&dev->power.usage_count, 0);
1487 
1488 	dev->power.runtime_error = 0;
1489 
1490 	atomic_set(&dev->power.child_count, 0);
1491 	pm_suspend_ignore_children(dev, false);
1492 	dev->power.runtime_auto = true;
1493 
1494 	dev->power.request_pending = false;
1495 	dev->power.request = RPM_REQ_NONE;
1496 	dev->power.deferred_resume = false;
1497 	dev->power.accounting_timestamp = jiffies;
1498 	INIT_WORK(&dev->power.work, pm_runtime_work);
1499 
1500 	dev->power.timer_expires = 0;
1501 	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1502 			(unsigned long)dev);
1503 
1504 	init_waitqueue_head(&dev->power.wait_queue);
1505 }
1506 
1507 /**
1508  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1509  * @dev: Device object to re-initialize.
1510  */
1511 void pm_runtime_reinit(struct device *dev)
1512 {
1513 	if (!pm_runtime_enabled(dev)) {
1514 		if (dev->power.runtime_status == RPM_ACTIVE)
1515 			pm_runtime_set_suspended(dev);
1516 		if (dev->power.irq_safe) {
1517 			spin_lock_irq(&dev->power.lock);
1518 			dev->power.irq_safe = 0;
1519 			spin_unlock_irq(&dev->power.lock);
1520 			if (dev->parent)
1521 				pm_runtime_put(dev->parent);
1522 		}
1523 	}
1524 }
1525 
1526 /**
1527  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1528  * @dev: Device object being removed from device hierarchy.
1529  */
1530 void pm_runtime_remove(struct device *dev)
1531 {
1532 	__pm_runtime_disable(dev, false);
1533 	pm_runtime_reinit(dev);
1534 }
1535 
1536 /**
1537  * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1538  * @dev: Device whose driver is going to be removed.
1539  *
1540  * Check links from this device to any consumers and if any of them have active
1541  * runtime PM references to the device, drop the usage counter of the device
1542  * (once per link).
1543  *
1544  * Links with the DL_FLAG_STATELESS flag set are ignored.
1545  *
1546  * Since the device is guaranteed to be runtime-active at the point this is
1547  * called, nothing else needs to be done here.
1548  *
1549  * Moreover, this is called after device_links_busy() has returned 'false', so
1550  * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1551  * therefore rpm_active can't be manipulated concurrently.
1552  */
1553 void pm_runtime_clean_up_links(struct device *dev)
1554 {
1555 	struct device_link *link;
1556 	int idx;
1557 
1558 	idx = device_links_read_lock();
1559 
1560 	list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1561 		if (link->flags & DL_FLAG_STATELESS)
1562 			continue;
1563 
1564 		if (link->rpm_active) {
1565 			pm_runtime_put_noidle(dev);
1566 			link->rpm_active = false;
1567 		}
1568 	}
1569 
1570 	device_links_read_unlock(idx);
1571 }
1572 
1573 /**
1574  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1575  * @dev: Consumer device.
1576  */
1577 void pm_runtime_get_suppliers(struct device *dev)
1578 {
1579 	struct device_link *link;
1580 	int idx;
1581 
1582 	idx = device_links_read_lock();
1583 
1584 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1585 		if (link->flags & DL_FLAG_PM_RUNTIME)
1586 			pm_runtime_get_sync(link->supplier);
1587 
1588 	device_links_read_unlock(idx);
1589 }
1590 
1591 /**
1592  * pm_runtime_put_suppliers - Drop references to supplier devices.
1593  * @dev: Consumer device.
1594  */
1595 void pm_runtime_put_suppliers(struct device *dev)
1596 {
1597 	struct device_link *link;
1598 	int idx;
1599 
1600 	idx = device_links_read_lock();
1601 
1602 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1603 		if (link->flags & DL_FLAG_PM_RUNTIME)
1604 			pm_runtime_put(link->supplier);
1605 
1606 	device_links_read_unlock(idx);
1607 }
1608 
1609 void pm_runtime_new_link(struct device *dev)
1610 {
1611 	spin_lock_irq(&dev->power.lock);
1612 	dev->power.links_count++;
1613 	spin_unlock_irq(&dev->power.lock);
1614 }
1615 
1616 void pm_runtime_drop_link(struct device *dev)
1617 {
1618 	spin_lock_irq(&dev->power.lock);
1619 	WARN_ON(dev->power.links_count == 0);
1620 	dev->power.links_count--;
1621 	spin_unlock_irq(&dev->power.lock);
1622 }
1623 
1624 /**
1625  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1626  * @dev: Device to suspend.
1627  *
1628  * Disable runtime PM so we safely can check the device's runtime PM status and
1629  * if it is active, invoke it's .runtime_suspend callback to bring it into
1630  * suspend state. Keep runtime PM disabled to preserve the state unless we
1631  * encounter errors.
1632  *
1633  * Typically this function may be invoked from a system suspend callback to make
1634  * sure the device is put into low power state.
1635  */
1636 int pm_runtime_force_suspend(struct device *dev)
1637 {
1638 	int (*callback)(struct device *);
1639 	int ret = 0;
1640 
1641 	pm_runtime_disable(dev);
1642 	if (pm_runtime_status_suspended(dev))
1643 		return 0;
1644 
1645 	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1646 
1647 	if (!callback) {
1648 		ret = -ENOSYS;
1649 		goto err;
1650 	}
1651 
1652 	ret = callback(dev);
1653 	if (ret)
1654 		goto err;
1655 
1656 	/*
1657 	 * Increase the runtime PM usage count for the device's parent, in case
1658 	 * when we find the device being used when system suspend was invoked.
1659 	 * This informs pm_runtime_force_resume() to resume the parent
1660 	 * immediately, which is needed to be able to resume its children,
1661 	 * when not deferring the resume to be managed via runtime PM.
1662 	 */
1663 	if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
1664 		pm_runtime_get_noresume(dev->parent);
1665 
1666 	pm_runtime_set_suspended(dev);
1667 	return 0;
1668 err:
1669 	pm_runtime_enable(dev);
1670 	return ret;
1671 }
1672 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1673 
1674 /**
1675  * pm_runtime_force_resume - Force a device into resume state if needed.
1676  * @dev: Device to resume.
1677  *
1678  * Prior invoking this function we expect the user to have brought the device
1679  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1680  * those actions and brings the device into full power, if it is expected to be
1681  * used on system resume. To distinguish that, we check whether the runtime PM
1682  * usage count is greater than 1 (the PM core increases the usage count in the
1683  * system PM prepare phase), as that indicates a real user (such as a subsystem,
1684  * driver, userspace, etc.) is using it. If that is the case, the device is
1685  * expected to be used on system resume as well, so then we resume it. In the
1686  * other case, we defer the resume to be managed via runtime PM.
1687  *
1688  * Typically this function may be invoked from a system resume callback.
1689  */
1690 int pm_runtime_force_resume(struct device *dev)
1691 {
1692 	int (*callback)(struct device *);
1693 	int ret = 0;
1694 
1695 	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1696 
1697 	if (!callback) {
1698 		ret = -ENOSYS;
1699 		goto out;
1700 	}
1701 
1702 	if (!pm_runtime_status_suspended(dev))
1703 		goto out;
1704 
1705 	/*
1706 	 * Decrease the parent's runtime PM usage count, if we increased it
1707 	 * during system suspend in pm_runtime_force_suspend().
1708 	*/
1709 	if (atomic_read(&dev->power.usage_count) > 1) {
1710 		if (dev->parent)
1711 			pm_runtime_put_noidle(dev->parent);
1712 	} else {
1713 		goto out;
1714 	}
1715 
1716 	ret = pm_runtime_set_active(dev);
1717 	if (ret)
1718 		goto out;
1719 
1720 	ret = callback(dev);
1721 	if (ret) {
1722 		pm_runtime_set_suspended(dev);
1723 		goto out;
1724 	}
1725 
1726 	pm_runtime_mark_last_busy(dev);
1727 out:
1728 	pm_runtime_enable(dev);
1729 	return ret;
1730 }
1731 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1732