xref: /openbmc/linux/drivers/base/power/runtime.c (revision b595076a)
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device run-time PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include "power.h"
13 
14 static int rpm_resume(struct device *dev, int rpmflags);
15 static int rpm_suspend(struct device *dev, int rpmflags);
16 
17 /**
18  * update_pm_runtime_accounting - Update the time accounting of power states
19  * @dev: Device to update the accounting for
20  *
21  * In order to be able to have time accounting of the various power states
22  * (as used by programs such as PowerTOP to show the effectiveness of runtime
23  * PM), we need to track the time spent in each state.
24  * update_pm_runtime_accounting must be called each time before the
25  * runtime_status field is updated, to account the time in the old state
26  * correctly.
27  */
28 void update_pm_runtime_accounting(struct device *dev)
29 {
30 	unsigned long now = jiffies;
31 	int delta;
32 
33 	delta = now - dev->power.accounting_timestamp;
34 
35 	if (delta < 0)
36 		delta = 0;
37 
38 	dev->power.accounting_timestamp = now;
39 
40 	if (dev->power.disable_depth > 0)
41 		return;
42 
43 	if (dev->power.runtime_status == RPM_SUSPENDED)
44 		dev->power.suspended_jiffies += delta;
45 	else
46 		dev->power.active_jiffies += delta;
47 }
48 
49 static void __update_runtime_status(struct device *dev, enum rpm_status status)
50 {
51 	update_pm_runtime_accounting(dev);
52 	dev->power.runtime_status = status;
53 }
54 
55 /**
56  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57  * @dev: Device to handle.
58  */
59 static void pm_runtime_deactivate_timer(struct device *dev)
60 {
61 	if (dev->power.timer_expires > 0) {
62 		del_timer(&dev->power.suspend_timer);
63 		dev->power.timer_expires = 0;
64 	}
65 }
66 
67 /**
68  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69  * @dev: Device to handle.
70  */
71 static void pm_runtime_cancel_pending(struct device *dev)
72 {
73 	pm_runtime_deactivate_timer(dev);
74 	/*
75 	 * In case there's a request pending, make sure its work function will
76 	 * return without doing anything.
77 	 */
78 	dev->power.request = RPM_REQ_NONE;
79 }
80 
81 /*
82  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83  * @dev: Device to handle.
84  *
85  * Compute the autosuspend-delay expiration time based on the device's
86  * power.last_busy time.  If the delay has already expired or is disabled
87  * (negative) or the power.use_autosuspend flag isn't set, return 0.
88  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
89  *
90  * This function may be called either with or without dev->power.lock held.
91  * Either way it can be racy, since power.last_busy may be updated at any time.
92  */
93 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
94 {
95 	int autosuspend_delay;
96 	long elapsed;
97 	unsigned long last_busy;
98 	unsigned long expires = 0;
99 
100 	if (!dev->power.use_autosuspend)
101 		goto out;
102 
103 	autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
104 	if (autosuspend_delay < 0)
105 		goto out;
106 
107 	last_busy = ACCESS_ONCE(dev->power.last_busy);
108 	elapsed = jiffies - last_busy;
109 	if (elapsed < 0)
110 		goto out;	/* jiffies has wrapped around. */
111 
112 	/*
113 	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 	 * up to the nearest second.
115 	 */
116 	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
117 	if (autosuspend_delay >= 1000)
118 		expires = round_jiffies(expires);
119 	expires += !expires;
120 	if (elapsed >= expires - last_busy)
121 		expires = 0;	/* Already expired. */
122 
123  out:
124 	return expires;
125 }
126 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
127 
128 /**
129  * rpm_check_suspend_allowed - Test whether a device may be suspended.
130  * @dev: Device to test.
131  */
132 static int rpm_check_suspend_allowed(struct device *dev)
133 {
134 	int retval = 0;
135 
136 	if (dev->power.runtime_error)
137 		retval = -EINVAL;
138 	else if (atomic_read(&dev->power.usage_count) > 0
139 	    || dev->power.disable_depth > 0)
140 		retval = -EAGAIN;
141 	else if (!pm_children_suspended(dev))
142 		retval = -EBUSY;
143 
144 	/* Pending resume requests take precedence over suspends. */
145 	else if ((dev->power.deferred_resume
146 			&& dev->power.runtime_status == RPM_SUSPENDING)
147 	    || (dev->power.request_pending
148 			&& dev->power.request == RPM_REQ_RESUME))
149 		retval = -EAGAIN;
150 	else if (dev->power.runtime_status == RPM_SUSPENDED)
151 		retval = 1;
152 
153 	return retval;
154 }
155 
156 /**
157  * rpm_idle - Notify device bus type if the device can be suspended.
158  * @dev: Device to notify the bus type about.
159  * @rpmflags: Flag bits.
160  *
161  * Check if the device's run-time PM status allows it to be suspended.  If
162  * another idle notification has been started earlier, return immediately.  If
163  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
164  * run the ->runtime_idle() callback directly.
165  *
166  * This function must be called under dev->power.lock with interrupts disabled.
167  */
168 static int rpm_idle(struct device *dev, int rpmflags)
169 {
170 	int (*callback)(struct device *);
171 	int retval;
172 
173 	retval = rpm_check_suspend_allowed(dev);
174 	if (retval < 0)
175 		;	/* Conditions are wrong. */
176 
177 	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
178 	else if (dev->power.runtime_status != RPM_ACTIVE)
179 		retval = -EAGAIN;
180 
181 	/*
182 	 * Any pending request other than an idle notification takes
183 	 * precedence over us, except that the timer may be running.
184 	 */
185 	else if (dev->power.request_pending &&
186 	    dev->power.request > RPM_REQ_IDLE)
187 		retval = -EAGAIN;
188 
189 	/* Act as though RPM_NOWAIT is always set. */
190 	else if (dev->power.idle_notification)
191 		retval = -EINPROGRESS;
192 	if (retval)
193 		goto out;
194 
195 	/* Pending requests need to be canceled. */
196 	dev->power.request = RPM_REQ_NONE;
197 
198 	if (dev->power.no_callbacks) {
199 		/* Assume ->runtime_idle() callback would have suspended. */
200 		retval = rpm_suspend(dev, rpmflags);
201 		goto out;
202 	}
203 
204 	/* Carry out an asynchronous or a synchronous idle notification. */
205 	if (rpmflags & RPM_ASYNC) {
206 		dev->power.request = RPM_REQ_IDLE;
207 		if (!dev->power.request_pending) {
208 			dev->power.request_pending = true;
209 			queue_work(pm_wq, &dev->power.work);
210 		}
211 		goto out;
212 	}
213 
214 	dev->power.idle_notification = true;
215 
216 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle)
217 		callback = dev->bus->pm->runtime_idle;
218 	else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle)
219 		callback = dev->type->pm->runtime_idle;
220 	else if (dev->class && dev->class->pm)
221 		callback = dev->class->pm->runtime_idle;
222 	else
223 		callback = NULL;
224 
225 	if (callback) {
226 		spin_unlock_irq(&dev->power.lock);
227 
228 		callback(dev);
229 
230 		spin_lock_irq(&dev->power.lock);
231 	}
232 
233 	dev->power.idle_notification = false;
234 	wake_up_all(&dev->power.wait_queue);
235 
236  out:
237 	return retval;
238 }
239 
240 /**
241  * rpm_callback - Run a given runtime PM callback for a given device.
242  * @cb: Runtime PM callback to run.
243  * @dev: Device to run the callback for.
244  */
245 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
246 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
247 {
248 	int retval;
249 
250 	if (!cb)
251 		return -ENOSYS;
252 
253 	spin_unlock_irq(&dev->power.lock);
254 
255 	retval = cb(dev);
256 
257 	spin_lock_irq(&dev->power.lock);
258 	dev->power.runtime_error = retval;
259 
260 	return retval;
261 }
262 
263 /**
264  * rpm_suspend - Carry out run-time suspend of given device.
265  * @dev: Device to suspend.
266  * @rpmflags: Flag bits.
267  *
268  * Check if the device's run-time PM status allows it to be suspended.  If
269  * another suspend has been started earlier, either return immediately or wait
270  * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
271  * pending idle notification.  If the RPM_ASYNC flag is set then queue a
272  * suspend request; otherwise run the ->runtime_suspend() callback directly.
273  * If a deferred resume was requested while the callback was running then carry
274  * it out; otherwise send an idle notification for the device (if the suspend
275  * failed) or for its parent (if the suspend succeeded).
276  *
277  * This function must be called under dev->power.lock with interrupts disabled.
278  */
279 static int rpm_suspend(struct device *dev, int rpmflags)
280 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
281 {
282 	int (*callback)(struct device *);
283 	struct device *parent = NULL;
284 	int retval;
285 
286 	dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
287 
288  repeat:
289 	retval = rpm_check_suspend_allowed(dev);
290 
291 	if (retval < 0)
292 		;	/* Conditions are wrong. */
293 
294 	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
295 	else if (dev->power.runtime_status == RPM_RESUMING &&
296 	    !(rpmflags & RPM_ASYNC))
297 		retval = -EAGAIN;
298 	if (retval)
299 		goto out;
300 
301 	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
302 	if ((rpmflags & RPM_AUTO)
303 	    && dev->power.runtime_status != RPM_SUSPENDING) {
304 		unsigned long expires = pm_runtime_autosuspend_expiration(dev);
305 
306 		if (expires != 0) {
307 			/* Pending requests need to be canceled. */
308 			dev->power.request = RPM_REQ_NONE;
309 
310 			/*
311 			 * Optimization: If the timer is already running and is
312 			 * set to expire at or before the autosuspend delay,
313 			 * avoid the overhead of resetting it.  Just let it
314 			 * expire; pm_suspend_timer_fn() will take care of the
315 			 * rest.
316 			 */
317 			if (!(dev->power.timer_expires && time_before_eq(
318 			    dev->power.timer_expires, expires))) {
319 				dev->power.timer_expires = expires;
320 				mod_timer(&dev->power.suspend_timer, expires);
321 			}
322 			dev->power.timer_autosuspends = 1;
323 			goto out;
324 		}
325 	}
326 
327 	/* Other scheduled or pending requests need to be canceled. */
328 	pm_runtime_cancel_pending(dev);
329 
330 	if (dev->power.runtime_status == RPM_SUSPENDING) {
331 		DEFINE_WAIT(wait);
332 
333 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
334 			retval = -EINPROGRESS;
335 			goto out;
336 		}
337 
338 		/* Wait for the other suspend running in parallel with us. */
339 		for (;;) {
340 			prepare_to_wait(&dev->power.wait_queue, &wait,
341 					TASK_UNINTERRUPTIBLE);
342 			if (dev->power.runtime_status != RPM_SUSPENDING)
343 				break;
344 
345 			spin_unlock_irq(&dev->power.lock);
346 
347 			schedule();
348 
349 			spin_lock_irq(&dev->power.lock);
350 		}
351 		finish_wait(&dev->power.wait_queue, &wait);
352 		goto repeat;
353 	}
354 
355 	dev->power.deferred_resume = false;
356 	if (dev->power.no_callbacks)
357 		goto no_callback;	/* Assume success. */
358 
359 	/* Carry out an asynchronous or a synchronous suspend. */
360 	if (rpmflags & RPM_ASYNC) {
361 		dev->power.request = (rpmflags & RPM_AUTO) ?
362 		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
363 		if (!dev->power.request_pending) {
364 			dev->power.request_pending = true;
365 			queue_work(pm_wq, &dev->power.work);
366 		}
367 		goto out;
368 	}
369 
370 	__update_runtime_status(dev, RPM_SUSPENDING);
371 
372 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
373 		callback = dev->bus->pm->runtime_suspend;
374 	else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend)
375 		callback = dev->type->pm->runtime_suspend;
376 	else if (dev->class && dev->class->pm)
377 		callback = dev->class->pm->runtime_suspend;
378 	else
379 		callback = NULL;
380 
381 	retval = rpm_callback(callback, dev);
382 	if (retval) {
383 		__update_runtime_status(dev, RPM_ACTIVE);
384 		dev->power.deferred_resume = 0;
385 		if (retval == -EAGAIN || retval == -EBUSY)
386 			dev->power.runtime_error = 0;
387 		else
388 			pm_runtime_cancel_pending(dev);
389 	} else {
390  no_callback:
391 		__update_runtime_status(dev, RPM_SUSPENDED);
392 		pm_runtime_deactivate_timer(dev);
393 
394 		if (dev->parent) {
395 			parent = dev->parent;
396 			atomic_add_unless(&parent->power.child_count, -1, 0);
397 		}
398 	}
399 	wake_up_all(&dev->power.wait_queue);
400 
401 	if (dev->power.deferred_resume) {
402 		rpm_resume(dev, 0);
403 		retval = -EAGAIN;
404 		goto out;
405 	}
406 
407 	if (parent && !parent->power.ignore_children) {
408 		spin_unlock_irq(&dev->power.lock);
409 
410 		pm_request_idle(parent);
411 
412 		spin_lock_irq(&dev->power.lock);
413 	}
414 
415  out:
416 	dev_dbg(dev, "%s returns %d\n", __func__, retval);
417 
418 	return retval;
419 }
420 
421 /**
422  * rpm_resume - Carry out run-time resume of given device.
423  * @dev: Device to resume.
424  * @rpmflags: Flag bits.
425  *
426  * Check if the device's run-time PM status allows it to be resumed.  Cancel
427  * any scheduled or pending requests.  If another resume has been started
428  * earlier, either return imediately or wait for it to finish, depending on the
429  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
430  * parallel with this function, either tell the other process to resume after
431  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
432  * flag is set then queue a resume request; otherwise run the
433  * ->runtime_resume() callback directly.  Queue an idle notification for the
434  * device if the resume succeeded.
435  *
436  * This function must be called under dev->power.lock with interrupts disabled.
437  */
438 static int rpm_resume(struct device *dev, int rpmflags)
439 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
440 {
441 	int (*callback)(struct device *);
442 	struct device *parent = NULL;
443 	int retval = 0;
444 
445 	dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
446 
447  repeat:
448 	if (dev->power.runtime_error)
449 		retval = -EINVAL;
450 	else if (dev->power.disable_depth > 0)
451 		retval = -EAGAIN;
452 	if (retval)
453 		goto out;
454 
455 	/*
456 	 * Other scheduled or pending requests need to be canceled.  Small
457 	 * optimization: If an autosuspend timer is running, leave it running
458 	 * rather than cancelling it now only to restart it again in the near
459 	 * future.
460 	 */
461 	dev->power.request = RPM_REQ_NONE;
462 	if (!dev->power.timer_autosuspends)
463 		pm_runtime_deactivate_timer(dev);
464 
465 	if (dev->power.runtime_status == RPM_ACTIVE) {
466 		retval = 1;
467 		goto out;
468 	}
469 
470 	if (dev->power.runtime_status == RPM_RESUMING
471 	    || dev->power.runtime_status == RPM_SUSPENDING) {
472 		DEFINE_WAIT(wait);
473 
474 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
475 			if (dev->power.runtime_status == RPM_SUSPENDING)
476 				dev->power.deferred_resume = true;
477 			else
478 				retval = -EINPROGRESS;
479 			goto out;
480 		}
481 
482 		/* Wait for the operation carried out in parallel with us. */
483 		for (;;) {
484 			prepare_to_wait(&dev->power.wait_queue, &wait,
485 					TASK_UNINTERRUPTIBLE);
486 			if (dev->power.runtime_status != RPM_RESUMING
487 			    && dev->power.runtime_status != RPM_SUSPENDING)
488 				break;
489 
490 			spin_unlock_irq(&dev->power.lock);
491 
492 			schedule();
493 
494 			spin_lock_irq(&dev->power.lock);
495 		}
496 		finish_wait(&dev->power.wait_queue, &wait);
497 		goto repeat;
498 	}
499 
500 	/*
501 	 * See if we can skip waking up the parent.  This is safe only if
502 	 * power.no_callbacks is set, because otherwise we don't know whether
503 	 * the resume will actually succeed.
504 	 */
505 	if (dev->power.no_callbacks && !parent && dev->parent) {
506 		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
507 		if (dev->parent->power.disable_depth > 0
508 		    || dev->parent->power.ignore_children
509 		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
510 			atomic_inc(&dev->parent->power.child_count);
511 			spin_unlock(&dev->parent->power.lock);
512 			goto no_callback;	/* Assume success. */
513 		}
514 		spin_unlock(&dev->parent->power.lock);
515 	}
516 
517 	/* Carry out an asynchronous or a synchronous resume. */
518 	if (rpmflags & RPM_ASYNC) {
519 		dev->power.request = RPM_REQ_RESUME;
520 		if (!dev->power.request_pending) {
521 			dev->power.request_pending = true;
522 			queue_work(pm_wq, &dev->power.work);
523 		}
524 		retval = 0;
525 		goto out;
526 	}
527 
528 	if (!parent && dev->parent) {
529 		/*
530 		 * Increment the parent's resume counter and resume it if
531 		 * necessary.
532 		 */
533 		parent = dev->parent;
534 		spin_unlock(&dev->power.lock);
535 
536 		pm_runtime_get_noresume(parent);
537 
538 		spin_lock(&parent->power.lock);
539 		/*
540 		 * We can resume if the parent's run-time PM is disabled or it
541 		 * is set to ignore children.
542 		 */
543 		if (!parent->power.disable_depth
544 		    && !parent->power.ignore_children) {
545 			rpm_resume(parent, 0);
546 			if (parent->power.runtime_status != RPM_ACTIVE)
547 				retval = -EBUSY;
548 		}
549 		spin_unlock(&parent->power.lock);
550 
551 		spin_lock(&dev->power.lock);
552 		if (retval)
553 			goto out;
554 		goto repeat;
555 	}
556 
557 	if (dev->power.no_callbacks)
558 		goto no_callback;	/* Assume success. */
559 
560 	__update_runtime_status(dev, RPM_RESUMING);
561 
562 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
563 		callback = dev->bus->pm->runtime_resume;
564 	else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume)
565 		callback = dev->type->pm->runtime_resume;
566 	else if (dev->class && dev->class->pm)
567 		callback = dev->class->pm->runtime_resume;
568 	else
569 		callback = NULL;
570 
571 	retval = rpm_callback(callback, dev);
572 	if (retval) {
573 		__update_runtime_status(dev, RPM_SUSPENDED);
574 		pm_runtime_cancel_pending(dev);
575 	} else {
576  no_callback:
577 		__update_runtime_status(dev, RPM_ACTIVE);
578 		if (parent)
579 			atomic_inc(&parent->power.child_count);
580 	}
581 	wake_up_all(&dev->power.wait_queue);
582 
583 	if (!retval)
584 		rpm_idle(dev, RPM_ASYNC);
585 
586  out:
587 	if (parent) {
588 		spin_unlock_irq(&dev->power.lock);
589 
590 		pm_runtime_put(parent);
591 
592 		spin_lock_irq(&dev->power.lock);
593 	}
594 
595 	dev_dbg(dev, "%s returns %d\n", __func__, retval);
596 
597 	return retval;
598 }
599 
600 /**
601  * pm_runtime_work - Universal run-time PM work function.
602  * @work: Work structure used for scheduling the execution of this function.
603  *
604  * Use @work to get the device object the work is to be done for, determine what
605  * is to be done and execute the appropriate run-time PM function.
606  */
607 static void pm_runtime_work(struct work_struct *work)
608 {
609 	struct device *dev = container_of(work, struct device, power.work);
610 	enum rpm_request req;
611 
612 	spin_lock_irq(&dev->power.lock);
613 
614 	if (!dev->power.request_pending)
615 		goto out;
616 
617 	req = dev->power.request;
618 	dev->power.request = RPM_REQ_NONE;
619 	dev->power.request_pending = false;
620 
621 	switch (req) {
622 	case RPM_REQ_NONE:
623 		break;
624 	case RPM_REQ_IDLE:
625 		rpm_idle(dev, RPM_NOWAIT);
626 		break;
627 	case RPM_REQ_SUSPEND:
628 		rpm_suspend(dev, RPM_NOWAIT);
629 		break;
630 	case RPM_REQ_AUTOSUSPEND:
631 		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
632 		break;
633 	case RPM_REQ_RESUME:
634 		rpm_resume(dev, RPM_NOWAIT);
635 		break;
636 	}
637 
638  out:
639 	spin_unlock_irq(&dev->power.lock);
640 }
641 
642 /**
643  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
644  * @data: Device pointer passed by pm_schedule_suspend().
645  *
646  * Check if the time is right and queue a suspend request.
647  */
648 static void pm_suspend_timer_fn(unsigned long data)
649 {
650 	struct device *dev = (struct device *)data;
651 	unsigned long flags;
652 	unsigned long expires;
653 
654 	spin_lock_irqsave(&dev->power.lock, flags);
655 
656 	expires = dev->power.timer_expires;
657 	/* If 'expire' is after 'jiffies' we've been called too early. */
658 	if (expires > 0 && !time_after(expires, jiffies)) {
659 		dev->power.timer_expires = 0;
660 		rpm_suspend(dev, dev->power.timer_autosuspends ?
661 		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
662 	}
663 
664 	spin_unlock_irqrestore(&dev->power.lock, flags);
665 }
666 
667 /**
668  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
669  * @dev: Device to suspend.
670  * @delay: Time to wait before submitting a suspend request, in milliseconds.
671  */
672 int pm_schedule_suspend(struct device *dev, unsigned int delay)
673 {
674 	unsigned long flags;
675 	int retval;
676 
677 	spin_lock_irqsave(&dev->power.lock, flags);
678 
679 	if (!delay) {
680 		retval = rpm_suspend(dev, RPM_ASYNC);
681 		goto out;
682 	}
683 
684 	retval = rpm_check_suspend_allowed(dev);
685 	if (retval)
686 		goto out;
687 
688 	/* Other scheduled or pending requests need to be canceled. */
689 	pm_runtime_cancel_pending(dev);
690 
691 	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
692 	dev->power.timer_expires += !dev->power.timer_expires;
693 	dev->power.timer_autosuspends = 0;
694 	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
695 
696  out:
697 	spin_unlock_irqrestore(&dev->power.lock, flags);
698 
699 	return retval;
700 }
701 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
702 
703 /**
704  * __pm_runtime_idle - Entry point for run-time idle operations.
705  * @dev: Device to send idle notification for.
706  * @rpmflags: Flag bits.
707  *
708  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
709  * return immediately if it is larger than zero.  Then carry out an idle
710  * notification, either synchronous or asynchronous.
711  *
712  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
713  */
714 int __pm_runtime_idle(struct device *dev, int rpmflags)
715 {
716 	unsigned long flags;
717 	int retval;
718 
719 	if (rpmflags & RPM_GET_PUT) {
720 		if (!atomic_dec_and_test(&dev->power.usage_count))
721 			return 0;
722 	}
723 
724 	spin_lock_irqsave(&dev->power.lock, flags);
725 	retval = rpm_idle(dev, rpmflags);
726 	spin_unlock_irqrestore(&dev->power.lock, flags);
727 
728 	return retval;
729 }
730 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
731 
732 /**
733  * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
734  * @dev: Device to suspend.
735  * @rpmflags: Flag bits.
736  *
737  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
738  * return immediately if it is larger than zero.  Then carry out a suspend,
739  * either synchronous or asynchronous.
740  *
741  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
742  */
743 int __pm_runtime_suspend(struct device *dev, int rpmflags)
744 {
745 	unsigned long flags;
746 	int retval;
747 
748 	if (rpmflags & RPM_GET_PUT) {
749 		if (!atomic_dec_and_test(&dev->power.usage_count))
750 			return 0;
751 	}
752 
753 	spin_lock_irqsave(&dev->power.lock, flags);
754 	retval = rpm_suspend(dev, rpmflags);
755 	spin_unlock_irqrestore(&dev->power.lock, flags);
756 
757 	return retval;
758 }
759 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
760 
761 /**
762  * __pm_runtime_resume - Entry point for run-time resume operations.
763  * @dev: Device to resume.
764  * @rpmflags: Flag bits.
765  *
766  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
767  * carry out a resume, either synchronous or asynchronous.
768  *
769  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
770  */
771 int __pm_runtime_resume(struct device *dev, int rpmflags)
772 {
773 	unsigned long flags;
774 	int retval;
775 
776 	if (rpmflags & RPM_GET_PUT)
777 		atomic_inc(&dev->power.usage_count);
778 
779 	spin_lock_irqsave(&dev->power.lock, flags);
780 	retval = rpm_resume(dev, rpmflags);
781 	spin_unlock_irqrestore(&dev->power.lock, flags);
782 
783 	return retval;
784 }
785 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
786 
787 /**
788  * __pm_runtime_set_status - Set run-time PM status of a device.
789  * @dev: Device to handle.
790  * @status: New run-time PM status of the device.
791  *
792  * If run-time PM of the device is disabled or its power.runtime_error field is
793  * different from zero, the status may be changed either to RPM_ACTIVE, or to
794  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
795  * However, if the device has a parent and the parent is not active, and the
796  * parent's power.ignore_children flag is unset, the device's status cannot be
797  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
798  *
799  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
800  * and the device parent's counter of unsuspended children is modified to
801  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
802  * notification request for the parent is submitted.
803  */
804 int __pm_runtime_set_status(struct device *dev, unsigned int status)
805 {
806 	struct device *parent = dev->parent;
807 	unsigned long flags;
808 	bool notify_parent = false;
809 	int error = 0;
810 
811 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
812 		return -EINVAL;
813 
814 	spin_lock_irqsave(&dev->power.lock, flags);
815 
816 	if (!dev->power.runtime_error && !dev->power.disable_depth) {
817 		error = -EAGAIN;
818 		goto out;
819 	}
820 
821 	if (dev->power.runtime_status == status)
822 		goto out_set;
823 
824 	if (status == RPM_SUSPENDED) {
825 		/* It always is possible to set the status to 'suspended'. */
826 		if (parent) {
827 			atomic_add_unless(&parent->power.child_count, -1, 0);
828 			notify_parent = !parent->power.ignore_children;
829 		}
830 		goto out_set;
831 	}
832 
833 	if (parent) {
834 		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
835 
836 		/*
837 		 * It is invalid to put an active child under a parent that is
838 		 * not active, has run-time PM enabled and the
839 		 * 'power.ignore_children' flag unset.
840 		 */
841 		if (!parent->power.disable_depth
842 		    && !parent->power.ignore_children
843 		    && parent->power.runtime_status != RPM_ACTIVE)
844 			error = -EBUSY;
845 		else if (dev->power.runtime_status == RPM_SUSPENDED)
846 			atomic_inc(&parent->power.child_count);
847 
848 		spin_unlock(&parent->power.lock);
849 
850 		if (error)
851 			goto out;
852 	}
853 
854  out_set:
855 	__update_runtime_status(dev, status);
856 	dev->power.runtime_error = 0;
857  out:
858 	spin_unlock_irqrestore(&dev->power.lock, flags);
859 
860 	if (notify_parent)
861 		pm_request_idle(parent);
862 
863 	return error;
864 }
865 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
866 
867 /**
868  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
869  * @dev: Device to handle.
870  *
871  * Flush all pending requests for the device from pm_wq and wait for all
872  * run-time PM operations involving the device in progress to complete.
873  *
874  * Should be called under dev->power.lock with interrupts disabled.
875  */
876 static void __pm_runtime_barrier(struct device *dev)
877 {
878 	pm_runtime_deactivate_timer(dev);
879 
880 	if (dev->power.request_pending) {
881 		dev->power.request = RPM_REQ_NONE;
882 		spin_unlock_irq(&dev->power.lock);
883 
884 		cancel_work_sync(&dev->power.work);
885 
886 		spin_lock_irq(&dev->power.lock);
887 		dev->power.request_pending = false;
888 	}
889 
890 	if (dev->power.runtime_status == RPM_SUSPENDING
891 	    || dev->power.runtime_status == RPM_RESUMING
892 	    || dev->power.idle_notification) {
893 		DEFINE_WAIT(wait);
894 
895 		/* Suspend, wake-up or idle notification in progress. */
896 		for (;;) {
897 			prepare_to_wait(&dev->power.wait_queue, &wait,
898 					TASK_UNINTERRUPTIBLE);
899 			if (dev->power.runtime_status != RPM_SUSPENDING
900 			    && dev->power.runtime_status != RPM_RESUMING
901 			    && !dev->power.idle_notification)
902 				break;
903 			spin_unlock_irq(&dev->power.lock);
904 
905 			schedule();
906 
907 			spin_lock_irq(&dev->power.lock);
908 		}
909 		finish_wait(&dev->power.wait_queue, &wait);
910 	}
911 }
912 
913 /**
914  * pm_runtime_barrier - Flush pending requests and wait for completions.
915  * @dev: Device to handle.
916  *
917  * Prevent the device from being suspended by incrementing its usage counter and
918  * if there's a pending resume request for the device, wake the device up.
919  * Next, make sure that all pending requests for the device have been flushed
920  * from pm_wq and wait for all run-time PM operations involving the device in
921  * progress to complete.
922  *
923  * Return value:
924  * 1, if there was a resume request pending and the device had to be woken up,
925  * 0, otherwise
926  */
927 int pm_runtime_barrier(struct device *dev)
928 {
929 	int retval = 0;
930 
931 	pm_runtime_get_noresume(dev);
932 	spin_lock_irq(&dev->power.lock);
933 
934 	if (dev->power.request_pending
935 	    && dev->power.request == RPM_REQ_RESUME) {
936 		rpm_resume(dev, 0);
937 		retval = 1;
938 	}
939 
940 	__pm_runtime_barrier(dev);
941 
942 	spin_unlock_irq(&dev->power.lock);
943 	pm_runtime_put_noidle(dev);
944 
945 	return retval;
946 }
947 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
948 
949 /**
950  * __pm_runtime_disable - Disable run-time PM of a device.
951  * @dev: Device to handle.
952  * @check_resume: If set, check if there's a resume request for the device.
953  *
954  * Increment power.disable_depth for the device and if was zero previously,
955  * cancel all pending run-time PM requests for the device and wait for all
956  * operations in progress to complete.  The device can be either active or
957  * suspended after its run-time PM has been disabled.
958  *
959  * If @check_resume is set and there's a resume request pending when
960  * __pm_runtime_disable() is called and power.disable_depth is zero, the
961  * function will wake up the device before disabling its run-time PM.
962  */
963 void __pm_runtime_disable(struct device *dev, bool check_resume)
964 {
965 	spin_lock_irq(&dev->power.lock);
966 
967 	if (dev->power.disable_depth > 0) {
968 		dev->power.disable_depth++;
969 		goto out;
970 	}
971 
972 	/*
973 	 * Wake up the device if there's a resume request pending, because that
974 	 * means there probably is some I/O to process and disabling run-time PM
975 	 * shouldn't prevent the device from processing the I/O.
976 	 */
977 	if (check_resume && dev->power.request_pending
978 	    && dev->power.request == RPM_REQ_RESUME) {
979 		/*
980 		 * Prevent suspends and idle notifications from being carried
981 		 * out after we have woken up the device.
982 		 */
983 		pm_runtime_get_noresume(dev);
984 
985 		rpm_resume(dev, 0);
986 
987 		pm_runtime_put_noidle(dev);
988 	}
989 
990 	if (!dev->power.disable_depth++)
991 		__pm_runtime_barrier(dev);
992 
993  out:
994 	spin_unlock_irq(&dev->power.lock);
995 }
996 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
997 
998 /**
999  * pm_runtime_enable - Enable run-time PM of a device.
1000  * @dev: Device to handle.
1001  */
1002 void pm_runtime_enable(struct device *dev)
1003 {
1004 	unsigned long flags;
1005 
1006 	spin_lock_irqsave(&dev->power.lock, flags);
1007 
1008 	if (dev->power.disable_depth > 0)
1009 		dev->power.disable_depth--;
1010 	else
1011 		dev_warn(dev, "Unbalanced %s!\n", __func__);
1012 
1013 	spin_unlock_irqrestore(&dev->power.lock, flags);
1014 }
1015 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1016 
1017 /**
1018  * pm_runtime_forbid - Block run-time PM of a device.
1019  * @dev: Device to handle.
1020  *
1021  * Increase the device's usage count and clear its power.runtime_auto flag,
1022  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1023  * for it.
1024  */
1025 void pm_runtime_forbid(struct device *dev)
1026 {
1027 	spin_lock_irq(&dev->power.lock);
1028 	if (!dev->power.runtime_auto)
1029 		goto out;
1030 
1031 	dev->power.runtime_auto = false;
1032 	atomic_inc(&dev->power.usage_count);
1033 	rpm_resume(dev, 0);
1034 
1035  out:
1036 	spin_unlock_irq(&dev->power.lock);
1037 }
1038 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1039 
1040 /**
1041  * pm_runtime_allow - Unblock run-time PM of a device.
1042  * @dev: Device to handle.
1043  *
1044  * Decrease the device's usage count and set its power.runtime_auto flag.
1045  */
1046 void pm_runtime_allow(struct device *dev)
1047 {
1048 	spin_lock_irq(&dev->power.lock);
1049 	if (dev->power.runtime_auto)
1050 		goto out;
1051 
1052 	dev->power.runtime_auto = true;
1053 	if (atomic_dec_and_test(&dev->power.usage_count))
1054 		rpm_idle(dev, RPM_AUTO);
1055 
1056  out:
1057 	spin_unlock_irq(&dev->power.lock);
1058 }
1059 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1060 
1061 /**
1062  * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1063  * @dev: Device to handle.
1064  *
1065  * Set the power.no_callbacks flag, which tells the PM core that this
1066  * device is power-managed through its parent and has no run-time PM
1067  * callbacks of its own.  The run-time sysfs attributes will be removed.
1068  *
1069  */
1070 void pm_runtime_no_callbacks(struct device *dev)
1071 {
1072 	spin_lock_irq(&dev->power.lock);
1073 	dev->power.no_callbacks = 1;
1074 	spin_unlock_irq(&dev->power.lock);
1075 	if (device_is_registered(dev))
1076 		rpm_sysfs_remove(dev);
1077 }
1078 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1079 
1080 /**
1081  * update_autosuspend - Handle a change to a device's autosuspend settings.
1082  * @dev: Device to handle.
1083  * @old_delay: The former autosuspend_delay value.
1084  * @old_use: The former use_autosuspend value.
1085  *
1086  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1087  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1088  *
1089  * This function must be called under dev->power.lock with interrupts disabled.
1090  */
1091 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1092 {
1093 	int delay = dev->power.autosuspend_delay;
1094 
1095 	/* Should runtime suspend be prevented now? */
1096 	if (dev->power.use_autosuspend && delay < 0) {
1097 
1098 		/* If it used to be allowed then prevent it. */
1099 		if (!old_use || old_delay >= 0) {
1100 			atomic_inc(&dev->power.usage_count);
1101 			rpm_resume(dev, 0);
1102 		}
1103 	}
1104 
1105 	/* Runtime suspend should be allowed now. */
1106 	else {
1107 
1108 		/* If it used to be prevented then allow it. */
1109 		if (old_use && old_delay < 0)
1110 			atomic_dec(&dev->power.usage_count);
1111 
1112 		/* Maybe we can autosuspend now. */
1113 		rpm_idle(dev, RPM_AUTO);
1114 	}
1115 }
1116 
1117 /**
1118  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1119  * @dev: Device to handle.
1120  * @delay: Value of the new delay in milliseconds.
1121  *
1122  * Set the device's power.autosuspend_delay value.  If it changes to negative
1123  * and the power.use_autosuspend flag is set, prevent run-time suspends.  If it
1124  * changes the other way, allow run-time suspends.
1125  */
1126 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1127 {
1128 	int old_delay, old_use;
1129 
1130 	spin_lock_irq(&dev->power.lock);
1131 	old_delay = dev->power.autosuspend_delay;
1132 	old_use = dev->power.use_autosuspend;
1133 	dev->power.autosuspend_delay = delay;
1134 	update_autosuspend(dev, old_delay, old_use);
1135 	spin_unlock_irq(&dev->power.lock);
1136 }
1137 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1138 
1139 /**
1140  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1141  * @dev: Device to handle.
1142  * @use: New value for use_autosuspend.
1143  *
1144  * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1145  * suspends as needed.
1146  */
1147 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1148 {
1149 	int old_delay, old_use;
1150 
1151 	spin_lock_irq(&dev->power.lock);
1152 	old_delay = dev->power.autosuspend_delay;
1153 	old_use = dev->power.use_autosuspend;
1154 	dev->power.use_autosuspend = use;
1155 	update_autosuspend(dev, old_delay, old_use);
1156 	spin_unlock_irq(&dev->power.lock);
1157 }
1158 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1159 
1160 /**
1161  * pm_runtime_init - Initialize run-time PM fields in given device object.
1162  * @dev: Device object to initialize.
1163  */
1164 void pm_runtime_init(struct device *dev)
1165 {
1166 	dev->power.runtime_status = RPM_SUSPENDED;
1167 	dev->power.idle_notification = false;
1168 
1169 	dev->power.disable_depth = 1;
1170 	atomic_set(&dev->power.usage_count, 0);
1171 
1172 	dev->power.runtime_error = 0;
1173 
1174 	atomic_set(&dev->power.child_count, 0);
1175 	pm_suspend_ignore_children(dev, false);
1176 	dev->power.runtime_auto = true;
1177 
1178 	dev->power.request_pending = false;
1179 	dev->power.request = RPM_REQ_NONE;
1180 	dev->power.deferred_resume = false;
1181 	dev->power.accounting_timestamp = jiffies;
1182 	INIT_WORK(&dev->power.work, pm_runtime_work);
1183 
1184 	dev->power.timer_expires = 0;
1185 	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1186 			(unsigned long)dev);
1187 
1188 	init_waitqueue_head(&dev->power.wait_queue);
1189 }
1190 
1191 /**
1192  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1193  * @dev: Device object being removed from device hierarchy.
1194  */
1195 void pm_runtime_remove(struct device *dev)
1196 {
1197 	__pm_runtime_disable(dev, false);
1198 
1199 	/* Change the status back to 'suspended' to match the initial status. */
1200 	if (dev->power.runtime_status == RPM_ACTIVE)
1201 		pm_runtime_set_suspended(dev);
1202 }
1203