Loading...
1/*
2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
11#include <linux/pm_runtime.h>
12#include "power.h"
13
14static int rpm_resume(struct device *dev, int rpmflags);
15static int rpm_suspend(struct device *dev, int rpmflags);
16
17/**
18 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
20 *
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
26 * correctly.
27 */
28void update_pm_runtime_accounting(struct device *dev)
29{
30 unsigned long now = jiffies;
31 int delta;
32
33 delta = now - dev->power.accounting_timestamp;
34
35 if (delta < 0)
36 delta = 0;
37
38 dev->power.accounting_timestamp = now;
39
40 if (dev->power.disable_depth > 0)
41 return;
42
43 if (dev->power.runtime_status == RPM_SUSPENDED)
44 dev->power.suspended_jiffies += delta;
45 else
46 dev->power.active_jiffies += delta;
47}
48
49static void __update_runtime_status(struct device *dev, enum rpm_status status)
50{
51 update_pm_runtime_accounting(dev);
52 dev->power.runtime_status = status;
53}
54
55/**
56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57 * @dev: Device to handle.
58 */
59static void pm_runtime_deactivate_timer(struct device *dev)
60{
61 if (dev->power.timer_expires > 0) {
62 del_timer(&dev->power.suspend_timer);
63 dev->power.timer_expires = 0;
64 }
65}
66
67/**
68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69 * @dev: Device to handle.
70 */
71static void pm_runtime_cancel_pending(struct device *dev)
72{
73 pm_runtime_deactivate_timer(dev);
74 /*
75 * In case there's a request pending, make sure its work function will
76 * return without doing anything.
77 */
78 dev->power.request = RPM_REQ_NONE;
79}
80
81/*
82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83 * @dev: Device to handle.
84 *
85 * Compute the autosuspend-delay expiration time based on the device's
86 * power.last_busy time. If the delay has already expired or is disabled
87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
89 *
90 * This function may be called either with or without dev->power.lock held.
91 * Either way it can be racy, since power.last_busy may be updated at any time.
92 */
93unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
94{
95 int autosuspend_delay;
96 long elapsed;
97 unsigned long last_busy;
98 unsigned long expires = 0;
99
100 if (!dev->power.use_autosuspend)
101 goto out;
102
103 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
104 if (autosuspend_delay < 0)
105 goto out;
106
107 last_busy = ACCESS_ONCE(dev->power.last_busy);
108 elapsed = jiffies - last_busy;
109 if (elapsed < 0)
110 goto out; /* jiffies has wrapped around. */
111
112 /*
113 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 * up to the nearest second.
115 */
116 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
117 if (autosuspend_delay >= 1000)
118 expires = round_jiffies(expires);
119 expires += !expires;
120 if (elapsed >= expires - last_busy)
121 expires = 0; /* Already expired. */
122
123 out:
124 return expires;
125}
126EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
127
128/**
129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
130 * @dev: Device to test.
131 */
132static int rpm_check_suspend_allowed(struct device *dev)
133{
134 int retval = 0;
135
136 if (dev->power.runtime_error)
137 retval = -EINVAL;
138 else if (dev->power.disable_depth > 0)
139 retval = -EACCES;
140 else if (atomic_read(&dev->power.usage_count) > 0)
141 retval = -EAGAIN;
142 else if (!pm_children_suspended(dev))
143 retval = -EBUSY;
144
145 /* Pending resume requests take precedence over suspends. */
146 else if ((dev->power.deferred_resume
147 && dev->power.runtime_status == RPM_SUSPENDING)
148 || (dev->power.request_pending
149 && dev->power.request == RPM_REQ_RESUME))
150 retval = -EAGAIN;
151 else if (dev->power.runtime_status == RPM_SUSPENDED)
152 retval = 1;
153
154 return retval;
155}
156
157/**
158 * rpm_idle - Notify device bus type if the device can be suspended.
159 * @dev: Device to notify the bus type about.
160 * @rpmflags: Flag bits.
161 *
162 * Check if the device's runtime PM status allows it to be suspended. If
163 * another idle notification has been started earlier, return immediately. If
164 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
165 * run the ->runtime_idle() callback directly.
166 *
167 * This function must be called under dev->power.lock with interrupts disabled.
168 */
169static int rpm_idle(struct device *dev, int rpmflags)
170{
171 int (*callback)(struct device *);
172 int retval;
173
174 retval = rpm_check_suspend_allowed(dev);
175 if (retval < 0)
176 ; /* Conditions are wrong. */
177
178 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
179 else if (dev->power.runtime_status != RPM_ACTIVE)
180 retval = -EAGAIN;
181
182 /*
183 * Any pending request other than an idle notification takes
184 * precedence over us, except that the timer may be running.
185 */
186 else if (dev->power.request_pending &&
187 dev->power.request > RPM_REQ_IDLE)
188 retval = -EAGAIN;
189
190 /* Act as though RPM_NOWAIT is always set. */
191 else if (dev->power.idle_notification)
192 retval = -EINPROGRESS;
193 if (retval)
194 goto out;
195
196 /* Pending requests need to be canceled. */
197 dev->power.request = RPM_REQ_NONE;
198
199 if (dev->power.no_callbacks) {
200 /* Assume ->runtime_idle() callback would have suspended. */
201 retval = rpm_suspend(dev, rpmflags);
202 goto out;
203 }
204
205 /* Carry out an asynchronous or a synchronous idle notification. */
206 if (rpmflags & RPM_ASYNC) {
207 dev->power.request = RPM_REQ_IDLE;
208 if (!dev->power.request_pending) {
209 dev->power.request_pending = true;
210 queue_work(pm_wq, &dev->power.work);
211 }
212 goto out;
213 }
214
215 dev->power.idle_notification = true;
216
217 if (dev->pm_domain)
218 callback = dev->pm_domain->ops.runtime_idle;
219 else if (dev->type && dev->type->pm)
220 callback = dev->type->pm->runtime_idle;
221 else if (dev->class && dev->class->pm)
222 callback = dev->class->pm->runtime_idle;
223 else if (dev->bus && dev->bus->pm)
224 callback = dev->bus->pm->runtime_idle;
225 else
226 callback = NULL;
227
228 if (callback) {
229 if (dev->power.irq_safe)
230 spin_unlock(&dev->power.lock);
231 else
232 spin_unlock_irq(&dev->power.lock);
233
234 callback(dev);
235
236 if (dev->power.irq_safe)
237 spin_lock(&dev->power.lock);
238 else
239 spin_lock_irq(&dev->power.lock);
240 }
241
242 dev->power.idle_notification = false;
243 wake_up_all(&dev->power.wait_queue);
244
245 out:
246 return retval;
247}
248
249/**
250 * rpm_callback - Run a given runtime PM callback for a given device.
251 * @cb: Runtime PM callback to run.
252 * @dev: Device to run the callback for.
253 */
254static int rpm_callback(int (*cb)(struct device *), struct device *dev)
255 __releases(&dev->power.lock) __acquires(&dev->power.lock)
256{
257 int retval;
258
259 if (!cb)
260 return -ENOSYS;
261
262 if (dev->power.irq_safe) {
263 retval = cb(dev);
264 } else {
265 spin_unlock_irq(&dev->power.lock);
266
267 retval = cb(dev);
268
269 spin_lock_irq(&dev->power.lock);
270 }
271 dev->power.runtime_error = retval;
272 return retval != -EACCES ? retval : -EIO;
273}
274
275/**
276 * rpm_suspend - Carry out runtime suspend of given device.
277 * @dev: Device to suspend.
278 * @rpmflags: Flag bits.
279 *
280 * Check if the device's runtime PM status allows it to be suspended. If
281 * another suspend has been started earlier, either return immediately or wait
282 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
283 * pending idle notification. If the RPM_ASYNC flag is set then queue a
284 * suspend request; otherwise run the ->runtime_suspend() callback directly.
285 * If a deferred resume was requested while the callback was running then carry
286 * it out; otherwise send an idle notification for the device (if the suspend
287 * failed) or for its parent (if the suspend succeeded).
288 *
289 * This function must be called under dev->power.lock with interrupts disabled.
290 */
291static int rpm_suspend(struct device *dev, int rpmflags)
292 __releases(&dev->power.lock) __acquires(&dev->power.lock)
293{
294 int (*callback)(struct device *);
295 struct device *parent = NULL;
296 int retval;
297
298 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
299
300 repeat:
301 retval = rpm_check_suspend_allowed(dev);
302
303 if (retval < 0)
304 ; /* Conditions are wrong. */
305
306 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
307 else if (dev->power.runtime_status == RPM_RESUMING &&
308 !(rpmflags & RPM_ASYNC))
309 retval = -EAGAIN;
310 if (retval)
311 goto out;
312
313 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
314 if ((rpmflags & RPM_AUTO)
315 && dev->power.runtime_status != RPM_SUSPENDING) {
316 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
317
318 if (expires != 0) {
319 /* Pending requests need to be canceled. */
320 dev->power.request = RPM_REQ_NONE;
321
322 /*
323 * Optimization: If the timer is already running and is
324 * set to expire at or before the autosuspend delay,
325 * avoid the overhead of resetting it. Just let it
326 * expire; pm_suspend_timer_fn() will take care of the
327 * rest.
328 */
329 if (!(dev->power.timer_expires && time_before_eq(
330 dev->power.timer_expires, expires))) {
331 dev->power.timer_expires = expires;
332 mod_timer(&dev->power.suspend_timer, expires);
333 }
334 dev->power.timer_autosuspends = 1;
335 goto out;
336 }
337 }
338
339 /* Other scheduled or pending requests need to be canceled. */
340 pm_runtime_cancel_pending(dev);
341
342 if (dev->power.runtime_status == RPM_SUSPENDING) {
343 DEFINE_WAIT(wait);
344
345 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
346 retval = -EINPROGRESS;
347 goto out;
348 }
349
350 /* Wait for the other suspend running in parallel with us. */
351 for (;;) {
352 prepare_to_wait(&dev->power.wait_queue, &wait,
353 TASK_UNINTERRUPTIBLE);
354 if (dev->power.runtime_status != RPM_SUSPENDING)
355 break;
356
357 spin_unlock_irq(&dev->power.lock);
358
359 schedule();
360
361 spin_lock_irq(&dev->power.lock);
362 }
363 finish_wait(&dev->power.wait_queue, &wait);
364 goto repeat;
365 }
366
367 dev->power.deferred_resume = false;
368 if (dev->power.no_callbacks)
369 goto no_callback; /* Assume success. */
370
371 /* Carry out an asynchronous or a synchronous suspend. */
372 if (rpmflags & RPM_ASYNC) {
373 dev->power.request = (rpmflags & RPM_AUTO) ?
374 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
375 if (!dev->power.request_pending) {
376 dev->power.request_pending = true;
377 queue_work(pm_wq, &dev->power.work);
378 }
379 goto out;
380 }
381
382 __update_runtime_status(dev, RPM_SUSPENDING);
383
384 if (dev->pm_domain)
385 callback = dev->pm_domain->ops.runtime_suspend;
386 else if (dev->type && dev->type->pm)
387 callback = dev->type->pm->runtime_suspend;
388 else if (dev->class && dev->class->pm)
389 callback = dev->class->pm->runtime_suspend;
390 else if (dev->bus && dev->bus->pm)
391 callback = dev->bus->pm->runtime_suspend;
392 else
393 callback = NULL;
394
395 retval = rpm_callback(callback, dev);
396 if (retval) {
397 __update_runtime_status(dev, RPM_ACTIVE);
398 dev->power.deferred_resume = false;
399 if (retval == -EAGAIN || retval == -EBUSY)
400 dev->power.runtime_error = 0;
401 else
402 pm_runtime_cancel_pending(dev);
403 } else {
404 no_callback:
405 __update_runtime_status(dev, RPM_SUSPENDED);
406 pm_runtime_deactivate_timer(dev);
407
408 if (dev->parent) {
409 parent = dev->parent;
410 atomic_add_unless(&parent->power.child_count, -1, 0);
411 }
412 }
413 wake_up_all(&dev->power.wait_queue);
414
415 if (dev->power.deferred_resume) {
416 rpm_resume(dev, 0);
417 retval = -EAGAIN;
418 goto out;
419 }
420
421 /* Maybe the parent is now able to suspend. */
422 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
423 spin_unlock(&dev->power.lock);
424
425 spin_lock(&parent->power.lock);
426 rpm_idle(parent, RPM_ASYNC);
427 spin_unlock(&parent->power.lock);
428
429 spin_lock(&dev->power.lock);
430 }
431
432 out:
433 dev_dbg(dev, "%s returns %d\n", __func__, retval);
434
435 return retval;
436}
437
438/**
439 * rpm_resume - Carry out runtime resume of given device.
440 * @dev: Device to resume.
441 * @rpmflags: Flag bits.
442 *
443 * Check if the device's runtime PM status allows it to be resumed. Cancel
444 * any scheduled or pending requests. If another resume has been started
445 * earlier, either return immediately or wait for it to finish, depending on the
446 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
447 * parallel with this function, either tell the other process to resume after
448 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
449 * flag is set then queue a resume request; otherwise run the
450 * ->runtime_resume() callback directly. Queue an idle notification for the
451 * device if the resume succeeded.
452 *
453 * This function must be called under dev->power.lock with interrupts disabled.
454 */
455static int rpm_resume(struct device *dev, int rpmflags)
456 __releases(&dev->power.lock) __acquires(&dev->power.lock)
457{
458 int (*callback)(struct device *);
459 struct device *parent = NULL;
460 int retval = 0;
461
462 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
463
464 repeat:
465 if (dev->power.runtime_error)
466 retval = -EINVAL;
467 else if (dev->power.disable_depth > 0)
468 retval = -EACCES;
469 if (retval)
470 goto out;
471
472 /*
473 * Other scheduled or pending requests need to be canceled. Small
474 * optimization: If an autosuspend timer is running, leave it running
475 * rather than cancelling it now only to restart it again in the near
476 * future.
477 */
478 dev->power.request = RPM_REQ_NONE;
479 if (!dev->power.timer_autosuspends)
480 pm_runtime_deactivate_timer(dev);
481
482 if (dev->power.runtime_status == RPM_ACTIVE) {
483 retval = 1;
484 goto out;
485 }
486
487 if (dev->power.runtime_status == RPM_RESUMING
488 || dev->power.runtime_status == RPM_SUSPENDING) {
489 DEFINE_WAIT(wait);
490
491 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
492 if (dev->power.runtime_status == RPM_SUSPENDING)
493 dev->power.deferred_resume = true;
494 else
495 retval = -EINPROGRESS;
496 goto out;
497 }
498
499 /* Wait for the operation carried out in parallel with us. */
500 for (;;) {
501 prepare_to_wait(&dev->power.wait_queue, &wait,
502 TASK_UNINTERRUPTIBLE);
503 if (dev->power.runtime_status != RPM_RESUMING
504 && dev->power.runtime_status != RPM_SUSPENDING)
505 break;
506
507 spin_unlock_irq(&dev->power.lock);
508
509 schedule();
510
511 spin_lock_irq(&dev->power.lock);
512 }
513 finish_wait(&dev->power.wait_queue, &wait);
514 goto repeat;
515 }
516
517 /*
518 * See if we can skip waking up the parent. This is safe only if
519 * power.no_callbacks is set, because otherwise we don't know whether
520 * the resume will actually succeed.
521 */
522 if (dev->power.no_callbacks && !parent && dev->parent) {
523 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
524 if (dev->parent->power.disable_depth > 0
525 || dev->parent->power.ignore_children
526 || dev->parent->power.runtime_status == RPM_ACTIVE) {
527 atomic_inc(&dev->parent->power.child_count);
528 spin_unlock(&dev->parent->power.lock);
529 goto no_callback; /* Assume success. */
530 }
531 spin_unlock(&dev->parent->power.lock);
532 }
533
534 /* Carry out an asynchronous or a synchronous resume. */
535 if (rpmflags & RPM_ASYNC) {
536 dev->power.request = RPM_REQ_RESUME;
537 if (!dev->power.request_pending) {
538 dev->power.request_pending = true;
539 queue_work(pm_wq, &dev->power.work);
540 }
541 retval = 0;
542 goto out;
543 }
544
545 if (!parent && dev->parent) {
546 /*
547 * Increment the parent's usage counter and resume it if
548 * necessary. Not needed if dev is irq-safe; then the
549 * parent is permanently resumed.
550 */
551 parent = dev->parent;
552 if (dev->power.irq_safe)
553 goto skip_parent;
554 spin_unlock(&dev->power.lock);
555
556 pm_runtime_get_noresume(parent);
557
558 spin_lock(&parent->power.lock);
559 /*
560 * We can resume if the parent's runtime PM is disabled or it
561 * is set to ignore children.
562 */
563 if (!parent->power.disable_depth
564 && !parent->power.ignore_children) {
565 rpm_resume(parent, 0);
566 if (parent->power.runtime_status != RPM_ACTIVE)
567 retval = -EBUSY;
568 }
569 spin_unlock(&parent->power.lock);
570
571 spin_lock(&dev->power.lock);
572 if (retval)
573 goto out;
574 goto repeat;
575 }
576 skip_parent:
577
578 if (dev->power.no_callbacks)
579 goto no_callback; /* Assume success. */
580
581 __update_runtime_status(dev, RPM_RESUMING);
582
583 if (dev->pm_domain)
584 callback = dev->pm_domain->ops.runtime_resume;
585 else if (dev->type && dev->type->pm)
586 callback = dev->type->pm->runtime_resume;
587 else if (dev->class && dev->class->pm)
588 callback = dev->class->pm->runtime_resume;
589 else if (dev->bus && dev->bus->pm)
590 callback = dev->bus->pm->runtime_resume;
591 else
592 callback = NULL;
593
594 retval = rpm_callback(callback, dev);
595 if (retval) {
596 __update_runtime_status(dev, RPM_SUSPENDED);
597 pm_runtime_cancel_pending(dev);
598 } else {
599 no_callback:
600 __update_runtime_status(dev, RPM_ACTIVE);
601 if (parent)
602 atomic_inc(&parent->power.child_count);
603 }
604 wake_up_all(&dev->power.wait_queue);
605
606 if (!retval)
607 rpm_idle(dev, RPM_ASYNC);
608
609 out:
610 if (parent && !dev->power.irq_safe) {
611 spin_unlock_irq(&dev->power.lock);
612
613 pm_runtime_put(parent);
614
615 spin_lock_irq(&dev->power.lock);
616 }
617
618 dev_dbg(dev, "%s returns %d\n", __func__, retval);
619
620 return retval;
621}
622
623/**
624 * pm_runtime_work - Universal runtime PM work function.
625 * @work: Work structure used for scheduling the execution of this function.
626 *
627 * Use @work to get the device object the work is to be done for, determine what
628 * is to be done and execute the appropriate runtime PM function.
629 */
630static void pm_runtime_work(struct work_struct *work)
631{
632 struct device *dev = container_of(work, struct device, power.work);
633 enum rpm_request req;
634
635 spin_lock_irq(&dev->power.lock);
636
637 if (!dev->power.request_pending)
638 goto out;
639
640 req = dev->power.request;
641 dev->power.request = RPM_REQ_NONE;
642 dev->power.request_pending = false;
643
644 switch (req) {
645 case RPM_REQ_NONE:
646 break;
647 case RPM_REQ_IDLE:
648 rpm_idle(dev, RPM_NOWAIT);
649 break;
650 case RPM_REQ_SUSPEND:
651 rpm_suspend(dev, RPM_NOWAIT);
652 break;
653 case RPM_REQ_AUTOSUSPEND:
654 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
655 break;
656 case RPM_REQ_RESUME:
657 rpm_resume(dev, RPM_NOWAIT);
658 break;
659 }
660
661 out:
662 spin_unlock_irq(&dev->power.lock);
663}
664
665/**
666 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
667 * @data: Device pointer passed by pm_schedule_suspend().
668 *
669 * Check if the time is right and queue a suspend request.
670 */
671static void pm_suspend_timer_fn(unsigned long data)
672{
673 struct device *dev = (struct device *)data;
674 unsigned long flags;
675 unsigned long expires;
676
677 spin_lock_irqsave(&dev->power.lock, flags);
678
679 expires = dev->power.timer_expires;
680 /* If 'expire' is after 'jiffies' we've been called too early. */
681 if (expires > 0 && !time_after(expires, jiffies)) {
682 dev->power.timer_expires = 0;
683 rpm_suspend(dev, dev->power.timer_autosuspends ?
684 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
685 }
686
687 spin_unlock_irqrestore(&dev->power.lock, flags);
688}
689
690/**
691 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
692 * @dev: Device to suspend.
693 * @delay: Time to wait before submitting a suspend request, in milliseconds.
694 */
695int pm_schedule_suspend(struct device *dev, unsigned int delay)
696{
697 unsigned long flags;
698 int retval;
699
700 spin_lock_irqsave(&dev->power.lock, flags);
701
702 if (!delay) {
703 retval = rpm_suspend(dev, RPM_ASYNC);
704 goto out;
705 }
706
707 retval = rpm_check_suspend_allowed(dev);
708 if (retval)
709 goto out;
710
711 /* Other scheduled or pending requests need to be canceled. */
712 pm_runtime_cancel_pending(dev);
713
714 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
715 dev->power.timer_expires += !dev->power.timer_expires;
716 dev->power.timer_autosuspends = 0;
717 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
718
719 out:
720 spin_unlock_irqrestore(&dev->power.lock, flags);
721
722 return retval;
723}
724EXPORT_SYMBOL_GPL(pm_schedule_suspend);
725
726/**
727 * __pm_runtime_idle - Entry point for runtime idle operations.
728 * @dev: Device to send idle notification for.
729 * @rpmflags: Flag bits.
730 *
731 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
732 * return immediately if it is larger than zero. Then carry out an idle
733 * notification, either synchronous or asynchronous.
734 *
735 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
736 */
737int __pm_runtime_idle(struct device *dev, int rpmflags)
738{
739 unsigned long flags;
740 int retval;
741
742 if (rpmflags & RPM_GET_PUT) {
743 if (!atomic_dec_and_test(&dev->power.usage_count))
744 return 0;
745 }
746
747 spin_lock_irqsave(&dev->power.lock, flags);
748 retval = rpm_idle(dev, rpmflags);
749 spin_unlock_irqrestore(&dev->power.lock, flags);
750
751 return retval;
752}
753EXPORT_SYMBOL_GPL(__pm_runtime_idle);
754
755/**
756 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
757 * @dev: Device to suspend.
758 * @rpmflags: Flag bits.
759 *
760 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
761 * return immediately if it is larger than zero. Then carry out a suspend,
762 * either synchronous or asynchronous.
763 *
764 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
765 */
766int __pm_runtime_suspend(struct device *dev, int rpmflags)
767{
768 unsigned long flags;
769 int retval;
770
771 if (rpmflags & RPM_GET_PUT) {
772 if (!atomic_dec_and_test(&dev->power.usage_count))
773 return 0;
774 }
775
776 spin_lock_irqsave(&dev->power.lock, flags);
777 retval = rpm_suspend(dev, rpmflags);
778 spin_unlock_irqrestore(&dev->power.lock, flags);
779
780 return retval;
781}
782EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
783
784/**
785 * __pm_runtime_resume - Entry point for runtime resume operations.
786 * @dev: Device to resume.
787 * @rpmflags: Flag bits.
788 *
789 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
790 * carry out a resume, either synchronous or asynchronous.
791 *
792 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
793 */
794int __pm_runtime_resume(struct device *dev, int rpmflags)
795{
796 unsigned long flags;
797 int retval;
798
799 if (rpmflags & RPM_GET_PUT)
800 atomic_inc(&dev->power.usage_count);
801
802 spin_lock_irqsave(&dev->power.lock, flags);
803 retval = rpm_resume(dev, rpmflags);
804 spin_unlock_irqrestore(&dev->power.lock, flags);
805
806 return retval;
807}
808EXPORT_SYMBOL_GPL(__pm_runtime_resume);
809
810/**
811 * __pm_runtime_set_status - Set runtime PM status of a device.
812 * @dev: Device to handle.
813 * @status: New runtime PM status of the device.
814 *
815 * If runtime PM of the device is disabled or its power.runtime_error field is
816 * different from zero, the status may be changed either to RPM_ACTIVE, or to
817 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
818 * However, if the device has a parent and the parent is not active, and the
819 * parent's power.ignore_children flag is unset, the device's status cannot be
820 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
821 *
822 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
823 * and the device parent's counter of unsuspended children is modified to
824 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
825 * notification request for the parent is submitted.
826 */
827int __pm_runtime_set_status(struct device *dev, unsigned int status)
828{
829 struct device *parent = dev->parent;
830 unsigned long flags;
831 bool notify_parent = false;
832 int error = 0;
833
834 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
835 return -EINVAL;
836
837 spin_lock_irqsave(&dev->power.lock, flags);
838
839 if (!dev->power.runtime_error && !dev->power.disable_depth) {
840 error = -EAGAIN;
841 goto out;
842 }
843
844 if (dev->power.runtime_status == status)
845 goto out_set;
846
847 if (status == RPM_SUSPENDED) {
848 /* It always is possible to set the status to 'suspended'. */
849 if (parent) {
850 atomic_add_unless(&parent->power.child_count, -1, 0);
851 notify_parent = !parent->power.ignore_children;
852 }
853 goto out_set;
854 }
855
856 if (parent) {
857 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
858
859 /*
860 * It is invalid to put an active child under a parent that is
861 * not active, has runtime PM enabled and the
862 * 'power.ignore_children' flag unset.
863 */
864 if (!parent->power.disable_depth
865 && !parent->power.ignore_children
866 && parent->power.runtime_status != RPM_ACTIVE)
867 error = -EBUSY;
868 else if (dev->power.runtime_status == RPM_SUSPENDED)
869 atomic_inc(&parent->power.child_count);
870
871 spin_unlock(&parent->power.lock);
872
873 if (error)
874 goto out;
875 }
876
877 out_set:
878 __update_runtime_status(dev, status);
879 dev->power.runtime_error = 0;
880 out:
881 spin_unlock_irqrestore(&dev->power.lock, flags);
882
883 if (notify_parent)
884 pm_request_idle(parent);
885
886 return error;
887}
888EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
889
890/**
891 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
892 * @dev: Device to handle.
893 *
894 * Flush all pending requests for the device from pm_wq and wait for all
895 * runtime PM operations involving the device in progress to complete.
896 *
897 * Should be called under dev->power.lock with interrupts disabled.
898 */
899static void __pm_runtime_barrier(struct device *dev)
900{
901 pm_runtime_deactivate_timer(dev);
902
903 if (dev->power.request_pending) {
904 dev->power.request = RPM_REQ_NONE;
905 spin_unlock_irq(&dev->power.lock);
906
907 cancel_work_sync(&dev->power.work);
908
909 spin_lock_irq(&dev->power.lock);
910 dev->power.request_pending = false;
911 }
912
913 if (dev->power.runtime_status == RPM_SUSPENDING
914 || dev->power.runtime_status == RPM_RESUMING
915 || dev->power.idle_notification) {
916 DEFINE_WAIT(wait);
917
918 /* Suspend, wake-up or idle notification in progress. */
919 for (;;) {
920 prepare_to_wait(&dev->power.wait_queue, &wait,
921 TASK_UNINTERRUPTIBLE);
922 if (dev->power.runtime_status != RPM_SUSPENDING
923 && dev->power.runtime_status != RPM_RESUMING
924 && !dev->power.idle_notification)
925 break;
926 spin_unlock_irq(&dev->power.lock);
927
928 schedule();
929
930 spin_lock_irq(&dev->power.lock);
931 }
932 finish_wait(&dev->power.wait_queue, &wait);
933 }
934}
935
936/**
937 * pm_runtime_barrier - Flush pending requests and wait for completions.
938 * @dev: Device to handle.
939 *
940 * Prevent the device from being suspended by incrementing its usage counter and
941 * if there's a pending resume request for the device, wake the device up.
942 * Next, make sure that all pending requests for the device have been flushed
943 * from pm_wq and wait for all runtime PM operations involving the device in
944 * progress to complete.
945 *
946 * Return value:
947 * 1, if there was a resume request pending and the device had to be woken up,
948 * 0, otherwise
949 */
950int pm_runtime_barrier(struct device *dev)
951{
952 int retval = 0;
953
954 pm_runtime_get_noresume(dev);
955 spin_lock_irq(&dev->power.lock);
956
957 if (dev->power.request_pending
958 && dev->power.request == RPM_REQ_RESUME) {
959 rpm_resume(dev, 0);
960 retval = 1;
961 }
962
963 __pm_runtime_barrier(dev);
964
965 spin_unlock_irq(&dev->power.lock);
966 pm_runtime_put_noidle(dev);
967
968 return retval;
969}
970EXPORT_SYMBOL_GPL(pm_runtime_barrier);
971
972/**
973 * __pm_runtime_disable - Disable runtime PM of a device.
974 * @dev: Device to handle.
975 * @check_resume: If set, check if there's a resume request for the device.
976 *
977 * Increment power.disable_depth for the device and if was zero previously,
978 * cancel all pending runtime PM requests for the device and wait for all
979 * operations in progress to complete. The device can be either active or
980 * suspended after its runtime PM has been disabled.
981 *
982 * If @check_resume is set and there's a resume request pending when
983 * __pm_runtime_disable() is called and power.disable_depth is zero, the
984 * function will wake up the device before disabling its runtime PM.
985 */
986void __pm_runtime_disable(struct device *dev, bool check_resume)
987{
988 spin_lock_irq(&dev->power.lock);
989
990 if (dev->power.disable_depth > 0) {
991 dev->power.disable_depth++;
992 goto out;
993 }
994
995 /*
996 * Wake up the device if there's a resume request pending, because that
997 * means there probably is some I/O to process and disabling runtime PM
998 * shouldn't prevent the device from processing the I/O.
999 */
1000 if (check_resume && dev->power.request_pending
1001 && dev->power.request == RPM_REQ_RESUME) {
1002 /*
1003 * Prevent suspends and idle notifications from being carried
1004 * out after we have woken up the device.
1005 */
1006 pm_runtime_get_noresume(dev);
1007
1008 rpm_resume(dev, 0);
1009
1010 pm_runtime_put_noidle(dev);
1011 }
1012
1013 if (!dev->power.disable_depth++)
1014 __pm_runtime_barrier(dev);
1015
1016 out:
1017 spin_unlock_irq(&dev->power.lock);
1018}
1019EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1020
1021/**
1022 * pm_runtime_enable - Enable runtime PM of a device.
1023 * @dev: Device to handle.
1024 */
1025void pm_runtime_enable(struct device *dev)
1026{
1027 unsigned long flags;
1028
1029 spin_lock_irqsave(&dev->power.lock, flags);
1030
1031 if (dev->power.disable_depth > 0)
1032 dev->power.disable_depth--;
1033 else
1034 dev_warn(dev, "Unbalanced %s!\n", __func__);
1035
1036 spin_unlock_irqrestore(&dev->power.lock, flags);
1037}
1038EXPORT_SYMBOL_GPL(pm_runtime_enable);
1039
1040/**
1041 * pm_runtime_forbid - Block runtime PM of a device.
1042 * @dev: Device to handle.
1043 *
1044 * Increase the device's usage count and clear its power.runtime_auto flag,
1045 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1046 * for it.
1047 */
1048void pm_runtime_forbid(struct device *dev)
1049{
1050 spin_lock_irq(&dev->power.lock);
1051 if (!dev->power.runtime_auto)
1052 goto out;
1053
1054 dev->power.runtime_auto = false;
1055 atomic_inc(&dev->power.usage_count);
1056 rpm_resume(dev, 0);
1057
1058 out:
1059 spin_unlock_irq(&dev->power.lock);
1060}
1061EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1062
1063/**
1064 * pm_runtime_allow - Unblock runtime PM of a device.
1065 * @dev: Device to handle.
1066 *
1067 * Decrease the device's usage count and set its power.runtime_auto flag.
1068 */
1069void pm_runtime_allow(struct device *dev)
1070{
1071 spin_lock_irq(&dev->power.lock);
1072 if (dev->power.runtime_auto)
1073 goto out;
1074
1075 dev->power.runtime_auto = true;
1076 if (atomic_dec_and_test(&dev->power.usage_count))
1077 rpm_idle(dev, RPM_AUTO);
1078
1079 out:
1080 spin_unlock_irq(&dev->power.lock);
1081}
1082EXPORT_SYMBOL_GPL(pm_runtime_allow);
1083
1084/**
1085 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1086 * @dev: Device to handle.
1087 *
1088 * Set the power.no_callbacks flag, which tells the PM core that this
1089 * device is power-managed through its parent and has no runtime PM
1090 * callbacks of its own. The runtime sysfs attributes will be removed.
1091 */
1092void pm_runtime_no_callbacks(struct device *dev)
1093{
1094 spin_lock_irq(&dev->power.lock);
1095 dev->power.no_callbacks = 1;
1096 spin_unlock_irq(&dev->power.lock);
1097 if (device_is_registered(dev))
1098 rpm_sysfs_remove(dev);
1099}
1100EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1101
1102/**
1103 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1104 * @dev: Device to handle
1105 *
1106 * Set the power.irq_safe flag, which tells the PM core that the
1107 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1108 * always be invoked with the spinlock held and interrupts disabled. It also
1109 * causes the parent's usage counter to be permanently incremented, preventing
1110 * the parent from runtime suspending -- otherwise an irq-safe child might have
1111 * to wait for a non-irq-safe parent.
1112 */
1113void pm_runtime_irq_safe(struct device *dev)
1114{
1115 if (dev->parent)
1116 pm_runtime_get_sync(dev->parent);
1117 spin_lock_irq(&dev->power.lock);
1118 dev->power.irq_safe = 1;
1119 spin_unlock_irq(&dev->power.lock);
1120}
1121EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1122
1123/**
1124 * update_autosuspend - Handle a change to a device's autosuspend settings.
1125 * @dev: Device to handle.
1126 * @old_delay: The former autosuspend_delay value.
1127 * @old_use: The former use_autosuspend value.
1128 *
1129 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1130 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1131 *
1132 * This function must be called under dev->power.lock with interrupts disabled.
1133 */
1134static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1135{
1136 int delay = dev->power.autosuspend_delay;
1137
1138 /* Should runtime suspend be prevented now? */
1139 if (dev->power.use_autosuspend && delay < 0) {
1140
1141 /* If it used to be allowed then prevent it. */
1142 if (!old_use || old_delay >= 0) {
1143 atomic_inc(&dev->power.usage_count);
1144 rpm_resume(dev, 0);
1145 }
1146 }
1147
1148 /* Runtime suspend should be allowed now. */
1149 else {
1150
1151 /* If it used to be prevented then allow it. */
1152 if (old_use && old_delay < 0)
1153 atomic_dec(&dev->power.usage_count);
1154
1155 /* Maybe we can autosuspend now. */
1156 rpm_idle(dev, RPM_AUTO);
1157 }
1158}
1159
1160/**
1161 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1162 * @dev: Device to handle.
1163 * @delay: Value of the new delay in milliseconds.
1164 *
1165 * Set the device's power.autosuspend_delay value. If it changes to negative
1166 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1167 * changes the other way, allow runtime suspends.
1168 */
1169void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1170{
1171 int old_delay, old_use;
1172
1173 spin_lock_irq(&dev->power.lock);
1174 old_delay = dev->power.autosuspend_delay;
1175 old_use = dev->power.use_autosuspend;
1176 dev->power.autosuspend_delay = delay;
1177 update_autosuspend(dev, old_delay, old_use);
1178 spin_unlock_irq(&dev->power.lock);
1179}
1180EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1181
1182/**
1183 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1184 * @dev: Device to handle.
1185 * @use: New value for use_autosuspend.
1186 *
1187 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1188 * suspends as needed.
1189 */
1190void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1191{
1192 int old_delay, old_use;
1193
1194 spin_lock_irq(&dev->power.lock);
1195 old_delay = dev->power.autosuspend_delay;
1196 old_use = dev->power.use_autosuspend;
1197 dev->power.use_autosuspend = use;
1198 update_autosuspend(dev, old_delay, old_use);
1199 spin_unlock_irq(&dev->power.lock);
1200}
1201EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1202
1203/**
1204 * pm_runtime_init - Initialize runtime PM fields in given device object.
1205 * @dev: Device object to initialize.
1206 */
1207void pm_runtime_init(struct device *dev)
1208{
1209 dev->power.runtime_status = RPM_SUSPENDED;
1210 dev->power.idle_notification = false;
1211
1212 dev->power.disable_depth = 1;
1213 atomic_set(&dev->power.usage_count, 0);
1214
1215 dev->power.runtime_error = 0;
1216
1217 atomic_set(&dev->power.child_count, 0);
1218 pm_suspend_ignore_children(dev, false);
1219 dev->power.runtime_auto = true;
1220
1221 dev->power.request_pending = false;
1222 dev->power.request = RPM_REQ_NONE;
1223 dev->power.deferred_resume = false;
1224 dev->power.accounting_timestamp = jiffies;
1225 INIT_WORK(&dev->power.work, pm_runtime_work);
1226
1227 dev->power.timer_expires = 0;
1228 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1229 (unsigned long)dev);
1230
1231 init_waitqueue_head(&dev->power.wait_queue);
1232}
1233
1234/**
1235 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1236 * @dev: Device object being removed from device hierarchy.
1237 */
1238void pm_runtime_remove(struct device *dev)
1239{
1240 __pm_runtime_disable(dev, false);
1241
1242 /* Change the status back to 'suspended' to match the initial status. */
1243 if (dev->power.runtime_status == RPM_ACTIVE)
1244 pm_runtime_set_suspended(dev);
1245 if (dev->power.irq_safe && dev->parent)
1246 pm_runtime_put_sync(dev->parent);
1247}
1/*
2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <trace/events/rpm.h>
14#include "power.h"
15
16#define RPM_GET_CALLBACK(dev, cb) \
17({ \
18 int (*__rpm_cb)(struct device *__d); \
19 \
20 if (dev->pm_domain) \
21 __rpm_cb = dev->pm_domain->ops.cb; \
22 else if (dev->type && dev->type->pm) \
23 __rpm_cb = dev->type->pm->cb; \
24 else if (dev->class && dev->class->pm) \
25 __rpm_cb = dev->class->pm->cb; \
26 else if (dev->bus && dev->bus->pm) \
27 __rpm_cb = dev->bus->pm->cb; \
28 else \
29 __rpm_cb = NULL; \
30 \
31 if (!__rpm_cb && dev->driver && dev->driver->pm) \
32 __rpm_cb = dev->driver->pm->cb; \
33 \
34 __rpm_cb; \
35})
36
37static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
38{
39 return RPM_GET_CALLBACK(dev, runtime_suspend);
40}
41
42static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
43{
44 return RPM_GET_CALLBACK(dev, runtime_resume);
45}
46
47#ifdef CONFIG_PM_RUNTIME
48static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
49{
50 return RPM_GET_CALLBACK(dev, runtime_idle);
51}
52
53static int rpm_resume(struct device *dev, int rpmflags);
54static int rpm_suspend(struct device *dev, int rpmflags);
55
56/**
57 * update_pm_runtime_accounting - Update the time accounting of power states
58 * @dev: Device to update the accounting for
59 *
60 * In order to be able to have time accounting of the various power states
61 * (as used by programs such as PowerTOP to show the effectiveness of runtime
62 * PM), we need to track the time spent in each state.
63 * update_pm_runtime_accounting must be called each time before the
64 * runtime_status field is updated, to account the time in the old state
65 * correctly.
66 */
67void update_pm_runtime_accounting(struct device *dev)
68{
69 unsigned long now = jiffies;
70 unsigned long delta;
71
72 delta = now - dev->power.accounting_timestamp;
73
74 dev->power.accounting_timestamp = now;
75
76 if (dev->power.disable_depth > 0)
77 return;
78
79 if (dev->power.runtime_status == RPM_SUSPENDED)
80 dev->power.suspended_jiffies += delta;
81 else
82 dev->power.active_jiffies += delta;
83}
84
85static void __update_runtime_status(struct device *dev, enum rpm_status status)
86{
87 update_pm_runtime_accounting(dev);
88 dev->power.runtime_status = status;
89}
90
91/**
92 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
93 * @dev: Device to handle.
94 */
95static void pm_runtime_deactivate_timer(struct device *dev)
96{
97 if (dev->power.timer_expires > 0) {
98 del_timer(&dev->power.suspend_timer);
99 dev->power.timer_expires = 0;
100 }
101}
102
103/**
104 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
105 * @dev: Device to handle.
106 */
107static void pm_runtime_cancel_pending(struct device *dev)
108{
109 pm_runtime_deactivate_timer(dev);
110 /*
111 * In case there's a request pending, make sure its work function will
112 * return without doing anything.
113 */
114 dev->power.request = RPM_REQ_NONE;
115}
116
117/*
118 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
119 * @dev: Device to handle.
120 *
121 * Compute the autosuspend-delay expiration time based on the device's
122 * power.last_busy time. If the delay has already expired or is disabled
123 * (negative) or the power.use_autosuspend flag isn't set, return 0.
124 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
125 *
126 * This function may be called either with or without dev->power.lock held.
127 * Either way it can be racy, since power.last_busy may be updated at any time.
128 */
129unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
130{
131 int autosuspend_delay;
132 long elapsed;
133 unsigned long last_busy;
134 unsigned long expires = 0;
135
136 if (!dev->power.use_autosuspend)
137 goto out;
138
139 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
140 if (autosuspend_delay < 0)
141 goto out;
142
143 last_busy = ACCESS_ONCE(dev->power.last_busy);
144 elapsed = jiffies - last_busy;
145 if (elapsed < 0)
146 goto out; /* jiffies has wrapped around. */
147
148 /*
149 * If the autosuspend_delay is >= 1 second, align the timer by rounding
150 * up to the nearest second.
151 */
152 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
153 if (autosuspend_delay >= 1000)
154 expires = round_jiffies(expires);
155 expires += !expires;
156 if (elapsed >= expires - last_busy)
157 expires = 0; /* Already expired. */
158
159 out:
160 return expires;
161}
162EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
163
164static int dev_memalloc_noio(struct device *dev, void *data)
165{
166 return dev->power.memalloc_noio;
167}
168
169/*
170 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
171 * @dev: Device to handle.
172 * @enable: True for setting the flag and False for clearing the flag.
173 *
174 * Set the flag for all devices in the path from the device to the
175 * root device in the device tree if @enable is true, otherwise clear
176 * the flag for devices in the path whose siblings don't set the flag.
177 *
178 * The function should only be called by block device, or network
179 * device driver for solving the deadlock problem during runtime
180 * resume/suspend:
181 *
182 * If memory allocation with GFP_KERNEL is called inside runtime
183 * resume/suspend callback of any one of its ancestors(or the
184 * block device itself), the deadlock may be triggered inside the
185 * memory allocation since it might not complete until the block
186 * device becomes active and the involed page I/O finishes. The
187 * situation is pointed out first by Alan Stern. Network device
188 * are involved in iSCSI kind of situation.
189 *
190 * The lock of dev_hotplug_mutex is held in the function for handling
191 * hotplug race because pm_runtime_set_memalloc_noio() may be called
192 * in async probe().
193 *
194 * The function should be called between device_add() and device_del()
195 * on the affected device(block/network device).
196 */
197void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
198{
199 static DEFINE_MUTEX(dev_hotplug_mutex);
200
201 mutex_lock(&dev_hotplug_mutex);
202 for (;;) {
203 bool enabled;
204
205 /* hold power lock since bitfield is not SMP-safe. */
206 spin_lock_irq(&dev->power.lock);
207 enabled = dev->power.memalloc_noio;
208 dev->power.memalloc_noio = enable;
209 spin_unlock_irq(&dev->power.lock);
210
211 /*
212 * not need to enable ancestors any more if the device
213 * has been enabled.
214 */
215 if (enabled && enable)
216 break;
217
218 dev = dev->parent;
219
220 /*
221 * clear flag of the parent device only if all the
222 * children don't set the flag because ancestor's
223 * flag was set by any one of the descendants.
224 */
225 if (!dev || (!enable &&
226 device_for_each_child(dev, NULL,
227 dev_memalloc_noio)))
228 break;
229 }
230 mutex_unlock(&dev_hotplug_mutex);
231}
232EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
233
234/**
235 * rpm_check_suspend_allowed - Test whether a device may be suspended.
236 * @dev: Device to test.
237 */
238static int rpm_check_suspend_allowed(struct device *dev)
239{
240 int retval = 0;
241
242 if (dev->power.runtime_error)
243 retval = -EINVAL;
244 else if (dev->power.disable_depth > 0)
245 retval = -EACCES;
246 else if (atomic_read(&dev->power.usage_count) > 0)
247 retval = -EAGAIN;
248 else if (!pm_children_suspended(dev))
249 retval = -EBUSY;
250
251 /* Pending resume requests take precedence over suspends. */
252 else if ((dev->power.deferred_resume
253 && dev->power.runtime_status == RPM_SUSPENDING)
254 || (dev->power.request_pending
255 && dev->power.request == RPM_REQ_RESUME))
256 retval = -EAGAIN;
257 else if (__dev_pm_qos_read_value(dev) < 0)
258 retval = -EPERM;
259 else if (dev->power.runtime_status == RPM_SUSPENDED)
260 retval = 1;
261
262 return retval;
263}
264
265/**
266 * __rpm_callback - Run a given runtime PM callback for a given device.
267 * @cb: Runtime PM callback to run.
268 * @dev: Device to run the callback for.
269 */
270static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
271 __releases(&dev->power.lock) __acquires(&dev->power.lock)
272{
273 int retval;
274
275 if (dev->power.irq_safe)
276 spin_unlock(&dev->power.lock);
277 else
278 spin_unlock_irq(&dev->power.lock);
279
280 retval = cb(dev);
281
282 if (dev->power.irq_safe)
283 spin_lock(&dev->power.lock);
284 else
285 spin_lock_irq(&dev->power.lock);
286
287 return retval;
288}
289
290/**
291 * rpm_idle - Notify device bus type if the device can be suspended.
292 * @dev: Device to notify the bus type about.
293 * @rpmflags: Flag bits.
294 *
295 * Check if the device's runtime PM status allows it to be suspended. If
296 * another idle notification has been started earlier, return immediately. If
297 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
298 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
299 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
300 *
301 * This function must be called under dev->power.lock with interrupts disabled.
302 */
303static int rpm_idle(struct device *dev, int rpmflags)
304{
305 int (*callback)(struct device *);
306 int retval;
307
308 trace_rpm_idle(dev, rpmflags);
309 retval = rpm_check_suspend_allowed(dev);
310 if (retval < 0)
311 ; /* Conditions are wrong. */
312
313 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
314 else if (dev->power.runtime_status != RPM_ACTIVE)
315 retval = -EAGAIN;
316
317 /*
318 * Any pending request other than an idle notification takes
319 * precedence over us, except that the timer may be running.
320 */
321 else if (dev->power.request_pending &&
322 dev->power.request > RPM_REQ_IDLE)
323 retval = -EAGAIN;
324
325 /* Act as though RPM_NOWAIT is always set. */
326 else if (dev->power.idle_notification)
327 retval = -EINPROGRESS;
328 if (retval)
329 goto out;
330
331 /* Pending requests need to be canceled. */
332 dev->power.request = RPM_REQ_NONE;
333
334 if (dev->power.no_callbacks)
335 goto out;
336
337 /* Carry out an asynchronous or a synchronous idle notification. */
338 if (rpmflags & RPM_ASYNC) {
339 dev->power.request = RPM_REQ_IDLE;
340 if (!dev->power.request_pending) {
341 dev->power.request_pending = true;
342 queue_work(pm_wq, &dev->power.work);
343 }
344 trace_rpm_return_int(dev, _THIS_IP_, 0);
345 return 0;
346 }
347
348 dev->power.idle_notification = true;
349
350 callback = rpm_get_idle_cb(dev);
351
352 if (callback)
353 retval = __rpm_callback(callback, dev);
354
355 dev->power.idle_notification = false;
356 wake_up_all(&dev->power.wait_queue);
357
358 out:
359 trace_rpm_return_int(dev, _THIS_IP_, retval);
360 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
361}
362
363/**
364 * rpm_callback - Run a given runtime PM callback for a given device.
365 * @cb: Runtime PM callback to run.
366 * @dev: Device to run the callback for.
367 */
368static int rpm_callback(int (*cb)(struct device *), struct device *dev)
369{
370 int retval;
371
372 if (!cb)
373 return -ENOSYS;
374
375 if (dev->power.memalloc_noio) {
376 unsigned int noio_flag;
377
378 /*
379 * Deadlock might be caused if memory allocation with
380 * GFP_KERNEL happens inside runtime_suspend and
381 * runtime_resume callbacks of one block device's
382 * ancestor or the block device itself. Network
383 * device might be thought as part of iSCSI block
384 * device, so network device and its ancestor should
385 * be marked as memalloc_noio too.
386 */
387 noio_flag = memalloc_noio_save();
388 retval = __rpm_callback(cb, dev);
389 memalloc_noio_restore(noio_flag);
390 } else {
391 retval = __rpm_callback(cb, dev);
392 }
393
394 dev->power.runtime_error = retval;
395 return retval != -EACCES ? retval : -EIO;
396}
397
398/**
399 * rpm_suspend - Carry out runtime suspend of given device.
400 * @dev: Device to suspend.
401 * @rpmflags: Flag bits.
402 *
403 * Check if the device's runtime PM status allows it to be suspended.
404 * Cancel a pending idle notification, autosuspend or suspend. If
405 * another suspend has been started earlier, either return immediately
406 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
407 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
408 * otherwise run the ->runtime_suspend() callback directly. When
409 * ->runtime_suspend succeeded, if a deferred resume was requested while
410 * the callback was running then carry it out, otherwise send an idle
411 * notification for its parent (if the suspend succeeded and both
412 * ignore_children of parent->power and irq_safe of dev->power are not set).
413 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
414 * flag is set and the next autosuspend-delay expiration time is in the
415 * future, schedule another autosuspend attempt.
416 *
417 * This function must be called under dev->power.lock with interrupts disabled.
418 */
419static int rpm_suspend(struct device *dev, int rpmflags)
420 __releases(&dev->power.lock) __acquires(&dev->power.lock)
421{
422 int (*callback)(struct device *);
423 struct device *parent = NULL;
424 int retval;
425
426 trace_rpm_suspend(dev, rpmflags);
427
428 repeat:
429 retval = rpm_check_suspend_allowed(dev);
430
431 if (retval < 0)
432 ; /* Conditions are wrong. */
433
434 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
435 else if (dev->power.runtime_status == RPM_RESUMING &&
436 !(rpmflags & RPM_ASYNC))
437 retval = -EAGAIN;
438 if (retval)
439 goto out;
440
441 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
442 if ((rpmflags & RPM_AUTO)
443 && dev->power.runtime_status != RPM_SUSPENDING) {
444 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
445
446 if (expires != 0) {
447 /* Pending requests need to be canceled. */
448 dev->power.request = RPM_REQ_NONE;
449
450 /*
451 * Optimization: If the timer is already running and is
452 * set to expire at or before the autosuspend delay,
453 * avoid the overhead of resetting it. Just let it
454 * expire; pm_suspend_timer_fn() will take care of the
455 * rest.
456 */
457 if (!(dev->power.timer_expires && time_before_eq(
458 dev->power.timer_expires, expires))) {
459 dev->power.timer_expires = expires;
460 mod_timer(&dev->power.suspend_timer, expires);
461 }
462 dev->power.timer_autosuspends = 1;
463 goto out;
464 }
465 }
466
467 /* Other scheduled or pending requests need to be canceled. */
468 pm_runtime_cancel_pending(dev);
469
470 if (dev->power.runtime_status == RPM_SUSPENDING) {
471 DEFINE_WAIT(wait);
472
473 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
474 retval = -EINPROGRESS;
475 goto out;
476 }
477
478 if (dev->power.irq_safe) {
479 spin_unlock(&dev->power.lock);
480
481 cpu_relax();
482
483 spin_lock(&dev->power.lock);
484 goto repeat;
485 }
486
487 /* Wait for the other suspend running in parallel with us. */
488 for (;;) {
489 prepare_to_wait(&dev->power.wait_queue, &wait,
490 TASK_UNINTERRUPTIBLE);
491 if (dev->power.runtime_status != RPM_SUSPENDING)
492 break;
493
494 spin_unlock_irq(&dev->power.lock);
495
496 schedule();
497
498 spin_lock_irq(&dev->power.lock);
499 }
500 finish_wait(&dev->power.wait_queue, &wait);
501 goto repeat;
502 }
503
504 if (dev->power.no_callbacks)
505 goto no_callback; /* Assume success. */
506
507 /* Carry out an asynchronous or a synchronous suspend. */
508 if (rpmflags & RPM_ASYNC) {
509 dev->power.request = (rpmflags & RPM_AUTO) ?
510 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
511 if (!dev->power.request_pending) {
512 dev->power.request_pending = true;
513 queue_work(pm_wq, &dev->power.work);
514 }
515 goto out;
516 }
517
518 __update_runtime_status(dev, RPM_SUSPENDING);
519
520 callback = rpm_get_suspend_cb(dev);
521
522 retval = rpm_callback(callback, dev);
523 if (retval)
524 goto fail;
525
526 no_callback:
527 __update_runtime_status(dev, RPM_SUSPENDED);
528 pm_runtime_deactivate_timer(dev);
529
530 if (dev->parent) {
531 parent = dev->parent;
532 atomic_add_unless(&parent->power.child_count, -1, 0);
533 }
534 wake_up_all(&dev->power.wait_queue);
535
536 if (dev->power.deferred_resume) {
537 dev->power.deferred_resume = false;
538 rpm_resume(dev, 0);
539 retval = -EAGAIN;
540 goto out;
541 }
542
543 /* Maybe the parent is now able to suspend. */
544 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
545 spin_unlock(&dev->power.lock);
546
547 spin_lock(&parent->power.lock);
548 rpm_idle(parent, RPM_ASYNC);
549 spin_unlock(&parent->power.lock);
550
551 spin_lock(&dev->power.lock);
552 }
553
554 out:
555 trace_rpm_return_int(dev, _THIS_IP_, retval);
556
557 return retval;
558
559 fail:
560 __update_runtime_status(dev, RPM_ACTIVE);
561 dev->power.deferred_resume = false;
562 wake_up_all(&dev->power.wait_queue);
563
564 if (retval == -EAGAIN || retval == -EBUSY) {
565 dev->power.runtime_error = 0;
566
567 /*
568 * If the callback routine failed an autosuspend, and
569 * if the last_busy time has been updated so that there
570 * is a new autosuspend expiration time, automatically
571 * reschedule another autosuspend.
572 */
573 if ((rpmflags & RPM_AUTO) &&
574 pm_runtime_autosuspend_expiration(dev) != 0)
575 goto repeat;
576 } else {
577 pm_runtime_cancel_pending(dev);
578 }
579 goto out;
580}
581
582/**
583 * rpm_resume - Carry out runtime resume of given device.
584 * @dev: Device to resume.
585 * @rpmflags: Flag bits.
586 *
587 * Check if the device's runtime PM status allows it to be resumed. Cancel
588 * any scheduled or pending requests. If another resume has been started
589 * earlier, either return immediately or wait for it to finish, depending on the
590 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
591 * parallel with this function, either tell the other process to resume after
592 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
593 * flag is set then queue a resume request; otherwise run the
594 * ->runtime_resume() callback directly. Queue an idle notification for the
595 * device if the resume succeeded.
596 *
597 * This function must be called under dev->power.lock with interrupts disabled.
598 */
599static int rpm_resume(struct device *dev, int rpmflags)
600 __releases(&dev->power.lock) __acquires(&dev->power.lock)
601{
602 int (*callback)(struct device *);
603 struct device *parent = NULL;
604 int retval = 0;
605
606 trace_rpm_resume(dev, rpmflags);
607
608 repeat:
609 if (dev->power.runtime_error)
610 retval = -EINVAL;
611 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
612 && dev->power.runtime_status == RPM_ACTIVE)
613 retval = 1;
614 else if (dev->power.disable_depth > 0)
615 retval = -EACCES;
616 if (retval)
617 goto out;
618
619 /*
620 * Other scheduled or pending requests need to be canceled. Small
621 * optimization: If an autosuspend timer is running, leave it running
622 * rather than cancelling it now only to restart it again in the near
623 * future.
624 */
625 dev->power.request = RPM_REQ_NONE;
626 if (!dev->power.timer_autosuspends)
627 pm_runtime_deactivate_timer(dev);
628
629 if (dev->power.runtime_status == RPM_ACTIVE) {
630 retval = 1;
631 goto out;
632 }
633
634 if (dev->power.runtime_status == RPM_RESUMING
635 || dev->power.runtime_status == RPM_SUSPENDING) {
636 DEFINE_WAIT(wait);
637
638 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
639 if (dev->power.runtime_status == RPM_SUSPENDING)
640 dev->power.deferred_resume = true;
641 else
642 retval = -EINPROGRESS;
643 goto out;
644 }
645
646 if (dev->power.irq_safe) {
647 spin_unlock(&dev->power.lock);
648
649 cpu_relax();
650
651 spin_lock(&dev->power.lock);
652 goto repeat;
653 }
654
655 /* Wait for the operation carried out in parallel with us. */
656 for (;;) {
657 prepare_to_wait(&dev->power.wait_queue, &wait,
658 TASK_UNINTERRUPTIBLE);
659 if (dev->power.runtime_status != RPM_RESUMING
660 && dev->power.runtime_status != RPM_SUSPENDING)
661 break;
662
663 spin_unlock_irq(&dev->power.lock);
664
665 schedule();
666
667 spin_lock_irq(&dev->power.lock);
668 }
669 finish_wait(&dev->power.wait_queue, &wait);
670 goto repeat;
671 }
672
673 /*
674 * See if we can skip waking up the parent. This is safe only if
675 * power.no_callbacks is set, because otherwise we don't know whether
676 * the resume will actually succeed.
677 */
678 if (dev->power.no_callbacks && !parent && dev->parent) {
679 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
680 if (dev->parent->power.disable_depth > 0
681 || dev->parent->power.ignore_children
682 || dev->parent->power.runtime_status == RPM_ACTIVE) {
683 atomic_inc(&dev->parent->power.child_count);
684 spin_unlock(&dev->parent->power.lock);
685 retval = 1;
686 goto no_callback; /* Assume success. */
687 }
688 spin_unlock(&dev->parent->power.lock);
689 }
690
691 /* Carry out an asynchronous or a synchronous resume. */
692 if (rpmflags & RPM_ASYNC) {
693 dev->power.request = RPM_REQ_RESUME;
694 if (!dev->power.request_pending) {
695 dev->power.request_pending = true;
696 queue_work(pm_wq, &dev->power.work);
697 }
698 retval = 0;
699 goto out;
700 }
701
702 if (!parent && dev->parent) {
703 /*
704 * Increment the parent's usage counter and resume it if
705 * necessary. Not needed if dev is irq-safe; then the
706 * parent is permanently resumed.
707 */
708 parent = dev->parent;
709 if (dev->power.irq_safe)
710 goto skip_parent;
711 spin_unlock(&dev->power.lock);
712
713 pm_runtime_get_noresume(parent);
714
715 spin_lock(&parent->power.lock);
716 /*
717 * We can resume if the parent's runtime PM is disabled or it
718 * is set to ignore children.
719 */
720 if (!parent->power.disable_depth
721 && !parent->power.ignore_children) {
722 rpm_resume(parent, 0);
723 if (parent->power.runtime_status != RPM_ACTIVE)
724 retval = -EBUSY;
725 }
726 spin_unlock(&parent->power.lock);
727
728 spin_lock(&dev->power.lock);
729 if (retval)
730 goto out;
731 goto repeat;
732 }
733 skip_parent:
734
735 if (dev->power.no_callbacks)
736 goto no_callback; /* Assume success. */
737
738 __update_runtime_status(dev, RPM_RESUMING);
739
740 callback = rpm_get_resume_cb(dev);
741
742 retval = rpm_callback(callback, dev);
743 if (retval) {
744 __update_runtime_status(dev, RPM_SUSPENDED);
745 pm_runtime_cancel_pending(dev);
746 } else {
747 no_callback:
748 __update_runtime_status(dev, RPM_ACTIVE);
749 if (parent)
750 atomic_inc(&parent->power.child_count);
751 }
752 wake_up_all(&dev->power.wait_queue);
753
754 if (retval >= 0)
755 rpm_idle(dev, RPM_ASYNC);
756
757 out:
758 if (parent && !dev->power.irq_safe) {
759 spin_unlock_irq(&dev->power.lock);
760
761 pm_runtime_put(parent);
762
763 spin_lock_irq(&dev->power.lock);
764 }
765
766 trace_rpm_return_int(dev, _THIS_IP_, retval);
767
768 return retval;
769}
770
771/**
772 * pm_runtime_work - Universal runtime PM work function.
773 * @work: Work structure used for scheduling the execution of this function.
774 *
775 * Use @work to get the device object the work is to be done for, determine what
776 * is to be done and execute the appropriate runtime PM function.
777 */
778static void pm_runtime_work(struct work_struct *work)
779{
780 struct device *dev = container_of(work, struct device, power.work);
781 enum rpm_request req;
782
783 spin_lock_irq(&dev->power.lock);
784
785 if (!dev->power.request_pending)
786 goto out;
787
788 req = dev->power.request;
789 dev->power.request = RPM_REQ_NONE;
790 dev->power.request_pending = false;
791
792 switch (req) {
793 case RPM_REQ_NONE:
794 break;
795 case RPM_REQ_IDLE:
796 rpm_idle(dev, RPM_NOWAIT);
797 break;
798 case RPM_REQ_SUSPEND:
799 rpm_suspend(dev, RPM_NOWAIT);
800 break;
801 case RPM_REQ_AUTOSUSPEND:
802 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
803 break;
804 case RPM_REQ_RESUME:
805 rpm_resume(dev, RPM_NOWAIT);
806 break;
807 }
808
809 out:
810 spin_unlock_irq(&dev->power.lock);
811}
812
813/**
814 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
815 * @data: Device pointer passed by pm_schedule_suspend().
816 *
817 * Check if the time is right and queue a suspend request.
818 */
819static void pm_suspend_timer_fn(unsigned long data)
820{
821 struct device *dev = (struct device *)data;
822 unsigned long flags;
823 unsigned long expires;
824
825 spin_lock_irqsave(&dev->power.lock, flags);
826
827 expires = dev->power.timer_expires;
828 /* If 'expire' is after 'jiffies' we've been called too early. */
829 if (expires > 0 && !time_after(expires, jiffies)) {
830 dev->power.timer_expires = 0;
831 rpm_suspend(dev, dev->power.timer_autosuspends ?
832 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
833 }
834
835 spin_unlock_irqrestore(&dev->power.lock, flags);
836}
837
838/**
839 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
840 * @dev: Device to suspend.
841 * @delay: Time to wait before submitting a suspend request, in milliseconds.
842 */
843int pm_schedule_suspend(struct device *dev, unsigned int delay)
844{
845 unsigned long flags;
846 int retval;
847
848 spin_lock_irqsave(&dev->power.lock, flags);
849
850 if (!delay) {
851 retval = rpm_suspend(dev, RPM_ASYNC);
852 goto out;
853 }
854
855 retval = rpm_check_suspend_allowed(dev);
856 if (retval)
857 goto out;
858
859 /* Other scheduled or pending requests need to be canceled. */
860 pm_runtime_cancel_pending(dev);
861
862 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
863 dev->power.timer_expires += !dev->power.timer_expires;
864 dev->power.timer_autosuspends = 0;
865 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
866
867 out:
868 spin_unlock_irqrestore(&dev->power.lock, flags);
869
870 return retval;
871}
872EXPORT_SYMBOL_GPL(pm_schedule_suspend);
873
874/**
875 * __pm_runtime_idle - Entry point for runtime idle operations.
876 * @dev: Device to send idle notification for.
877 * @rpmflags: Flag bits.
878 *
879 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
880 * return immediately if it is larger than zero. Then carry out an idle
881 * notification, either synchronous or asynchronous.
882 *
883 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
884 * or if pm_runtime_irq_safe() has been called.
885 */
886int __pm_runtime_idle(struct device *dev, int rpmflags)
887{
888 unsigned long flags;
889 int retval;
890
891 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
892
893 if (rpmflags & RPM_GET_PUT) {
894 if (!atomic_dec_and_test(&dev->power.usage_count))
895 return 0;
896 }
897
898 spin_lock_irqsave(&dev->power.lock, flags);
899 retval = rpm_idle(dev, rpmflags);
900 spin_unlock_irqrestore(&dev->power.lock, flags);
901
902 return retval;
903}
904EXPORT_SYMBOL_GPL(__pm_runtime_idle);
905
906/**
907 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
908 * @dev: Device to suspend.
909 * @rpmflags: Flag bits.
910 *
911 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
912 * return immediately if it is larger than zero. Then carry out a suspend,
913 * either synchronous or asynchronous.
914 *
915 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
916 * or if pm_runtime_irq_safe() has been called.
917 */
918int __pm_runtime_suspend(struct device *dev, int rpmflags)
919{
920 unsigned long flags;
921 int retval;
922
923 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
924
925 if (rpmflags & RPM_GET_PUT) {
926 if (!atomic_dec_and_test(&dev->power.usage_count))
927 return 0;
928 }
929
930 spin_lock_irqsave(&dev->power.lock, flags);
931 retval = rpm_suspend(dev, rpmflags);
932 spin_unlock_irqrestore(&dev->power.lock, flags);
933
934 return retval;
935}
936EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
937
938/**
939 * __pm_runtime_resume - Entry point for runtime resume operations.
940 * @dev: Device to resume.
941 * @rpmflags: Flag bits.
942 *
943 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
944 * carry out a resume, either synchronous or asynchronous.
945 *
946 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
947 * or if pm_runtime_irq_safe() has been called.
948 */
949int __pm_runtime_resume(struct device *dev, int rpmflags)
950{
951 unsigned long flags;
952 int retval;
953
954 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
955
956 if (rpmflags & RPM_GET_PUT)
957 atomic_inc(&dev->power.usage_count);
958
959 spin_lock_irqsave(&dev->power.lock, flags);
960 retval = rpm_resume(dev, rpmflags);
961 spin_unlock_irqrestore(&dev->power.lock, flags);
962
963 return retval;
964}
965EXPORT_SYMBOL_GPL(__pm_runtime_resume);
966
967/**
968 * __pm_runtime_set_status - Set runtime PM status of a device.
969 * @dev: Device to handle.
970 * @status: New runtime PM status of the device.
971 *
972 * If runtime PM of the device is disabled or its power.runtime_error field is
973 * different from zero, the status may be changed either to RPM_ACTIVE, or to
974 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
975 * However, if the device has a parent and the parent is not active, and the
976 * parent's power.ignore_children flag is unset, the device's status cannot be
977 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
978 *
979 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
980 * and the device parent's counter of unsuspended children is modified to
981 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
982 * notification request for the parent is submitted.
983 */
984int __pm_runtime_set_status(struct device *dev, unsigned int status)
985{
986 struct device *parent = dev->parent;
987 unsigned long flags;
988 bool notify_parent = false;
989 int error = 0;
990
991 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
992 return -EINVAL;
993
994 spin_lock_irqsave(&dev->power.lock, flags);
995
996 if (!dev->power.runtime_error && !dev->power.disable_depth) {
997 error = -EAGAIN;
998 goto out;
999 }
1000
1001 if (dev->power.runtime_status == status)
1002 goto out_set;
1003
1004 if (status == RPM_SUSPENDED) {
1005 /* It always is possible to set the status to 'suspended'. */
1006 if (parent) {
1007 atomic_add_unless(&parent->power.child_count, -1, 0);
1008 notify_parent = !parent->power.ignore_children;
1009 }
1010 goto out_set;
1011 }
1012
1013 if (parent) {
1014 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1015
1016 /*
1017 * It is invalid to put an active child under a parent that is
1018 * not active, has runtime PM enabled and the
1019 * 'power.ignore_children' flag unset.
1020 */
1021 if (!parent->power.disable_depth
1022 && !parent->power.ignore_children
1023 && parent->power.runtime_status != RPM_ACTIVE)
1024 error = -EBUSY;
1025 else if (dev->power.runtime_status == RPM_SUSPENDED)
1026 atomic_inc(&parent->power.child_count);
1027
1028 spin_unlock(&parent->power.lock);
1029
1030 if (error)
1031 goto out;
1032 }
1033
1034 out_set:
1035 __update_runtime_status(dev, status);
1036 dev->power.runtime_error = 0;
1037 out:
1038 spin_unlock_irqrestore(&dev->power.lock, flags);
1039
1040 if (notify_parent)
1041 pm_request_idle(parent);
1042
1043 return error;
1044}
1045EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1046
1047/**
1048 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1049 * @dev: Device to handle.
1050 *
1051 * Flush all pending requests for the device from pm_wq and wait for all
1052 * runtime PM operations involving the device in progress to complete.
1053 *
1054 * Should be called under dev->power.lock with interrupts disabled.
1055 */
1056static void __pm_runtime_barrier(struct device *dev)
1057{
1058 pm_runtime_deactivate_timer(dev);
1059
1060 if (dev->power.request_pending) {
1061 dev->power.request = RPM_REQ_NONE;
1062 spin_unlock_irq(&dev->power.lock);
1063
1064 cancel_work_sync(&dev->power.work);
1065
1066 spin_lock_irq(&dev->power.lock);
1067 dev->power.request_pending = false;
1068 }
1069
1070 if (dev->power.runtime_status == RPM_SUSPENDING
1071 || dev->power.runtime_status == RPM_RESUMING
1072 || dev->power.idle_notification) {
1073 DEFINE_WAIT(wait);
1074
1075 /* Suspend, wake-up or idle notification in progress. */
1076 for (;;) {
1077 prepare_to_wait(&dev->power.wait_queue, &wait,
1078 TASK_UNINTERRUPTIBLE);
1079 if (dev->power.runtime_status != RPM_SUSPENDING
1080 && dev->power.runtime_status != RPM_RESUMING
1081 && !dev->power.idle_notification)
1082 break;
1083 spin_unlock_irq(&dev->power.lock);
1084
1085 schedule();
1086
1087 spin_lock_irq(&dev->power.lock);
1088 }
1089 finish_wait(&dev->power.wait_queue, &wait);
1090 }
1091}
1092
1093/**
1094 * pm_runtime_barrier - Flush pending requests and wait for completions.
1095 * @dev: Device to handle.
1096 *
1097 * Prevent the device from being suspended by incrementing its usage counter and
1098 * if there's a pending resume request for the device, wake the device up.
1099 * Next, make sure that all pending requests for the device have been flushed
1100 * from pm_wq and wait for all runtime PM operations involving the device in
1101 * progress to complete.
1102 *
1103 * Return value:
1104 * 1, if there was a resume request pending and the device had to be woken up,
1105 * 0, otherwise
1106 */
1107int pm_runtime_barrier(struct device *dev)
1108{
1109 int retval = 0;
1110
1111 pm_runtime_get_noresume(dev);
1112 spin_lock_irq(&dev->power.lock);
1113
1114 if (dev->power.request_pending
1115 && dev->power.request == RPM_REQ_RESUME) {
1116 rpm_resume(dev, 0);
1117 retval = 1;
1118 }
1119
1120 __pm_runtime_barrier(dev);
1121
1122 spin_unlock_irq(&dev->power.lock);
1123 pm_runtime_put_noidle(dev);
1124
1125 return retval;
1126}
1127EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1128
1129/**
1130 * __pm_runtime_disable - Disable runtime PM of a device.
1131 * @dev: Device to handle.
1132 * @check_resume: If set, check if there's a resume request for the device.
1133 *
1134 * Increment power.disable_depth for the device and if it was zero previously,
1135 * cancel all pending runtime PM requests for the device and wait for all
1136 * operations in progress to complete. The device can be either active or
1137 * suspended after its runtime PM has been disabled.
1138 *
1139 * If @check_resume is set and there's a resume request pending when
1140 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1141 * function will wake up the device before disabling its runtime PM.
1142 */
1143void __pm_runtime_disable(struct device *dev, bool check_resume)
1144{
1145 spin_lock_irq(&dev->power.lock);
1146
1147 if (dev->power.disable_depth > 0) {
1148 dev->power.disable_depth++;
1149 goto out;
1150 }
1151
1152 /*
1153 * Wake up the device if there's a resume request pending, because that
1154 * means there probably is some I/O to process and disabling runtime PM
1155 * shouldn't prevent the device from processing the I/O.
1156 */
1157 if (check_resume && dev->power.request_pending
1158 && dev->power.request == RPM_REQ_RESUME) {
1159 /*
1160 * Prevent suspends and idle notifications from being carried
1161 * out after we have woken up the device.
1162 */
1163 pm_runtime_get_noresume(dev);
1164
1165 rpm_resume(dev, 0);
1166
1167 pm_runtime_put_noidle(dev);
1168 }
1169
1170 if (!dev->power.disable_depth++)
1171 __pm_runtime_barrier(dev);
1172
1173 out:
1174 spin_unlock_irq(&dev->power.lock);
1175}
1176EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1177
1178/**
1179 * pm_runtime_enable - Enable runtime PM of a device.
1180 * @dev: Device to handle.
1181 */
1182void pm_runtime_enable(struct device *dev)
1183{
1184 unsigned long flags;
1185
1186 spin_lock_irqsave(&dev->power.lock, flags);
1187
1188 if (dev->power.disable_depth > 0)
1189 dev->power.disable_depth--;
1190 else
1191 dev_warn(dev, "Unbalanced %s!\n", __func__);
1192
1193 spin_unlock_irqrestore(&dev->power.lock, flags);
1194}
1195EXPORT_SYMBOL_GPL(pm_runtime_enable);
1196
1197/**
1198 * pm_runtime_forbid - Block runtime PM of a device.
1199 * @dev: Device to handle.
1200 *
1201 * Increase the device's usage count and clear its power.runtime_auto flag,
1202 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1203 * for it.
1204 */
1205void pm_runtime_forbid(struct device *dev)
1206{
1207 spin_lock_irq(&dev->power.lock);
1208 if (!dev->power.runtime_auto)
1209 goto out;
1210
1211 dev->power.runtime_auto = false;
1212 atomic_inc(&dev->power.usage_count);
1213 rpm_resume(dev, 0);
1214
1215 out:
1216 spin_unlock_irq(&dev->power.lock);
1217}
1218EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1219
1220/**
1221 * pm_runtime_allow - Unblock runtime PM of a device.
1222 * @dev: Device to handle.
1223 *
1224 * Decrease the device's usage count and set its power.runtime_auto flag.
1225 */
1226void pm_runtime_allow(struct device *dev)
1227{
1228 spin_lock_irq(&dev->power.lock);
1229 if (dev->power.runtime_auto)
1230 goto out;
1231
1232 dev->power.runtime_auto = true;
1233 if (atomic_dec_and_test(&dev->power.usage_count))
1234 rpm_idle(dev, RPM_AUTO);
1235
1236 out:
1237 spin_unlock_irq(&dev->power.lock);
1238}
1239EXPORT_SYMBOL_GPL(pm_runtime_allow);
1240
1241/**
1242 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1243 * @dev: Device to handle.
1244 *
1245 * Set the power.no_callbacks flag, which tells the PM core that this
1246 * device is power-managed through its parent and has no runtime PM
1247 * callbacks of its own. The runtime sysfs attributes will be removed.
1248 */
1249void pm_runtime_no_callbacks(struct device *dev)
1250{
1251 spin_lock_irq(&dev->power.lock);
1252 dev->power.no_callbacks = 1;
1253 spin_unlock_irq(&dev->power.lock);
1254 if (device_is_registered(dev))
1255 rpm_sysfs_remove(dev);
1256}
1257EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1258
1259/**
1260 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1261 * @dev: Device to handle
1262 *
1263 * Set the power.irq_safe flag, which tells the PM core that the
1264 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1265 * always be invoked with the spinlock held and interrupts disabled. It also
1266 * causes the parent's usage counter to be permanently incremented, preventing
1267 * the parent from runtime suspending -- otherwise an irq-safe child might have
1268 * to wait for a non-irq-safe parent.
1269 */
1270void pm_runtime_irq_safe(struct device *dev)
1271{
1272 if (dev->parent)
1273 pm_runtime_get_sync(dev->parent);
1274 spin_lock_irq(&dev->power.lock);
1275 dev->power.irq_safe = 1;
1276 spin_unlock_irq(&dev->power.lock);
1277}
1278EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1279
1280/**
1281 * update_autosuspend - Handle a change to a device's autosuspend settings.
1282 * @dev: Device to handle.
1283 * @old_delay: The former autosuspend_delay value.
1284 * @old_use: The former use_autosuspend value.
1285 *
1286 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1287 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1288 *
1289 * This function must be called under dev->power.lock with interrupts disabled.
1290 */
1291static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1292{
1293 int delay = dev->power.autosuspend_delay;
1294
1295 /* Should runtime suspend be prevented now? */
1296 if (dev->power.use_autosuspend && delay < 0) {
1297
1298 /* If it used to be allowed then prevent it. */
1299 if (!old_use || old_delay >= 0) {
1300 atomic_inc(&dev->power.usage_count);
1301 rpm_resume(dev, 0);
1302 }
1303 }
1304
1305 /* Runtime suspend should be allowed now. */
1306 else {
1307
1308 /* If it used to be prevented then allow it. */
1309 if (old_use && old_delay < 0)
1310 atomic_dec(&dev->power.usage_count);
1311
1312 /* Maybe we can autosuspend now. */
1313 rpm_idle(dev, RPM_AUTO);
1314 }
1315}
1316
1317/**
1318 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1319 * @dev: Device to handle.
1320 * @delay: Value of the new delay in milliseconds.
1321 *
1322 * Set the device's power.autosuspend_delay value. If it changes to negative
1323 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1324 * changes the other way, allow runtime suspends.
1325 */
1326void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1327{
1328 int old_delay, old_use;
1329
1330 spin_lock_irq(&dev->power.lock);
1331 old_delay = dev->power.autosuspend_delay;
1332 old_use = dev->power.use_autosuspend;
1333 dev->power.autosuspend_delay = delay;
1334 update_autosuspend(dev, old_delay, old_use);
1335 spin_unlock_irq(&dev->power.lock);
1336}
1337EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1338
1339/**
1340 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1341 * @dev: Device to handle.
1342 * @use: New value for use_autosuspend.
1343 *
1344 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1345 * suspends as needed.
1346 */
1347void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1348{
1349 int old_delay, old_use;
1350
1351 spin_lock_irq(&dev->power.lock);
1352 old_delay = dev->power.autosuspend_delay;
1353 old_use = dev->power.use_autosuspend;
1354 dev->power.use_autosuspend = use;
1355 update_autosuspend(dev, old_delay, old_use);
1356 spin_unlock_irq(&dev->power.lock);
1357}
1358EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1359
1360/**
1361 * pm_runtime_init - Initialize runtime PM fields in given device object.
1362 * @dev: Device object to initialize.
1363 */
1364void pm_runtime_init(struct device *dev)
1365{
1366 dev->power.runtime_status = RPM_SUSPENDED;
1367 dev->power.idle_notification = false;
1368
1369 dev->power.disable_depth = 1;
1370 atomic_set(&dev->power.usage_count, 0);
1371
1372 dev->power.runtime_error = 0;
1373
1374 atomic_set(&dev->power.child_count, 0);
1375 pm_suspend_ignore_children(dev, false);
1376 dev->power.runtime_auto = true;
1377
1378 dev->power.request_pending = false;
1379 dev->power.request = RPM_REQ_NONE;
1380 dev->power.deferred_resume = false;
1381 dev->power.accounting_timestamp = jiffies;
1382 INIT_WORK(&dev->power.work, pm_runtime_work);
1383
1384 dev->power.timer_expires = 0;
1385 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1386 (unsigned long)dev);
1387
1388 init_waitqueue_head(&dev->power.wait_queue);
1389}
1390
1391/**
1392 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1393 * @dev: Device object being removed from device hierarchy.
1394 */
1395void pm_runtime_remove(struct device *dev)
1396{
1397 __pm_runtime_disable(dev, false);
1398
1399 /* Change the status back to 'suspended' to match the initial status. */
1400 if (dev->power.runtime_status == RPM_ACTIVE)
1401 pm_runtime_set_suspended(dev);
1402 if (dev->power.irq_safe && dev->parent)
1403 pm_runtime_put(dev->parent);
1404}
1405#endif
1406
1407/**
1408 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1409 * @dev: Device to suspend.
1410 *
1411 * Disable runtime PM so we safely can check the device's runtime PM status and
1412 * if it is active, invoke it's .runtime_suspend callback to bring it into
1413 * suspend state. Keep runtime PM disabled to preserve the state unless we
1414 * encounter errors.
1415 *
1416 * Typically this function may be invoked from a system suspend callback to make
1417 * sure the device is put into low power state.
1418 */
1419int pm_runtime_force_suspend(struct device *dev)
1420{
1421 int (*callback)(struct device *);
1422 int ret = 0;
1423
1424 pm_runtime_disable(dev);
1425
1426 /*
1427 * Note that pm_runtime_status_suspended() returns false while
1428 * !CONFIG_PM_RUNTIME, which means the device will be put into low
1429 * power state.
1430 */
1431 if (pm_runtime_status_suspended(dev))
1432 return 0;
1433
1434 callback = rpm_get_suspend_cb(dev);
1435
1436 if (!callback) {
1437 ret = -ENOSYS;
1438 goto err;
1439 }
1440
1441 ret = callback(dev);
1442 if (ret)
1443 goto err;
1444
1445 pm_runtime_set_suspended(dev);
1446 return 0;
1447err:
1448 pm_runtime_enable(dev);
1449 return ret;
1450}
1451EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1452
1453/**
1454 * pm_runtime_force_resume - Force a device into resume state.
1455 * @dev: Device to resume.
1456 *
1457 * Prior invoking this function we expect the user to have brought the device
1458 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1459 * those actions and brings the device into full power. We update the runtime PM
1460 * status and re-enables runtime PM.
1461 *
1462 * Typically this function may be invoked from a system resume callback to make
1463 * sure the device is put into full power state.
1464 */
1465int pm_runtime_force_resume(struct device *dev)
1466{
1467 int (*callback)(struct device *);
1468 int ret = 0;
1469
1470 callback = rpm_get_resume_cb(dev);
1471
1472 if (!callback) {
1473 ret = -ENOSYS;
1474 goto out;
1475 }
1476
1477 ret = callback(dev);
1478 if (ret)
1479 goto out;
1480
1481 pm_runtime_set_active(dev);
1482 pm_runtime_mark_last_busy(dev);
1483out:
1484 pm_runtime_enable(dev);
1485 return ret;
1486}
1487EXPORT_SYMBOL_GPL(pm_runtime_force_resume);