Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 *
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 */
8#include <linux/sched/mm.h>
9#include <linux/ktime.h>
10#include <linux/hrtimer.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_wakeirq.h>
14#include <trace/events/rpm.h>
15
16#include "../base.h"
17#include "power.h"
18
19typedef int (*pm_callback_t)(struct device *);
20
21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22{
23 pm_callback_t cb;
24 const struct dev_pm_ops *ops;
25
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
34 else
35 ops = NULL;
36
37 if (ops)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 else
40 cb = NULL;
41
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45 return cb;
46}
47
48#define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51static int rpm_resume(struct device *dev, int rpmflags);
52static int rpm_suspend(struct device *dev, int rpmflags);
53
54/**
55 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
57 *
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
63 * correctly.
64 */
65static void update_pm_runtime_accounting(struct device *dev)
66{
67 u64 now, last, delta;
68
69 if (dev->power.disable_depth > 0)
70 return;
71
72 last = dev->power.accounting_timestamp;
73
74 now = ktime_get_mono_fast_ns();
75 dev->power.accounting_timestamp = now;
76
77 /*
78 * Because ktime_get_mono_fast_ns() is not monotonic during
79 * timekeeping updates, ensure that 'now' is after the last saved
80 * timesptamp.
81 */
82 if (now < last)
83 return;
84
85 delta = now - last;
86
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
89 else
90 dev->power.active_time += delta;
91}
92
93static void __update_runtime_status(struct device *dev, enum rpm_status status)
94{
95 update_pm_runtime_accounting(dev);
96 dev->power.runtime_status = status;
97}
98
99static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
100{
101 u64 time;
102 unsigned long flags;
103
104 spin_lock_irqsave(&dev->power.lock, flags);
105
106 update_pm_runtime_accounting(dev);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
108
109 spin_unlock_irqrestore(&dev->power.lock, flags);
110
111 return time;
112}
113
114u64 pm_runtime_active_time(struct device *dev)
115{
116 return rpm_get_accounted_time(dev, false);
117}
118
119u64 pm_runtime_suspended_time(struct device *dev)
120{
121 return rpm_get_accounted_time(dev, true);
122}
123EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
124
125/**
126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
127 * @dev: Device to handle.
128 */
129static void pm_runtime_deactivate_timer(struct device *dev)
130{
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
134 }
135}
136
137/**
138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
139 * @dev: Device to handle.
140 */
141static void pm_runtime_cancel_pending(struct device *dev)
142{
143 pm_runtime_deactivate_timer(dev);
144 /*
145 * In case there's a request pending, make sure its work function will
146 * return without doing anything.
147 */
148 dev->power.request = RPM_REQ_NONE;
149}
150
151/*
152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
153 * @dev: Device to handle.
154 *
155 * Compute the autosuspend-delay expiration time based on the device's
156 * power.last_busy time. If the delay has already expired or is disabled
157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
159 *
160 * This function may be called either with or without dev->power.lock held.
161 * Either way it can be racy, since power.last_busy may be updated at any time.
162 */
163u64 pm_runtime_autosuspend_expiration(struct device *dev)
164{
165 int autosuspend_delay;
166 u64 expires;
167
168 if (!dev->power.use_autosuspend)
169 return 0;
170
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
172 if (autosuspend_delay < 0)
173 return 0;
174
175 expires = READ_ONCE(dev->power.last_busy);
176 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177 if (expires > ktime_get_mono_fast_ns())
178 return expires; /* Expires in the future */
179
180 return 0;
181}
182EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
183
184static int dev_memalloc_noio(struct device *dev, void *data)
185{
186 return dev->power.memalloc_noio;
187}
188
189/*
190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
191 * @dev: Device to handle.
192 * @enable: True for setting the flag and False for clearing the flag.
193 *
194 * Set the flag for all devices in the path from the device to the
195 * root device in the device tree if @enable is true, otherwise clear
196 * the flag for devices in the path whose siblings don't set the flag.
197 *
198 * The function should only be called by block device, or network
199 * device driver for solving the deadlock problem during runtime
200 * resume/suspend:
201 *
202 * If memory allocation with GFP_KERNEL is called inside runtime
203 * resume/suspend callback of any one of its ancestors(or the
204 * block device itself), the deadlock may be triggered inside the
205 * memory allocation since it might not complete until the block
206 * device becomes active and the involed page I/O finishes. The
207 * situation is pointed out first by Alan Stern. Network device
208 * are involved in iSCSI kind of situation.
209 *
210 * The lock of dev_hotplug_mutex is held in the function for handling
211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
212 * in async probe().
213 *
214 * The function should be called between device_add() and device_del()
215 * on the affected device(block/network device).
216 */
217void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
218{
219 static DEFINE_MUTEX(dev_hotplug_mutex);
220
221 mutex_lock(&dev_hotplug_mutex);
222 for (;;) {
223 bool enabled;
224
225 /* hold power lock since bitfield is not SMP-safe. */
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
230
231 /*
232 * not need to enable ancestors any more if the device
233 * has been enabled.
234 */
235 if (enabled && enable)
236 break;
237
238 dev = dev->parent;
239
240 /*
241 * clear flag of the parent device only if all the
242 * children don't set the flag because ancestor's
243 * flag was set by any one of the descendants.
244 */
245 if (!dev || (!enable &&
246 device_for_each_child(dev, NULL,
247 dev_memalloc_noio)))
248 break;
249 }
250 mutex_unlock(&dev_hotplug_mutex);
251}
252EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
253
254/**
255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
256 * @dev: Device to test.
257 */
258static int rpm_check_suspend_allowed(struct device *dev)
259{
260 int retval = 0;
261
262 if (dev->power.runtime_error)
263 retval = -EINVAL;
264 else if (dev->power.disable_depth > 0)
265 retval = -EACCES;
266 else if (atomic_read(&dev->power.usage_count) > 0)
267 retval = -EAGAIN;
268 else if (!dev->power.ignore_children &&
269 atomic_read(&dev->power.child_count))
270 retval = -EBUSY;
271
272 /* Pending resume requests take precedence over suspends. */
273 else if ((dev->power.deferred_resume
274 && dev->power.runtime_status == RPM_SUSPENDING)
275 || (dev->power.request_pending
276 && dev->power.request == RPM_REQ_RESUME))
277 retval = -EAGAIN;
278 else if (__dev_pm_qos_resume_latency(dev) == 0)
279 retval = -EPERM;
280 else if (dev->power.runtime_status == RPM_SUSPENDED)
281 retval = 1;
282
283 return retval;
284}
285
286static int rpm_get_suppliers(struct device *dev)
287{
288 struct device_link *link;
289
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291 device_links_read_lock_held()) {
292 int retval;
293
294 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
295 READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
296 continue;
297
298 retval = pm_runtime_get_sync(link->supplier);
299 /* Ignore suppliers with disabled runtime PM. */
300 if (retval < 0 && retval != -EACCES) {
301 pm_runtime_put_noidle(link->supplier);
302 return retval;
303 }
304 refcount_inc(&link->rpm_active);
305 }
306 return 0;
307}
308
309static void rpm_put_suppliers(struct device *dev)
310{
311 struct device_link *link;
312
313 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
314 device_links_read_lock_held()) {
315 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
316 continue;
317
318 while (refcount_dec_not_one(&link->rpm_active))
319 pm_runtime_put(link->supplier);
320 }
321}
322
323/**
324 * __rpm_callback - Run a given runtime PM callback for a given device.
325 * @cb: Runtime PM callback to run.
326 * @dev: Device to run the callback for.
327 */
328static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
329 __releases(&dev->power.lock) __acquires(&dev->power.lock)
330{
331 int retval, idx;
332 bool use_links = dev->power.links_count > 0;
333
334 if (dev->power.irq_safe) {
335 spin_unlock(&dev->power.lock);
336 } else {
337 spin_unlock_irq(&dev->power.lock);
338
339 /*
340 * Resume suppliers if necessary.
341 *
342 * The device's runtime PM status cannot change until this
343 * routine returns, so it is safe to read the status outside of
344 * the lock.
345 */
346 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
347 idx = device_links_read_lock();
348
349 retval = rpm_get_suppliers(dev);
350 if (retval)
351 goto fail;
352
353 device_links_read_unlock(idx);
354 }
355 }
356
357 retval = cb(dev);
358
359 if (dev->power.irq_safe) {
360 spin_lock(&dev->power.lock);
361 } else {
362 /*
363 * If the device is suspending and the callback has returned
364 * success, drop the usage counters of the suppliers that have
365 * been reference counted on its resume.
366 *
367 * Do that if resume fails too.
368 */
369 if (use_links
370 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
371 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
372 idx = device_links_read_lock();
373
374 fail:
375 rpm_put_suppliers(dev);
376
377 device_links_read_unlock(idx);
378 }
379
380 spin_lock_irq(&dev->power.lock);
381 }
382
383 return retval;
384}
385
386/**
387 * rpm_idle - Notify device bus type if the device can be suspended.
388 * @dev: Device to notify the bus type about.
389 * @rpmflags: Flag bits.
390 *
391 * Check if the device's runtime PM status allows it to be suspended. If
392 * another idle notification has been started earlier, return immediately. If
393 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
394 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
395 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
396 *
397 * This function must be called under dev->power.lock with interrupts disabled.
398 */
399static int rpm_idle(struct device *dev, int rpmflags)
400{
401 int (*callback)(struct device *);
402 int retval;
403
404 trace_rpm_idle_rcuidle(dev, rpmflags);
405 retval = rpm_check_suspend_allowed(dev);
406 if (retval < 0)
407 ; /* Conditions are wrong. */
408
409 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
410 else if (dev->power.runtime_status != RPM_ACTIVE)
411 retval = -EAGAIN;
412
413 /*
414 * Any pending request other than an idle notification takes
415 * precedence over us, except that the timer may be running.
416 */
417 else if (dev->power.request_pending &&
418 dev->power.request > RPM_REQ_IDLE)
419 retval = -EAGAIN;
420
421 /* Act as though RPM_NOWAIT is always set. */
422 else if (dev->power.idle_notification)
423 retval = -EINPROGRESS;
424 if (retval)
425 goto out;
426
427 /* Pending requests need to be canceled. */
428 dev->power.request = RPM_REQ_NONE;
429
430 if (dev->power.no_callbacks)
431 goto out;
432
433 /* Carry out an asynchronous or a synchronous idle notification. */
434 if (rpmflags & RPM_ASYNC) {
435 dev->power.request = RPM_REQ_IDLE;
436 if (!dev->power.request_pending) {
437 dev->power.request_pending = true;
438 queue_work(pm_wq, &dev->power.work);
439 }
440 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
441 return 0;
442 }
443
444 dev->power.idle_notification = true;
445
446 callback = RPM_GET_CALLBACK(dev, runtime_idle);
447
448 if (callback)
449 retval = __rpm_callback(callback, dev);
450
451 dev->power.idle_notification = false;
452 wake_up_all(&dev->power.wait_queue);
453
454 out:
455 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
456 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
457}
458
459/**
460 * rpm_callback - Run a given runtime PM callback for a given device.
461 * @cb: Runtime PM callback to run.
462 * @dev: Device to run the callback for.
463 */
464static int rpm_callback(int (*cb)(struct device *), struct device *dev)
465{
466 int retval;
467
468 if (!cb)
469 return -ENOSYS;
470
471 if (dev->power.memalloc_noio) {
472 unsigned int noio_flag;
473
474 /*
475 * Deadlock might be caused if memory allocation with
476 * GFP_KERNEL happens inside runtime_suspend and
477 * runtime_resume callbacks of one block device's
478 * ancestor or the block device itself. Network
479 * device might be thought as part of iSCSI block
480 * device, so network device and its ancestor should
481 * be marked as memalloc_noio too.
482 */
483 noio_flag = memalloc_noio_save();
484 retval = __rpm_callback(cb, dev);
485 memalloc_noio_restore(noio_flag);
486 } else {
487 retval = __rpm_callback(cb, dev);
488 }
489
490 dev->power.runtime_error = retval;
491 return retval != -EACCES ? retval : -EIO;
492}
493
494/**
495 * rpm_suspend - Carry out runtime suspend of given device.
496 * @dev: Device to suspend.
497 * @rpmflags: Flag bits.
498 *
499 * Check if the device's runtime PM status allows it to be suspended.
500 * Cancel a pending idle notification, autosuspend or suspend. If
501 * another suspend has been started earlier, either return immediately
502 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
503 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
504 * otherwise run the ->runtime_suspend() callback directly. When
505 * ->runtime_suspend succeeded, if a deferred resume was requested while
506 * the callback was running then carry it out, otherwise send an idle
507 * notification for its parent (if the suspend succeeded and both
508 * ignore_children of parent->power and irq_safe of dev->power are not set).
509 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
510 * flag is set and the next autosuspend-delay expiration time is in the
511 * future, schedule another autosuspend attempt.
512 *
513 * This function must be called under dev->power.lock with interrupts disabled.
514 */
515static int rpm_suspend(struct device *dev, int rpmflags)
516 __releases(&dev->power.lock) __acquires(&dev->power.lock)
517{
518 int (*callback)(struct device *);
519 struct device *parent = NULL;
520 int retval;
521
522 trace_rpm_suspend_rcuidle(dev, rpmflags);
523
524 repeat:
525 retval = rpm_check_suspend_allowed(dev);
526 if (retval < 0)
527 goto out; /* Conditions are wrong. */
528
529 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
530 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
531 retval = -EAGAIN;
532 if (retval)
533 goto out;
534
535 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
536 if ((rpmflags & RPM_AUTO)
537 && dev->power.runtime_status != RPM_SUSPENDING) {
538 u64 expires = pm_runtime_autosuspend_expiration(dev);
539
540 if (expires != 0) {
541 /* Pending requests need to be canceled. */
542 dev->power.request = RPM_REQ_NONE;
543
544 /*
545 * Optimization: If the timer is already running and is
546 * set to expire at or before the autosuspend delay,
547 * avoid the overhead of resetting it. Just let it
548 * expire; pm_suspend_timer_fn() will take care of the
549 * rest.
550 */
551 if (!(dev->power.timer_expires &&
552 dev->power.timer_expires <= expires)) {
553 /*
554 * We add a slack of 25% to gather wakeups
555 * without sacrificing the granularity.
556 */
557 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
558 (NSEC_PER_MSEC >> 2);
559
560 dev->power.timer_expires = expires;
561 hrtimer_start_range_ns(&dev->power.suspend_timer,
562 ns_to_ktime(expires),
563 slack,
564 HRTIMER_MODE_ABS);
565 }
566 dev->power.timer_autosuspends = 1;
567 goto out;
568 }
569 }
570
571 /* Other scheduled or pending requests need to be canceled. */
572 pm_runtime_cancel_pending(dev);
573
574 if (dev->power.runtime_status == RPM_SUSPENDING) {
575 DEFINE_WAIT(wait);
576
577 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
578 retval = -EINPROGRESS;
579 goto out;
580 }
581
582 if (dev->power.irq_safe) {
583 spin_unlock(&dev->power.lock);
584
585 cpu_relax();
586
587 spin_lock(&dev->power.lock);
588 goto repeat;
589 }
590
591 /* Wait for the other suspend running in parallel with us. */
592 for (;;) {
593 prepare_to_wait(&dev->power.wait_queue, &wait,
594 TASK_UNINTERRUPTIBLE);
595 if (dev->power.runtime_status != RPM_SUSPENDING)
596 break;
597
598 spin_unlock_irq(&dev->power.lock);
599
600 schedule();
601
602 spin_lock_irq(&dev->power.lock);
603 }
604 finish_wait(&dev->power.wait_queue, &wait);
605 goto repeat;
606 }
607
608 if (dev->power.no_callbacks)
609 goto no_callback; /* Assume success. */
610
611 /* Carry out an asynchronous or a synchronous suspend. */
612 if (rpmflags & RPM_ASYNC) {
613 dev->power.request = (rpmflags & RPM_AUTO) ?
614 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
615 if (!dev->power.request_pending) {
616 dev->power.request_pending = true;
617 queue_work(pm_wq, &dev->power.work);
618 }
619 goto out;
620 }
621
622 __update_runtime_status(dev, RPM_SUSPENDING);
623
624 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
625
626 dev_pm_enable_wake_irq_check(dev, true);
627 retval = rpm_callback(callback, dev);
628 if (retval)
629 goto fail;
630
631 no_callback:
632 __update_runtime_status(dev, RPM_SUSPENDED);
633 pm_runtime_deactivate_timer(dev);
634
635 if (dev->parent) {
636 parent = dev->parent;
637 atomic_add_unless(&parent->power.child_count, -1, 0);
638 }
639 wake_up_all(&dev->power.wait_queue);
640
641 if (dev->power.deferred_resume) {
642 dev->power.deferred_resume = false;
643 rpm_resume(dev, 0);
644 retval = -EAGAIN;
645 goto out;
646 }
647
648 /* Maybe the parent is now able to suspend. */
649 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
650 spin_unlock(&dev->power.lock);
651
652 spin_lock(&parent->power.lock);
653 rpm_idle(parent, RPM_ASYNC);
654 spin_unlock(&parent->power.lock);
655
656 spin_lock(&dev->power.lock);
657 }
658
659 out:
660 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
661
662 return retval;
663
664 fail:
665 dev_pm_disable_wake_irq_check(dev);
666 __update_runtime_status(dev, RPM_ACTIVE);
667 dev->power.deferred_resume = false;
668 wake_up_all(&dev->power.wait_queue);
669
670 if (retval == -EAGAIN || retval == -EBUSY) {
671 dev->power.runtime_error = 0;
672
673 /*
674 * If the callback routine failed an autosuspend, and
675 * if the last_busy time has been updated so that there
676 * is a new autosuspend expiration time, automatically
677 * reschedule another autosuspend.
678 */
679 if ((rpmflags & RPM_AUTO) &&
680 pm_runtime_autosuspend_expiration(dev) != 0)
681 goto repeat;
682 } else {
683 pm_runtime_cancel_pending(dev);
684 }
685 goto out;
686}
687
688/**
689 * rpm_resume - Carry out runtime resume of given device.
690 * @dev: Device to resume.
691 * @rpmflags: Flag bits.
692 *
693 * Check if the device's runtime PM status allows it to be resumed. Cancel
694 * any scheduled or pending requests. If another resume has been started
695 * earlier, either return immediately or wait for it to finish, depending on the
696 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
697 * parallel with this function, either tell the other process to resume after
698 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
699 * flag is set then queue a resume request; otherwise run the
700 * ->runtime_resume() callback directly. Queue an idle notification for the
701 * device if the resume succeeded.
702 *
703 * This function must be called under dev->power.lock with interrupts disabled.
704 */
705static int rpm_resume(struct device *dev, int rpmflags)
706 __releases(&dev->power.lock) __acquires(&dev->power.lock)
707{
708 int (*callback)(struct device *);
709 struct device *parent = NULL;
710 int retval = 0;
711
712 trace_rpm_resume_rcuidle(dev, rpmflags);
713
714 repeat:
715 if (dev->power.runtime_error)
716 retval = -EINVAL;
717 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
718 && dev->power.runtime_status == RPM_ACTIVE)
719 retval = 1;
720 else if (dev->power.disable_depth > 0)
721 retval = -EACCES;
722 if (retval)
723 goto out;
724
725 /*
726 * Other scheduled or pending requests need to be canceled. Small
727 * optimization: If an autosuspend timer is running, leave it running
728 * rather than cancelling it now only to restart it again in the near
729 * future.
730 */
731 dev->power.request = RPM_REQ_NONE;
732 if (!dev->power.timer_autosuspends)
733 pm_runtime_deactivate_timer(dev);
734
735 if (dev->power.runtime_status == RPM_ACTIVE) {
736 retval = 1;
737 goto out;
738 }
739
740 if (dev->power.runtime_status == RPM_RESUMING
741 || dev->power.runtime_status == RPM_SUSPENDING) {
742 DEFINE_WAIT(wait);
743
744 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
745 if (dev->power.runtime_status == RPM_SUSPENDING)
746 dev->power.deferred_resume = true;
747 else
748 retval = -EINPROGRESS;
749 goto out;
750 }
751
752 if (dev->power.irq_safe) {
753 spin_unlock(&dev->power.lock);
754
755 cpu_relax();
756
757 spin_lock(&dev->power.lock);
758 goto repeat;
759 }
760
761 /* Wait for the operation carried out in parallel with us. */
762 for (;;) {
763 prepare_to_wait(&dev->power.wait_queue, &wait,
764 TASK_UNINTERRUPTIBLE);
765 if (dev->power.runtime_status != RPM_RESUMING
766 && dev->power.runtime_status != RPM_SUSPENDING)
767 break;
768
769 spin_unlock_irq(&dev->power.lock);
770
771 schedule();
772
773 spin_lock_irq(&dev->power.lock);
774 }
775 finish_wait(&dev->power.wait_queue, &wait);
776 goto repeat;
777 }
778
779 /*
780 * See if we can skip waking up the parent. This is safe only if
781 * power.no_callbacks is set, because otherwise we don't know whether
782 * the resume will actually succeed.
783 */
784 if (dev->power.no_callbacks && !parent && dev->parent) {
785 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
786 if (dev->parent->power.disable_depth > 0
787 || dev->parent->power.ignore_children
788 || dev->parent->power.runtime_status == RPM_ACTIVE) {
789 atomic_inc(&dev->parent->power.child_count);
790 spin_unlock(&dev->parent->power.lock);
791 retval = 1;
792 goto no_callback; /* Assume success. */
793 }
794 spin_unlock(&dev->parent->power.lock);
795 }
796
797 /* Carry out an asynchronous or a synchronous resume. */
798 if (rpmflags & RPM_ASYNC) {
799 dev->power.request = RPM_REQ_RESUME;
800 if (!dev->power.request_pending) {
801 dev->power.request_pending = true;
802 queue_work(pm_wq, &dev->power.work);
803 }
804 retval = 0;
805 goto out;
806 }
807
808 if (!parent && dev->parent) {
809 /*
810 * Increment the parent's usage counter and resume it if
811 * necessary. Not needed if dev is irq-safe; then the
812 * parent is permanently resumed.
813 */
814 parent = dev->parent;
815 if (dev->power.irq_safe)
816 goto skip_parent;
817 spin_unlock(&dev->power.lock);
818
819 pm_runtime_get_noresume(parent);
820
821 spin_lock(&parent->power.lock);
822 /*
823 * Resume the parent if it has runtime PM enabled and not been
824 * set to ignore its children.
825 */
826 if (!parent->power.disable_depth
827 && !parent->power.ignore_children) {
828 rpm_resume(parent, 0);
829 if (parent->power.runtime_status != RPM_ACTIVE)
830 retval = -EBUSY;
831 }
832 spin_unlock(&parent->power.lock);
833
834 spin_lock(&dev->power.lock);
835 if (retval)
836 goto out;
837 goto repeat;
838 }
839 skip_parent:
840
841 if (dev->power.no_callbacks)
842 goto no_callback; /* Assume success. */
843
844 __update_runtime_status(dev, RPM_RESUMING);
845
846 callback = RPM_GET_CALLBACK(dev, runtime_resume);
847
848 dev_pm_disable_wake_irq_check(dev);
849 retval = rpm_callback(callback, dev);
850 if (retval) {
851 __update_runtime_status(dev, RPM_SUSPENDED);
852 pm_runtime_cancel_pending(dev);
853 dev_pm_enable_wake_irq_check(dev, false);
854 } else {
855 no_callback:
856 __update_runtime_status(dev, RPM_ACTIVE);
857 pm_runtime_mark_last_busy(dev);
858 if (parent)
859 atomic_inc(&parent->power.child_count);
860 }
861 wake_up_all(&dev->power.wait_queue);
862
863 if (retval >= 0)
864 rpm_idle(dev, RPM_ASYNC);
865
866 out:
867 if (parent && !dev->power.irq_safe) {
868 spin_unlock_irq(&dev->power.lock);
869
870 pm_runtime_put(parent);
871
872 spin_lock_irq(&dev->power.lock);
873 }
874
875 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
876
877 return retval;
878}
879
880/**
881 * pm_runtime_work - Universal runtime PM work function.
882 * @work: Work structure used for scheduling the execution of this function.
883 *
884 * Use @work to get the device object the work is to be done for, determine what
885 * is to be done and execute the appropriate runtime PM function.
886 */
887static void pm_runtime_work(struct work_struct *work)
888{
889 struct device *dev = container_of(work, struct device, power.work);
890 enum rpm_request req;
891
892 spin_lock_irq(&dev->power.lock);
893
894 if (!dev->power.request_pending)
895 goto out;
896
897 req = dev->power.request;
898 dev->power.request = RPM_REQ_NONE;
899 dev->power.request_pending = false;
900
901 switch (req) {
902 case RPM_REQ_NONE:
903 break;
904 case RPM_REQ_IDLE:
905 rpm_idle(dev, RPM_NOWAIT);
906 break;
907 case RPM_REQ_SUSPEND:
908 rpm_suspend(dev, RPM_NOWAIT);
909 break;
910 case RPM_REQ_AUTOSUSPEND:
911 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
912 break;
913 case RPM_REQ_RESUME:
914 rpm_resume(dev, RPM_NOWAIT);
915 break;
916 }
917
918 out:
919 spin_unlock_irq(&dev->power.lock);
920}
921
922/**
923 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
924 * @data: Device pointer passed by pm_schedule_suspend().
925 *
926 * Check if the time is right and queue a suspend request.
927 */
928static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
929{
930 struct device *dev = container_of(timer, struct device, power.suspend_timer);
931 unsigned long flags;
932 u64 expires;
933
934 spin_lock_irqsave(&dev->power.lock, flags);
935
936 expires = dev->power.timer_expires;
937 /*
938 * If 'expires' is after the current time, we've been called
939 * too early.
940 */
941 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
942 dev->power.timer_expires = 0;
943 rpm_suspend(dev, dev->power.timer_autosuspends ?
944 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
945 }
946
947 spin_unlock_irqrestore(&dev->power.lock, flags);
948
949 return HRTIMER_NORESTART;
950}
951
952/**
953 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
954 * @dev: Device to suspend.
955 * @delay: Time to wait before submitting a suspend request, in milliseconds.
956 */
957int pm_schedule_suspend(struct device *dev, unsigned int delay)
958{
959 unsigned long flags;
960 u64 expires;
961 int retval;
962
963 spin_lock_irqsave(&dev->power.lock, flags);
964
965 if (!delay) {
966 retval = rpm_suspend(dev, RPM_ASYNC);
967 goto out;
968 }
969
970 retval = rpm_check_suspend_allowed(dev);
971 if (retval)
972 goto out;
973
974 /* Other scheduled or pending requests need to be canceled. */
975 pm_runtime_cancel_pending(dev);
976
977 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
978 dev->power.timer_expires = expires;
979 dev->power.timer_autosuspends = 0;
980 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
981
982 out:
983 spin_unlock_irqrestore(&dev->power.lock, flags);
984
985 return retval;
986}
987EXPORT_SYMBOL_GPL(pm_schedule_suspend);
988
989/**
990 * __pm_runtime_idle - Entry point for runtime idle operations.
991 * @dev: Device to send idle notification for.
992 * @rpmflags: Flag bits.
993 *
994 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
995 * return immediately if it is larger than zero. Then carry out an idle
996 * notification, either synchronous or asynchronous.
997 *
998 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
999 * or if pm_runtime_irq_safe() has been called.
1000 */
1001int __pm_runtime_idle(struct device *dev, int rpmflags)
1002{
1003 unsigned long flags;
1004 int retval;
1005
1006 if (rpmflags & RPM_GET_PUT) {
1007 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1008 trace_rpm_usage_rcuidle(dev, rpmflags);
1009 return 0;
1010 }
1011 }
1012
1013 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1014
1015 spin_lock_irqsave(&dev->power.lock, flags);
1016 retval = rpm_idle(dev, rpmflags);
1017 spin_unlock_irqrestore(&dev->power.lock, flags);
1018
1019 return retval;
1020}
1021EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1022
1023/**
1024 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1025 * @dev: Device to suspend.
1026 * @rpmflags: Flag bits.
1027 *
1028 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1029 * return immediately if it is larger than zero. Then carry out a suspend,
1030 * either synchronous or asynchronous.
1031 *
1032 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1033 * or if pm_runtime_irq_safe() has been called.
1034 */
1035int __pm_runtime_suspend(struct device *dev, int rpmflags)
1036{
1037 unsigned long flags;
1038 int retval;
1039
1040 if (rpmflags & RPM_GET_PUT) {
1041 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1042 trace_rpm_usage_rcuidle(dev, rpmflags);
1043 return 0;
1044 }
1045 }
1046
1047 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1048
1049 spin_lock_irqsave(&dev->power.lock, flags);
1050 retval = rpm_suspend(dev, rpmflags);
1051 spin_unlock_irqrestore(&dev->power.lock, flags);
1052
1053 return retval;
1054}
1055EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1056
1057/**
1058 * __pm_runtime_resume - Entry point for runtime resume operations.
1059 * @dev: Device to resume.
1060 * @rpmflags: Flag bits.
1061 *
1062 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1063 * carry out a resume, either synchronous or asynchronous.
1064 *
1065 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1066 * or if pm_runtime_irq_safe() has been called.
1067 */
1068int __pm_runtime_resume(struct device *dev, int rpmflags)
1069{
1070 unsigned long flags;
1071 int retval;
1072
1073 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1074 dev->power.runtime_status != RPM_ACTIVE);
1075
1076 if (rpmflags & RPM_GET_PUT)
1077 atomic_inc(&dev->power.usage_count);
1078
1079 spin_lock_irqsave(&dev->power.lock, flags);
1080 retval = rpm_resume(dev, rpmflags);
1081 spin_unlock_irqrestore(&dev->power.lock, flags);
1082
1083 return retval;
1084}
1085EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1086
1087/**
1088 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1089 * @dev: Device to handle.
1090 * @ign_usage_count: Whether or not to look at the current usage counter value.
1091 *
1092 * Return -EINVAL if runtime PM is disabled for @dev.
1093 *
1094 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1095 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1096 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1097 * without changing the usage counter.
1098 *
1099 * If @ign_usage_count is %true, this function can be used to prevent suspending
1100 * the device when its runtime PM status is %RPM_ACTIVE.
1101 *
1102 * If @ign_usage_count is %false, this function can be used to prevent
1103 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1104 * runtime PM usage counter is not zero.
1105 *
1106 * The caller is resposible for decrementing the runtime PM usage counter of
1107 * @dev after this function has returned a positive value for it.
1108 */
1109int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1110{
1111 unsigned long flags;
1112 int retval;
1113
1114 spin_lock_irqsave(&dev->power.lock, flags);
1115 if (dev->power.disable_depth > 0) {
1116 retval = -EINVAL;
1117 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1118 retval = 0;
1119 } else if (ign_usage_count) {
1120 retval = 1;
1121 atomic_inc(&dev->power.usage_count);
1122 } else {
1123 retval = atomic_inc_not_zero(&dev->power.usage_count);
1124 }
1125 trace_rpm_usage_rcuidle(dev, 0);
1126 spin_unlock_irqrestore(&dev->power.lock, flags);
1127
1128 return retval;
1129}
1130EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1131
1132/**
1133 * __pm_runtime_set_status - Set runtime PM status of a device.
1134 * @dev: Device to handle.
1135 * @status: New runtime PM status of the device.
1136 *
1137 * If runtime PM of the device is disabled or its power.runtime_error field is
1138 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1139 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1140 * However, if the device has a parent and the parent is not active, and the
1141 * parent's power.ignore_children flag is unset, the device's status cannot be
1142 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1143 *
1144 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1145 * and the device parent's counter of unsuspended children is modified to
1146 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1147 * notification request for the parent is submitted.
1148 *
1149 * If @dev has any suppliers (as reflected by device links to them), and @status
1150 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1151 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1152 * of the @status value) and the suppliers will be deacticated on exit. The
1153 * error returned by the failing supplier activation will be returned in that
1154 * case.
1155 */
1156int __pm_runtime_set_status(struct device *dev, unsigned int status)
1157{
1158 struct device *parent = dev->parent;
1159 bool notify_parent = false;
1160 int error = 0;
1161
1162 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1163 return -EINVAL;
1164
1165 spin_lock_irq(&dev->power.lock);
1166
1167 /*
1168 * Prevent PM-runtime from being enabled for the device or return an
1169 * error if it is enabled already and working.
1170 */
1171 if (dev->power.runtime_error || dev->power.disable_depth)
1172 dev->power.disable_depth++;
1173 else
1174 error = -EAGAIN;
1175
1176 spin_unlock_irq(&dev->power.lock);
1177
1178 if (error)
1179 return error;
1180
1181 /*
1182 * If the new status is RPM_ACTIVE, the suppliers can be activated
1183 * upfront regardless of the current status, because next time
1184 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1185 * involved will be dropped down to one anyway.
1186 */
1187 if (status == RPM_ACTIVE) {
1188 int idx = device_links_read_lock();
1189
1190 error = rpm_get_suppliers(dev);
1191 if (error)
1192 status = RPM_SUSPENDED;
1193
1194 device_links_read_unlock(idx);
1195 }
1196
1197 spin_lock_irq(&dev->power.lock);
1198
1199 if (dev->power.runtime_status == status || !parent)
1200 goto out_set;
1201
1202 if (status == RPM_SUSPENDED) {
1203 atomic_add_unless(&parent->power.child_count, -1, 0);
1204 notify_parent = !parent->power.ignore_children;
1205 } else {
1206 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1207
1208 /*
1209 * It is invalid to put an active child under a parent that is
1210 * not active, has runtime PM enabled and the
1211 * 'power.ignore_children' flag unset.
1212 */
1213 if (!parent->power.disable_depth
1214 && !parent->power.ignore_children
1215 && parent->power.runtime_status != RPM_ACTIVE) {
1216 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1217 dev_name(dev),
1218 dev_name(parent));
1219 error = -EBUSY;
1220 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1221 atomic_inc(&parent->power.child_count);
1222 }
1223
1224 spin_unlock(&parent->power.lock);
1225
1226 if (error) {
1227 status = RPM_SUSPENDED;
1228 goto out;
1229 }
1230 }
1231
1232 out_set:
1233 __update_runtime_status(dev, status);
1234 if (!error)
1235 dev->power.runtime_error = 0;
1236
1237 out:
1238 spin_unlock_irq(&dev->power.lock);
1239
1240 if (notify_parent)
1241 pm_request_idle(parent);
1242
1243 if (status == RPM_SUSPENDED) {
1244 int idx = device_links_read_lock();
1245
1246 rpm_put_suppliers(dev);
1247
1248 device_links_read_unlock(idx);
1249 }
1250
1251 pm_runtime_enable(dev);
1252
1253 return error;
1254}
1255EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1256
1257/**
1258 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1259 * @dev: Device to handle.
1260 *
1261 * Flush all pending requests for the device from pm_wq and wait for all
1262 * runtime PM operations involving the device in progress to complete.
1263 *
1264 * Should be called under dev->power.lock with interrupts disabled.
1265 */
1266static void __pm_runtime_barrier(struct device *dev)
1267{
1268 pm_runtime_deactivate_timer(dev);
1269
1270 if (dev->power.request_pending) {
1271 dev->power.request = RPM_REQ_NONE;
1272 spin_unlock_irq(&dev->power.lock);
1273
1274 cancel_work_sync(&dev->power.work);
1275
1276 spin_lock_irq(&dev->power.lock);
1277 dev->power.request_pending = false;
1278 }
1279
1280 if (dev->power.runtime_status == RPM_SUSPENDING
1281 || dev->power.runtime_status == RPM_RESUMING
1282 || dev->power.idle_notification) {
1283 DEFINE_WAIT(wait);
1284
1285 /* Suspend, wake-up or idle notification in progress. */
1286 for (;;) {
1287 prepare_to_wait(&dev->power.wait_queue, &wait,
1288 TASK_UNINTERRUPTIBLE);
1289 if (dev->power.runtime_status != RPM_SUSPENDING
1290 && dev->power.runtime_status != RPM_RESUMING
1291 && !dev->power.idle_notification)
1292 break;
1293 spin_unlock_irq(&dev->power.lock);
1294
1295 schedule();
1296
1297 spin_lock_irq(&dev->power.lock);
1298 }
1299 finish_wait(&dev->power.wait_queue, &wait);
1300 }
1301}
1302
1303/**
1304 * pm_runtime_barrier - Flush pending requests and wait for completions.
1305 * @dev: Device to handle.
1306 *
1307 * Prevent the device from being suspended by incrementing its usage counter and
1308 * if there's a pending resume request for the device, wake the device up.
1309 * Next, make sure that all pending requests for the device have been flushed
1310 * from pm_wq and wait for all runtime PM operations involving the device in
1311 * progress to complete.
1312 *
1313 * Return value:
1314 * 1, if there was a resume request pending and the device had to be woken up,
1315 * 0, otherwise
1316 */
1317int pm_runtime_barrier(struct device *dev)
1318{
1319 int retval = 0;
1320
1321 pm_runtime_get_noresume(dev);
1322 spin_lock_irq(&dev->power.lock);
1323
1324 if (dev->power.request_pending
1325 && dev->power.request == RPM_REQ_RESUME) {
1326 rpm_resume(dev, 0);
1327 retval = 1;
1328 }
1329
1330 __pm_runtime_barrier(dev);
1331
1332 spin_unlock_irq(&dev->power.lock);
1333 pm_runtime_put_noidle(dev);
1334
1335 return retval;
1336}
1337EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1338
1339/**
1340 * __pm_runtime_disable - Disable runtime PM of a device.
1341 * @dev: Device to handle.
1342 * @check_resume: If set, check if there's a resume request for the device.
1343 *
1344 * Increment power.disable_depth for the device and if it was zero previously,
1345 * cancel all pending runtime PM requests for the device and wait for all
1346 * operations in progress to complete. The device can be either active or
1347 * suspended after its runtime PM has been disabled.
1348 *
1349 * If @check_resume is set and there's a resume request pending when
1350 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1351 * function will wake up the device before disabling its runtime PM.
1352 */
1353void __pm_runtime_disable(struct device *dev, bool check_resume)
1354{
1355 spin_lock_irq(&dev->power.lock);
1356
1357 if (dev->power.disable_depth > 0) {
1358 dev->power.disable_depth++;
1359 goto out;
1360 }
1361
1362 /*
1363 * Wake up the device if there's a resume request pending, because that
1364 * means there probably is some I/O to process and disabling runtime PM
1365 * shouldn't prevent the device from processing the I/O.
1366 */
1367 if (check_resume && dev->power.request_pending
1368 && dev->power.request == RPM_REQ_RESUME) {
1369 /*
1370 * Prevent suspends and idle notifications from being carried
1371 * out after we have woken up the device.
1372 */
1373 pm_runtime_get_noresume(dev);
1374
1375 rpm_resume(dev, 0);
1376
1377 pm_runtime_put_noidle(dev);
1378 }
1379
1380 /* Update time accounting before disabling PM-runtime. */
1381 update_pm_runtime_accounting(dev);
1382
1383 if (!dev->power.disable_depth++)
1384 __pm_runtime_barrier(dev);
1385
1386 out:
1387 spin_unlock_irq(&dev->power.lock);
1388}
1389EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1390
1391/**
1392 * pm_runtime_enable - Enable runtime PM of a device.
1393 * @dev: Device to handle.
1394 */
1395void pm_runtime_enable(struct device *dev)
1396{
1397 unsigned long flags;
1398
1399 spin_lock_irqsave(&dev->power.lock, flags);
1400
1401 if (dev->power.disable_depth > 0) {
1402 dev->power.disable_depth--;
1403
1404 /* About to enable runtime pm, set accounting_timestamp to now */
1405 if (!dev->power.disable_depth)
1406 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1407 } else {
1408 dev_warn(dev, "Unbalanced %s!\n", __func__);
1409 }
1410
1411 WARN(!dev->power.disable_depth &&
1412 dev->power.runtime_status == RPM_SUSPENDED &&
1413 !dev->power.ignore_children &&
1414 atomic_read(&dev->power.child_count) > 0,
1415 "Enabling runtime PM for inactive device (%s) with active children\n",
1416 dev_name(dev));
1417
1418 spin_unlock_irqrestore(&dev->power.lock, flags);
1419}
1420EXPORT_SYMBOL_GPL(pm_runtime_enable);
1421
1422/**
1423 * pm_runtime_forbid - Block runtime PM of a device.
1424 * @dev: Device to handle.
1425 *
1426 * Increase the device's usage count and clear its power.runtime_auto flag,
1427 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1428 * for it.
1429 */
1430void pm_runtime_forbid(struct device *dev)
1431{
1432 spin_lock_irq(&dev->power.lock);
1433 if (!dev->power.runtime_auto)
1434 goto out;
1435
1436 dev->power.runtime_auto = false;
1437 atomic_inc(&dev->power.usage_count);
1438 rpm_resume(dev, 0);
1439
1440 out:
1441 spin_unlock_irq(&dev->power.lock);
1442}
1443EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1444
1445/**
1446 * pm_runtime_allow - Unblock runtime PM of a device.
1447 * @dev: Device to handle.
1448 *
1449 * Decrease the device's usage count and set its power.runtime_auto flag.
1450 */
1451void pm_runtime_allow(struct device *dev)
1452{
1453 spin_lock_irq(&dev->power.lock);
1454 if (dev->power.runtime_auto)
1455 goto out;
1456
1457 dev->power.runtime_auto = true;
1458 if (atomic_dec_and_test(&dev->power.usage_count))
1459 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1460 else
1461 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1462
1463 out:
1464 spin_unlock_irq(&dev->power.lock);
1465}
1466EXPORT_SYMBOL_GPL(pm_runtime_allow);
1467
1468/**
1469 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1470 * @dev: Device to handle.
1471 *
1472 * Set the power.no_callbacks flag, which tells the PM core that this
1473 * device is power-managed through its parent and has no runtime PM
1474 * callbacks of its own. The runtime sysfs attributes will be removed.
1475 */
1476void pm_runtime_no_callbacks(struct device *dev)
1477{
1478 spin_lock_irq(&dev->power.lock);
1479 dev->power.no_callbacks = 1;
1480 spin_unlock_irq(&dev->power.lock);
1481 if (device_is_registered(dev))
1482 rpm_sysfs_remove(dev);
1483}
1484EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1485
1486/**
1487 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1488 * @dev: Device to handle
1489 *
1490 * Set the power.irq_safe flag, which tells the PM core that the
1491 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1492 * always be invoked with the spinlock held and interrupts disabled. It also
1493 * causes the parent's usage counter to be permanently incremented, preventing
1494 * the parent from runtime suspending -- otherwise an irq-safe child might have
1495 * to wait for a non-irq-safe parent.
1496 */
1497void pm_runtime_irq_safe(struct device *dev)
1498{
1499 if (dev->parent)
1500 pm_runtime_get_sync(dev->parent);
1501 spin_lock_irq(&dev->power.lock);
1502 dev->power.irq_safe = 1;
1503 spin_unlock_irq(&dev->power.lock);
1504}
1505EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1506
1507/**
1508 * update_autosuspend - Handle a change to a device's autosuspend settings.
1509 * @dev: Device to handle.
1510 * @old_delay: The former autosuspend_delay value.
1511 * @old_use: The former use_autosuspend value.
1512 *
1513 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1514 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1515 *
1516 * This function must be called under dev->power.lock with interrupts disabled.
1517 */
1518static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1519{
1520 int delay = dev->power.autosuspend_delay;
1521
1522 /* Should runtime suspend be prevented now? */
1523 if (dev->power.use_autosuspend && delay < 0) {
1524
1525 /* If it used to be allowed then prevent it. */
1526 if (!old_use || old_delay >= 0) {
1527 atomic_inc(&dev->power.usage_count);
1528 rpm_resume(dev, 0);
1529 } else {
1530 trace_rpm_usage_rcuidle(dev, 0);
1531 }
1532 }
1533
1534 /* Runtime suspend should be allowed now. */
1535 else {
1536
1537 /* If it used to be prevented then allow it. */
1538 if (old_use && old_delay < 0)
1539 atomic_dec(&dev->power.usage_count);
1540
1541 /* Maybe we can autosuspend now. */
1542 rpm_idle(dev, RPM_AUTO);
1543 }
1544}
1545
1546/**
1547 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1548 * @dev: Device to handle.
1549 * @delay: Value of the new delay in milliseconds.
1550 *
1551 * Set the device's power.autosuspend_delay value. If it changes to negative
1552 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1553 * changes the other way, allow runtime suspends.
1554 */
1555void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1556{
1557 int old_delay, old_use;
1558
1559 spin_lock_irq(&dev->power.lock);
1560 old_delay = dev->power.autosuspend_delay;
1561 old_use = dev->power.use_autosuspend;
1562 dev->power.autosuspend_delay = delay;
1563 update_autosuspend(dev, old_delay, old_use);
1564 spin_unlock_irq(&dev->power.lock);
1565}
1566EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1567
1568/**
1569 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1570 * @dev: Device to handle.
1571 * @use: New value for use_autosuspend.
1572 *
1573 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1574 * suspends as needed.
1575 */
1576void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1577{
1578 int old_delay, old_use;
1579
1580 spin_lock_irq(&dev->power.lock);
1581 old_delay = dev->power.autosuspend_delay;
1582 old_use = dev->power.use_autosuspend;
1583 dev->power.use_autosuspend = use;
1584 update_autosuspend(dev, old_delay, old_use);
1585 spin_unlock_irq(&dev->power.lock);
1586}
1587EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1588
1589/**
1590 * pm_runtime_init - Initialize runtime PM fields in given device object.
1591 * @dev: Device object to initialize.
1592 */
1593void pm_runtime_init(struct device *dev)
1594{
1595 dev->power.runtime_status = RPM_SUSPENDED;
1596 dev->power.idle_notification = false;
1597
1598 dev->power.disable_depth = 1;
1599 atomic_set(&dev->power.usage_count, 0);
1600
1601 dev->power.runtime_error = 0;
1602
1603 atomic_set(&dev->power.child_count, 0);
1604 pm_suspend_ignore_children(dev, false);
1605 dev->power.runtime_auto = true;
1606
1607 dev->power.request_pending = false;
1608 dev->power.request = RPM_REQ_NONE;
1609 dev->power.deferred_resume = false;
1610 INIT_WORK(&dev->power.work, pm_runtime_work);
1611
1612 dev->power.timer_expires = 0;
1613 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1614 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1615
1616 init_waitqueue_head(&dev->power.wait_queue);
1617}
1618
1619/**
1620 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1621 * @dev: Device object to re-initialize.
1622 */
1623void pm_runtime_reinit(struct device *dev)
1624{
1625 if (!pm_runtime_enabled(dev)) {
1626 if (dev->power.runtime_status == RPM_ACTIVE)
1627 pm_runtime_set_suspended(dev);
1628 if (dev->power.irq_safe) {
1629 spin_lock_irq(&dev->power.lock);
1630 dev->power.irq_safe = 0;
1631 spin_unlock_irq(&dev->power.lock);
1632 if (dev->parent)
1633 pm_runtime_put(dev->parent);
1634 }
1635 }
1636}
1637
1638/**
1639 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1640 * @dev: Device object being removed from device hierarchy.
1641 */
1642void pm_runtime_remove(struct device *dev)
1643{
1644 __pm_runtime_disable(dev, false);
1645 pm_runtime_reinit(dev);
1646}
1647
1648/**
1649 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1650 * @dev: Device whose driver is going to be removed.
1651 *
1652 * Check links from this device to any consumers and if any of them have active
1653 * runtime PM references to the device, drop the usage counter of the device
1654 * (as many times as needed).
1655 *
1656 * Links with the DL_FLAG_MANAGED flag unset are ignored.
1657 *
1658 * Since the device is guaranteed to be runtime-active at the point this is
1659 * called, nothing else needs to be done here.
1660 *
1661 * Moreover, this is called after device_links_busy() has returned 'false', so
1662 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1663 * therefore rpm_active can't be manipulated concurrently.
1664 */
1665void pm_runtime_clean_up_links(struct device *dev)
1666{
1667 struct device_link *link;
1668 int idx;
1669
1670 idx = device_links_read_lock();
1671
1672 list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
1673 device_links_read_lock_held()) {
1674 if (!(link->flags & DL_FLAG_MANAGED))
1675 continue;
1676
1677 while (refcount_dec_not_one(&link->rpm_active))
1678 pm_runtime_put_noidle(dev);
1679 }
1680
1681 device_links_read_unlock(idx);
1682}
1683
1684/**
1685 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1686 * @dev: Consumer device.
1687 */
1688void pm_runtime_get_suppliers(struct device *dev)
1689{
1690 struct device_link *link;
1691 int idx;
1692
1693 idx = device_links_read_lock();
1694
1695 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1696 device_links_read_lock_held())
1697 if (link->flags & DL_FLAG_PM_RUNTIME) {
1698 link->supplier_preactivated = true;
1699 refcount_inc(&link->rpm_active);
1700 pm_runtime_get_sync(link->supplier);
1701 }
1702
1703 device_links_read_unlock(idx);
1704}
1705
1706/**
1707 * pm_runtime_put_suppliers - Drop references to supplier devices.
1708 * @dev: Consumer device.
1709 */
1710void pm_runtime_put_suppliers(struct device *dev)
1711{
1712 struct device_link *link;
1713 int idx;
1714
1715 idx = device_links_read_lock();
1716
1717 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1718 device_links_read_lock_held())
1719 if (link->supplier_preactivated) {
1720 link->supplier_preactivated = false;
1721 if (refcount_dec_not_one(&link->rpm_active))
1722 pm_runtime_put(link->supplier);
1723 }
1724
1725 device_links_read_unlock(idx);
1726}
1727
1728void pm_runtime_new_link(struct device *dev)
1729{
1730 spin_lock_irq(&dev->power.lock);
1731 dev->power.links_count++;
1732 spin_unlock_irq(&dev->power.lock);
1733}
1734
1735void pm_runtime_drop_link(struct device *dev)
1736{
1737 spin_lock_irq(&dev->power.lock);
1738 WARN_ON(dev->power.links_count == 0);
1739 dev->power.links_count--;
1740 spin_unlock_irq(&dev->power.lock);
1741}
1742
1743static bool pm_runtime_need_not_resume(struct device *dev)
1744{
1745 return atomic_read(&dev->power.usage_count) <= 1 &&
1746 (atomic_read(&dev->power.child_count) == 0 ||
1747 dev->power.ignore_children);
1748}
1749
1750/**
1751 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1752 * @dev: Device to suspend.
1753 *
1754 * Disable runtime PM so we safely can check the device's runtime PM status and
1755 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1756 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1757 * usage and children counters don't indicate that the device was in use before
1758 * the system-wide transition under way, decrement its parent's children counter
1759 * (if there is a parent). Keep runtime PM disabled to preserve the state
1760 * unless we encounter errors.
1761 *
1762 * Typically this function may be invoked from a system suspend callback to make
1763 * sure the device is put into low power state and it should only be used during
1764 * system-wide PM transitions to sleep states. It assumes that the analogous
1765 * pm_runtime_force_resume() will be used to resume the device.
1766 */
1767int pm_runtime_force_suspend(struct device *dev)
1768{
1769 int (*callback)(struct device *);
1770 int ret;
1771
1772 pm_runtime_disable(dev);
1773 if (pm_runtime_status_suspended(dev))
1774 return 0;
1775
1776 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1777
1778 ret = callback ? callback(dev) : 0;
1779 if (ret)
1780 goto err;
1781
1782 /*
1783 * If the device can stay in suspend after the system-wide transition
1784 * to the working state that will follow, drop the children counter of
1785 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1786 * function will be called again for it in the meantime.
1787 */
1788 if (pm_runtime_need_not_resume(dev))
1789 pm_runtime_set_suspended(dev);
1790 else
1791 __update_runtime_status(dev, RPM_SUSPENDED);
1792
1793 return 0;
1794
1795err:
1796 pm_runtime_enable(dev);
1797 return ret;
1798}
1799EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1800
1801/**
1802 * pm_runtime_force_resume - Force a device into resume state if needed.
1803 * @dev: Device to resume.
1804 *
1805 * Prior invoking this function we expect the user to have brought the device
1806 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1807 * those actions and bring the device into full power, if it is expected to be
1808 * used on system resume. In the other case, we defer the resume to be managed
1809 * via runtime PM.
1810 *
1811 * Typically this function may be invoked from a system resume callback.
1812 */
1813int pm_runtime_force_resume(struct device *dev)
1814{
1815 int (*callback)(struct device *);
1816 int ret = 0;
1817
1818 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1819 goto out;
1820
1821 /*
1822 * The value of the parent's children counter is correct already, so
1823 * just update the status of the device.
1824 */
1825 __update_runtime_status(dev, RPM_ACTIVE);
1826
1827 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1828
1829 ret = callback ? callback(dev) : 0;
1830 if (ret) {
1831 pm_runtime_set_suspended(dev);
1832 goto out;
1833 }
1834
1835 pm_runtime_mark_last_busy(dev);
1836out:
1837 pm_runtime_enable(dev);
1838 return ret;
1839}
1840EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1/*
2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_wakeirq.h>
14#include <trace/events/rpm.h>
15#include "power.h"
16
17typedef int (*pm_callback_t)(struct device *);
18
19static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
20{
21 pm_callback_t cb;
22 const struct dev_pm_ops *ops;
23
24 if (dev->pm_domain)
25 ops = &dev->pm_domain->ops;
26 else if (dev->type && dev->type->pm)
27 ops = dev->type->pm;
28 else if (dev->class && dev->class->pm)
29 ops = dev->class->pm;
30 else if (dev->bus && dev->bus->pm)
31 ops = dev->bus->pm;
32 else
33 ops = NULL;
34
35 if (ops)
36 cb = *(pm_callback_t *)((void *)ops + cb_offset);
37 else
38 cb = NULL;
39
40 if (!cb && dev->driver && dev->driver->pm)
41 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
42
43 return cb;
44}
45
46#define RPM_GET_CALLBACK(dev, callback) \
47 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
48
49static int rpm_resume(struct device *dev, int rpmflags);
50static int rpm_suspend(struct device *dev, int rpmflags);
51
52/**
53 * update_pm_runtime_accounting - Update the time accounting of power states
54 * @dev: Device to update the accounting for
55 *
56 * In order to be able to have time accounting of the various power states
57 * (as used by programs such as PowerTOP to show the effectiveness of runtime
58 * PM), we need to track the time spent in each state.
59 * update_pm_runtime_accounting must be called each time before the
60 * runtime_status field is updated, to account the time in the old state
61 * correctly.
62 */
63void update_pm_runtime_accounting(struct device *dev)
64{
65 unsigned long now = jiffies;
66 unsigned long delta;
67
68 delta = now - dev->power.accounting_timestamp;
69
70 dev->power.accounting_timestamp = now;
71
72 if (dev->power.disable_depth > 0)
73 return;
74
75 if (dev->power.runtime_status == RPM_SUSPENDED)
76 dev->power.suspended_jiffies += delta;
77 else
78 dev->power.active_jiffies += delta;
79}
80
81static void __update_runtime_status(struct device *dev, enum rpm_status status)
82{
83 update_pm_runtime_accounting(dev);
84 dev->power.runtime_status = status;
85}
86
87/**
88 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
89 * @dev: Device to handle.
90 */
91static void pm_runtime_deactivate_timer(struct device *dev)
92{
93 if (dev->power.timer_expires > 0) {
94 del_timer(&dev->power.suspend_timer);
95 dev->power.timer_expires = 0;
96 }
97}
98
99/**
100 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
101 * @dev: Device to handle.
102 */
103static void pm_runtime_cancel_pending(struct device *dev)
104{
105 pm_runtime_deactivate_timer(dev);
106 /*
107 * In case there's a request pending, make sure its work function will
108 * return without doing anything.
109 */
110 dev->power.request = RPM_REQ_NONE;
111}
112
113/*
114 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
115 * @dev: Device to handle.
116 *
117 * Compute the autosuspend-delay expiration time based on the device's
118 * power.last_busy time. If the delay has already expired or is disabled
119 * (negative) or the power.use_autosuspend flag isn't set, return 0.
120 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
121 *
122 * This function may be called either with or without dev->power.lock held.
123 * Either way it can be racy, since power.last_busy may be updated at any time.
124 */
125unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
126{
127 int autosuspend_delay;
128 long elapsed;
129 unsigned long last_busy;
130 unsigned long expires = 0;
131
132 if (!dev->power.use_autosuspend)
133 goto out;
134
135 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
136 if (autosuspend_delay < 0)
137 goto out;
138
139 last_busy = ACCESS_ONCE(dev->power.last_busy);
140 elapsed = jiffies - last_busy;
141 if (elapsed < 0)
142 goto out; /* jiffies has wrapped around. */
143
144 /*
145 * If the autosuspend_delay is >= 1 second, align the timer by rounding
146 * up to the nearest second.
147 */
148 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
149 if (autosuspend_delay >= 1000)
150 expires = round_jiffies(expires);
151 expires += !expires;
152 if (elapsed >= expires - last_busy)
153 expires = 0; /* Already expired. */
154
155 out:
156 return expires;
157}
158EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
159
160static int dev_memalloc_noio(struct device *dev, void *data)
161{
162 return dev->power.memalloc_noio;
163}
164
165/*
166 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
167 * @dev: Device to handle.
168 * @enable: True for setting the flag and False for clearing the flag.
169 *
170 * Set the flag for all devices in the path from the device to the
171 * root device in the device tree if @enable is true, otherwise clear
172 * the flag for devices in the path whose siblings don't set the flag.
173 *
174 * The function should only be called by block device, or network
175 * device driver for solving the deadlock problem during runtime
176 * resume/suspend:
177 *
178 * If memory allocation with GFP_KERNEL is called inside runtime
179 * resume/suspend callback of any one of its ancestors(or the
180 * block device itself), the deadlock may be triggered inside the
181 * memory allocation since it might not complete until the block
182 * device becomes active and the involed page I/O finishes. The
183 * situation is pointed out first by Alan Stern. Network device
184 * are involved in iSCSI kind of situation.
185 *
186 * The lock of dev_hotplug_mutex is held in the function for handling
187 * hotplug race because pm_runtime_set_memalloc_noio() may be called
188 * in async probe().
189 *
190 * The function should be called between device_add() and device_del()
191 * on the affected device(block/network device).
192 */
193void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
194{
195 static DEFINE_MUTEX(dev_hotplug_mutex);
196
197 mutex_lock(&dev_hotplug_mutex);
198 for (;;) {
199 bool enabled;
200
201 /* hold power lock since bitfield is not SMP-safe. */
202 spin_lock_irq(&dev->power.lock);
203 enabled = dev->power.memalloc_noio;
204 dev->power.memalloc_noio = enable;
205 spin_unlock_irq(&dev->power.lock);
206
207 /*
208 * not need to enable ancestors any more if the device
209 * has been enabled.
210 */
211 if (enabled && enable)
212 break;
213
214 dev = dev->parent;
215
216 /*
217 * clear flag of the parent device only if all the
218 * children don't set the flag because ancestor's
219 * flag was set by any one of the descendants.
220 */
221 if (!dev || (!enable &&
222 device_for_each_child(dev, NULL,
223 dev_memalloc_noio)))
224 break;
225 }
226 mutex_unlock(&dev_hotplug_mutex);
227}
228EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
229
230/**
231 * rpm_check_suspend_allowed - Test whether a device may be suspended.
232 * @dev: Device to test.
233 */
234static int rpm_check_suspend_allowed(struct device *dev)
235{
236 int retval = 0;
237
238 if (dev->power.runtime_error)
239 retval = -EINVAL;
240 else if (dev->power.disable_depth > 0)
241 retval = -EACCES;
242 else if (atomic_read(&dev->power.usage_count) > 0)
243 retval = -EAGAIN;
244 else if (!pm_children_suspended(dev))
245 retval = -EBUSY;
246
247 /* Pending resume requests take precedence over suspends. */
248 else if ((dev->power.deferred_resume
249 && dev->power.runtime_status == RPM_SUSPENDING)
250 || (dev->power.request_pending
251 && dev->power.request == RPM_REQ_RESUME))
252 retval = -EAGAIN;
253 else if (__dev_pm_qos_read_value(dev) < 0)
254 retval = -EPERM;
255 else if (dev->power.runtime_status == RPM_SUSPENDED)
256 retval = 1;
257
258 return retval;
259}
260
261/**
262 * __rpm_callback - Run a given runtime PM callback for a given device.
263 * @cb: Runtime PM callback to run.
264 * @dev: Device to run the callback for.
265 */
266static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
267 __releases(&dev->power.lock) __acquires(&dev->power.lock)
268{
269 int retval;
270
271 if (dev->power.irq_safe)
272 spin_unlock(&dev->power.lock);
273 else
274 spin_unlock_irq(&dev->power.lock);
275
276 retval = cb(dev);
277
278 if (dev->power.irq_safe)
279 spin_lock(&dev->power.lock);
280 else
281 spin_lock_irq(&dev->power.lock);
282
283 return retval;
284}
285
286/**
287 * rpm_idle - Notify device bus type if the device can be suspended.
288 * @dev: Device to notify the bus type about.
289 * @rpmflags: Flag bits.
290 *
291 * Check if the device's runtime PM status allows it to be suspended. If
292 * another idle notification has been started earlier, return immediately. If
293 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
294 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
295 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
296 *
297 * This function must be called under dev->power.lock with interrupts disabled.
298 */
299static int rpm_idle(struct device *dev, int rpmflags)
300{
301 int (*callback)(struct device *);
302 int retval;
303
304 trace_rpm_idle(dev, rpmflags);
305 retval = rpm_check_suspend_allowed(dev);
306 if (retval < 0)
307 ; /* Conditions are wrong. */
308
309 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
310 else if (dev->power.runtime_status != RPM_ACTIVE)
311 retval = -EAGAIN;
312
313 /*
314 * Any pending request other than an idle notification takes
315 * precedence over us, except that the timer may be running.
316 */
317 else if (dev->power.request_pending &&
318 dev->power.request > RPM_REQ_IDLE)
319 retval = -EAGAIN;
320
321 /* Act as though RPM_NOWAIT is always set. */
322 else if (dev->power.idle_notification)
323 retval = -EINPROGRESS;
324 if (retval)
325 goto out;
326
327 /* Pending requests need to be canceled. */
328 dev->power.request = RPM_REQ_NONE;
329
330 if (dev->power.no_callbacks)
331 goto out;
332
333 /* Carry out an asynchronous or a synchronous idle notification. */
334 if (rpmflags & RPM_ASYNC) {
335 dev->power.request = RPM_REQ_IDLE;
336 if (!dev->power.request_pending) {
337 dev->power.request_pending = true;
338 queue_work(pm_wq, &dev->power.work);
339 }
340 trace_rpm_return_int(dev, _THIS_IP_, 0);
341 return 0;
342 }
343
344 dev->power.idle_notification = true;
345
346 callback = RPM_GET_CALLBACK(dev, runtime_idle);
347
348 if (callback)
349 retval = __rpm_callback(callback, dev);
350
351 dev->power.idle_notification = false;
352 wake_up_all(&dev->power.wait_queue);
353
354 out:
355 trace_rpm_return_int(dev, _THIS_IP_, retval);
356 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
357}
358
359/**
360 * rpm_callback - Run a given runtime PM callback for a given device.
361 * @cb: Runtime PM callback to run.
362 * @dev: Device to run the callback for.
363 */
364static int rpm_callback(int (*cb)(struct device *), struct device *dev)
365{
366 int retval;
367
368 if (!cb)
369 return -ENOSYS;
370
371 if (dev->power.memalloc_noio) {
372 unsigned int noio_flag;
373
374 /*
375 * Deadlock might be caused if memory allocation with
376 * GFP_KERNEL happens inside runtime_suspend and
377 * runtime_resume callbacks of one block device's
378 * ancestor or the block device itself. Network
379 * device might be thought as part of iSCSI block
380 * device, so network device and its ancestor should
381 * be marked as memalloc_noio too.
382 */
383 noio_flag = memalloc_noio_save();
384 retval = __rpm_callback(cb, dev);
385 memalloc_noio_restore(noio_flag);
386 } else {
387 retval = __rpm_callback(cb, dev);
388 }
389
390 dev->power.runtime_error = retval;
391 return retval != -EACCES ? retval : -EIO;
392}
393
394/**
395 * rpm_suspend - Carry out runtime suspend of given device.
396 * @dev: Device to suspend.
397 * @rpmflags: Flag bits.
398 *
399 * Check if the device's runtime PM status allows it to be suspended.
400 * Cancel a pending idle notification, autosuspend or suspend. If
401 * another suspend has been started earlier, either return immediately
402 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
403 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
404 * otherwise run the ->runtime_suspend() callback directly. When
405 * ->runtime_suspend succeeded, if a deferred resume was requested while
406 * the callback was running then carry it out, otherwise send an idle
407 * notification for its parent (if the suspend succeeded and both
408 * ignore_children of parent->power and irq_safe of dev->power are not set).
409 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
410 * flag is set and the next autosuspend-delay expiration time is in the
411 * future, schedule another autosuspend attempt.
412 *
413 * This function must be called under dev->power.lock with interrupts disabled.
414 */
415static int rpm_suspend(struct device *dev, int rpmflags)
416 __releases(&dev->power.lock) __acquires(&dev->power.lock)
417{
418 int (*callback)(struct device *);
419 struct device *parent = NULL;
420 int retval;
421
422 trace_rpm_suspend(dev, rpmflags);
423
424 repeat:
425 retval = rpm_check_suspend_allowed(dev);
426
427 if (retval < 0)
428 ; /* Conditions are wrong. */
429
430 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
431 else if (dev->power.runtime_status == RPM_RESUMING &&
432 !(rpmflags & RPM_ASYNC))
433 retval = -EAGAIN;
434 if (retval)
435 goto out;
436
437 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
438 if ((rpmflags & RPM_AUTO)
439 && dev->power.runtime_status != RPM_SUSPENDING) {
440 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
441
442 if (expires != 0) {
443 /* Pending requests need to be canceled. */
444 dev->power.request = RPM_REQ_NONE;
445
446 /*
447 * Optimization: If the timer is already running and is
448 * set to expire at or before the autosuspend delay,
449 * avoid the overhead of resetting it. Just let it
450 * expire; pm_suspend_timer_fn() will take care of the
451 * rest.
452 */
453 if (!(dev->power.timer_expires && time_before_eq(
454 dev->power.timer_expires, expires))) {
455 dev->power.timer_expires = expires;
456 mod_timer(&dev->power.suspend_timer, expires);
457 }
458 dev->power.timer_autosuspends = 1;
459 goto out;
460 }
461 }
462
463 /* Other scheduled or pending requests need to be canceled. */
464 pm_runtime_cancel_pending(dev);
465
466 if (dev->power.runtime_status == RPM_SUSPENDING) {
467 DEFINE_WAIT(wait);
468
469 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
470 retval = -EINPROGRESS;
471 goto out;
472 }
473
474 if (dev->power.irq_safe) {
475 spin_unlock(&dev->power.lock);
476
477 cpu_relax();
478
479 spin_lock(&dev->power.lock);
480 goto repeat;
481 }
482
483 /* Wait for the other suspend running in parallel with us. */
484 for (;;) {
485 prepare_to_wait(&dev->power.wait_queue, &wait,
486 TASK_UNINTERRUPTIBLE);
487 if (dev->power.runtime_status != RPM_SUSPENDING)
488 break;
489
490 spin_unlock_irq(&dev->power.lock);
491
492 schedule();
493
494 spin_lock_irq(&dev->power.lock);
495 }
496 finish_wait(&dev->power.wait_queue, &wait);
497 goto repeat;
498 }
499
500 if (dev->power.no_callbacks)
501 goto no_callback; /* Assume success. */
502
503 /* Carry out an asynchronous or a synchronous suspend. */
504 if (rpmflags & RPM_ASYNC) {
505 dev->power.request = (rpmflags & RPM_AUTO) ?
506 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
507 if (!dev->power.request_pending) {
508 dev->power.request_pending = true;
509 queue_work(pm_wq, &dev->power.work);
510 }
511 goto out;
512 }
513
514 __update_runtime_status(dev, RPM_SUSPENDING);
515
516 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
517
518 dev_pm_enable_wake_irq(dev);
519 retval = rpm_callback(callback, dev);
520 if (retval)
521 goto fail;
522
523 no_callback:
524 __update_runtime_status(dev, RPM_SUSPENDED);
525 pm_runtime_deactivate_timer(dev);
526
527 if (dev->parent) {
528 parent = dev->parent;
529 atomic_add_unless(&parent->power.child_count, -1, 0);
530 }
531 wake_up_all(&dev->power.wait_queue);
532
533 if (dev->power.deferred_resume) {
534 dev->power.deferred_resume = false;
535 rpm_resume(dev, 0);
536 retval = -EAGAIN;
537 goto out;
538 }
539
540 /* Maybe the parent is now able to suspend. */
541 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
542 spin_unlock(&dev->power.lock);
543
544 spin_lock(&parent->power.lock);
545 rpm_idle(parent, RPM_ASYNC);
546 spin_unlock(&parent->power.lock);
547
548 spin_lock(&dev->power.lock);
549 }
550
551 out:
552 trace_rpm_return_int(dev, _THIS_IP_, retval);
553
554 return retval;
555
556 fail:
557 dev_pm_disable_wake_irq(dev);
558 __update_runtime_status(dev, RPM_ACTIVE);
559 dev->power.deferred_resume = false;
560 wake_up_all(&dev->power.wait_queue);
561
562 if (retval == -EAGAIN || retval == -EBUSY) {
563 dev->power.runtime_error = 0;
564
565 /*
566 * If the callback routine failed an autosuspend, and
567 * if the last_busy time has been updated so that there
568 * is a new autosuspend expiration time, automatically
569 * reschedule another autosuspend.
570 */
571 if ((rpmflags & RPM_AUTO) &&
572 pm_runtime_autosuspend_expiration(dev) != 0)
573 goto repeat;
574 } else {
575 pm_runtime_cancel_pending(dev);
576 }
577 goto out;
578}
579
580/**
581 * rpm_resume - Carry out runtime resume of given device.
582 * @dev: Device to resume.
583 * @rpmflags: Flag bits.
584 *
585 * Check if the device's runtime PM status allows it to be resumed. Cancel
586 * any scheduled or pending requests. If another resume has been started
587 * earlier, either return immediately or wait for it to finish, depending on the
588 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
589 * parallel with this function, either tell the other process to resume after
590 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
591 * flag is set then queue a resume request; otherwise run the
592 * ->runtime_resume() callback directly. Queue an idle notification for the
593 * device if the resume succeeded.
594 *
595 * This function must be called under dev->power.lock with interrupts disabled.
596 */
597static int rpm_resume(struct device *dev, int rpmflags)
598 __releases(&dev->power.lock) __acquires(&dev->power.lock)
599{
600 int (*callback)(struct device *);
601 struct device *parent = NULL;
602 int retval = 0;
603
604 trace_rpm_resume(dev, rpmflags);
605
606 repeat:
607 if (dev->power.runtime_error)
608 retval = -EINVAL;
609 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
610 && dev->power.runtime_status == RPM_ACTIVE)
611 retval = 1;
612 else if (dev->power.disable_depth > 0)
613 retval = -EACCES;
614 if (retval)
615 goto out;
616
617 /*
618 * Other scheduled or pending requests need to be canceled. Small
619 * optimization: If an autosuspend timer is running, leave it running
620 * rather than cancelling it now only to restart it again in the near
621 * future.
622 */
623 dev->power.request = RPM_REQ_NONE;
624 if (!dev->power.timer_autosuspends)
625 pm_runtime_deactivate_timer(dev);
626
627 if (dev->power.runtime_status == RPM_ACTIVE) {
628 retval = 1;
629 goto out;
630 }
631
632 if (dev->power.runtime_status == RPM_RESUMING
633 || dev->power.runtime_status == RPM_SUSPENDING) {
634 DEFINE_WAIT(wait);
635
636 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
637 if (dev->power.runtime_status == RPM_SUSPENDING)
638 dev->power.deferred_resume = true;
639 else
640 retval = -EINPROGRESS;
641 goto out;
642 }
643
644 if (dev->power.irq_safe) {
645 spin_unlock(&dev->power.lock);
646
647 cpu_relax();
648
649 spin_lock(&dev->power.lock);
650 goto repeat;
651 }
652
653 /* Wait for the operation carried out in parallel with us. */
654 for (;;) {
655 prepare_to_wait(&dev->power.wait_queue, &wait,
656 TASK_UNINTERRUPTIBLE);
657 if (dev->power.runtime_status != RPM_RESUMING
658 && dev->power.runtime_status != RPM_SUSPENDING)
659 break;
660
661 spin_unlock_irq(&dev->power.lock);
662
663 schedule();
664
665 spin_lock_irq(&dev->power.lock);
666 }
667 finish_wait(&dev->power.wait_queue, &wait);
668 goto repeat;
669 }
670
671 /*
672 * See if we can skip waking up the parent. This is safe only if
673 * power.no_callbacks is set, because otherwise we don't know whether
674 * the resume will actually succeed.
675 */
676 if (dev->power.no_callbacks && !parent && dev->parent) {
677 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
678 if (dev->parent->power.disable_depth > 0
679 || dev->parent->power.ignore_children
680 || dev->parent->power.runtime_status == RPM_ACTIVE) {
681 atomic_inc(&dev->parent->power.child_count);
682 spin_unlock(&dev->parent->power.lock);
683 retval = 1;
684 goto no_callback; /* Assume success. */
685 }
686 spin_unlock(&dev->parent->power.lock);
687 }
688
689 /* Carry out an asynchronous or a synchronous resume. */
690 if (rpmflags & RPM_ASYNC) {
691 dev->power.request = RPM_REQ_RESUME;
692 if (!dev->power.request_pending) {
693 dev->power.request_pending = true;
694 queue_work(pm_wq, &dev->power.work);
695 }
696 retval = 0;
697 goto out;
698 }
699
700 if (!parent && dev->parent) {
701 /*
702 * Increment the parent's usage counter and resume it if
703 * necessary. Not needed if dev is irq-safe; then the
704 * parent is permanently resumed.
705 */
706 parent = dev->parent;
707 if (dev->power.irq_safe)
708 goto skip_parent;
709 spin_unlock(&dev->power.lock);
710
711 pm_runtime_get_noresume(parent);
712
713 spin_lock(&parent->power.lock);
714 /*
715 * We can resume if the parent's runtime PM is disabled or it
716 * is set to ignore children.
717 */
718 if (!parent->power.disable_depth
719 && !parent->power.ignore_children) {
720 rpm_resume(parent, 0);
721 if (parent->power.runtime_status != RPM_ACTIVE)
722 retval = -EBUSY;
723 }
724 spin_unlock(&parent->power.lock);
725
726 spin_lock(&dev->power.lock);
727 if (retval)
728 goto out;
729 goto repeat;
730 }
731 skip_parent:
732
733 if (dev->power.no_callbacks)
734 goto no_callback; /* Assume success. */
735
736 __update_runtime_status(dev, RPM_RESUMING);
737
738 callback = RPM_GET_CALLBACK(dev, runtime_resume);
739
740 dev_pm_disable_wake_irq(dev);
741 retval = rpm_callback(callback, dev);
742 if (retval) {
743 __update_runtime_status(dev, RPM_SUSPENDED);
744 pm_runtime_cancel_pending(dev);
745 dev_pm_enable_wake_irq(dev);
746 } else {
747 no_callback:
748 __update_runtime_status(dev, RPM_ACTIVE);
749 pm_runtime_mark_last_busy(dev);
750 if (parent)
751 atomic_inc(&parent->power.child_count);
752 }
753 wake_up_all(&dev->power.wait_queue);
754
755 if (retval >= 0)
756 rpm_idle(dev, RPM_ASYNC);
757
758 out:
759 if (parent && !dev->power.irq_safe) {
760 spin_unlock_irq(&dev->power.lock);
761
762 pm_runtime_put(parent);
763
764 spin_lock_irq(&dev->power.lock);
765 }
766
767 trace_rpm_return_int(dev, _THIS_IP_, retval);
768
769 return retval;
770}
771
772/**
773 * pm_runtime_work - Universal runtime PM work function.
774 * @work: Work structure used for scheduling the execution of this function.
775 *
776 * Use @work to get the device object the work is to be done for, determine what
777 * is to be done and execute the appropriate runtime PM function.
778 */
779static void pm_runtime_work(struct work_struct *work)
780{
781 struct device *dev = container_of(work, struct device, power.work);
782 enum rpm_request req;
783
784 spin_lock_irq(&dev->power.lock);
785
786 if (!dev->power.request_pending)
787 goto out;
788
789 req = dev->power.request;
790 dev->power.request = RPM_REQ_NONE;
791 dev->power.request_pending = false;
792
793 switch (req) {
794 case RPM_REQ_NONE:
795 break;
796 case RPM_REQ_IDLE:
797 rpm_idle(dev, RPM_NOWAIT);
798 break;
799 case RPM_REQ_SUSPEND:
800 rpm_suspend(dev, RPM_NOWAIT);
801 break;
802 case RPM_REQ_AUTOSUSPEND:
803 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
804 break;
805 case RPM_REQ_RESUME:
806 rpm_resume(dev, RPM_NOWAIT);
807 break;
808 }
809
810 out:
811 spin_unlock_irq(&dev->power.lock);
812}
813
814/**
815 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
816 * @data: Device pointer passed by pm_schedule_suspend().
817 *
818 * Check if the time is right and queue a suspend request.
819 */
820static void pm_suspend_timer_fn(unsigned long data)
821{
822 struct device *dev = (struct device *)data;
823 unsigned long flags;
824 unsigned long expires;
825
826 spin_lock_irqsave(&dev->power.lock, flags);
827
828 expires = dev->power.timer_expires;
829 /* If 'expire' is after 'jiffies' we've been called too early. */
830 if (expires > 0 && !time_after(expires, jiffies)) {
831 dev->power.timer_expires = 0;
832 rpm_suspend(dev, dev->power.timer_autosuspends ?
833 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
834 }
835
836 spin_unlock_irqrestore(&dev->power.lock, flags);
837}
838
839/**
840 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
841 * @dev: Device to suspend.
842 * @delay: Time to wait before submitting a suspend request, in milliseconds.
843 */
844int pm_schedule_suspend(struct device *dev, unsigned int delay)
845{
846 unsigned long flags;
847 int retval;
848
849 spin_lock_irqsave(&dev->power.lock, flags);
850
851 if (!delay) {
852 retval = rpm_suspend(dev, RPM_ASYNC);
853 goto out;
854 }
855
856 retval = rpm_check_suspend_allowed(dev);
857 if (retval)
858 goto out;
859
860 /* Other scheduled or pending requests need to be canceled. */
861 pm_runtime_cancel_pending(dev);
862
863 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
864 dev->power.timer_expires += !dev->power.timer_expires;
865 dev->power.timer_autosuspends = 0;
866 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
867
868 out:
869 spin_unlock_irqrestore(&dev->power.lock, flags);
870
871 return retval;
872}
873EXPORT_SYMBOL_GPL(pm_schedule_suspend);
874
875/**
876 * __pm_runtime_idle - Entry point for runtime idle operations.
877 * @dev: Device to send idle notification for.
878 * @rpmflags: Flag bits.
879 *
880 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
881 * return immediately if it is larger than zero. Then carry out an idle
882 * notification, either synchronous or asynchronous.
883 *
884 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
885 * or if pm_runtime_irq_safe() has been called.
886 */
887int __pm_runtime_idle(struct device *dev, int rpmflags)
888{
889 unsigned long flags;
890 int retval;
891
892 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
893
894 if (rpmflags & RPM_GET_PUT) {
895 if (!atomic_dec_and_test(&dev->power.usage_count))
896 return 0;
897 }
898
899 spin_lock_irqsave(&dev->power.lock, flags);
900 retval = rpm_idle(dev, rpmflags);
901 spin_unlock_irqrestore(&dev->power.lock, flags);
902
903 return retval;
904}
905EXPORT_SYMBOL_GPL(__pm_runtime_idle);
906
907/**
908 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
909 * @dev: Device to suspend.
910 * @rpmflags: Flag bits.
911 *
912 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
913 * return immediately if it is larger than zero. Then carry out a suspend,
914 * either synchronous or asynchronous.
915 *
916 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
917 * or if pm_runtime_irq_safe() has been called.
918 */
919int __pm_runtime_suspend(struct device *dev, int rpmflags)
920{
921 unsigned long flags;
922 int retval;
923
924 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
925
926 if (rpmflags & RPM_GET_PUT) {
927 if (!atomic_dec_and_test(&dev->power.usage_count))
928 return 0;
929 }
930
931 spin_lock_irqsave(&dev->power.lock, flags);
932 retval = rpm_suspend(dev, rpmflags);
933 spin_unlock_irqrestore(&dev->power.lock, flags);
934
935 return retval;
936}
937EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
938
939/**
940 * __pm_runtime_resume - Entry point for runtime resume operations.
941 * @dev: Device to resume.
942 * @rpmflags: Flag bits.
943 *
944 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
945 * carry out a resume, either synchronous or asynchronous.
946 *
947 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
948 * or if pm_runtime_irq_safe() has been called.
949 */
950int __pm_runtime_resume(struct device *dev, int rpmflags)
951{
952 unsigned long flags;
953 int retval;
954
955 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
956
957 if (rpmflags & RPM_GET_PUT)
958 atomic_inc(&dev->power.usage_count);
959
960 spin_lock_irqsave(&dev->power.lock, flags);
961 retval = rpm_resume(dev, rpmflags);
962 spin_unlock_irqrestore(&dev->power.lock, flags);
963
964 return retval;
965}
966EXPORT_SYMBOL_GPL(__pm_runtime_resume);
967
968/**
969 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
970 * @dev: Device to handle.
971 *
972 * Return -EINVAL if runtime PM is disabled for the device.
973 *
974 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
975 * and the runtime PM usage counter is nonzero, increment the counter and
976 * return 1. Otherwise return 0 without changing the counter.
977 */
978int pm_runtime_get_if_in_use(struct device *dev)
979{
980 unsigned long flags;
981 int retval;
982
983 spin_lock_irqsave(&dev->power.lock, flags);
984 retval = dev->power.disable_depth > 0 ? -EINVAL :
985 dev->power.runtime_status == RPM_ACTIVE
986 && atomic_inc_not_zero(&dev->power.usage_count);
987 spin_unlock_irqrestore(&dev->power.lock, flags);
988 return retval;
989}
990EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
991
992/**
993 * __pm_runtime_set_status - Set runtime PM status of a device.
994 * @dev: Device to handle.
995 * @status: New runtime PM status of the device.
996 *
997 * If runtime PM of the device is disabled or its power.runtime_error field is
998 * different from zero, the status may be changed either to RPM_ACTIVE, or to
999 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1000 * However, if the device has a parent and the parent is not active, and the
1001 * parent's power.ignore_children flag is unset, the device's status cannot be
1002 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1003 *
1004 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1005 * and the device parent's counter of unsuspended children is modified to
1006 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1007 * notification request for the parent is submitted.
1008 */
1009int __pm_runtime_set_status(struct device *dev, unsigned int status)
1010{
1011 struct device *parent = dev->parent;
1012 unsigned long flags;
1013 bool notify_parent = false;
1014 int error = 0;
1015
1016 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1017 return -EINVAL;
1018
1019 spin_lock_irqsave(&dev->power.lock, flags);
1020
1021 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1022 error = -EAGAIN;
1023 goto out;
1024 }
1025
1026 if (dev->power.runtime_status == status)
1027 goto out_set;
1028
1029 if (status == RPM_SUSPENDED) {
1030 /* It always is possible to set the status to 'suspended'. */
1031 if (parent) {
1032 atomic_add_unless(&parent->power.child_count, -1, 0);
1033 notify_parent = !parent->power.ignore_children;
1034 }
1035 goto out_set;
1036 }
1037
1038 if (parent) {
1039 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1040
1041 /*
1042 * It is invalid to put an active child under a parent that is
1043 * not active, has runtime PM enabled and the
1044 * 'power.ignore_children' flag unset.
1045 */
1046 if (!parent->power.disable_depth
1047 && !parent->power.ignore_children
1048 && parent->power.runtime_status != RPM_ACTIVE)
1049 error = -EBUSY;
1050 else if (dev->power.runtime_status == RPM_SUSPENDED)
1051 atomic_inc(&parent->power.child_count);
1052
1053 spin_unlock(&parent->power.lock);
1054
1055 if (error)
1056 goto out;
1057 }
1058
1059 out_set:
1060 __update_runtime_status(dev, status);
1061 dev->power.runtime_error = 0;
1062 out:
1063 spin_unlock_irqrestore(&dev->power.lock, flags);
1064
1065 if (notify_parent)
1066 pm_request_idle(parent);
1067
1068 return error;
1069}
1070EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1071
1072/**
1073 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1074 * @dev: Device to handle.
1075 *
1076 * Flush all pending requests for the device from pm_wq and wait for all
1077 * runtime PM operations involving the device in progress to complete.
1078 *
1079 * Should be called under dev->power.lock with interrupts disabled.
1080 */
1081static void __pm_runtime_barrier(struct device *dev)
1082{
1083 pm_runtime_deactivate_timer(dev);
1084
1085 if (dev->power.request_pending) {
1086 dev->power.request = RPM_REQ_NONE;
1087 spin_unlock_irq(&dev->power.lock);
1088
1089 cancel_work_sync(&dev->power.work);
1090
1091 spin_lock_irq(&dev->power.lock);
1092 dev->power.request_pending = false;
1093 }
1094
1095 if (dev->power.runtime_status == RPM_SUSPENDING
1096 || dev->power.runtime_status == RPM_RESUMING
1097 || dev->power.idle_notification) {
1098 DEFINE_WAIT(wait);
1099
1100 /* Suspend, wake-up or idle notification in progress. */
1101 for (;;) {
1102 prepare_to_wait(&dev->power.wait_queue, &wait,
1103 TASK_UNINTERRUPTIBLE);
1104 if (dev->power.runtime_status != RPM_SUSPENDING
1105 && dev->power.runtime_status != RPM_RESUMING
1106 && !dev->power.idle_notification)
1107 break;
1108 spin_unlock_irq(&dev->power.lock);
1109
1110 schedule();
1111
1112 spin_lock_irq(&dev->power.lock);
1113 }
1114 finish_wait(&dev->power.wait_queue, &wait);
1115 }
1116}
1117
1118/**
1119 * pm_runtime_barrier - Flush pending requests and wait for completions.
1120 * @dev: Device to handle.
1121 *
1122 * Prevent the device from being suspended by incrementing its usage counter and
1123 * if there's a pending resume request for the device, wake the device up.
1124 * Next, make sure that all pending requests for the device have been flushed
1125 * from pm_wq and wait for all runtime PM operations involving the device in
1126 * progress to complete.
1127 *
1128 * Return value:
1129 * 1, if there was a resume request pending and the device had to be woken up,
1130 * 0, otherwise
1131 */
1132int pm_runtime_barrier(struct device *dev)
1133{
1134 int retval = 0;
1135
1136 pm_runtime_get_noresume(dev);
1137 spin_lock_irq(&dev->power.lock);
1138
1139 if (dev->power.request_pending
1140 && dev->power.request == RPM_REQ_RESUME) {
1141 rpm_resume(dev, 0);
1142 retval = 1;
1143 }
1144
1145 __pm_runtime_barrier(dev);
1146
1147 spin_unlock_irq(&dev->power.lock);
1148 pm_runtime_put_noidle(dev);
1149
1150 return retval;
1151}
1152EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1153
1154/**
1155 * __pm_runtime_disable - Disable runtime PM of a device.
1156 * @dev: Device to handle.
1157 * @check_resume: If set, check if there's a resume request for the device.
1158 *
1159 * Increment power.disable_depth for the device and if it was zero previously,
1160 * cancel all pending runtime PM requests for the device and wait for all
1161 * operations in progress to complete. The device can be either active or
1162 * suspended after its runtime PM has been disabled.
1163 *
1164 * If @check_resume is set and there's a resume request pending when
1165 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1166 * function will wake up the device before disabling its runtime PM.
1167 */
1168void __pm_runtime_disable(struct device *dev, bool check_resume)
1169{
1170 spin_lock_irq(&dev->power.lock);
1171
1172 if (dev->power.disable_depth > 0) {
1173 dev->power.disable_depth++;
1174 goto out;
1175 }
1176
1177 /*
1178 * Wake up the device if there's a resume request pending, because that
1179 * means there probably is some I/O to process and disabling runtime PM
1180 * shouldn't prevent the device from processing the I/O.
1181 */
1182 if (check_resume && dev->power.request_pending
1183 && dev->power.request == RPM_REQ_RESUME) {
1184 /*
1185 * Prevent suspends and idle notifications from being carried
1186 * out after we have woken up the device.
1187 */
1188 pm_runtime_get_noresume(dev);
1189
1190 rpm_resume(dev, 0);
1191
1192 pm_runtime_put_noidle(dev);
1193 }
1194
1195 if (!dev->power.disable_depth++)
1196 __pm_runtime_barrier(dev);
1197
1198 out:
1199 spin_unlock_irq(&dev->power.lock);
1200}
1201EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1202
1203/**
1204 * pm_runtime_enable - Enable runtime PM of a device.
1205 * @dev: Device to handle.
1206 */
1207void pm_runtime_enable(struct device *dev)
1208{
1209 unsigned long flags;
1210
1211 spin_lock_irqsave(&dev->power.lock, flags);
1212
1213 if (dev->power.disable_depth > 0)
1214 dev->power.disable_depth--;
1215 else
1216 dev_warn(dev, "Unbalanced %s!\n", __func__);
1217
1218 spin_unlock_irqrestore(&dev->power.lock, flags);
1219}
1220EXPORT_SYMBOL_GPL(pm_runtime_enable);
1221
1222/**
1223 * pm_runtime_forbid - Block runtime PM of a device.
1224 * @dev: Device to handle.
1225 *
1226 * Increase the device's usage count and clear its power.runtime_auto flag,
1227 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1228 * for it.
1229 */
1230void pm_runtime_forbid(struct device *dev)
1231{
1232 spin_lock_irq(&dev->power.lock);
1233 if (!dev->power.runtime_auto)
1234 goto out;
1235
1236 dev->power.runtime_auto = false;
1237 atomic_inc(&dev->power.usage_count);
1238 rpm_resume(dev, 0);
1239
1240 out:
1241 spin_unlock_irq(&dev->power.lock);
1242}
1243EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1244
1245/**
1246 * pm_runtime_allow - Unblock runtime PM of a device.
1247 * @dev: Device to handle.
1248 *
1249 * Decrease the device's usage count and set its power.runtime_auto flag.
1250 */
1251void pm_runtime_allow(struct device *dev)
1252{
1253 spin_lock_irq(&dev->power.lock);
1254 if (dev->power.runtime_auto)
1255 goto out;
1256
1257 dev->power.runtime_auto = true;
1258 if (atomic_dec_and_test(&dev->power.usage_count))
1259 rpm_idle(dev, RPM_AUTO);
1260
1261 out:
1262 spin_unlock_irq(&dev->power.lock);
1263}
1264EXPORT_SYMBOL_GPL(pm_runtime_allow);
1265
1266/**
1267 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1268 * @dev: Device to handle.
1269 *
1270 * Set the power.no_callbacks flag, which tells the PM core that this
1271 * device is power-managed through its parent and has no runtime PM
1272 * callbacks of its own. The runtime sysfs attributes will be removed.
1273 */
1274void pm_runtime_no_callbacks(struct device *dev)
1275{
1276 spin_lock_irq(&dev->power.lock);
1277 dev->power.no_callbacks = 1;
1278 spin_unlock_irq(&dev->power.lock);
1279 if (device_is_registered(dev))
1280 rpm_sysfs_remove(dev);
1281}
1282EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1283
1284/**
1285 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1286 * @dev: Device to handle
1287 *
1288 * Set the power.irq_safe flag, which tells the PM core that the
1289 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1290 * always be invoked with the spinlock held and interrupts disabled. It also
1291 * causes the parent's usage counter to be permanently incremented, preventing
1292 * the parent from runtime suspending -- otherwise an irq-safe child might have
1293 * to wait for a non-irq-safe parent.
1294 */
1295void pm_runtime_irq_safe(struct device *dev)
1296{
1297 if (dev->parent)
1298 pm_runtime_get_sync(dev->parent);
1299 spin_lock_irq(&dev->power.lock);
1300 dev->power.irq_safe = 1;
1301 spin_unlock_irq(&dev->power.lock);
1302}
1303EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1304
1305/**
1306 * update_autosuspend - Handle a change to a device's autosuspend settings.
1307 * @dev: Device to handle.
1308 * @old_delay: The former autosuspend_delay value.
1309 * @old_use: The former use_autosuspend value.
1310 *
1311 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1312 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1313 *
1314 * This function must be called under dev->power.lock with interrupts disabled.
1315 */
1316static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1317{
1318 int delay = dev->power.autosuspend_delay;
1319
1320 /* Should runtime suspend be prevented now? */
1321 if (dev->power.use_autosuspend && delay < 0) {
1322
1323 /* If it used to be allowed then prevent it. */
1324 if (!old_use || old_delay >= 0) {
1325 atomic_inc(&dev->power.usage_count);
1326 rpm_resume(dev, 0);
1327 }
1328 }
1329
1330 /* Runtime suspend should be allowed now. */
1331 else {
1332
1333 /* If it used to be prevented then allow it. */
1334 if (old_use && old_delay < 0)
1335 atomic_dec(&dev->power.usage_count);
1336
1337 /* Maybe we can autosuspend now. */
1338 rpm_idle(dev, RPM_AUTO);
1339 }
1340}
1341
1342/**
1343 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1344 * @dev: Device to handle.
1345 * @delay: Value of the new delay in milliseconds.
1346 *
1347 * Set the device's power.autosuspend_delay value. If it changes to negative
1348 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1349 * changes the other way, allow runtime suspends.
1350 */
1351void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1352{
1353 int old_delay, old_use;
1354
1355 spin_lock_irq(&dev->power.lock);
1356 old_delay = dev->power.autosuspend_delay;
1357 old_use = dev->power.use_autosuspend;
1358 dev->power.autosuspend_delay = delay;
1359 update_autosuspend(dev, old_delay, old_use);
1360 spin_unlock_irq(&dev->power.lock);
1361}
1362EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1363
1364/**
1365 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1366 * @dev: Device to handle.
1367 * @use: New value for use_autosuspend.
1368 *
1369 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1370 * suspends as needed.
1371 */
1372void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1373{
1374 int old_delay, old_use;
1375
1376 spin_lock_irq(&dev->power.lock);
1377 old_delay = dev->power.autosuspend_delay;
1378 old_use = dev->power.use_autosuspend;
1379 dev->power.use_autosuspend = use;
1380 update_autosuspend(dev, old_delay, old_use);
1381 spin_unlock_irq(&dev->power.lock);
1382}
1383EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1384
1385/**
1386 * pm_runtime_init - Initialize runtime PM fields in given device object.
1387 * @dev: Device object to initialize.
1388 */
1389void pm_runtime_init(struct device *dev)
1390{
1391 dev->power.runtime_status = RPM_SUSPENDED;
1392 dev->power.idle_notification = false;
1393
1394 dev->power.disable_depth = 1;
1395 atomic_set(&dev->power.usage_count, 0);
1396
1397 dev->power.runtime_error = 0;
1398
1399 atomic_set(&dev->power.child_count, 0);
1400 pm_suspend_ignore_children(dev, false);
1401 dev->power.runtime_auto = true;
1402
1403 dev->power.request_pending = false;
1404 dev->power.request = RPM_REQ_NONE;
1405 dev->power.deferred_resume = false;
1406 dev->power.accounting_timestamp = jiffies;
1407 INIT_WORK(&dev->power.work, pm_runtime_work);
1408
1409 dev->power.timer_expires = 0;
1410 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1411 (unsigned long)dev);
1412
1413 init_waitqueue_head(&dev->power.wait_queue);
1414}
1415
1416/**
1417 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1418 * @dev: Device object to re-initialize.
1419 */
1420void pm_runtime_reinit(struct device *dev)
1421{
1422 if (!pm_runtime_enabled(dev)) {
1423 if (dev->power.runtime_status == RPM_ACTIVE)
1424 pm_runtime_set_suspended(dev);
1425 if (dev->power.irq_safe) {
1426 spin_lock_irq(&dev->power.lock);
1427 dev->power.irq_safe = 0;
1428 spin_unlock_irq(&dev->power.lock);
1429 if (dev->parent)
1430 pm_runtime_put(dev->parent);
1431 }
1432 }
1433}
1434
1435/**
1436 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1437 * @dev: Device object being removed from device hierarchy.
1438 */
1439void pm_runtime_remove(struct device *dev)
1440{
1441 __pm_runtime_disable(dev, false);
1442 pm_runtime_reinit(dev);
1443}
1444
1445/**
1446 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1447 * @dev: Device to suspend.
1448 *
1449 * Disable runtime PM so we safely can check the device's runtime PM status and
1450 * if it is active, invoke it's .runtime_suspend callback to bring it into
1451 * suspend state. Keep runtime PM disabled to preserve the state unless we
1452 * encounter errors.
1453 *
1454 * Typically this function may be invoked from a system suspend callback to make
1455 * sure the device is put into low power state.
1456 */
1457int pm_runtime_force_suspend(struct device *dev)
1458{
1459 int (*callback)(struct device *);
1460 int ret = 0;
1461
1462 pm_runtime_disable(dev);
1463 if (pm_runtime_status_suspended(dev))
1464 return 0;
1465
1466 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1467
1468 if (!callback) {
1469 ret = -ENOSYS;
1470 goto err;
1471 }
1472
1473 ret = callback(dev);
1474 if (ret)
1475 goto err;
1476
1477 pm_runtime_set_suspended(dev);
1478 return 0;
1479err:
1480 pm_runtime_enable(dev);
1481 return ret;
1482}
1483EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1484
1485/**
1486 * pm_runtime_force_resume - Force a device into resume state.
1487 * @dev: Device to resume.
1488 *
1489 * Prior invoking this function we expect the user to have brought the device
1490 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1491 * those actions and brings the device into full power. We update the runtime PM
1492 * status and re-enables runtime PM.
1493 *
1494 * Typically this function may be invoked from a system resume callback to make
1495 * sure the device is put into full power state.
1496 */
1497int pm_runtime_force_resume(struct device *dev)
1498{
1499 int (*callback)(struct device *);
1500 int ret = 0;
1501
1502 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1503
1504 if (!callback) {
1505 ret = -ENOSYS;
1506 goto out;
1507 }
1508
1509 ret = callback(dev);
1510 if (ret)
1511 goto out;
1512
1513 pm_runtime_set_active(dev);
1514 pm_runtime_mark_last_busy(dev);
1515out:
1516 pm_runtime_enable(dev);
1517 return ret;
1518}
1519EXPORT_SYMBOL_GPL(pm_runtime_force_resume);