Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 *
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 */
8#include <linux/sched/mm.h>
9#include <linux/ktime.h>
10#include <linux/hrtimer.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_wakeirq.h>
14#include <linux/rculist.h>
15#include <trace/events/rpm.h>
16
17#include "../base.h"
18#include "power.h"
19
20typedef int (*pm_callback_t)(struct device *);
21
22static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
23{
24 pm_callback_t cb;
25 const struct dev_pm_ops *ops;
26
27 if (dev->pm_domain)
28 ops = &dev->pm_domain->ops;
29 else if (dev->type && dev->type->pm)
30 ops = dev->type->pm;
31 else if (dev->class && dev->class->pm)
32 ops = dev->class->pm;
33 else if (dev->bus && dev->bus->pm)
34 ops = dev->bus->pm;
35 else
36 ops = NULL;
37
38 if (ops)
39 cb = *(pm_callback_t *)((void *)ops + cb_offset);
40 else
41 cb = NULL;
42
43 if (!cb && dev->driver && dev->driver->pm)
44 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
45
46 return cb;
47}
48
49#define RPM_GET_CALLBACK(dev, callback) \
50 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51
52static int rpm_resume(struct device *dev, int rpmflags);
53static int rpm_suspend(struct device *dev, int rpmflags);
54
55/**
56 * update_pm_runtime_accounting - Update the time accounting of power states
57 * @dev: Device to update the accounting for
58 *
59 * In order to be able to have time accounting of the various power states
60 * (as used by programs such as PowerTOP to show the effectiveness of runtime
61 * PM), we need to track the time spent in each state.
62 * update_pm_runtime_accounting must be called each time before the
63 * runtime_status field is updated, to account the time in the old state
64 * correctly.
65 */
66static void update_pm_runtime_accounting(struct device *dev)
67{
68 u64 now, last, delta;
69
70 if (dev->power.disable_depth > 0)
71 return;
72
73 last = dev->power.accounting_timestamp;
74
75 now = ktime_get_mono_fast_ns();
76 dev->power.accounting_timestamp = now;
77
78 /*
79 * Because ktime_get_mono_fast_ns() is not monotonic during
80 * timekeeping updates, ensure that 'now' is after the last saved
81 * timesptamp.
82 */
83 if (now < last)
84 return;
85
86 delta = now - last;
87
88 if (dev->power.runtime_status == RPM_SUSPENDED)
89 dev->power.suspended_time += delta;
90 else
91 dev->power.active_time += delta;
92}
93
94static void __update_runtime_status(struct device *dev, enum rpm_status status)
95{
96 update_pm_runtime_accounting(dev);
97 dev->power.runtime_status = status;
98}
99
100static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
101{
102 u64 time;
103 unsigned long flags;
104
105 spin_lock_irqsave(&dev->power.lock, flags);
106
107 update_pm_runtime_accounting(dev);
108 time = suspended ? dev->power.suspended_time : dev->power.active_time;
109
110 spin_unlock_irqrestore(&dev->power.lock, flags);
111
112 return time;
113}
114
115u64 pm_runtime_active_time(struct device *dev)
116{
117 return rpm_get_accounted_time(dev, false);
118}
119
120u64 pm_runtime_suspended_time(struct device *dev)
121{
122 return rpm_get_accounted_time(dev, true);
123}
124EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
125
126/**
127 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
128 * @dev: Device to handle.
129 */
130static void pm_runtime_deactivate_timer(struct device *dev)
131{
132 if (dev->power.timer_expires > 0) {
133 hrtimer_try_to_cancel(&dev->power.suspend_timer);
134 dev->power.timer_expires = 0;
135 }
136}
137
138/**
139 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
140 * @dev: Device to handle.
141 */
142static void pm_runtime_cancel_pending(struct device *dev)
143{
144 pm_runtime_deactivate_timer(dev);
145 /*
146 * In case there's a request pending, make sure its work function will
147 * return without doing anything.
148 */
149 dev->power.request = RPM_REQ_NONE;
150}
151
152/*
153 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
154 * @dev: Device to handle.
155 *
156 * Compute the autosuspend-delay expiration time based on the device's
157 * power.last_busy time. If the delay has already expired or is disabled
158 * (negative) or the power.use_autosuspend flag isn't set, return 0.
159 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
160 *
161 * This function may be called either with or without dev->power.lock held.
162 * Either way it can be racy, since power.last_busy may be updated at any time.
163 */
164u64 pm_runtime_autosuspend_expiration(struct device *dev)
165{
166 int autosuspend_delay;
167 u64 expires;
168
169 if (!dev->power.use_autosuspend)
170 return 0;
171
172 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
173 if (autosuspend_delay < 0)
174 return 0;
175
176 expires = READ_ONCE(dev->power.last_busy);
177 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
178 if (expires > ktime_get_mono_fast_ns())
179 return expires; /* Expires in the future */
180
181 return 0;
182}
183EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
184
185static int dev_memalloc_noio(struct device *dev, void *data)
186{
187 return dev->power.memalloc_noio;
188}
189
190/*
191 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
192 * @dev: Device to handle.
193 * @enable: True for setting the flag and False for clearing the flag.
194 *
195 * Set the flag for all devices in the path from the device to the
196 * root device in the device tree if @enable is true, otherwise clear
197 * the flag for devices in the path whose siblings don't set the flag.
198 *
199 * The function should only be called by block device, or network
200 * device driver for solving the deadlock problem during runtime
201 * resume/suspend:
202 *
203 * If memory allocation with GFP_KERNEL is called inside runtime
204 * resume/suspend callback of any one of its ancestors(or the
205 * block device itself), the deadlock may be triggered inside the
206 * memory allocation since it might not complete until the block
207 * device becomes active and the involed page I/O finishes. The
208 * situation is pointed out first by Alan Stern. Network device
209 * are involved in iSCSI kind of situation.
210 *
211 * The lock of dev_hotplug_mutex is held in the function for handling
212 * hotplug race because pm_runtime_set_memalloc_noio() may be called
213 * in async probe().
214 *
215 * The function should be called between device_add() and device_del()
216 * on the affected device(block/network device).
217 */
218void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
219{
220 static DEFINE_MUTEX(dev_hotplug_mutex);
221
222 mutex_lock(&dev_hotplug_mutex);
223 for (;;) {
224 bool enabled;
225
226 /* hold power lock since bitfield is not SMP-safe. */
227 spin_lock_irq(&dev->power.lock);
228 enabled = dev->power.memalloc_noio;
229 dev->power.memalloc_noio = enable;
230 spin_unlock_irq(&dev->power.lock);
231
232 /*
233 * not need to enable ancestors any more if the device
234 * has been enabled.
235 */
236 if (enabled && enable)
237 break;
238
239 dev = dev->parent;
240
241 /*
242 * clear flag of the parent device only if all the
243 * children don't set the flag because ancestor's
244 * flag was set by any one of the descendants.
245 */
246 if (!dev || (!enable &&
247 device_for_each_child(dev, NULL, dev_memalloc_noio)))
248 break;
249 }
250 mutex_unlock(&dev_hotplug_mutex);
251}
252EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
253
254/**
255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
256 * @dev: Device to test.
257 */
258static int rpm_check_suspend_allowed(struct device *dev)
259{
260 int retval = 0;
261
262 if (dev->power.runtime_error)
263 retval = -EINVAL;
264 else if (dev->power.disable_depth > 0)
265 retval = -EACCES;
266 else if (atomic_read(&dev->power.usage_count))
267 retval = -EAGAIN;
268 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
269 retval = -EBUSY;
270
271 /* Pending resume requests take precedence over suspends. */
272 else if ((dev->power.deferred_resume &&
273 dev->power.runtime_status == RPM_SUSPENDING) ||
274 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
275 retval = -EAGAIN;
276 else if (__dev_pm_qos_resume_latency(dev) == 0)
277 retval = -EPERM;
278 else if (dev->power.runtime_status == RPM_SUSPENDED)
279 retval = 1;
280
281 return retval;
282}
283
284static int rpm_get_suppliers(struct device *dev)
285{
286 struct device_link *link;
287
288 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
289 device_links_read_lock_held()) {
290 int retval;
291
292 if (!(link->flags & DL_FLAG_PM_RUNTIME))
293 continue;
294
295 retval = pm_runtime_get_sync(link->supplier);
296 /* Ignore suppliers with disabled runtime PM. */
297 if (retval < 0 && retval != -EACCES) {
298 pm_runtime_put_noidle(link->supplier);
299 return retval;
300 }
301 refcount_inc(&link->rpm_active);
302 }
303 return 0;
304}
305
306/**
307 * pm_runtime_release_supplier - Drop references to device link's supplier.
308 * @link: Target device link.
309 *
310 * Drop all runtime PM references associated with @link to its supplier device.
311 */
312void pm_runtime_release_supplier(struct device_link *link)
313{
314 struct device *supplier = link->supplier;
315
316 /*
317 * The additional power.usage_count check is a safety net in case
318 * the rpm_active refcount becomes saturated, in which case
319 * refcount_dec_not_one() would return true forever, but it is not
320 * strictly necessary.
321 */
322 while (refcount_dec_not_one(&link->rpm_active) &&
323 atomic_read(&supplier->power.usage_count) > 0)
324 pm_runtime_put_noidle(supplier);
325}
326
327static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
328{
329 struct device_link *link;
330
331 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
332 device_links_read_lock_held()) {
333 pm_runtime_release_supplier(link);
334 if (try_to_suspend)
335 pm_request_idle(link->supplier);
336 }
337}
338
339static void rpm_put_suppliers(struct device *dev)
340{
341 __rpm_put_suppliers(dev, true);
342}
343
344static void rpm_suspend_suppliers(struct device *dev)
345{
346 struct device_link *link;
347 int idx = device_links_read_lock();
348
349 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
350 device_links_read_lock_held())
351 pm_request_idle(link->supplier);
352
353 device_links_read_unlock(idx);
354}
355
356/**
357 * __rpm_callback - Run a given runtime PM callback for a given device.
358 * @cb: Runtime PM callback to run.
359 * @dev: Device to run the callback for.
360 */
361static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
362 __releases(&dev->power.lock) __acquires(&dev->power.lock)
363{
364 int retval = 0, idx;
365 bool use_links = dev->power.links_count > 0;
366
367 if (dev->power.irq_safe) {
368 spin_unlock(&dev->power.lock);
369 } else {
370 spin_unlock_irq(&dev->power.lock);
371
372 /*
373 * Resume suppliers if necessary.
374 *
375 * The device's runtime PM status cannot change until this
376 * routine returns, so it is safe to read the status outside of
377 * the lock.
378 */
379 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
380 idx = device_links_read_lock();
381
382 retval = rpm_get_suppliers(dev);
383 if (retval) {
384 rpm_put_suppliers(dev);
385 goto fail;
386 }
387
388 device_links_read_unlock(idx);
389 }
390 }
391
392 if (cb)
393 retval = cb(dev);
394
395 if (dev->power.irq_safe) {
396 spin_lock(&dev->power.lock);
397 } else {
398 /*
399 * If the device is suspending and the callback has returned
400 * success, drop the usage counters of the suppliers that have
401 * been reference counted on its resume.
402 *
403 * Do that if resume fails too.
404 */
405 if (use_links &&
406 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
407 (dev->power.runtime_status == RPM_RESUMING && retval))) {
408 idx = device_links_read_lock();
409
410 __rpm_put_suppliers(dev, false);
411
412fail:
413 device_links_read_unlock(idx);
414 }
415
416 spin_lock_irq(&dev->power.lock);
417 }
418
419 return retval;
420}
421
422/**
423 * rpm_callback - Run a given runtime PM callback for a given device.
424 * @cb: Runtime PM callback to run.
425 * @dev: Device to run the callback for.
426 */
427static int rpm_callback(int (*cb)(struct device *), struct device *dev)
428{
429 int retval;
430
431 if (dev->power.memalloc_noio) {
432 unsigned int noio_flag;
433
434 /*
435 * Deadlock might be caused if memory allocation with
436 * GFP_KERNEL happens inside runtime_suspend and
437 * runtime_resume callbacks of one block device's
438 * ancestor or the block device itself. Network
439 * device might be thought as part of iSCSI block
440 * device, so network device and its ancestor should
441 * be marked as memalloc_noio too.
442 */
443 noio_flag = memalloc_noio_save();
444 retval = __rpm_callback(cb, dev);
445 memalloc_noio_restore(noio_flag);
446 } else {
447 retval = __rpm_callback(cb, dev);
448 }
449
450 dev->power.runtime_error = retval;
451 return retval != -EACCES ? retval : -EIO;
452}
453
454/**
455 * rpm_idle - Notify device bus type if the device can be suspended.
456 * @dev: Device to notify the bus type about.
457 * @rpmflags: Flag bits.
458 *
459 * Check if the device's runtime PM status allows it to be suspended. If
460 * another idle notification has been started earlier, return immediately. If
461 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
462 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
463 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
464 *
465 * This function must be called under dev->power.lock with interrupts disabled.
466 */
467static int rpm_idle(struct device *dev, int rpmflags)
468{
469 int (*callback)(struct device *);
470 int retval;
471
472 trace_rpm_idle(dev, rpmflags);
473 retval = rpm_check_suspend_allowed(dev);
474 if (retval < 0)
475 ; /* Conditions are wrong. */
476
477 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
478 else if (dev->power.runtime_status != RPM_ACTIVE)
479 retval = -EAGAIN;
480
481 /*
482 * Any pending request other than an idle notification takes
483 * precedence over us, except that the timer may be running.
484 */
485 else if (dev->power.request_pending &&
486 dev->power.request > RPM_REQ_IDLE)
487 retval = -EAGAIN;
488
489 /* Act as though RPM_NOWAIT is always set. */
490 else if (dev->power.idle_notification)
491 retval = -EINPROGRESS;
492
493 if (retval)
494 goto out;
495
496 /* Pending requests need to be canceled. */
497 dev->power.request = RPM_REQ_NONE;
498
499 callback = RPM_GET_CALLBACK(dev, runtime_idle);
500
501 /* If no callback assume success. */
502 if (!callback || dev->power.no_callbacks)
503 goto out;
504
505 /* Carry out an asynchronous or a synchronous idle notification. */
506 if (rpmflags & RPM_ASYNC) {
507 dev->power.request = RPM_REQ_IDLE;
508 if (!dev->power.request_pending) {
509 dev->power.request_pending = true;
510 queue_work(pm_wq, &dev->power.work);
511 }
512 trace_rpm_return_int(dev, _THIS_IP_, 0);
513 return 0;
514 }
515
516 dev->power.idle_notification = true;
517
518 if (dev->power.irq_safe)
519 spin_unlock(&dev->power.lock);
520 else
521 spin_unlock_irq(&dev->power.lock);
522
523 retval = callback(dev);
524
525 if (dev->power.irq_safe)
526 spin_lock(&dev->power.lock);
527 else
528 spin_lock_irq(&dev->power.lock);
529
530 dev->power.idle_notification = false;
531 wake_up_all(&dev->power.wait_queue);
532
533 out:
534 trace_rpm_return_int(dev, _THIS_IP_, retval);
535 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
536}
537
538/**
539 * rpm_suspend - Carry out runtime suspend of given device.
540 * @dev: Device to suspend.
541 * @rpmflags: Flag bits.
542 *
543 * Check if the device's runtime PM status allows it to be suspended.
544 * Cancel a pending idle notification, autosuspend or suspend. If
545 * another suspend has been started earlier, either return immediately
546 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
547 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
548 * otherwise run the ->runtime_suspend() callback directly. When
549 * ->runtime_suspend succeeded, if a deferred resume was requested while
550 * the callback was running then carry it out, otherwise send an idle
551 * notification for its parent (if the suspend succeeded and both
552 * ignore_children of parent->power and irq_safe of dev->power are not set).
553 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
554 * flag is set and the next autosuspend-delay expiration time is in the
555 * future, schedule another autosuspend attempt.
556 *
557 * This function must be called under dev->power.lock with interrupts disabled.
558 */
559static int rpm_suspend(struct device *dev, int rpmflags)
560 __releases(&dev->power.lock) __acquires(&dev->power.lock)
561{
562 int (*callback)(struct device *);
563 struct device *parent = NULL;
564 int retval;
565
566 trace_rpm_suspend(dev, rpmflags);
567
568 repeat:
569 retval = rpm_check_suspend_allowed(dev);
570 if (retval < 0)
571 goto out; /* Conditions are wrong. */
572
573 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
574 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
575 retval = -EAGAIN;
576
577 if (retval)
578 goto out;
579
580 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
581 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
582 u64 expires = pm_runtime_autosuspend_expiration(dev);
583
584 if (expires != 0) {
585 /* Pending requests need to be canceled. */
586 dev->power.request = RPM_REQ_NONE;
587
588 /*
589 * Optimization: If the timer is already running and is
590 * set to expire at or before the autosuspend delay,
591 * avoid the overhead of resetting it. Just let it
592 * expire; pm_suspend_timer_fn() will take care of the
593 * rest.
594 */
595 if (!(dev->power.timer_expires &&
596 dev->power.timer_expires <= expires)) {
597 /*
598 * We add a slack of 25% to gather wakeups
599 * without sacrificing the granularity.
600 */
601 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
602 (NSEC_PER_MSEC >> 2);
603
604 dev->power.timer_expires = expires;
605 hrtimer_start_range_ns(&dev->power.suspend_timer,
606 ns_to_ktime(expires),
607 slack,
608 HRTIMER_MODE_ABS);
609 }
610 dev->power.timer_autosuspends = 1;
611 goto out;
612 }
613 }
614
615 /* Other scheduled or pending requests need to be canceled. */
616 pm_runtime_cancel_pending(dev);
617
618 if (dev->power.runtime_status == RPM_SUSPENDING) {
619 DEFINE_WAIT(wait);
620
621 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
622 retval = -EINPROGRESS;
623 goto out;
624 }
625
626 if (dev->power.irq_safe) {
627 spin_unlock(&dev->power.lock);
628
629 cpu_relax();
630
631 spin_lock(&dev->power.lock);
632 goto repeat;
633 }
634
635 /* Wait for the other suspend running in parallel with us. */
636 for (;;) {
637 prepare_to_wait(&dev->power.wait_queue, &wait,
638 TASK_UNINTERRUPTIBLE);
639 if (dev->power.runtime_status != RPM_SUSPENDING)
640 break;
641
642 spin_unlock_irq(&dev->power.lock);
643
644 schedule();
645
646 spin_lock_irq(&dev->power.lock);
647 }
648 finish_wait(&dev->power.wait_queue, &wait);
649 goto repeat;
650 }
651
652 if (dev->power.no_callbacks)
653 goto no_callback; /* Assume success. */
654
655 /* Carry out an asynchronous or a synchronous suspend. */
656 if (rpmflags & RPM_ASYNC) {
657 dev->power.request = (rpmflags & RPM_AUTO) ?
658 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
659 if (!dev->power.request_pending) {
660 dev->power.request_pending = true;
661 queue_work(pm_wq, &dev->power.work);
662 }
663 goto out;
664 }
665
666 __update_runtime_status(dev, RPM_SUSPENDING);
667
668 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
669
670 dev_pm_enable_wake_irq_check(dev, true);
671 retval = rpm_callback(callback, dev);
672 if (retval)
673 goto fail;
674
675 dev_pm_enable_wake_irq_complete(dev);
676
677 no_callback:
678 __update_runtime_status(dev, RPM_SUSPENDED);
679 pm_runtime_deactivate_timer(dev);
680
681 if (dev->parent) {
682 parent = dev->parent;
683 atomic_add_unless(&parent->power.child_count, -1, 0);
684 }
685 wake_up_all(&dev->power.wait_queue);
686
687 if (dev->power.deferred_resume) {
688 dev->power.deferred_resume = false;
689 rpm_resume(dev, 0);
690 retval = -EAGAIN;
691 goto out;
692 }
693
694 if (dev->power.irq_safe)
695 goto out;
696
697 /* Maybe the parent is now able to suspend. */
698 if (parent && !parent->power.ignore_children) {
699 spin_unlock(&dev->power.lock);
700
701 spin_lock(&parent->power.lock);
702 rpm_idle(parent, RPM_ASYNC);
703 spin_unlock(&parent->power.lock);
704
705 spin_lock(&dev->power.lock);
706 }
707 /* Maybe the suppliers are now able to suspend. */
708 if (dev->power.links_count > 0) {
709 spin_unlock_irq(&dev->power.lock);
710
711 rpm_suspend_suppliers(dev);
712
713 spin_lock_irq(&dev->power.lock);
714 }
715
716 out:
717 trace_rpm_return_int(dev, _THIS_IP_, retval);
718
719 return retval;
720
721 fail:
722 dev_pm_disable_wake_irq_check(dev, true);
723 __update_runtime_status(dev, RPM_ACTIVE);
724 dev->power.deferred_resume = false;
725 wake_up_all(&dev->power.wait_queue);
726
727 if (retval == -EAGAIN || retval == -EBUSY) {
728 dev->power.runtime_error = 0;
729
730 /*
731 * If the callback routine failed an autosuspend, and
732 * if the last_busy time has been updated so that there
733 * is a new autosuspend expiration time, automatically
734 * reschedule another autosuspend.
735 */
736 if ((rpmflags & RPM_AUTO) &&
737 pm_runtime_autosuspend_expiration(dev) != 0)
738 goto repeat;
739 } else {
740 pm_runtime_cancel_pending(dev);
741 }
742 goto out;
743}
744
745/**
746 * rpm_resume - Carry out runtime resume of given device.
747 * @dev: Device to resume.
748 * @rpmflags: Flag bits.
749 *
750 * Check if the device's runtime PM status allows it to be resumed. Cancel
751 * any scheduled or pending requests. If another resume has been started
752 * earlier, either return immediately or wait for it to finish, depending on the
753 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
754 * parallel with this function, either tell the other process to resume after
755 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
756 * flag is set then queue a resume request; otherwise run the
757 * ->runtime_resume() callback directly. Queue an idle notification for the
758 * device if the resume succeeded.
759 *
760 * This function must be called under dev->power.lock with interrupts disabled.
761 */
762static int rpm_resume(struct device *dev, int rpmflags)
763 __releases(&dev->power.lock) __acquires(&dev->power.lock)
764{
765 int (*callback)(struct device *);
766 struct device *parent = NULL;
767 int retval = 0;
768
769 trace_rpm_resume(dev, rpmflags);
770
771 repeat:
772 if (dev->power.runtime_error) {
773 retval = -EINVAL;
774 } else if (dev->power.disable_depth > 0) {
775 if (dev->power.runtime_status == RPM_ACTIVE &&
776 dev->power.last_status == RPM_ACTIVE)
777 retval = 1;
778 else
779 retval = -EACCES;
780 }
781 if (retval)
782 goto out;
783
784 /*
785 * Other scheduled or pending requests need to be canceled. Small
786 * optimization: If an autosuspend timer is running, leave it running
787 * rather than cancelling it now only to restart it again in the near
788 * future.
789 */
790 dev->power.request = RPM_REQ_NONE;
791 if (!dev->power.timer_autosuspends)
792 pm_runtime_deactivate_timer(dev);
793
794 if (dev->power.runtime_status == RPM_ACTIVE) {
795 retval = 1;
796 goto out;
797 }
798
799 if (dev->power.runtime_status == RPM_RESUMING ||
800 dev->power.runtime_status == RPM_SUSPENDING) {
801 DEFINE_WAIT(wait);
802
803 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
804 if (dev->power.runtime_status == RPM_SUSPENDING) {
805 dev->power.deferred_resume = true;
806 if (rpmflags & RPM_NOWAIT)
807 retval = -EINPROGRESS;
808 } else {
809 retval = -EINPROGRESS;
810 }
811 goto out;
812 }
813
814 if (dev->power.irq_safe) {
815 spin_unlock(&dev->power.lock);
816
817 cpu_relax();
818
819 spin_lock(&dev->power.lock);
820 goto repeat;
821 }
822
823 /* Wait for the operation carried out in parallel with us. */
824 for (;;) {
825 prepare_to_wait(&dev->power.wait_queue, &wait,
826 TASK_UNINTERRUPTIBLE);
827 if (dev->power.runtime_status != RPM_RESUMING &&
828 dev->power.runtime_status != RPM_SUSPENDING)
829 break;
830
831 spin_unlock_irq(&dev->power.lock);
832
833 schedule();
834
835 spin_lock_irq(&dev->power.lock);
836 }
837 finish_wait(&dev->power.wait_queue, &wait);
838 goto repeat;
839 }
840
841 /*
842 * See if we can skip waking up the parent. This is safe only if
843 * power.no_callbacks is set, because otherwise we don't know whether
844 * the resume will actually succeed.
845 */
846 if (dev->power.no_callbacks && !parent && dev->parent) {
847 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
848 if (dev->parent->power.disable_depth > 0 ||
849 dev->parent->power.ignore_children ||
850 dev->parent->power.runtime_status == RPM_ACTIVE) {
851 atomic_inc(&dev->parent->power.child_count);
852 spin_unlock(&dev->parent->power.lock);
853 retval = 1;
854 goto no_callback; /* Assume success. */
855 }
856 spin_unlock(&dev->parent->power.lock);
857 }
858
859 /* Carry out an asynchronous or a synchronous resume. */
860 if (rpmflags & RPM_ASYNC) {
861 dev->power.request = RPM_REQ_RESUME;
862 if (!dev->power.request_pending) {
863 dev->power.request_pending = true;
864 queue_work(pm_wq, &dev->power.work);
865 }
866 retval = 0;
867 goto out;
868 }
869
870 if (!parent && dev->parent) {
871 /*
872 * Increment the parent's usage counter and resume it if
873 * necessary. Not needed if dev is irq-safe; then the
874 * parent is permanently resumed.
875 */
876 parent = dev->parent;
877 if (dev->power.irq_safe)
878 goto skip_parent;
879
880 spin_unlock(&dev->power.lock);
881
882 pm_runtime_get_noresume(parent);
883
884 spin_lock(&parent->power.lock);
885 /*
886 * Resume the parent if it has runtime PM enabled and not been
887 * set to ignore its children.
888 */
889 if (!parent->power.disable_depth &&
890 !parent->power.ignore_children) {
891 rpm_resume(parent, 0);
892 if (parent->power.runtime_status != RPM_ACTIVE)
893 retval = -EBUSY;
894 }
895 spin_unlock(&parent->power.lock);
896
897 spin_lock(&dev->power.lock);
898 if (retval)
899 goto out;
900
901 goto repeat;
902 }
903 skip_parent:
904
905 if (dev->power.no_callbacks)
906 goto no_callback; /* Assume success. */
907
908 __update_runtime_status(dev, RPM_RESUMING);
909
910 callback = RPM_GET_CALLBACK(dev, runtime_resume);
911
912 dev_pm_disable_wake_irq_check(dev, false);
913 retval = rpm_callback(callback, dev);
914 if (retval) {
915 __update_runtime_status(dev, RPM_SUSPENDED);
916 pm_runtime_cancel_pending(dev);
917 dev_pm_enable_wake_irq_check(dev, false);
918 } else {
919 no_callback:
920 __update_runtime_status(dev, RPM_ACTIVE);
921 pm_runtime_mark_last_busy(dev);
922 if (parent)
923 atomic_inc(&parent->power.child_count);
924 }
925 wake_up_all(&dev->power.wait_queue);
926
927 if (retval >= 0)
928 rpm_idle(dev, RPM_ASYNC);
929
930 out:
931 if (parent && !dev->power.irq_safe) {
932 spin_unlock_irq(&dev->power.lock);
933
934 pm_runtime_put(parent);
935
936 spin_lock_irq(&dev->power.lock);
937 }
938
939 trace_rpm_return_int(dev, _THIS_IP_, retval);
940
941 return retval;
942}
943
944/**
945 * pm_runtime_work - Universal runtime PM work function.
946 * @work: Work structure used for scheduling the execution of this function.
947 *
948 * Use @work to get the device object the work is to be done for, determine what
949 * is to be done and execute the appropriate runtime PM function.
950 */
951static void pm_runtime_work(struct work_struct *work)
952{
953 struct device *dev = container_of(work, struct device, power.work);
954 enum rpm_request req;
955
956 spin_lock_irq(&dev->power.lock);
957
958 if (!dev->power.request_pending)
959 goto out;
960
961 req = dev->power.request;
962 dev->power.request = RPM_REQ_NONE;
963 dev->power.request_pending = false;
964
965 switch (req) {
966 case RPM_REQ_NONE:
967 break;
968 case RPM_REQ_IDLE:
969 rpm_idle(dev, RPM_NOWAIT);
970 break;
971 case RPM_REQ_SUSPEND:
972 rpm_suspend(dev, RPM_NOWAIT);
973 break;
974 case RPM_REQ_AUTOSUSPEND:
975 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
976 break;
977 case RPM_REQ_RESUME:
978 rpm_resume(dev, RPM_NOWAIT);
979 break;
980 }
981
982 out:
983 spin_unlock_irq(&dev->power.lock);
984}
985
986/**
987 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
988 * @timer: hrtimer used by pm_schedule_suspend().
989 *
990 * Check if the time is right and queue a suspend request.
991 */
992static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
993{
994 struct device *dev = container_of(timer, struct device, power.suspend_timer);
995 unsigned long flags;
996 u64 expires;
997
998 spin_lock_irqsave(&dev->power.lock, flags);
999
1000 expires = dev->power.timer_expires;
1001 /*
1002 * If 'expires' is after the current time, we've been called
1003 * too early.
1004 */
1005 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
1006 dev->power.timer_expires = 0;
1007 rpm_suspend(dev, dev->power.timer_autosuspends ?
1008 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1009 }
1010
1011 spin_unlock_irqrestore(&dev->power.lock, flags);
1012
1013 return HRTIMER_NORESTART;
1014}
1015
1016/**
1017 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1018 * @dev: Device to suspend.
1019 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1020 */
1021int pm_schedule_suspend(struct device *dev, unsigned int delay)
1022{
1023 unsigned long flags;
1024 u64 expires;
1025 int retval;
1026
1027 spin_lock_irqsave(&dev->power.lock, flags);
1028
1029 if (!delay) {
1030 retval = rpm_suspend(dev, RPM_ASYNC);
1031 goto out;
1032 }
1033
1034 retval = rpm_check_suspend_allowed(dev);
1035 if (retval)
1036 goto out;
1037
1038 /* Other scheduled or pending requests need to be canceled. */
1039 pm_runtime_cancel_pending(dev);
1040
1041 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1042 dev->power.timer_expires = expires;
1043 dev->power.timer_autosuspends = 0;
1044 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1045
1046 out:
1047 spin_unlock_irqrestore(&dev->power.lock, flags);
1048
1049 return retval;
1050}
1051EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1052
1053static int rpm_drop_usage_count(struct device *dev)
1054{
1055 int ret;
1056
1057 ret = atomic_sub_return(1, &dev->power.usage_count);
1058 if (ret >= 0)
1059 return ret;
1060
1061 /*
1062 * Because rpm_resume() does not check the usage counter, it will resume
1063 * the device even if the usage counter is 0 or negative, so it is
1064 * sufficient to increment the usage counter here to reverse the change
1065 * made above.
1066 */
1067 atomic_inc(&dev->power.usage_count);
1068 dev_warn(dev, "Runtime PM usage count underflow!\n");
1069 return -EINVAL;
1070}
1071
1072/**
1073 * __pm_runtime_idle - Entry point for runtime idle operations.
1074 * @dev: Device to send idle notification for.
1075 * @rpmflags: Flag bits.
1076 *
1077 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1078 * return immediately if it is larger than zero (if it becomes negative, log a
1079 * warning, increment it, and return an error). Then carry out an idle
1080 * notification, either synchronous or asynchronous.
1081 *
1082 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1083 * or if pm_runtime_irq_safe() has been called.
1084 */
1085int __pm_runtime_idle(struct device *dev, int rpmflags)
1086{
1087 unsigned long flags;
1088 int retval;
1089
1090 if (rpmflags & RPM_GET_PUT) {
1091 retval = rpm_drop_usage_count(dev);
1092 if (retval < 0) {
1093 return retval;
1094 } else if (retval > 0) {
1095 trace_rpm_usage(dev, rpmflags);
1096 return 0;
1097 }
1098 }
1099
1100 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1101
1102 spin_lock_irqsave(&dev->power.lock, flags);
1103 retval = rpm_idle(dev, rpmflags);
1104 spin_unlock_irqrestore(&dev->power.lock, flags);
1105
1106 return retval;
1107}
1108EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1109
1110/**
1111 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1112 * @dev: Device to suspend.
1113 * @rpmflags: Flag bits.
1114 *
1115 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1116 * return immediately if it is larger than zero (if it becomes negative, log a
1117 * warning, increment it, and return an error). Then carry out a suspend,
1118 * either synchronous or asynchronous.
1119 *
1120 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1121 * or if pm_runtime_irq_safe() has been called.
1122 */
1123int __pm_runtime_suspend(struct device *dev, int rpmflags)
1124{
1125 unsigned long flags;
1126 int retval;
1127
1128 if (rpmflags & RPM_GET_PUT) {
1129 retval = rpm_drop_usage_count(dev);
1130 if (retval < 0) {
1131 return retval;
1132 } else if (retval > 0) {
1133 trace_rpm_usage(dev, rpmflags);
1134 return 0;
1135 }
1136 }
1137
1138 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1139
1140 spin_lock_irqsave(&dev->power.lock, flags);
1141 retval = rpm_suspend(dev, rpmflags);
1142 spin_unlock_irqrestore(&dev->power.lock, flags);
1143
1144 return retval;
1145}
1146EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1147
1148/**
1149 * __pm_runtime_resume - Entry point for runtime resume operations.
1150 * @dev: Device to resume.
1151 * @rpmflags: Flag bits.
1152 *
1153 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1154 * carry out a resume, either synchronous or asynchronous.
1155 *
1156 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1157 * or if pm_runtime_irq_safe() has been called.
1158 */
1159int __pm_runtime_resume(struct device *dev, int rpmflags)
1160{
1161 unsigned long flags;
1162 int retval;
1163
1164 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1165 dev->power.runtime_status != RPM_ACTIVE);
1166
1167 if (rpmflags & RPM_GET_PUT)
1168 atomic_inc(&dev->power.usage_count);
1169
1170 spin_lock_irqsave(&dev->power.lock, flags);
1171 retval = rpm_resume(dev, rpmflags);
1172 spin_unlock_irqrestore(&dev->power.lock, flags);
1173
1174 return retval;
1175}
1176EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1177
1178/**
1179 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1180 * @dev: Device to handle.
1181 * @ign_usage_count: Whether or not to look at the current usage counter value.
1182 *
1183 * Return -EINVAL if runtime PM is disabled for @dev.
1184 *
1185 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1186 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1187 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1188 * without changing the usage counter.
1189 *
1190 * If @ign_usage_count is %true, this function can be used to prevent suspending
1191 * the device when its runtime PM status is %RPM_ACTIVE.
1192 *
1193 * If @ign_usage_count is %false, this function can be used to prevent
1194 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1195 * runtime PM usage counter is not zero.
1196 *
1197 * The caller is responsible for decrementing the runtime PM usage counter of
1198 * @dev after this function has returned a positive value for it.
1199 */
1200int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1201{
1202 unsigned long flags;
1203 int retval;
1204
1205 spin_lock_irqsave(&dev->power.lock, flags);
1206 if (dev->power.disable_depth > 0) {
1207 retval = -EINVAL;
1208 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1209 retval = 0;
1210 } else if (ign_usage_count) {
1211 retval = 1;
1212 atomic_inc(&dev->power.usage_count);
1213 } else {
1214 retval = atomic_inc_not_zero(&dev->power.usage_count);
1215 }
1216 trace_rpm_usage(dev, 0);
1217 spin_unlock_irqrestore(&dev->power.lock, flags);
1218
1219 return retval;
1220}
1221EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1222
1223/**
1224 * __pm_runtime_set_status - Set runtime PM status of a device.
1225 * @dev: Device to handle.
1226 * @status: New runtime PM status of the device.
1227 *
1228 * If runtime PM of the device is disabled or its power.runtime_error field is
1229 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1230 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1231 * However, if the device has a parent and the parent is not active, and the
1232 * parent's power.ignore_children flag is unset, the device's status cannot be
1233 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1234 *
1235 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1236 * and the device parent's counter of unsuspended children is modified to
1237 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1238 * notification request for the parent is submitted.
1239 *
1240 * If @dev has any suppliers (as reflected by device links to them), and @status
1241 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1242 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1243 * of the @status value) and the suppliers will be deacticated on exit. The
1244 * error returned by the failing supplier activation will be returned in that
1245 * case.
1246 */
1247int __pm_runtime_set_status(struct device *dev, unsigned int status)
1248{
1249 struct device *parent = dev->parent;
1250 bool notify_parent = false;
1251 unsigned long flags;
1252 int error = 0;
1253
1254 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1255 return -EINVAL;
1256
1257 spin_lock_irqsave(&dev->power.lock, flags);
1258
1259 /*
1260 * Prevent PM-runtime from being enabled for the device or return an
1261 * error if it is enabled already and working.
1262 */
1263 if (dev->power.runtime_error || dev->power.disable_depth)
1264 dev->power.disable_depth++;
1265 else
1266 error = -EAGAIN;
1267
1268 spin_unlock_irqrestore(&dev->power.lock, flags);
1269
1270 if (error)
1271 return error;
1272
1273 /*
1274 * If the new status is RPM_ACTIVE, the suppliers can be activated
1275 * upfront regardless of the current status, because next time
1276 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1277 * involved will be dropped down to one anyway.
1278 */
1279 if (status == RPM_ACTIVE) {
1280 int idx = device_links_read_lock();
1281
1282 error = rpm_get_suppliers(dev);
1283 if (error)
1284 status = RPM_SUSPENDED;
1285
1286 device_links_read_unlock(idx);
1287 }
1288
1289 spin_lock_irqsave(&dev->power.lock, flags);
1290
1291 if (dev->power.runtime_status == status || !parent)
1292 goto out_set;
1293
1294 if (status == RPM_SUSPENDED) {
1295 atomic_add_unless(&parent->power.child_count, -1, 0);
1296 notify_parent = !parent->power.ignore_children;
1297 } else {
1298 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1299
1300 /*
1301 * It is invalid to put an active child under a parent that is
1302 * not active, has runtime PM enabled and the
1303 * 'power.ignore_children' flag unset.
1304 */
1305 if (!parent->power.disable_depth &&
1306 !parent->power.ignore_children &&
1307 parent->power.runtime_status != RPM_ACTIVE) {
1308 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1309 dev_name(dev),
1310 dev_name(parent));
1311 error = -EBUSY;
1312 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1313 atomic_inc(&parent->power.child_count);
1314 }
1315
1316 spin_unlock(&parent->power.lock);
1317
1318 if (error) {
1319 status = RPM_SUSPENDED;
1320 goto out;
1321 }
1322 }
1323
1324 out_set:
1325 __update_runtime_status(dev, status);
1326 if (!error)
1327 dev->power.runtime_error = 0;
1328
1329 out:
1330 spin_unlock_irqrestore(&dev->power.lock, flags);
1331
1332 if (notify_parent)
1333 pm_request_idle(parent);
1334
1335 if (status == RPM_SUSPENDED) {
1336 int idx = device_links_read_lock();
1337
1338 rpm_put_suppliers(dev);
1339
1340 device_links_read_unlock(idx);
1341 }
1342
1343 pm_runtime_enable(dev);
1344
1345 return error;
1346}
1347EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1348
1349/**
1350 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1351 * @dev: Device to handle.
1352 *
1353 * Flush all pending requests for the device from pm_wq and wait for all
1354 * runtime PM operations involving the device in progress to complete.
1355 *
1356 * Should be called under dev->power.lock with interrupts disabled.
1357 */
1358static void __pm_runtime_barrier(struct device *dev)
1359{
1360 pm_runtime_deactivate_timer(dev);
1361
1362 if (dev->power.request_pending) {
1363 dev->power.request = RPM_REQ_NONE;
1364 spin_unlock_irq(&dev->power.lock);
1365
1366 cancel_work_sync(&dev->power.work);
1367
1368 spin_lock_irq(&dev->power.lock);
1369 dev->power.request_pending = false;
1370 }
1371
1372 if (dev->power.runtime_status == RPM_SUSPENDING ||
1373 dev->power.runtime_status == RPM_RESUMING ||
1374 dev->power.idle_notification) {
1375 DEFINE_WAIT(wait);
1376
1377 /* Suspend, wake-up or idle notification in progress. */
1378 for (;;) {
1379 prepare_to_wait(&dev->power.wait_queue, &wait,
1380 TASK_UNINTERRUPTIBLE);
1381 if (dev->power.runtime_status != RPM_SUSPENDING
1382 && dev->power.runtime_status != RPM_RESUMING
1383 && !dev->power.idle_notification)
1384 break;
1385 spin_unlock_irq(&dev->power.lock);
1386
1387 schedule();
1388
1389 spin_lock_irq(&dev->power.lock);
1390 }
1391 finish_wait(&dev->power.wait_queue, &wait);
1392 }
1393}
1394
1395/**
1396 * pm_runtime_barrier - Flush pending requests and wait for completions.
1397 * @dev: Device to handle.
1398 *
1399 * Prevent the device from being suspended by incrementing its usage counter and
1400 * if there's a pending resume request for the device, wake the device up.
1401 * Next, make sure that all pending requests for the device have been flushed
1402 * from pm_wq and wait for all runtime PM operations involving the device in
1403 * progress to complete.
1404 *
1405 * Return value:
1406 * 1, if there was a resume request pending and the device had to be woken up,
1407 * 0, otherwise
1408 */
1409int pm_runtime_barrier(struct device *dev)
1410{
1411 int retval = 0;
1412
1413 pm_runtime_get_noresume(dev);
1414 spin_lock_irq(&dev->power.lock);
1415
1416 if (dev->power.request_pending
1417 && dev->power.request == RPM_REQ_RESUME) {
1418 rpm_resume(dev, 0);
1419 retval = 1;
1420 }
1421
1422 __pm_runtime_barrier(dev);
1423
1424 spin_unlock_irq(&dev->power.lock);
1425 pm_runtime_put_noidle(dev);
1426
1427 return retval;
1428}
1429EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1430
1431/**
1432 * __pm_runtime_disable - Disable runtime PM of a device.
1433 * @dev: Device to handle.
1434 * @check_resume: If set, check if there's a resume request for the device.
1435 *
1436 * Increment power.disable_depth for the device and if it was zero previously,
1437 * cancel all pending runtime PM requests for the device and wait for all
1438 * operations in progress to complete. The device can be either active or
1439 * suspended after its runtime PM has been disabled.
1440 *
1441 * If @check_resume is set and there's a resume request pending when
1442 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1443 * function will wake up the device before disabling its runtime PM.
1444 */
1445void __pm_runtime_disable(struct device *dev, bool check_resume)
1446{
1447 spin_lock_irq(&dev->power.lock);
1448
1449 if (dev->power.disable_depth > 0) {
1450 dev->power.disable_depth++;
1451 goto out;
1452 }
1453
1454 /*
1455 * Wake up the device if there's a resume request pending, because that
1456 * means there probably is some I/O to process and disabling runtime PM
1457 * shouldn't prevent the device from processing the I/O.
1458 */
1459 if (check_resume && dev->power.request_pending &&
1460 dev->power.request == RPM_REQ_RESUME) {
1461 /*
1462 * Prevent suspends and idle notifications from being carried
1463 * out after we have woken up the device.
1464 */
1465 pm_runtime_get_noresume(dev);
1466
1467 rpm_resume(dev, 0);
1468
1469 pm_runtime_put_noidle(dev);
1470 }
1471
1472 /* Update time accounting before disabling PM-runtime. */
1473 update_pm_runtime_accounting(dev);
1474
1475 if (!dev->power.disable_depth++) {
1476 __pm_runtime_barrier(dev);
1477 dev->power.last_status = dev->power.runtime_status;
1478 }
1479
1480 out:
1481 spin_unlock_irq(&dev->power.lock);
1482}
1483EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1484
1485/**
1486 * pm_runtime_enable - Enable runtime PM of a device.
1487 * @dev: Device to handle.
1488 */
1489void pm_runtime_enable(struct device *dev)
1490{
1491 unsigned long flags;
1492
1493 spin_lock_irqsave(&dev->power.lock, flags);
1494
1495 if (!dev->power.disable_depth) {
1496 dev_warn(dev, "Unbalanced %s!\n", __func__);
1497 goto out;
1498 }
1499
1500 if (--dev->power.disable_depth > 0)
1501 goto out;
1502
1503 dev->power.last_status = RPM_INVALID;
1504 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1505
1506 if (dev->power.runtime_status == RPM_SUSPENDED &&
1507 !dev->power.ignore_children &&
1508 atomic_read(&dev->power.child_count) > 0)
1509 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1510
1511out:
1512 spin_unlock_irqrestore(&dev->power.lock, flags);
1513}
1514EXPORT_SYMBOL_GPL(pm_runtime_enable);
1515
1516static void pm_runtime_disable_action(void *data)
1517{
1518 pm_runtime_dont_use_autosuspend(data);
1519 pm_runtime_disable(data);
1520}
1521
1522/**
1523 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1524 *
1525 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1526 * you at driver exit time if needed.
1527 *
1528 * @dev: Device to handle.
1529 */
1530int devm_pm_runtime_enable(struct device *dev)
1531{
1532 pm_runtime_enable(dev);
1533
1534 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1535}
1536EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1537
1538/**
1539 * pm_runtime_forbid - Block runtime PM of a device.
1540 * @dev: Device to handle.
1541 *
1542 * Increase the device's usage count and clear its power.runtime_auto flag,
1543 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1544 * for it.
1545 */
1546void pm_runtime_forbid(struct device *dev)
1547{
1548 spin_lock_irq(&dev->power.lock);
1549 if (!dev->power.runtime_auto)
1550 goto out;
1551
1552 dev->power.runtime_auto = false;
1553 atomic_inc(&dev->power.usage_count);
1554 rpm_resume(dev, 0);
1555
1556 out:
1557 spin_unlock_irq(&dev->power.lock);
1558}
1559EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1560
1561/**
1562 * pm_runtime_allow - Unblock runtime PM of a device.
1563 * @dev: Device to handle.
1564 *
1565 * Decrease the device's usage count and set its power.runtime_auto flag.
1566 */
1567void pm_runtime_allow(struct device *dev)
1568{
1569 int ret;
1570
1571 spin_lock_irq(&dev->power.lock);
1572 if (dev->power.runtime_auto)
1573 goto out;
1574
1575 dev->power.runtime_auto = true;
1576 ret = rpm_drop_usage_count(dev);
1577 if (ret == 0)
1578 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1579 else if (ret > 0)
1580 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1581
1582 out:
1583 spin_unlock_irq(&dev->power.lock);
1584}
1585EXPORT_SYMBOL_GPL(pm_runtime_allow);
1586
1587/**
1588 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1589 * @dev: Device to handle.
1590 *
1591 * Set the power.no_callbacks flag, which tells the PM core that this
1592 * device is power-managed through its parent and has no runtime PM
1593 * callbacks of its own. The runtime sysfs attributes will be removed.
1594 */
1595void pm_runtime_no_callbacks(struct device *dev)
1596{
1597 spin_lock_irq(&dev->power.lock);
1598 dev->power.no_callbacks = 1;
1599 spin_unlock_irq(&dev->power.lock);
1600 if (device_is_registered(dev))
1601 rpm_sysfs_remove(dev);
1602}
1603EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1604
1605/**
1606 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1607 * @dev: Device to handle
1608 *
1609 * Set the power.irq_safe flag, which tells the PM core that the
1610 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1611 * always be invoked with the spinlock held and interrupts disabled. It also
1612 * causes the parent's usage counter to be permanently incremented, preventing
1613 * the parent from runtime suspending -- otherwise an irq-safe child might have
1614 * to wait for a non-irq-safe parent.
1615 */
1616void pm_runtime_irq_safe(struct device *dev)
1617{
1618 if (dev->parent)
1619 pm_runtime_get_sync(dev->parent);
1620
1621 spin_lock_irq(&dev->power.lock);
1622 dev->power.irq_safe = 1;
1623 spin_unlock_irq(&dev->power.lock);
1624}
1625EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1626
1627/**
1628 * update_autosuspend - Handle a change to a device's autosuspend settings.
1629 * @dev: Device to handle.
1630 * @old_delay: The former autosuspend_delay value.
1631 * @old_use: The former use_autosuspend value.
1632 *
1633 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1634 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1635 *
1636 * This function must be called under dev->power.lock with interrupts disabled.
1637 */
1638static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1639{
1640 int delay = dev->power.autosuspend_delay;
1641
1642 /* Should runtime suspend be prevented now? */
1643 if (dev->power.use_autosuspend && delay < 0) {
1644
1645 /* If it used to be allowed then prevent it. */
1646 if (!old_use || old_delay >= 0) {
1647 atomic_inc(&dev->power.usage_count);
1648 rpm_resume(dev, 0);
1649 } else {
1650 trace_rpm_usage(dev, 0);
1651 }
1652 }
1653
1654 /* Runtime suspend should be allowed now. */
1655 else {
1656
1657 /* If it used to be prevented then allow it. */
1658 if (old_use && old_delay < 0)
1659 atomic_dec(&dev->power.usage_count);
1660
1661 /* Maybe we can autosuspend now. */
1662 rpm_idle(dev, RPM_AUTO);
1663 }
1664}
1665
1666/**
1667 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1668 * @dev: Device to handle.
1669 * @delay: Value of the new delay in milliseconds.
1670 *
1671 * Set the device's power.autosuspend_delay value. If it changes to negative
1672 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1673 * changes the other way, allow runtime suspends.
1674 */
1675void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1676{
1677 int old_delay, old_use;
1678
1679 spin_lock_irq(&dev->power.lock);
1680 old_delay = dev->power.autosuspend_delay;
1681 old_use = dev->power.use_autosuspend;
1682 dev->power.autosuspend_delay = delay;
1683 update_autosuspend(dev, old_delay, old_use);
1684 spin_unlock_irq(&dev->power.lock);
1685}
1686EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1687
1688/**
1689 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1690 * @dev: Device to handle.
1691 * @use: New value for use_autosuspend.
1692 *
1693 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1694 * suspends as needed.
1695 */
1696void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1697{
1698 int old_delay, old_use;
1699
1700 spin_lock_irq(&dev->power.lock);
1701 old_delay = dev->power.autosuspend_delay;
1702 old_use = dev->power.use_autosuspend;
1703 dev->power.use_autosuspend = use;
1704 update_autosuspend(dev, old_delay, old_use);
1705 spin_unlock_irq(&dev->power.lock);
1706}
1707EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1708
1709/**
1710 * pm_runtime_init - Initialize runtime PM fields in given device object.
1711 * @dev: Device object to initialize.
1712 */
1713void pm_runtime_init(struct device *dev)
1714{
1715 dev->power.runtime_status = RPM_SUSPENDED;
1716 dev->power.last_status = RPM_INVALID;
1717 dev->power.idle_notification = false;
1718
1719 dev->power.disable_depth = 1;
1720 atomic_set(&dev->power.usage_count, 0);
1721
1722 dev->power.runtime_error = 0;
1723
1724 atomic_set(&dev->power.child_count, 0);
1725 pm_suspend_ignore_children(dev, false);
1726 dev->power.runtime_auto = true;
1727
1728 dev->power.request_pending = false;
1729 dev->power.request = RPM_REQ_NONE;
1730 dev->power.deferred_resume = false;
1731 dev->power.needs_force_resume = 0;
1732 INIT_WORK(&dev->power.work, pm_runtime_work);
1733
1734 dev->power.timer_expires = 0;
1735 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1736 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1737
1738 init_waitqueue_head(&dev->power.wait_queue);
1739}
1740
1741/**
1742 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1743 * @dev: Device object to re-initialize.
1744 */
1745void pm_runtime_reinit(struct device *dev)
1746{
1747 if (!pm_runtime_enabled(dev)) {
1748 if (dev->power.runtime_status == RPM_ACTIVE)
1749 pm_runtime_set_suspended(dev);
1750 if (dev->power.irq_safe) {
1751 spin_lock_irq(&dev->power.lock);
1752 dev->power.irq_safe = 0;
1753 spin_unlock_irq(&dev->power.lock);
1754 if (dev->parent)
1755 pm_runtime_put(dev->parent);
1756 }
1757 }
1758}
1759
1760/**
1761 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1762 * @dev: Device object being removed from device hierarchy.
1763 */
1764void pm_runtime_remove(struct device *dev)
1765{
1766 __pm_runtime_disable(dev, false);
1767 pm_runtime_reinit(dev);
1768}
1769
1770/**
1771 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1772 * @dev: Consumer device.
1773 */
1774void pm_runtime_get_suppliers(struct device *dev)
1775{
1776 struct device_link *link;
1777 int idx;
1778
1779 idx = device_links_read_lock();
1780
1781 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1782 device_links_read_lock_held())
1783 if (link->flags & DL_FLAG_PM_RUNTIME) {
1784 link->supplier_preactivated = true;
1785 pm_runtime_get_sync(link->supplier);
1786 }
1787
1788 device_links_read_unlock(idx);
1789}
1790
1791/**
1792 * pm_runtime_put_suppliers - Drop references to supplier devices.
1793 * @dev: Consumer device.
1794 */
1795void pm_runtime_put_suppliers(struct device *dev)
1796{
1797 struct device_link *link;
1798 int idx;
1799
1800 idx = device_links_read_lock();
1801
1802 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1803 device_links_read_lock_held())
1804 if (link->supplier_preactivated) {
1805 link->supplier_preactivated = false;
1806 pm_runtime_put(link->supplier);
1807 }
1808
1809 device_links_read_unlock(idx);
1810}
1811
1812void pm_runtime_new_link(struct device *dev)
1813{
1814 spin_lock_irq(&dev->power.lock);
1815 dev->power.links_count++;
1816 spin_unlock_irq(&dev->power.lock);
1817}
1818
1819static void pm_runtime_drop_link_count(struct device *dev)
1820{
1821 spin_lock_irq(&dev->power.lock);
1822 WARN_ON(dev->power.links_count == 0);
1823 dev->power.links_count--;
1824 spin_unlock_irq(&dev->power.lock);
1825}
1826
1827/**
1828 * pm_runtime_drop_link - Prepare for device link removal.
1829 * @link: Device link going away.
1830 *
1831 * Drop the link count of the consumer end of @link and decrement the supplier
1832 * device's runtime PM usage counter as many times as needed to drop all of the
1833 * PM runtime reference to it from the consumer.
1834 */
1835void pm_runtime_drop_link(struct device_link *link)
1836{
1837 if (!(link->flags & DL_FLAG_PM_RUNTIME))
1838 return;
1839
1840 pm_runtime_drop_link_count(link->consumer);
1841 pm_runtime_release_supplier(link);
1842 pm_request_idle(link->supplier);
1843}
1844
1845static bool pm_runtime_need_not_resume(struct device *dev)
1846{
1847 return atomic_read(&dev->power.usage_count) <= 1 &&
1848 (atomic_read(&dev->power.child_count) == 0 ||
1849 dev->power.ignore_children);
1850}
1851
1852/**
1853 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1854 * @dev: Device to suspend.
1855 *
1856 * Disable runtime PM so we safely can check the device's runtime PM status and
1857 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1858 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1859 * usage and children counters don't indicate that the device was in use before
1860 * the system-wide transition under way, decrement its parent's children counter
1861 * (if there is a parent). Keep runtime PM disabled to preserve the state
1862 * unless we encounter errors.
1863 *
1864 * Typically this function may be invoked from a system suspend callback to make
1865 * sure the device is put into low power state and it should only be used during
1866 * system-wide PM transitions to sleep states. It assumes that the analogous
1867 * pm_runtime_force_resume() will be used to resume the device.
1868 *
1869 * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
1870 * state where this function has called the ->runtime_suspend callback but the
1871 * PM core marks the driver as runtime active.
1872 */
1873int pm_runtime_force_suspend(struct device *dev)
1874{
1875 int (*callback)(struct device *);
1876 int ret;
1877
1878 pm_runtime_disable(dev);
1879 if (pm_runtime_status_suspended(dev))
1880 return 0;
1881
1882 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1883
1884 dev_pm_enable_wake_irq_check(dev, true);
1885 ret = callback ? callback(dev) : 0;
1886 if (ret)
1887 goto err;
1888
1889 dev_pm_enable_wake_irq_complete(dev);
1890
1891 /*
1892 * If the device can stay in suspend after the system-wide transition
1893 * to the working state that will follow, drop the children counter of
1894 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1895 * function will be called again for it in the meantime.
1896 */
1897 if (pm_runtime_need_not_resume(dev)) {
1898 pm_runtime_set_suspended(dev);
1899 } else {
1900 __update_runtime_status(dev, RPM_SUSPENDED);
1901 dev->power.needs_force_resume = 1;
1902 }
1903
1904 return 0;
1905
1906err:
1907 dev_pm_disable_wake_irq_check(dev, true);
1908 pm_runtime_enable(dev);
1909 return ret;
1910}
1911EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1912
1913/**
1914 * pm_runtime_force_resume - Force a device into resume state if needed.
1915 * @dev: Device to resume.
1916 *
1917 * Prior invoking this function we expect the user to have brought the device
1918 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1919 * those actions and bring the device into full power, if it is expected to be
1920 * used on system resume. In the other case, we defer the resume to be managed
1921 * via runtime PM.
1922 *
1923 * Typically this function may be invoked from a system resume callback.
1924 */
1925int pm_runtime_force_resume(struct device *dev)
1926{
1927 int (*callback)(struct device *);
1928 int ret = 0;
1929
1930 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1931 goto out;
1932
1933 /*
1934 * The value of the parent's children counter is correct already, so
1935 * just update the status of the device.
1936 */
1937 __update_runtime_status(dev, RPM_ACTIVE);
1938
1939 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1940
1941 dev_pm_disable_wake_irq_check(dev, false);
1942 ret = callback ? callback(dev) : 0;
1943 if (ret) {
1944 pm_runtime_set_suspended(dev);
1945 dev_pm_enable_wake_irq_check(dev, false);
1946 goto out;
1947 }
1948
1949 pm_runtime_mark_last_busy(dev);
1950out:
1951 dev->power.needs_force_resume = 0;
1952 pm_runtime_enable(dev);
1953 return ret;
1954}
1955EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 *
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 */
8#include <linux/sched/mm.h>
9#include <linux/ktime.h>
10#include <linux/hrtimer.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_wakeirq.h>
14#include <trace/events/rpm.h>
15
16#include "../base.h"
17#include "power.h"
18
19typedef int (*pm_callback_t)(struct device *);
20
21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22{
23 pm_callback_t cb;
24 const struct dev_pm_ops *ops;
25
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
34 else
35 ops = NULL;
36
37 if (ops)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 else
40 cb = NULL;
41
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45 return cb;
46}
47
48#define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51static int rpm_resume(struct device *dev, int rpmflags);
52static int rpm_suspend(struct device *dev, int rpmflags);
53
54/**
55 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
57 *
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
63 * correctly.
64 */
65static void update_pm_runtime_accounting(struct device *dev)
66{
67 u64 now, last, delta;
68
69 if (dev->power.disable_depth > 0)
70 return;
71
72 last = dev->power.accounting_timestamp;
73
74 now = ktime_get_mono_fast_ns();
75 dev->power.accounting_timestamp = now;
76
77 /*
78 * Because ktime_get_mono_fast_ns() is not monotonic during
79 * timekeeping updates, ensure that 'now' is after the last saved
80 * timesptamp.
81 */
82 if (now < last)
83 return;
84
85 delta = now - last;
86
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
89 else
90 dev->power.active_time += delta;
91}
92
93static void __update_runtime_status(struct device *dev, enum rpm_status status)
94{
95 update_pm_runtime_accounting(dev);
96 dev->power.runtime_status = status;
97}
98
99static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
100{
101 u64 time;
102 unsigned long flags;
103
104 spin_lock_irqsave(&dev->power.lock, flags);
105
106 update_pm_runtime_accounting(dev);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
108
109 spin_unlock_irqrestore(&dev->power.lock, flags);
110
111 return time;
112}
113
114u64 pm_runtime_active_time(struct device *dev)
115{
116 return rpm_get_accounted_time(dev, false);
117}
118
119u64 pm_runtime_suspended_time(struct device *dev)
120{
121 return rpm_get_accounted_time(dev, true);
122}
123EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
124
125/**
126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
127 * @dev: Device to handle.
128 */
129static void pm_runtime_deactivate_timer(struct device *dev)
130{
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
134 }
135}
136
137/**
138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
139 * @dev: Device to handle.
140 */
141static void pm_runtime_cancel_pending(struct device *dev)
142{
143 pm_runtime_deactivate_timer(dev);
144 /*
145 * In case there's a request pending, make sure its work function will
146 * return without doing anything.
147 */
148 dev->power.request = RPM_REQ_NONE;
149}
150
151/*
152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
153 * @dev: Device to handle.
154 *
155 * Compute the autosuspend-delay expiration time based on the device's
156 * power.last_busy time. If the delay has already expired or is disabled
157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
159 *
160 * This function may be called either with or without dev->power.lock held.
161 * Either way it can be racy, since power.last_busy may be updated at any time.
162 */
163u64 pm_runtime_autosuspend_expiration(struct device *dev)
164{
165 int autosuspend_delay;
166 u64 expires;
167
168 if (!dev->power.use_autosuspend)
169 return 0;
170
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
172 if (autosuspend_delay < 0)
173 return 0;
174
175 expires = READ_ONCE(dev->power.last_busy);
176 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177 if (expires > ktime_get_mono_fast_ns())
178 return expires; /* Expires in the future */
179
180 return 0;
181}
182EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
183
184static int dev_memalloc_noio(struct device *dev, void *data)
185{
186 return dev->power.memalloc_noio;
187}
188
189/*
190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
191 * @dev: Device to handle.
192 * @enable: True for setting the flag and False for clearing the flag.
193 *
194 * Set the flag for all devices in the path from the device to the
195 * root device in the device tree if @enable is true, otherwise clear
196 * the flag for devices in the path whose siblings don't set the flag.
197 *
198 * The function should only be called by block device, or network
199 * device driver for solving the deadlock problem during runtime
200 * resume/suspend:
201 *
202 * If memory allocation with GFP_KERNEL is called inside runtime
203 * resume/suspend callback of any one of its ancestors(or the
204 * block device itself), the deadlock may be triggered inside the
205 * memory allocation since it might not complete until the block
206 * device becomes active and the involed page I/O finishes. The
207 * situation is pointed out first by Alan Stern. Network device
208 * are involved in iSCSI kind of situation.
209 *
210 * The lock of dev_hotplug_mutex is held in the function for handling
211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
212 * in async probe().
213 *
214 * The function should be called between device_add() and device_del()
215 * on the affected device(block/network device).
216 */
217void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
218{
219 static DEFINE_MUTEX(dev_hotplug_mutex);
220
221 mutex_lock(&dev_hotplug_mutex);
222 for (;;) {
223 bool enabled;
224
225 /* hold power lock since bitfield is not SMP-safe. */
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
230
231 /*
232 * not need to enable ancestors any more if the device
233 * has been enabled.
234 */
235 if (enabled && enable)
236 break;
237
238 dev = dev->parent;
239
240 /*
241 * clear flag of the parent device only if all the
242 * children don't set the flag because ancestor's
243 * flag was set by any one of the descendants.
244 */
245 if (!dev || (!enable &&
246 device_for_each_child(dev, NULL, dev_memalloc_noio)))
247 break;
248 }
249 mutex_unlock(&dev_hotplug_mutex);
250}
251EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
252
253/**
254 * rpm_check_suspend_allowed - Test whether a device may be suspended.
255 * @dev: Device to test.
256 */
257static int rpm_check_suspend_allowed(struct device *dev)
258{
259 int retval = 0;
260
261 if (dev->power.runtime_error)
262 retval = -EINVAL;
263 else if (dev->power.disable_depth > 0)
264 retval = -EACCES;
265 else if (atomic_read(&dev->power.usage_count))
266 retval = -EAGAIN;
267 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
268 retval = -EBUSY;
269
270 /* Pending resume requests take precedence over suspends. */
271 else if ((dev->power.deferred_resume &&
272 dev->power.runtime_status == RPM_SUSPENDING) ||
273 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
274 retval = -EAGAIN;
275 else if (__dev_pm_qos_resume_latency(dev) == 0)
276 retval = -EPERM;
277 else if (dev->power.runtime_status == RPM_SUSPENDED)
278 retval = 1;
279
280 return retval;
281}
282
283static int rpm_get_suppliers(struct device *dev)
284{
285 struct device_link *link;
286
287 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
288 device_links_read_lock_held()) {
289 int retval;
290
291 if (!(link->flags & DL_FLAG_PM_RUNTIME))
292 continue;
293
294 retval = pm_runtime_get_sync(link->supplier);
295 /* Ignore suppliers with disabled runtime PM. */
296 if (retval < 0 && retval != -EACCES) {
297 pm_runtime_put_noidle(link->supplier);
298 return retval;
299 }
300 refcount_inc(&link->rpm_active);
301 }
302 return 0;
303}
304
305/**
306 * pm_runtime_release_supplier - Drop references to device link's supplier.
307 * @link: Target device link.
308 *
309 * Drop all runtime PM references associated with @link to its supplier device.
310 */
311void pm_runtime_release_supplier(struct device_link *link)
312{
313 struct device *supplier = link->supplier;
314
315 /*
316 * The additional power.usage_count check is a safety net in case
317 * the rpm_active refcount becomes saturated, in which case
318 * refcount_dec_not_one() would return true forever, but it is not
319 * strictly necessary.
320 */
321 while (refcount_dec_not_one(&link->rpm_active) &&
322 atomic_read(&supplier->power.usage_count) > 0)
323 pm_runtime_put_noidle(supplier);
324}
325
326static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
327{
328 struct device_link *link;
329
330 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
331 device_links_read_lock_held()) {
332 pm_runtime_release_supplier(link);
333 if (try_to_suspend)
334 pm_request_idle(link->supplier);
335 }
336}
337
338static void rpm_put_suppliers(struct device *dev)
339{
340 __rpm_put_suppliers(dev, true);
341}
342
343static void rpm_suspend_suppliers(struct device *dev)
344{
345 struct device_link *link;
346 int idx = device_links_read_lock();
347
348 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
349 device_links_read_lock_held())
350 pm_request_idle(link->supplier);
351
352 device_links_read_unlock(idx);
353}
354
355/**
356 * __rpm_callback - Run a given runtime PM callback for a given device.
357 * @cb: Runtime PM callback to run.
358 * @dev: Device to run the callback for.
359 */
360static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
361 __releases(&dev->power.lock) __acquires(&dev->power.lock)
362{
363 int retval = 0, idx;
364 bool use_links = dev->power.links_count > 0;
365
366 if (dev->power.irq_safe) {
367 spin_unlock(&dev->power.lock);
368 } else {
369 spin_unlock_irq(&dev->power.lock);
370
371 /*
372 * Resume suppliers if necessary.
373 *
374 * The device's runtime PM status cannot change until this
375 * routine returns, so it is safe to read the status outside of
376 * the lock.
377 */
378 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
379 idx = device_links_read_lock();
380
381 retval = rpm_get_suppliers(dev);
382 if (retval) {
383 rpm_put_suppliers(dev);
384 goto fail;
385 }
386
387 device_links_read_unlock(idx);
388 }
389 }
390
391 if (cb)
392 retval = cb(dev);
393
394 if (dev->power.irq_safe) {
395 spin_lock(&dev->power.lock);
396 } else {
397 /*
398 * If the device is suspending and the callback has returned
399 * success, drop the usage counters of the suppliers that have
400 * been reference counted on its resume.
401 *
402 * Do that if resume fails too.
403 */
404 if (use_links &&
405 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
406 (dev->power.runtime_status == RPM_RESUMING && retval))) {
407 idx = device_links_read_lock();
408
409 __rpm_put_suppliers(dev, false);
410
411fail:
412 device_links_read_unlock(idx);
413 }
414
415 spin_lock_irq(&dev->power.lock);
416 }
417
418 return retval;
419}
420
421/**
422 * rpm_callback - Run a given runtime PM callback for a given device.
423 * @cb: Runtime PM callback to run.
424 * @dev: Device to run the callback for.
425 */
426static int rpm_callback(int (*cb)(struct device *), struct device *dev)
427{
428 int retval;
429
430 if (dev->power.memalloc_noio) {
431 unsigned int noio_flag;
432
433 /*
434 * Deadlock might be caused if memory allocation with
435 * GFP_KERNEL happens inside runtime_suspend and
436 * runtime_resume callbacks of one block device's
437 * ancestor or the block device itself. Network
438 * device might be thought as part of iSCSI block
439 * device, so network device and its ancestor should
440 * be marked as memalloc_noio too.
441 */
442 noio_flag = memalloc_noio_save();
443 retval = __rpm_callback(cb, dev);
444 memalloc_noio_restore(noio_flag);
445 } else {
446 retval = __rpm_callback(cb, dev);
447 }
448
449 dev->power.runtime_error = retval;
450 return retval != -EACCES ? retval : -EIO;
451}
452
453/**
454 * rpm_idle - Notify device bus type if the device can be suspended.
455 * @dev: Device to notify the bus type about.
456 * @rpmflags: Flag bits.
457 *
458 * Check if the device's runtime PM status allows it to be suspended. If
459 * another idle notification has been started earlier, return immediately. If
460 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
461 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
462 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
463 *
464 * This function must be called under dev->power.lock with interrupts disabled.
465 */
466static int rpm_idle(struct device *dev, int rpmflags)
467{
468 int (*callback)(struct device *);
469 int retval;
470
471 trace_rpm_idle_rcuidle(dev, rpmflags);
472 retval = rpm_check_suspend_allowed(dev);
473 if (retval < 0)
474 ; /* Conditions are wrong. */
475
476 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
477 else if (dev->power.runtime_status != RPM_ACTIVE)
478 retval = -EAGAIN;
479
480 /*
481 * Any pending request other than an idle notification takes
482 * precedence over us, except that the timer may be running.
483 */
484 else if (dev->power.request_pending &&
485 dev->power.request > RPM_REQ_IDLE)
486 retval = -EAGAIN;
487
488 /* Act as though RPM_NOWAIT is always set. */
489 else if (dev->power.idle_notification)
490 retval = -EINPROGRESS;
491
492 if (retval)
493 goto out;
494
495 /* Pending requests need to be canceled. */
496 dev->power.request = RPM_REQ_NONE;
497
498 callback = RPM_GET_CALLBACK(dev, runtime_idle);
499
500 /* If no callback assume success. */
501 if (!callback || dev->power.no_callbacks)
502 goto out;
503
504 /* Carry out an asynchronous or a synchronous idle notification. */
505 if (rpmflags & RPM_ASYNC) {
506 dev->power.request = RPM_REQ_IDLE;
507 if (!dev->power.request_pending) {
508 dev->power.request_pending = true;
509 queue_work(pm_wq, &dev->power.work);
510 }
511 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
512 return 0;
513 }
514
515 dev->power.idle_notification = true;
516
517 if (dev->power.irq_safe)
518 spin_unlock(&dev->power.lock);
519 else
520 spin_unlock_irq(&dev->power.lock);
521
522 retval = callback(dev);
523
524 if (dev->power.irq_safe)
525 spin_lock(&dev->power.lock);
526 else
527 spin_lock_irq(&dev->power.lock);
528
529 dev->power.idle_notification = false;
530 wake_up_all(&dev->power.wait_queue);
531
532 out:
533 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
534 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
535}
536
537/**
538 * rpm_suspend - Carry out runtime suspend of given device.
539 * @dev: Device to suspend.
540 * @rpmflags: Flag bits.
541 *
542 * Check if the device's runtime PM status allows it to be suspended.
543 * Cancel a pending idle notification, autosuspend or suspend. If
544 * another suspend has been started earlier, either return immediately
545 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
546 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
547 * otherwise run the ->runtime_suspend() callback directly. When
548 * ->runtime_suspend succeeded, if a deferred resume was requested while
549 * the callback was running then carry it out, otherwise send an idle
550 * notification for its parent (if the suspend succeeded and both
551 * ignore_children of parent->power and irq_safe of dev->power are not set).
552 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
553 * flag is set and the next autosuspend-delay expiration time is in the
554 * future, schedule another autosuspend attempt.
555 *
556 * This function must be called under dev->power.lock with interrupts disabled.
557 */
558static int rpm_suspend(struct device *dev, int rpmflags)
559 __releases(&dev->power.lock) __acquires(&dev->power.lock)
560{
561 int (*callback)(struct device *);
562 struct device *parent = NULL;
563 int retval;
564
565 trace_rpm_suspend_rcuidle(dev, rpmflags);
566
567 repeat:
568 retval = rpm_check_suspend_allowed(dev);
569 if (retval < 0)
570 goto out; /* Conditions are wrong. */
571
572 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
573 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
574 retval = -EAGAIN;
575
576 if (retval)
577 goto out;
578
579 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
580 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
581 u64 expires = pm_runtime_autosuspend_expiration(dev);
582
583 if (expires != 0) {
584 /* Pending requests need to be canceled. */
585 dev->power.request = RPM_REQ_NONE;
586
587 /*
588 * Optimization: If the timer is already running and is
589 * set to expire at or before the autosuspend delay,
590 * avoid the overhead of resetting it. Just let it
591 * expire; pm_suspend_timer_fn() will take care of the
592 * rest.
593 */
594 if (!(dev->power.timer_expires &&
595 dev->power.timer_expires <= expires)) {
596 /*
597 * We add a slack of 25% to gather wakeups
598 * without sacrificing the granularity.
599 */
600 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
601 (NSEC_PER_MSEC >> 2);
602
603 dev->power.timer_expires = expires;
604 hrtimer_start_range_ns(&dev->power.suspend_timer,
605 ns_to_ktime(expires),
606 slack,
607 HRTIMER_MODE_ABS);
608 }
609 dev->power.timer_autosuspends = 1;
610 goto out;
611 }
612 }
613
614 /* Other scheduled or pending requests need to be canceled. */
615 pm_runtime_cancel_pending(dev);
616
617 if (dev->power.runtime_status == RPM_SUSPENDING) {
618 DEFINE_WAIT(wait);
619
620 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
621 retval = -EINPROGRESS;
622 goto out;
623 }
624
625 if (dev->power.irq_safe) {
626 spin_unlock(&dev->power.lock);
627
628 cpu_relax();
629
630 spin_lock(&dev->power.lock);
631 goto repeat;
632 }
633
634 /* Wait for the other suspend running in parallel with us. */
635 for (;;) {
636 prepare_to_wait(&dev->power.wait_queue, &wait,
637 TASK_UNINTERRUPTIBLE);
638 if (dev->power.runtime_status != RPM_SUSPENDING)
639 break;
640
641 spin_unlock_irq(&dev->power.lock);
642
643 schedule();
644
645 spin_lock_irq(&dev->power.lock);
646 }
647 finish_wait(&dev->power.wait_queue, &wait);
648 goto repeat;
649 }
650
651 if (dev->power.no_callbacks)
652 goto no_callback; /* Assume success. */
653
654 /* Carry out an asynchronous or a synchronous suspend. */
655 if (rpmflags & RPM_ASYNC) {
656 dev->power.request = (rpmflags & RPM_AUTO) ?
657 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
658 if (!dev->power.request_pending) {
659 dev->power.request_pending = true;
660 queue_work(pm_wq, &dev->power.work);
661 }
662 goto out;
663 }
664
665 __update_runtime_status(dev, RPM_SUSPENDING);
666
667 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
668
669 dev_pm_enable_wake_irq_check(dev, true);
670 retval = rpm_callback(callback, dev);
671 if (retval)
672 goto fail;
673
674 dev_pm_enable_wake_irq_complete(dev);
675
676 no_callback:
677 __update_runtime_status(dev, RPM_SUSPENDED);
678 pm_runtime_deactivate_timer(dev);
679
680 if (dev->parent) {
681 parent = dev->parent;
682 atomic_add_unless(&parent->power.child_count, -1, 0);
683 }
684 wake_up_all(&dev->power.wait_queue);
685
686 if (dev->power.deferred_resume) {
687 dev->power.deferred_resume = false;
688 rpm_resume(dev, 0);
689 retval = -EAGAIN;
690 goto out;
691 }
692
693 if (dev->power.irq_safe)
694 goto out;
695
696 /* Maybe the parent is now able to suspend. */
697 if (parent && !parent->power.ignore_children) {
698 spin_unlock(&dev->power.lock);
699
700 spin_lock(&parent->power.lock);
701 rpm_idle(parent, RPM_ASYNC);
702 spin_unlock(&parent->power.lock);
703
704 spin_lock(&dev->power.lock);
705 }
706 /* Maybe the suppliers are now able to suspend. */
707 if (dev->power.links_count > 0) {
708 spin_unlock_irq(&dev->power.lock);
709
710 rpm_suspend_suppliers(dev);
711
712 spin_lock_irq(&dev->power.lock);
713 }
714
715 out:
716 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
717
718 return retval;
719
720 fail:
721 dev_pm_disable_wake_irq_check(dev, true);
722 __update_runtime_status(dev, RPM_ACTIVE);
723 dev->power.deferred_resume = false;
724 wake_up_all(&dev->power.wait_queue);
725
726 if (retval == -EAGAIN || retval == -EBUSY) {
727 dev->power.runtime_error = 0;
728
729 /*
730 * If the callback routine failed an autosuspend, and
731 * if the last_busy time has been updated so that there
732 * is a new autosuspend expiration time, automatically
733 * reschedule another autosuspend.
734 */
735 if ((rpmflags & RPM_AUTO) &&
736 pm_runtime_autosuspend_expiration(dev) != 0)
737 goto repeat;
738 } else {
739 pm_runtime_cancel_pending(dev);
740 }
741 goto out;
742}
743
744/**
745 * rpm_resume - Carry out runtime resume of given device.
746 * @dev: Device to resume.
747 * @rpmflags: Flag bits.
748 *
749 * Check if the device's runtime PM status allows it to be resumed. Cancel
750 * any scheduled or pending requests. If another resume has been started
751 * earlier, either return immediately or wait for it to finish, depending on the
752 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
753 * parallel with this function, either tell the other process to resume after
754 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
755 * flag is set then queue a resume request; otherwise run the
756 * ->runtime_resume() callback directly. Queue an idle notification for the
757 * device if the resume succeeded.
758 *
759 * This function must be called under dev->power.lock with interrupts disabled.
760 */
761static int rpm_resume(struct device *dev, int rpmflags)
762 __releases(&dev->power.lock) __acquires(&dev->power.lock)
763{
764 int (*callback)(struct device *);
765 struct device *parent = NULL;
766 int retval = 0;
767
768 trace_rpm_resume_rcuidle(dev, rpmflags);
769
770 repeat:
771 if (dev->power.runtime_error) {
772 retval = -EINVAL;
773 } else if (dev->power.disable_depth > 0) {
774 if (dev->power.runtime_status == RPM_ACTIVE &&
775 dev->power.last_status == RPM_ACTIVE)
776 retval = 1;
777 else
778 retval = -EACCES;
779 }
780 if (retval)
781 goto out;
782
783 /*
784 * Other scheduled or pending requests need to be canceled. Small
785 * optimization: If an autosuspend timer is running, leave it running
786 * rather than cancelling it now only to restart it again in the near
787 * future.
788 */
789 dev->power.request = RPM_REQ_NONE;
790 if (!dev->power.timer_autosuspends)
791 pm_runtime_deactivate_timer(dev);
792
793 if (dev->power.runtime_status == RPM_ACTIVE) {
794 retval = 1;
795 goto out;
796 }
797
798 if (dev->power.runtime_status == RPM_RESUMING ||
799 dev->power.runtime_status == RPM_SUSPENDING) {
800 DEFINE_WAIT(wait);
801
802 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
803 if (dev->power.runtime_status == RPM_SUSPENDING) {
804 dev->power.deferred_resume = true;
805 if (rpmflags & RPM_NOWAIT)
806 retval = -EINPROGRESS;
807 } else {
808 retval = -EINPROGRESS;
809 }
810 goto out;
811 }
812
813 if (dev->power.irq_safe) {
814 spin_unlock(&dev->power.lock);
815
816 cpu_relax();
817
818 spin_lock(&dev->power.lock);
819 goto repeat;
820 }
821
822 /* Wait for the operation carried out in parallel with us. */
823 for (;;) {
824 prepare_to_wait(&dev->power.wait_queue, &wait,
825 TASK_UNINTERRUPTIBLE);
826 if (dev->power.runtime_status != RPM_RESUMING &&
827 dev->power.runtime_status != RPM_SUSPENDING)
828 break;
829
830 spin_unlock_irq(&dev->power.lock);
831
832 schedule();
833
834 spin_lock_irq(&dev->power.lock);
835 }
836 finish_wait(&dev->power.wait_queue, &wait);
837 goto repeat;
838 }
839
840 /*
841 * See if we can skip waking up the parent. This is safe only if
842 * power.no_callbacks is set, because otherwise we don't know whether
843 * the resume will actually succeed.
844 */
845 if (dev->power.no_callbacks && !parent && dev->parent) {
846 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
847 if (dev->parent->power.disable_depth > 0 ||
848 dev->parent->power.ignore_children ||
849 dev->parent->power.runtime_status == RPM_ACTIVE) {
850 atomic_inc(&dev->parent->power.child_count);
851 spin_unlock(&dev->parent->power.lock);
852 retval = 1;
853 goto no_callback; /* Assume success. */
854 }
855 spin_unlock(&dev->parent->power.lock);
856 }
857
858 /* Carry out an asynchronous or a synchronous resume. */
859 if (rpmflags & RPM_ASYNC) {
860 dev->power.request = RPM_REQ_RESUME;
861 if (!dev->power.request_pending) {
862 dev->power.request_pending = true;
863 queue_work(pm_wq, &dev->power.work);
864 }
865 retval = 0;
866 goto out;
867 }
868
869 if (!parent && dev->parent) {
870 /*
871 * Increment the parent's usage counter and resume it if
872 * necessary. Not needed if dev is irq-safe; then the
873 * parent is permanently resumed.
874 */
875 parent = dev->parent;
876 if (dev->power.irq_safe)
877 goto skip_parent;
878
879 spin_unlock(&dev->power.lock);
880
881 pm_runtime_get_noresume(parent);
882
883 spin_lock(&parent->power.lock);
884 /*
885 * Resume the parent if it has runtime PM enabled and not been
886 * set to ignore its children.
887 */
888 if (!parent->power.disable_depth &&
889 !parent->power.ignore_children) {
890 rpm_resume(parent, 0);
891 if (parent->power.runtime_status != RPM_ACTIVE)
892 retval = -EBUSY;
893 }
894 spin_unlock(&parent->power.lock);
895
896 spin_lock(&dev->power.lock);
897 if (retval)
898 goto out;
899
900 goto repeat;
901 }
902 skip_parent:
903
904 if (dev->power.no_callbacks)
905 goto no_callback; /* Assume success. */
906
907 __update_runtime_status(dev, RPM_RESUMING);
908
909 callback = RPM_GET_CALLBACK(dev, runtime_resume);
910
911 dev_pm_disable_wake_irq_check(dev, false);
912 retval = rpm_callback(callback, dev);
913 if (retval) {
914 __update_runtime_status(dev, RPM_SUSPENDED);
915 pm_runtime_cancel_pending(dev);
916 dev_pm_enable_wake_irq_check(dev, false);
917 } else {
918 no_callback:
919 __update_runtime_status(dev, RPM_ACTIVE);
920 pm_runtime_mark_last_busy(dev);
921 if (parent)
922 atomic_inc(&parent->power.child_count);
923 }
924 wake_up_all(&dev->power.wait_queue);
925
926 if (retval >= 0)
927 rpm_idle(dev, RPM_ASYNC);
928
929 out:
930 if (parent && !dev->power.irq_safe) {
931 spin_unlock_irq(&dev->power.lock);
932
933 pm_runtime_put(parent);
934
935 spin_lock_irq(&dev->power.lock);
936 }
937
938 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
939
940 return retval;
941}
942
943/**
944 * pm_runtime_work - Universal runtime PM work function.
945 * @work: Work structure used for scheduling the execution of this function.
946 *
947 * Use @work to get the device object the work is to be done for, determine what
948 * is to be done and execute the appropriate runtime PM function.
949 */
950static void pm_runtime_work(struct work_struct *work)
951{
952 struct device *dev = container_of(work, struct device, power.work);
953 enum rpm_request req;
954
955 spin_lock_irq(&dev->power.lock);
956
957 if (!dev->power.request_pending)
958 goto out;
959
960 req = dev->power.request;
961 dev->power.request = RPM_REQ_NONE;
962 dev->power.request_pending = false;
963
964 switch (req) {
965 case RPM_REQ_NONE:
966 break;
967 case RPM_REQ_IDLE:
968 rpm_idle(dev, RPM_NOWAIT);
969 break;
970 case RPM_REQ_SUSPEND:
971 rpm_suspend(dev, RPM_NOWAIT);
972 break;
973 case RPM_REQ_AUTOSUSPEND:
974 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
975 break;
976 case RPM_REQ_RESUME:
977 rpm_resume(dev, RPM_NOWAIT);
978 break;
979 }
980
981 out:
982 spin_unlock_irq(&dev->power.lock);
983}
984
985/**
986 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
987 * @timer: hrtimer used by pm_schedule_suspend().
988 *
989 * Check if the time is right and queue a suspend request.
990 */
991static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
992{
993 struct device *dev = container_of(timer, struct device, power.suspend_timer);
994 unsigned long flags;
995 u64 expires;
996
997 spin_lock_irqsave(&dev->power.lock, flags);
998
999 expires = dev->power.timer_expires;
1000 /*
1001 * If 'expires' is after the current time, we've been called
1002 * too early.
1003 */
1004 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
1005 dev->power.timer_expires = 0;
1006 rpm_suspend(dev, dev->power.timer_autosuspends ?
1007 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1008 }
1009
1010 spin_unlock_irqrestore(&dev->power.lock, flags);
1011
1012 return HRTIMER_NORESTART;
1013}
1014
1015/**
1016 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1017 * @dev: Device to suspend.
1018 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1019 */
1020int pm_schedule_suspend(struct device *dev, unsigned int delay)
1021{
1022 unsigned long flags;
1023 u64 expires;
1024 int retval;
1025
1026 spin_lock_irqsave(&dev->power.lock, flags);
1027
1028 if (!delay) {
1029 retval = rpm_suspend(dev, RPM_ASYNC);
1030 goto out;
1031 }
1032
1033 retval = rpm_check_suspend_allowed(dev);
1034 if (retval)
1035 goto out;
1036
1037 /* Other scheduled or pending requests need to be canceled. */
1038 pm_runtime_cancel_pending(dev);
1039
1040 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1041 dev->power.timer_expires = expires;
1042 dev->power.timer_autosuspends = 0;
1043 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1044
1045 out:
1046 spin_unlock_irqrestore(&dev->power.lock, flags);
1047
1048 return retval;
1049}
1050EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1051
1052static int rpm_drop_usage_count(struct device *dev)
1053{
1054 int ret;
1055
1056 ret = atomic_sub_return(1, &dev->power.usage_count);
1057 if (ret >= 0)
1058 return ret;
1059
1060 /*
1061 * Because rpm_resume() does not check the usage counter, it will resume
1062 * the device even if the usage counter is 0 or negative, so it is
1063 * sufficient to increment the usage counter here to reverse the change
1064 * made above.
1065 */
1066 atomic_inc(&dev->power.usage_count);
1067 dev_warn(dev, "Runtime PM usage count underflow!\n");
1068 return -EINVAL;
1069}
1070
1071/**
1072 * __pm_runtime_idle - Entry point for runtime idle operations.
1073 * @dev: Device to send idle notification for.
1074 * @rpmflags: Flag bits.
1075 *
1076 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1077 * return immediately if it is larger than zero (if it becomes negative, log a
1078 * warning, increment it, and return an error). Then carry out an idle
1079 * notification, either synchronous or asynchronous.
1080 *
1081 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1082 * or if pm_runtime_irq_safe() has been called.
1083 */
1084int __pm_runtime_idle(struct device *dev, int rpmflags)
1085{
1086 unsigned long flags;
1087 int retval;
1088
1089 if (rpmflags & RPM_GET_PUT) {
1090 retval = rpm_drop_usage_count(dev);
1091 if (retval < 0) {
1092 return retval;
1093 } else if (retval > 0) {
1094 trace_rpm_usage_rcuidle(dev, rpmflags);
1095 return 0;
1096 }
1097 }
1098
1099 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1100
1101 spin_lock_irqsave(&dev->power.lock, flags);
1102 retval = rpm_idle(dev, rpmflags);
1103 spin_unlock_irqrestore(&dev->power.lock, flags);
1104
1105 return retval;
1106}
1107EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1108
1109/**
1110 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1111 * @dev: Device to suspend.
1112 * @rpmflags: Flag bits.
1113 *
1114 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1115 * return immediately if it is larger than zero (if it becomes negative, log a
1116 * warning, increment it, and return an error). Then carry out a suspend,
1117 * either synchronous or asynchronous.
1118 *
1119 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1120 * or if pm_runtime_irq_safe() has been called.
1121 */
1122int __pm_runtime_suspend(struct device *dev, int rpmflags)
1123{
1124 unsigned long flags;
1125 int retval;
1126
1127 if (rpmflags & RPM_GET_PUT) {
1128 retval = rpm_drop_usage_count(dev);
1129 if (retval < 0) {
1130 return retval;
1131 } else if (retval > 0) {
1132 trace_rpm_usage_rcuidle(dev, rpmflags);
1133 return 0;
1134 }
1135 }
1136
1137 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1138
1139 spin_lock_irqsave(&dev->power.lock, flags);
1140 retval = rpm_suspend(dev, rpmflags);
1141 spin_unlock_irqrestore(&dev->power.lock, flags);
1142
1143 return retval;
1144}
1145EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1146
1147/**
1148 * __pm_runtime_resume - Entry point for runtime resume operations.
1149 * @dev: Device to resume.
1150 * @rpmflags: Flag bits.
1151 *
1152 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1153 * carry out a resume, either synchronous or asynchronous.
1154 *
1155 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1156 * or if pm_runtime_irq_safe() has been called.
1157 */
1158int __pm_runtime_resume(struct device *dev, int rpmflags)
1159{
1160 unsigned long flags;
1161 int retval;
1162
1163 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1164 dev->power.runtime_status != RPM_ACTIVE);
1165
1166 if (rpmflags & RPM_GET_PUT)
1167 atomic_inc(&dev->power.usage_count);
1168
1169 spin_lock_irqsave(&dev->power.lock, flags);
1170 retval = rpm_resume(dev, rpmflags);
1171 spin_unlock_irqrestore(&dev->power.lock, flags);
1172
1173 return retval;
1174}
1175EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1176
1177/**
1178 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1179 * @dev: Device to handle.
1180 * @ign_usage_count: Whether or not to look at the current usage counter value.
1181 *
1182 * Return -EINVAL if runtime PM is disabled for @dev.
1183 *
1184 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1185 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1186 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1187 * without changing the usage counter.
1188 *
1189 * If @ign_usage_count is %true, this function can be used to prevent suspending
1190 * the device when its runtime PM status is %RPM_ACTIVE.
1191 *
1192 * If @ign_usage_count is %false, this function can be used to prevent
1193 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1194 * runtime PM usage counter is not zero.
1195 *
1196 * The caller is responsible for decrementing the runtime PM usage counter of
1197 * @dev after this function has returned a positive value for it.
1198 */
1199int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1200{
1201 unsigned long flags;
1202 int retval;
1203
1204 spin_lock_irqsave(&dev->power.lock, flags);
1205 if (dev->power.disable_depth > 0) {
1206 retval = -EINVAL;
1207 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1208 retval = 0;
1209 } else if (ign_usage_count) {
1210 retval = 1;
1211 atomic_inc(&dev->power.usage_count);
1212 } else {
1213 retval = atomic_inc_not_zero(&dev->power.usage_count);
1214 }
1215 trace_rpm_usage_rcuidle(dev, 0);
1216 spin_unlock_irqrestore(&dev->power.lock, flags);
1217
1218 return retval;
1219}
1220EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1221
1222/**
1223 * __pm_runtime_set_status - Set runtime PM status of a device.
1224 * @dev: Device to handle.
1225 * @status: New runtime PM status of the device.
1226 *
1227 * If runtime PM of the device is disabled or its power.runtime_error field is
1228 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1229 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1230 * However, if the device has a parent and the parent is not active, and the
1231 * parent's power.ignore_children flag is unset, the device's status cannot be
1232 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1233 *
1234 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1235 * and the device parent's counter of unsuspended children is modified to
1236 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1237 * notification request for the parent is submitted.
1238 *
1239 * If @dev has any suppliers (as reflected by device links to them), and @status
1240 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1241 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1242 * of the @status value) and the suppliers will be deacticated on exit. The
1243 * error returned by the failing supplier activation will be returned in that
1244 * case.
1245 */
1246int __pm_runtime_set_status(struct device *dev, unsigned int status)
1247{
1248 struct device *parent = dev->parent;
1249 bool notify_parent = false;
1250 unsigned long flags;
1251 int error = 0;
1252
1253 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1254 return -EINVAL;
1255
1256 spin_lock_irqsave(&dev->power.lock, flags);
1257
1258 /*
1259 * Prevent PM-runtime from being enabled for the device or return an
1260 * error if it is enabled already and working.
1261 */
1262 if (dev->power.runtime_error || dev->power.disable_depth)
1263 dev->power.disable_depth++;
1264 else
1265 error = -EAGAIN;
1266
1267 spin_unlock_irqrestore(&dev->power.lock, flags);
1268
1269 if (error)
1270 return error;
1271
1272 /*
1273 * If the new status is RPM_ACTIVE, the suppliers can be activated
1274 * upfront regardless of the current status, because next time
1275 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1276 * involved will be dropped down to one anyway.
1277 */
1278 if (status == RPM_ACTIVE) {
1279 int idx = device_links_read_lock();
1280
1281 error = rpm_get_suppliers(dev);
1282 if (error)
1283 status = RPM_SUSPENDED;
1284
1285 device_links_read_unlock(idx);
1286 }
1287
1288 spin_lock_irqsave(&dev->power.lock, flags);
1289
1290 if (dev->power.runtime_status == status || !parent)
1291 goto out_set;
1292
1293 if (status == RPM_SUSPENDED) {
1294 atomic_add_unless(&parent->power.child_count, -1, 0);
1295 notify_parent = !parent->power.ignore_children;
1296 } else {
1297 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1298
1299 /*
1300 * It is invalid to put an active child under a parent that is
1301 * not active, has runtime PM enabled and the
1302 * 'power.ignore_children' flag unset.
1303 */
1304 if (!parent->power.disable_depth &&
1305 !parent->power.ignore_children &&
1306 parent->power.runtime_status != RPM_ACTIVE) {
1307 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1308 dev_name(dev),
1309 dev_name(parent));
1310 error = -EBUSY;
1311 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1312 atomic_inc(&parent->power.child_count);
1313 }
1314
1315 spin_unlock(&parent->power.lock);
1316
1317 if (error) {
1318 status = RPM_SUSPENDED;
1319 goto out;
1320 }
1321 }
1322
1323 out_set:
1324 __update_runtime_status(dev, status);
1325 if (!error)
1326 dev->power.runtime_error = 0;
1327
1328 out:
1329 spin_unlock_irqrestore(&dev->power.lock, flags);
1330
1331 if (notify_parent)
1332 pm_request_idle(parent);
1333
1334 if (status == RPM_SUSPENDED) {
1335 int idx = device_links_read_lock();
1336
1337 rpm_put_suppliers(dev);
1338
1339 device_links_read_unlock(idx);
1340 }
1341
1342 pm_runtime_enable(dev);
1343
1344 return error;
1345}
1346EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1347
1348/**
1349 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1350 * @dev: Device to handle.
1351 *
1352 * Flush all pending requests for the device from pm_wq and wait for all
1353 * runtime PM operations involving the device in progress to complete.
1354 *
1355 * Should be called under dev->power.lock with interrupts disabled.
1356 */
1357static void __pm_runtime_barrier(struct device *dev)
1358{
1359 pm_runtime_deactivate_timer(dev);
1360
1361 if (dev->power.request_pending) {
1362 dev->power.request = RPM_REQ_NONE;
1363 spin_unlock_irq(&dev->power.lock);
1364
1365 cancel_work_sync(&dev->power.work);
1366
1367 spin_lock_irq(&dev->power.lock);
1368 dev->power.request_pending = false;
1369 }
1370
1371 if (dev->power.runtime_status == RPM_SUSPENDING ||
1372 dev->power.runtime_status == RPM_RESUMING ||
1373 dev->power.idle_notification) {
1374 DEFINE_WAIT(wait);
1375
1376 /* Suspend, wake-up or idle notification in progress. */
1377 for (;;) {
1378 prepare_to_wait(&dev->power.wait_queue, &wait,
1379 TASK_UNINTERRUPTIBLE);
1380 if (dev->power.runtime_status != RPM_SUSPENDING
1381 && dev->power.runtime_status != RPM_RESUMING
1382 && !dev->power.idle_notification)
1383 break;
1384 spin_unlock_irq(&dev->power.lock);
1385
1386 schedule();
1387
1388 spin_lock_irq(&dev->power.lock);
1389 }
1390 finish_wait(&dev->power.wait_queue, &wait);
1391 }
1392}
1393
1394/**
1395 * pm_runtime_barrier - Flush pending requests and wait for completions.
1396 * @dev: Device to handle.
1397 *
1398 * Prevent the device from being suspended by incrementing its usage counter and
1399 * if there's a pending resume request for the device, wake the device up.
1400 * Next, make sure that all pending requests for the device have been flushed
1401 * from pm_wq and wait for all runtime PM operations involving the device in
1402 * progress to complete.
1403 *
1404 * Return value:
1405 * 1, if there was a resume request pending and the device had to be woken up,
1406 * 0, otherwise
1407 */
1408int pm_runtime_barrier(struct device *dev)
1409{
1410 int retval = 0;
1411
1412 pm_runtime_get_noresume(dev);
1413 spin_lock_irq(&dev->power.lock);
1414
1415 if (dev->power.request_pending
1416 && dev->power.request == RPM_REQ_RESUME) {
1417 rpm_resume(dev, 0);
1418 retval = 1;
1419 }
1420
1421 __pm_runtime_barrier(dev);
1422
1423 spin_unlock_irq(&dev->power.lock);
1424 pm_runtime_put_noidle(dev);
1425
1426 return retval;
1427}
1428EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1429
1430/**
1431 * __pm_runtime_disable - Disable runtime PM of a device.
1432 * @dev: Device to handle.
1433 * @check_resume: If set, check if there's a resume request for the device.
1434 *
1435 * Increment power.disable_depth for the device and if it was zero previously,
1436 * cancel all pending runtime PM requests for the device and wait for all
1437 * operations in progress to complete. The device can be either active or
1438 * suspended after its runtime PM has been disabled.
1439 *
1440 * If @check_resume is set and there's a resume request pending when
1441 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1442 * function will wake up the device before disabling its runtime PM.
1443 */
1444void __pm_runtime_disable(struct device *dev, bool check_resume)
1445{
1446 spin_lock_irq(&dev->power.lock);
1447
1448 if (dev->power.disable_depth > 0) {
1449 dev->power.disable_depth++;
1450 goto out;
1451 }
1452
1453 /*
1454 * Wake up the device if there's a resume request pending, because that
1455 * means there probably is some I/O to process and disabling runtime PM
1456 * shouldn't prevent the device from processing the I/O.
1457 */
1458 if (check_resume && dev->power.request_pending &&
1459 dev->power.request == RPM_REQ_RESUME) {
1460 /*
1461 * Prevent suspends and idle notifications from being carried
1462 * out after we have woken up the device.
1463 */
1464 pm_runtime_get_noresume(dev);
1465
1466 rpm_resume(dev, 0);
1467
1468 pm_runtime_put_noidle(dev);
1469 }
1470
1471 /* Update time accounting before disabling PM-runtime. */
1472 update_pm_runtime_accounting(dev);
1473
1474 if (!dev->power.disable_depth++) {
1475 __pm_runtime_barrier(dev);
1476 dev->power.last_status = dev->power.runtime_status;
1477 }
1478
1479 out:
1480 spin_unlock_irq(&dev->power.lock);
1481}
1482EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1483
1484/**
1485 * pm_runtime_enable - Enable runtime PM of a device.
1486 * @dev: Device to handle.
1487 */
1488void pm_runtime_enable(struct device *dev)
1489{
1490 unsigned long flags;
1491
1492 spin_lock_irqsave(&dev->power.lock, flags);
1493
1494 if (!dev->power.disable_depth) {
1495 dev_warn(dev, "Unbalanced %s!\n", __func__);
1496 goto out;
1497 }
1498
1499 if (--dev->power.disable_depth > 0)
1500 goto out;
1501
1502 dev->power.last_status = RPM_INVALID;
1503 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1504
1505 if (dev->power.runtime_status == RPM_SUSPENDED &&
1506 !dev->power.ignore_children &&
1507 atomic_read(&dev->power.child_count) > 0)
1508 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1509
1510out:
1511 spin_unlock_irqrestore(&dev->power.lock, flags);
1512}
1513EXPORT_SYMBOL_GPL(pm_runtime_enable);
1514
1515static void pm_runtime_disable_action(void *data)
1516{
1517 pm_runtime_dont_use_autosuspend(data);
1518 pm_runtime_disable(data);
1519}
1520
1521/**
1522 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1523 *
1524 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1525 * you at driver exit time if needed.
1526 *
1527 * @dev: Device to handle.
1528 */
1529int devm_pm_runtime_enable(struct device *dev)
1530{
1531 pm_runtime_enable(dev);
1532
1533 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1534}
1535EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1536
1537/**
1538 * pm_runtime_forbid - Block runtime PM of a device.
1539 * @dev: Device to handle.
1540 *
1541 * Increase the device's usage count and clear its power.runtime_auto flag,
1542 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1543 * for it.
1544 */
1545void pm_runtime_forbid(struct device *dev)
1546{
1547 spin_lock_irq(&dev->power.lock);
1548 if (!dev->power.runtime_auto)
1549 goto out;
1550
1551 dev->power.runtime_auto = false;
1552 atomic_inc(&dev->power.usage_count);
1553 rpm_resume(dev, 0);
1554
1555 out:
1556 spin_unlock_irq(&dev->power.lock);
1557}
1558EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1559
1560/**
1561 * pm_runtime_allow - Unblock runtime PM of a device.
1562 * @dev: Device to handle.
1563 *
1564 * Decrease the device's usage count and set its power.runtime_auto flag.
1565 */
1566void pm_runtime_allow(struct device *dev)
1567{
1568 int ret;
1569
1570 spin_lock_irq(&dev->power.lock);
1571 if (dev->power.runtime_auto)
1572 goto out;
1573
1574 dev->power.runtime_auto = true;
1575 ret = rpm_drop_usage_count(dev);
1576 if (ret == 0)
1577 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1578 else if (ret > 0)
1579 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1580
1581 out:
1582 spin_unlock_irq(&dev->power.lock);
1583}
1584EXPORT_SYMBOL_GPL(pm_runtime_allow);
1585
1586/**
1587 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1588 * @dev: Device to handle.
1589 *
1590 * Set the power.no_callbacks flag, which tells the PM core that this
1591 * device is power-managed through its parent and has no runtime PM
1592 * callbacks of its own. The runtime sysfs attributes will be removed.
1593 */
1594void pm_runtime_no_callbacks(struct device *dev)
1595{
1596 spin_lock_irq(&dev->power.lock);
1597 dev->power.no_callbacks = 1;
1598 spin_unlock_irq(&dev->power.lock);
1599 if (device_is_registered(dev))
1600 rpm_sysfs_remove(dev);
1601}
1602EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1603
1604/**
1605 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1606 * @dev: Device to handle
1607 *
1608 * Set the power.irq_safe flag, which tells the PM core that the
1609 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1610 * always be invoked with the spinlock held and interrupts disabled. It also
1611 * causes the parent's usage counter to be permanently incremented, preventing
1612 * the parent from runtime suspending -- otherwise an irq-safe child might have
1613 * to wait for a non-irq-safe parent.
1614 */
1615void pm_runtime_irq_safe(struct device *dev)
1616{
1617 if (dev->parent)
1618 pm_runtime_get_sync(dev->parent);
1619
1620 spin_lock_irq(&dev->power.lock);
1621 dev->power.irq_safe = 1;
1622 spin_unlock_irq(&dev->power.lock);
1623}
1624EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1625
1626/**
1627 * update_autosuspend - Handle a change to a device's autosuspend settings.
1628 * @dev: Device to handle.
1629 * @old_delay: The former autosuspend_delay value.
1630 * @old_use: The former use_autosuspend value.
1631 *
1632 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1633 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1634 *
1635 * This function must be called under dev->power.lock with interrupts disabled.
1636 */
1637static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1638{
1639 int delay = dev->power.autosuspend_delay;
1640
1641 /* Should runtime suspend be prevented now? */
1642 if (dev->power.use_autosuspend && delay < 0) {
1643
1644 /* If it used to be allowed then prevent it. */
1645 if (!old_use || old_delay >= 0) {
1646 atomic_inc(&dev->power.usage_count);
1647 rpm_resume(dev, 0);
1648 } else {
1649 trace_rpm_usage_rcuidle(dev, 0);
1650 }
1651 }
1652
1653 /* Runtime suspend should be allowed now. */
1654 else {
1655
1656 /* If it used to be prevented then allow it. */
1657 if (old_use && old_delay < 0)
1658 atomic_dec(&dev->power.usage_count);
1659
1660 /* Maybe we can autosuspend now. */
1661 rpm_idle(dev, RPM_AUTO);
1662 }
1663}
1664
1665/**
1666 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1667 * @dev: Device to handle.
1668 * @delay: Value of the new delay in milliseconds.
1669 *
1670 * Set the device's power.autosuspend_delay value. If it changes to negative
1671 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1672 * changes the other way, allow runtime suspends.
1673 */
1674void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1675{
1676 int old_delay, old_use;
1677
1678 spin_lock_irq(&dev->power.lock);
1679 old_delay = dev->power.autosuspend_delay;
1680 old_use = dev->power.use_autosuspend;
1681 dev->power.autosuspend_delay = delay;
1682 update_autosuspend(dev, old_delay, old_use);
1683 spin_unlock_irq(&dev->power.lock);
1684}
1685EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1686
1687/**
1688 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1689 * @dev: Device to handle.
1690 * @use: New value for use_autosuspend.
1691 *
1692 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1693 * suspends as needed.
1694 */
1695void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1696{
1697 int old_delay, old_use;
1698
1699 spin_lock_irq(&dev->power.lock);
1700 old_delay = dev->power.autosuspend_delay;
1701 old_use = dev->power.use_autosuspend;
1702 dev->power.use_autosuspend = use;
1703 update_autosuspend(dev, old_delay, old_use);
1704 spin_unlock_irq(&dev->power.lock);
1705}
1706EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1707
1708/**
1709 * pm_runtime_init - Initialize runtime PM fields in given device object.
1710 * @dev: Device object to initialize.
1711 */
1712void pm_runtime_init(struct device *dev)
1713{
1714 dev->power.runtime_status = RPM_SUSPENDED;
1715 dev->power.last_status = RPM_INVALID;
1716 dev->power.idle_notification = false;
1717
1718 dev->power.disable_depth = 1;
1719 atomic_set(&dev->power.usage_count, 0);
1720
1721 dev->power.runtime_error = 0;
1722
1723 atomic_set(&dev->power.child_count, 0);
1724 pm_suspend_ignore_children(dev, false);
1725 dev->power.runtime_auto = true;
1726
1727 dev->power.request_pending = false;
1728 dev->power.request = RPM_REQ_NONE;
1729 dev->power.deferred_resume = false;
1730 dev->power.needs_force_resume = 0;
1731 INIT_WORK(&dev->power.work, pm_runtime_work);
1732
1733 dev->power.timer_expires = 0;
1734 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1735 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1736
1737 init_waitqueue_head(&dev->power.wait_queue);
1738}
1739
1740/**
1741 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1742 * @dev: Device object to re-initialize.
1743 */
1744void pm_runtime_reinit(struct device *dev)
1745{
1746 if (!pm_runtime_enabled(dev)) {
1747 if (dev->power.runtime_status == RPM_ACTIVE)
1748 pm_runtime_set_suspended(dev);
1749 if (dev->power.irq_safe) {
1750 spin_lock_irq(&dev->power.lock);
1751 dev->power.irq_safe = 0;
1752 spin_unlock_irq(&dev->power.lock);
1753 if (dev->parent)
1754 pm_runtime_put(dev->parent);
1755 }
1756 }
1757}
1758
1759/**
1760 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1761 * @dev: Device object being removed from device hierarchy.
1762 */
1763void pm_runtime_remove(struct device *dev)
1764{
1765 __pm_runtime_disable(dev, false);
1766 pm_runtime_reinit(dev);
1767}
1768
1769/**
1770 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1771 * @dev: Consumer device.
1772 */
1773void pm_runtime_get_suppliers(struct device *dev)
1774{
1775 struct device_link *link;
1776 int idx;
1777
1778 idx = device_links_read_lock();
1779
1780 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1781 device_links_read_lock_held())
1782 if (link->flags & DL_FLAG_PM_RUNTIME) {
1783 link->supplier_preactivated = true;
1784 pm_runtime_get_sync(link->supplier);
1785 }
1786
1787 device_links_read_unlock(idx);
1788}
1789
1790/**
1791 * pm_runtime_put_suppliers - Drop references to supplier devices.
1792 * @dev: Consumer device.
1793 */
1794void pm_runtime_put_suppliers(struct device *dev)
1795{
1796 struct device_link *link;
1797 int idx;
1798
1799 idx = device_links_read_lock();
1800
1801 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1802 device_links_read_lock_held())
1803 if (link->supplier_preactivated) {
1804 link->supplier_preactivated = false;
1805 pm_runtime_put(link->supplier);
1806 }
1807
1808 device_links_read_unlock(idx);
1809}
1810
1811void pm_runtime_new_link(struct device *dev)
1812{
1813 spin_lock_irq(&dev->power.lock);
1814 dev->power.links_count++;
1815 spin_unlock_irq(&dev->power.lock);
1816}
1817
1818static void pm_runtime_drop_link_count(struct device *dev)
1819{
1820 spin_lock_irq(&dev->power.lock);
1821 WARN_ON(dev->power.links_count == 0);
1822 dev->power.links_count--;
1823 spin_unlock_irq(&dev->power.lock);
1824}
1825
1826/**
1827 * pm_runtime_drop_link - Prepare for device link removal.
1828 * @link: Device link going away.
1829 *
1830 * Drop the link count of the consumer end of @link and decrement the supplier
1831 * device's runtime PM usage counter as many times as needed to drop all of the
1832 * PM runtime reference to it from the consumer.
1833 */
1834void pm_runtime_drop_link(struct device_link *link)
1835{
1836 if (!(link->flags & DL_FLAG_PM_RUNTIME))
1837 return;
1838
1839 pm_runtime_drop_link_count(link->consumer);
1840 pm_runtime_release_supplier(link);
1841 pm_request_idle(link->supplier);
1842}
1843
1844static bool pm_runtime_need_not_resume(struct device *dev)
1845{
1846 return atomic_read(&dev->power.usage_count) <= 1 &&
1847 (atomic_read(&dev->power.child_count) == 0 ||
1848 dev->power.ignore_children);
1849}
1850
1851/**
1852 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1853 * @dev: Device to suspend.
1854 *
1855 * Disable runtime PM so we safely can check the device's runtime PM status and
1856 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1857 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1858 * usage and children counters don't indicate that the device was in use before
1859 * the system-wide transition under way, decrement its parent's children counter
1860 * (if there is a parent). Keep runtime PM disabled to preserve the state
1861 * unless we encounter errors.
1862 *
1863 * Typically this function may be invoked from a system suspend callback to make
1864 * sure the device is put into low power state and it should only be used during
1865 * system-wide PM transitions to sleep states. It assumes that the analogous
1866 * pm_runtime_force_resume() will be used to resume the device.
1867 */
1868int pm_runtime_force_suspend(struct device *dev)
1869{
1870 int (*callback)(struct device *);
1871 int ret;
1872
1873 pm_runtime_disable(dev);
1874 if (pm_runtime_status_suspended(dev))
1875 return 0;
1876
1877 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1878
1879 dev_pm_enable_wake_irq_check(dev, true);
1880 ret = callback ? callback(dev) : 0;
1881 if (ret)
1882 goto err;
1883
1884 dev_pm_enable_wake_irq_complete(dev);
1885
1886 /*
1887 * If the device can stay in suspend after the system-wide transition
1888 * to the working state that will follow, drop the children counter of
1889 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1890 * function will be called again for it in the meantime.
1891 */
1892 if (pm_runtime_need_not_resume(dev)) {
1893 pm_runtime_set_suspended(dev);
1894 } else {
1895 __update_runtime_status(dev, RPM_SUSPENDED);
1896 dev->power.needs_force_resume = 1;
1897 }
1898
1899 return 0;
1900
1901err:
1902 dev_pm_disable_wake_irq_check(dev, true);
1903 pm_runtime_enable(dev);
1904 return ret;
1905}
1906EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1907
1908/**
1909 * pm_runtime_force_resume - Force a device into resume state if needed.
1910 * @dev: Device to resume.
1911 *
1912 * Prior invoking this function we expect the user to have brought the device
1913 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1914 * those actions and bring the device into full power, if it is expected to be
1915 * used on system resume. In the other case, we defer the resume to be managed
1916 * via runtime PM.
1917 *
1918 * Typically this function may be invoked from a system resume callback.
1919 */
1920int pm_runtime_force_resume(struct device *dev)
1921{
1922 int (*callback)(struct device *);
1923 int ret = 0;
1924
1925 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1926 goto out;
1927
1928 /*
1929 * The value of the parent's children counter is correct already, so
1930 * just update the status of the device.
1931 */
1932 __update_runtime_status(dev, RPM_ACTIVE);
1933
1934 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1935
1936 dev_pm_disable_wake_irq_check(dev, false);
1937 ret = callback ? callback(dev) : 0;
1938 if (ret) {
1939 pm_runtime_set_suspended(dev);
1940 dev_pm_enable_wake_irq_check(dev, false);
1941 goto out;
1942 }
1943
1944 pm_runtime_mark_last_busy(dev);
1945out:
1946 dev->power.needs_force_resume = 0;
1947 pm_runtime_enable(dev);
1948 return ret;
1949}
1950EXPORT_SYMBOL_GPL(pm_runtime_force_resume);