Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18#define pr_fmt(fmt) "PM: " fmt
19#define dev_fmt pr_fmt
20
21#include <linux/device.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm-trace.h>
27#include <linux/pm_wakeirq.h>
28#include <linux/interrupt.h>
29#include <linux/sched.h>
30#include <linux/sched/debug.h>
31#include <linux/async.h>
32#include <linux/suspend.h>
33#include <trace/events/power.h>
34#include <linux/cpufreq.h>
35#include <linux/cpuidle.h>
36#include <linux/devfreq.h>
37#include <linux/timer.h>
38
39#include "../base.h"
40#include "power.h"
41
42typedef int (*pm_callback_t)(struct device *);
43
44#define list_for_each_entry_rcu_locked(pos, head, member) \
45 list_for_each_entry_rcu(pos, head, member, \
46 device_links_read_lock_held())
47
48/*
49 * The entries in the dpm_list list are in a depth first order, simply
50 * because children are guaranteed to be discovered after parents, and
51 * are inserted at the back of the list on discovery.
52 *
53 * Since device_pm_add() may be called with a device lock held,
54 * we must never try to acquire a device lock while holding
55 * dpm_list_mutex.
56 */
57
58LIST_HEAD(dpm_list);
59static LIST_HEAD(dpm_prepared_list);
60static LIST_HEAD(dpm_suspended_list);
61static LIST_HEAD(dpm_late_early_list);
62static LIST_HEAD(dpm_noirq_list);
63
64struct suspend_stats suspend_stats;
65static DEFINE_MUTEX(dpm_list_mtx);
66static pm_message_t pm_transition;
67
68static int async_error;
69
70static const char *pm_verb(int event)
71{
72 switch (event) {
73 case PM_EVENT_SUSPEND:
74 return "suspend";
75 case PM_EVENT_RESUME:
76 return "resume";
77 case PM_EVENT_FREEZE:
78 return "freeze";
79 case PM_EVENT_QUIESCE:
80 return "quiesce";
81 case PM_EVENT_HIBERNATE:
82 return "hibernate";
83 case PM_EVENT_THAW:
84 return "thaw";
85 case PM_EVENT_RESTORE:
86 return "restore";
87 case PM_EVENT_RECOVER:
88 return "recover";
89 default:
90 return "(unknown PM event)";
91 }
92}
93
94/**
95 * device_pm_sleep_init - Initialize system suspend-related device fields.
96 * @dev: Device object being initialized.
97 */
98void device_pm_sleep_init(struct device *dev)
99{
100 dev->power.is_prepared = false;
101 dev->power.is_suspended = false;
102 dev->power.is_noirq_suspended = false;
103 dev->power.is_late_suspended = false;
104 init_completion(&dev->power.completion);
105 complete_all(&dev->power.completion);
106 dev->power.wakeup = NULL;
107 INIT_LIST_HEAD(&dev->power.entry);
108}
109
110/**
111 * device_pm_lock - Lock the list of active devices used by the PM core.
112 */
113void device_pm_lock(void)
114{
115 mutex_lock(&dpm_list_mtx);
116}
117
118/**
119 * device_pm_unlock - Unlock the list of active devices used by the PM core.
120 */
121void device_pm_unlock(void)
122{
123 mutex_unlock(&dpm_list_mtx);
124}
125
126/**
127 * device_pm_add - Add a device to the PM core's list of active devices.
128 * @dev: Device to add to the list.
129 */
130void device_pm_add(struct device *dev)
131{
132 /* Skip PM setup/initialization. */
133 if (device_pm_not_required(dev))
134 return;
135
136 pr_debug("Adding info for %s:%s\n",
137 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138 device_pm_check_callbacks(dev);
139 mutex_lock(&dpm_list_mtx);
140 if (dev->parent && dev->parent->power.is_prepared)
141 dev_warn(dev, "parent %s should not be sleeping\n",
142 dev_name(dev->parent));
143 list_add_tail(&dev->power.entry, &dpm_list);
144 dev->power.in_dpm_list = true;
145 mutex_unlock(&dpm_list_mtx);
146}
147
148/**
149 * device_pm_remove - Remove a device from the PM core's list of active devices.
150 * @dev: Device to be removed from the list.
151 */
152void device_pm_remove(struct device *dev)
153{
154 if (device_pm_not_required(dev))
155 return;
156
157 pr_debug("Removing info for %s:%s\n",
158 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 complete_all(&dev->power.completion);
160 mutex_lock(&dpm_list_mtx);
161 list_del_init(&dev->power.entry);
162 dev->power.in_dpm_list = false;
163 mutex_unlock(&dpm_list_mtx);
164 device_wakeup_disable(dev);
165 pm_runtime_remove(dev);
166 device_pm_check_callbacks(dev);
167}
168
169/**
170 * device_pm_move_before - Move device in the PM core's list of active devices.
171 * @deva: Device to move in dpm_list.
172 * @devb: Device @deva should come before.
173 */
174void device_pm_move_before(struct device *deva, struct device *devb)
175{
176 pr_debug("Moving %s:%s before %s:%s\n",
177 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179 /* Delete deva from dpm_list and reinsert before devb. */
180 list_move_tail(&deva->power.entry, &devb->power.entry);
181}
182
183/**
184 * device_pm_move_after - Move device in the PM core's list of active devices.
185 * @deva: Device to move in dpm_list.
186 * @devb: Device @deva should come after.
187 */
188void device_pm_move_after(struct device *deva, struct device *devb)
189{
190 pr_debug("Moving %s:%s after %s:%s\n",
191 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
192 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
193 /* Delete deva from dpm_list and reinsert after devb. */
194 list_move(&deva->power.entry, &devb->power.entry);
195}
196
197/**
198 * device_pm_move_last - Move device to end of the PM core's list of devices.
199 * @dev: Device to move in dpm_list.
200 */
201void device_pm_move_last(struct device *dev)
202{
203 pr_debug("Moving %s:%s to end of list\n",
204 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
205 list_move_tail(&dev->power.entry, &dpm_list);
206}
207
208static ktime_t initcall_debug_start(struct device *dev, void *cb)
209{
210 if (!pm_print_times_enabled)
211 return 0;
212
213 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
214 task_pid_nr(current),
215 dev->parent ? dev_name(dev->parent) : "none");
216 return ktime_get();
217}
218
219static void initcall_debug_report(struct device *dev, ktime_t calltime,
220 void *cb, int error)
221{
222 ktime_t rettime;
223
224 if (!pm_print_times_enabled)
225 return;
226
227 rettime = ktime_get();
228 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
229 (unsigned long long)ktime_us_delta(rettime, calltime));
230}
231
232/**
233 * dpm_wait - Wait for a PM operation to complete.
234 * @dev: Device to wait for.
235 * @async: If unset, wait only if the device's power.async_suspend flag is set.
236 */
237static void dpm_wait(struct device *dev, bool async)
238{
239 if (!dev)
240 return;
241
242 if (async || (pm_async_enabled && dev->power.async_suspend))
243 wait_for_completion(&dev->power.completion);
244}
245
246static int dpm_wait_fn(struct device *dev, void *async_ptr)
247{
248 dpm_wait(dev, *((bool *)async_ptr));
249 return 0;
250}
251
252static void dpm_wait_for_children(struct device *dev, bool async)
253{
254 device_for_each_child(dev, &async, dpm_wait_fn);
255}
256
257static void dpm_wait_for_suppliers(struct device *dev, bool async)
258{
259 struct device_link *link;
260 int idx;
261
262 idx = device_links_read_lock();
263
264 /*
265 * If the supplier goes away right after we've checked the link to it,
266 * we'll wait for its completion to change the state, but that's fine,
267 * because the only things that will block as a result are the SRCU
268 * callbacks freeing the link objects for the links in the list we're
269 * walking.
270 */
271 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
272 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
273 dpm_wait(link->supplier, async);
274
275 device_links_read_unlock(idx);
276}
277
278static bool dpm_wait_for_superior(struct device *dev, bool async)
279{
280 struct device *parent;
281
282 /*
283 * If the device is resumed asynchronously and the parent's callback
284 * deletes both the device and the parent itself, the parent object may
285 * be freed while this function is running, so avoid that by reference
286 * counting the parent once more unless the device has been deleted
287 * already (in which case return right away).
288 */
289 mutex_lock(&dpm_list_mtx);
290
291 if (!device_pm_initialized(dev)) {
292 mutex_unlock(&dpm_list_mtx);
293 return false;
294 }
295
296 parent = get_device(dev->parent);
297
298 mutex_unlock(&dpm_list_mtx);
299
300 dpm_wait(parent, async);
301 put_device(parent);
302
303 dpm_wait_for_suppliers(dev, async);
304
305 /*
306 * If the parent's callback has deleted the device, attempting to resume
307 * it would be invalid, so avoid doing that then.
308 */
309 return device_pm_initialized(dev);
310}
311
312static void dpm_wait_for_consumers(struct device *dev, bool async)
313{
314 struct device_link *link;
315 int idx;
316
317 idx = device_links_read_lock();
318
319 /*
320 * The status of a device link can only be changed from "dormant" by a
321 * probe, but that cannot happen during system suspend/resume. In
322 * theory it can change to "dormant" at that time, but then it is
323 * reasonable to wait for the target device anyway (eg. if it goes
324 * away, it's better to wait for it to go away completely and then
325 * continue instead of trying to continue in parallel with its
326 * unregistration).
327 */
328 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
329 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
330 dpm_wait(link->consumer, async);
331
332 device_links_read_unlock(idx);
333}
334
335static void dpm_wait_for_subordinate(struct device *dev, bool async)
336{
337 dpm_wait_for_children(dev, async);
338 dpm_wait_for_consumers(dev, async);
339}
340
341/**
342 * pm_op - Return the PM operation appropriate for given PM event.
343 * @ops: PM operations to choose from.
344 * @state: PM transition of the system being carried out.
345 */
346static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
347{
348 switch (state.event) {
349#ifdef CONFIG_SUSPEND
350 case PM_EVENT_SUSPEND:
351 return ops->suspend;
352 case PM_EVENT_RESUME:
353 return ops->resume;
354#endif /* CONFIG_SUSPEND */
355#ifdef CONFIG_HIBERNATE_CALLBACKS
356 case PM_EVENT_FREEZE:
357 case PM_EVENT_QUIESCE:
358 return ops->freeze;
359 case PM_EVENT_HIBERNATE:
360 return ops->poweroff;
361 case PM_EVENT_THAW:
362 case PM_EVENT_RECOVER:
363 return ops->thaw;
364 case PM_EVENT_RESTORE:
365 return ops->restore;
366#endif /* CONFIG_HIBERNATE_CALLBACKS */
367 }
368
369 return NULL;
370}
371
372/**
373 * pm_late_early_op - Return the PM operation appropriate for given PM event.
374 * @ops: PM operations to choose from.
375 * @state: PM transition of the system being carried out.
376 *
377 * Runtime PM is disabled for @dev while this function is being executed.
378 */
379static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
380 pm_message_t state)
381{
382 switch (state.event) {
383#ifdef CONFIG_SUSPEND
384 case PM_EVENT_SUSPEND:
385 return ops->suspend_late;
386 case PM_EVENT_RESUME:
387 return ops->resume_early;
388#endif /* CONFIG_SUSPEND */
389#ifdef CONFIG_HIBERNATE_CALLBACKS
390 case PM_EVENT_FREEZE:
391 case PM_EVENT_QUIESCE:
392 return ops->freeze_late;
393 case PM_EVENT_HIBERNATE:
394 return ops->poweroff_late;
395 case PM_EVENT_THAW:
396 case PM_EVENT_RECOVER:
397 return ops->thaw_early;
398 case PM_EVENT_RESTORE:
399 return ops->restore_early;
400#endif /* CONFIG_HIBERNATE_CALLBACKS */
401 }
402
403 return NULL;
404}
405
406/**
407 * pm_noirq_op - Return the PM operation appropriate for given PM event.
408 * @ops: PM operations to choose from.
409 * @state: PM transition of the system being carried out.
410 *
411 * The driver of @dev will not receive interrupts while this function is being
412 * executed.
413 */
414static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
415{
416 switch (state.event) {
417#ifdef CONFIG_SUSPEND
418 case PM_EVENT_SUSPEND:
419 return ops->suspend_noirq;
420 case PM_EVENT_RESUME:
421 return ops->resume_noirq;
422#endif /* CONFIG_SUSPEND */
423#ifdef CONFIG_HIBERNATE_CALLBACKS
424 case PM_EVENT_FREEZE:
425 case PM_EVENT_QUIESCE:
426 return ops->freeze_noirq;
427 case PM_EVENT_HIBERNATE:
428 return ops->poweroff_noirq;
429 case PM_EVENT_THAW:
430 case PM_EVENT_RECOVER:
431 return ops->thaw_noirq;
432 case PM_EVENT_RESTORE:
433 return ops->restore_noirq;
434#endif /* CONFIG_HIBERNATE_CALLBACKS */
435 }
436
437 return NULL;
438}
439
440static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441{
442 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
443 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
444 ", may wakeup" : "", dev->power.driver_flags);
445}
446
447static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
448 int error)
449{
450 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
451 error);
452}
453
454static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
455 const char *info)
456{
457 ktime_t calltime;
458 u64 usecs64;
459 int usecs;
460
461 calltime = ktime_get();
462 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
463 do_div(usecs64, NSEC_PER_USEC);
464 usecs = usecs64;
465 if (usecs == 0)
466 usecs = 1;
467
468 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
469 info ?: "", info ? " " : "", pm_verb(state.event),
470 error ? "aborted" : "complete",
471 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
472}
473
474static int dpm_run_callback(pm_callback_t cb, struct device *dev,
475 pm_message_t state, const char *info)
476{
477 ktime_t calltime;
478 int error;
479
480 if (!cb)
481 return 0;
482
483 calltime = initcall_debug_start(dev, cb);
484
485 pm_dev_dbg(dev, state, info);
486 trace_device_pm_callback_start(dev, info, state.event);
487 error = cb(dev);
488 trace_device_pm_callback_end(dev, error);
489 suspend_report_result(cb, error);
490
491 initcall_debug_report(dev, calltime, cb, error);
492
493 return error;
494}
495
496#ifdef CONFIG_DPM_WATCHDOG
497struct dpm_watchdog {
498 struct device *dev;
499 struct task_struct *tsk;
500 struct timer_list timer;
501};
502
503#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
504 struct dpm_watchdog wd
505
506/**
507 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
508 * @t: The timer that PM watchdog depends on.
509 *
510 * Called when a driver has timed out suspending or resuming.
511 * There's not much we can do here to recover so panic() to
512 * capture a crash-dump in pstore.
513 */
514static void dpm_watchdog_handler(struct timer_list *t)
515{
516 struct dpm_watchdog *wd = from_timer(wd, t, timer);
517
518 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
519 show_stack(wd->tsk, NULL, KERN_EMERG);
520 panic("%s %s: unrecoverable failure\n",
521 dev_driver_string(wd->dev), dev_name(wd->dev));
522}
523
524/**
525 * dpm_watchdog_set - Enable pm watchdog for given device.
526 * @wd: Watchdog. Must be allocated on the stack.
527 * @dev: Device to handle.
528 */
529static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
530{
531 struct timer_list *timer = &wd->timer;
532
533 wd->dev = dev;
534 wd->tsk = current;
535
536 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
537 /* use same timeout value for both suspend and resume */
538 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
539 add_timer(timer);
540}
541
542/**
543 * dpm_watchdog_clear - Disable suspend/resume watchdog.
544 * @wd: Watchdog to disable.
545 */
546static void dpm_watchdog_clear(struct dpm_watchdog *wd)
547{
548 struct timer_list *timer = &wd->timer;
549
550 del_timer_sync(timer);
551 destroy_timer_on_stack(timer);
552}
553#else
554#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
555#define dpm_watchdog_set(x, y)
556#define dpm_watchdog_clear(x)
557#endif
558
559/*------------------------- Resume routines -------------------------*/
560
561/**
562 * dev_pm_skip_resume - System-wide device resume optimization check.
563 * @dev: Target device.
564 *
565 * Return:
566 * - %false if the transition under way is RESTORE.
567 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
568 * - The logical negation of %power.must_resume otherwise (that is, when the
569 * transition under way is RESUME).
570 */
571bool dev_pm_skip_resume(struct device *dev)
572{
573 if (pm_transition.event == PM_EVENT_RESTORE)
574 return false;
575
576 if (pm_transition.event == PM_EVENT_THAW)
577 return dev_pm_skip_suspend(dev);
578
579 return !dev->power.must_resume;
580}
581
582/**
583 * device_resume_noirq - Execute a "noirq resume" callback for given device.
584 * @dev: Device to handle.
585 * @state: PM transition of the system being carried out.
586 * @async: If true, the device is being resumed asynchronously.
587 *
588 * The driver of @dev will not receive interrupts while this function is being
589 * executed.
590 */
591static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
592{
593 pm_callback_t callback = NULL;
594 const char *info = NULL;
595 bool skip_resume;
596 int error = 0;
597
598 TRACE_DEVICE(dev);
599 TRACE_RESUME(0);
600
601 if (dev->power.syscore || dev->power.direct_complete)
602 goto Out;
603
604 if (!dev->power.is_noirq_suspended)
605 goto Out;
606
607 if (!dpm_wait_for_superior(dev, async))
608 goto Out;
609
610 skip_resume = dev_pm_skip_resume(dev);
611 /*
612 * If the driver callback is skipped below or by the middle layer
613 * callback and device_resume_early() also skips the driver callback for
614 * this device later, it needs to appear as "suspended" to PM-runtime,
615 * so change its status accordingly.
616 *
617 * Otherwise, the device is going to be resumed, so set its PM-runtime
618 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
619 * to avoid confusing drivers that don't use it.
620 */
621 if (skip_resume)
622 pm_runtime_set_suspended(dev);
623 else if (dev_pm_skip_suspend(dev))
624 pm_runtime_set_active(dev);
625
626 if (dev->pm_domain) {
627 info = "noirq power domain ";
628 callback = pm_noirq_op(&dev->pm_domain->ops, state);
629 } else if (dev->type && dev->type->pm) {
630 info = "noirq type ";
631 callback = pm_noirq_op(dev->type->pm, state);
632 } else if (dev->class && dev->class->pm) {
633 info = "noirq class ";
634 callback = pm_noirq_op(dev->class->pm, state);
635 } else if (dev->bus && dev->bus->pm) {
636 info = "noirq bus ";
637 callback = pm_noirq_op(dev->bus->pm, state);
638 }
639 if (callback)
640 goto Run;
641
642 if (skip_resume)
643 goto Skip;
644
645 if (dev->driver && dev->driver->pm) {
646 info = "noirq driver ";
647 callback = pm_noirq_op(dev->driver->pm, state);
648 }
649
650Run:
651 error = dpm_run_callback(callback, dev, state, info);
652
653Skip:
654 dev->power.is_noirq_suspended = false;
655
656Out:
657 complete_all(&dev->power.completion);
658 TRACE_RESUME(error);
659 return error;
660}
661
662static bool is_async(struct device *dev)
663{
664 return dev->power.async_suspend && pm_async_enabled
665 && !pm_trace_is_enabled();
666}
667
668static bool dpm_async_fn(struct device *dev, async_func_t func)
669{
670 reinit_completion(&dev->power.completion);
671
672 if (is_async(dev)) {
673 get_device(dev);
674 async_schedule_dev(func, dev);
675 return true;
676 }
677
678 return false;
679}
680
681static void async_resume_noirq(void *data, async_cookie_t cookie)
682{
683 struct device *dev = (struct device *)data;
684 int error;
685
686 error = device_resume_noirq(dev, pm_transition, true);
687 if (error)
688 pm_dev_err(dev, pm_transition, " async", error);
689
690 put_device(dev);
691}
692
693static void dpm_noirq_resume_devices(pm_message_t state)
694{
695 struct device *dev;
696 ktime_t starttime = ktime_get();
697
698 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
699 mutex_lock(&dpm_list_mtx);
700 pm_transition = state;
701
702 /*
703 * Advanced the async threads upfront,
704 * in case the starting of async threads is
705 * delayed by non-async resuming devices.
706 */
707 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
708 dpm_async_fn(dev, async_resume_noirq);
709
710 while (!list_empty(&dpm_noirq_list)) {
711 dev = to_device(dpm_noirq_list.next);
712 get_device(dev);
713 list_move_tail(&dev->power.entry, &dpm_late_early_list);
714 mutex_unlock(&dpm_list_mtx);
715
716 if (!is_async(dev)) {
717 int error;
718
719 error = device_resume_noirq(dev, state, false);
720 if (error) {
721 suspend_stats.failed_resume_noirq++;
722 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
723 dpm_save_failed_dev(dev_name(dev));
724 pm_dev_err(dev, state, " noirq", error);
725 }
726 }
727
728 mutex_lock(&dpm_list_mtx);
729 put_device(dev);
730 }
731 mutex_unlock(&dpm_list_mtx);
732 async_synchronize_full();
733 dpm_show_time(starttime, state, 0, "noirq");
734 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
735}
736
737/**
738 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
739 * @state: PM transition of the system being carried out.
740 *
741 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
742 * allow device drivers' interrupt handlers to be called.
743 */
744void dpm_resume_noirq(pm_message_t state)
745{
746 dpm_noirq_resume_devices(state);
747
748 resume_device_irqs();
749 device_wakeup_disarm_wake_irqs();
750
751 cpuidle_resume();
752}
753
754/**
755 * device_resume_early - Execute an "early resume" callback for given device.
756 * @dev: Device to handle.
757 * @state: PM transition of the system being carried out.
758 * @async: If true, the device is being resumed asynchronously.
759 *
760 * Runtime PM is disabled for @dev while this function is being executed.
761 */
762static int device_resume_early(struct device *dev, pm_message_t state, bool async)
763{
764 pm_callback_t callback = NULL;
765 const char *info = NULL;
766 int error = 0;
767
768 TRACE_DEVICE(dev);
769 TRACE_RESUME(0);
770
771 if (dev->power.syscore || dev->power.direct_complete)
772 goto Out;
773
774 if (!dev->power.is_late_suspended)
775 goto Out;
776
777 if (!dpm_wait_for_superior(dev, async))
778 goto Out;
779
780 if (dev->pm_domain) {
781 info = "early power domain ";
782 callback = pm_late_early_op(&dev->pm_domain->ops, state);
783 } else if (dev->type && dev->type->pm) {
784 info = "early type ";
785 callback = pm_late_early_op(dev->type->pm, state);
786 } else if (dev->class && dev->class->pm) {
787 info = "early class ";
788 callback = pm_late_early_op(dev->class->pm, state);
789 } else if (dev->bus && dev->bus->pm) {
790 info = "early bus ";
791 callback = pm_late_early_op(dev->bus->pm, state);
792 }
793 if (callback)
794 goto Run;
795
796 if (dev_pm_skip_resume(dev))
797 goto Skip;
798
799 if (dev->driver && dev->driver->pm) {
800 info = "early driver ";
801 callback = pm_late_early_op(dev->driver->pm, state);
802 }
803
804Run:
805 error = dpm_run_callback(callback, dev, state, info);
806
807Skip:
808 dev->power.is_late_suspended = false;
809
810Out:
811 TRACE_RESUME(error);
812
813 pm_runtime_enable(dev);
814 complete_all(&dev->power.completion);
815 return error;
816}
817
818static void async_resume_early(void *data, async_cookie_t cookie)
819{
820 struct device *dev = (struct device *)data;
821 int error;
822
823 error = device_resume_early(dev, pm_transition, true);
824 if (error)
825 pm_dev_err(dev, pm_transition, " async", error);
826
827 put_device(dev);
828}
829
830/**
831 * dpm_resume_early - Execute "early resume" callbacks for all devices.
832 * @state: PM transition of the system being carried out.
833 */
834void dpm_resume_early(pm_message_t state)
835{
836 struct device *dev;
837 ktime_t starttime = ktime_get();
838
839 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
840 mutex_lock(&dpm_list_mtx);
841 pm_transition = state;
842
843 /*
844 * Advanced the async threads upfront,
845 * in case the starting of async threads is
846 * delayed by non-async resuming devices.
847 */
848 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
849 dpm_async_fn(dev, async_resume_early);
850
851 while (!list_empty(&dpm_late_early_list)) {
852 dev = to_device(dpm_late_early_list.next);
853 get_device(dev);
854 list_move_tail(&dev->power.entry, &dpm_suspended_list);
855 mutex_unlock(&dpm_list_mtx);
856
857 if (!is_async(dev)) {
858 int error;
859
860 error = device_resume_early(dev, state, false);
861 if (error) {
862 suspend_stats.failed_resume_early++;
863 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
864 dpm_save_failed_dev(dev_name(dev));
865 pm_dev_err(dev, state, " early", error);
866 }
867 }
868 mutex_lock(&dpm_list_mtx);
869 put_device(dev);
870 }
871 mutex_unlock(&dpm_list_mtx);
872 async_synchronize_full();
873 dpm_show_time(starttime, state, 0, "early");
874 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
875}
876
877/**
878 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
879 * @state: PM transition of the system being carried out.
880 */
881void dpm_resume_start(pm_message_t state)
882{
883 dpm_resume_noirq(state);
884 dpm_resume_early(state);
885}
886EXPORT_SYMBOL_GPL(dpm_resume_start);
887
888/**
889 * device_resume - Execute "resume" callbacks for given device.
890 * @dev: Device to handle.
891 * @state: PM transition of the system being carried out.
892 * @async: If true, the device is being resumed asynchronously.
893 */
894static int device_resume(struct device *dev, pm_message_t state, bool async)
895{
896 pm_callback_t callback = NULL;
897 const char *info = NULL;
898 int error = 0;
899 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
900
901 TRACE_DEVICE(dev);
902 TRACE_RESUME(0);
903
904 if (dev->power.syscore)
905 goto Complete;
906
907 if (dev->power.direct_complete) {
908 /* Match the pm_runtime_disable() in __device_suspend(). */
909 pm_runtime_enable(dev);
910 goto Complete;
911 }
912
913 if (!dpm_wait_for_superior(dev, async))
914 goto Complete;
915
916 dpm_watchdog_set(&wd, dev);
917 device_lock(dev);
918
919 /*
920 * This is a fib. But we'll allow new children to be added below
921 * a resumed device, even if the device hasn't been completed yet.
922 */
923 dev->power.is_prepared = false;
924
925 if (!dev->power.is_suspended)
926 goto Unlock;
927
928 if (dev->pm_domain) {
929 info = "power domain ";
930 callback = pm_op(&dev->pm_domain->ops, state);
931 goto Driver;
932 }
933
934 if (dev->type && dev->type->pm) {
935 info = "type ";
936 callback = pm_op(dev->type->pm, state);
937 goto Driver;
938 }
939
940 if (dev->class && dev->class->pm) {
941 info = "class ";
942 callback = pm_op(dev->class->pm, state);
943 goto Driver;
944 }
945
946 if (dev->bus) {
947 if (dev->bus->pm) {
948 info = "bus ";
949 callback = pm_op(dev->bus->pm, state);
950 } else if (dev->bus->resume) {
951 info = "legacy bus ";
952 callback = dev->bus->resume;
953 goto End;
954 }
955 }
956
957 Driver:
958 if (!callback && dev->driver && dev->driver->pm) {
959 info = "driver ";
960 callback = pm_op(dev->driver->pm, state);
961 }
962
963 End:
964 error = dpm_run_callback(callback, dev, state, info);
965 dev->power.is_suspended = false;
966
967 Unlock:
968 device_unlock(dev);
969 dpm_watchdog_clear(&wd);
970
971 Complete:
972 complete_all(&dev->power.completion);
973
974 TRACE_RESUME(error);
975
976 return error;
977}
978
979static void async_resume(void *data, async_cookie_t cookie)
980{
981 struct device *dev = (struct device *)data;
982 int error;
983
984 error = device_resume(dev, pm_transition, true);
985 if (error)
986 pm_dev_err(dev, pm_transition, " async", error);
987 put_device(dev);
988}
989
990/**
991 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
992 * @state: PM transition of the system being carried out.
993 *
994 * Execute the appropriate "resume" callback for all devices whose status
995 * indicates that they are suspended.
996 */
997void dpm_resume(pm_message_t state)
998{
999 struct device *dev;
1000 ktime_t starttime = ktime_get();
1001
1002 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1003 might_sleep();
1004
1005 mutex_lock(&dpm_list_mtx);
1006 pm_transition = state;
1007 async_error = 0;
1008
1009 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1010 dpm_async_fn(dev, async_resume);
1011
1012 while (!list_empty(&dpm_suspended_list)) {
1013 dev = to_device(dpm_suspended_list.next);
1014 get_device(dev);
1015 if (!is_async(dev)) {
1016 int error;
1017
1018 mutex_unlock(&dpm_list_mtx);
1019
1020 error = device_resume(dev, state, false);
1021 if (error) {
1022 suspend_stats.failed_resume++;
1023 dpm_save_failed_step(SUSPEND_RESUME);
1024 dpm_save_failed_dev(dev_name(dev));
1025 pm_dev_err(dev, state, "", error);
1026 }
1027
1028 mutex_lock(&dpm_list_mtx);
1029 }
1030 if (!list_empty(&dev->power.entry))
1031 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1032 put_device(dev);
1033 }
1034 mutex_unlock(&dpm_list_mtx);
1035 async_synchronize_full();
1036 dpm_show_time(starttime, state, 0, NULL);
1037
1038 cpufreq_resume();
1039 devfreq_resume();
1040 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1041}
1042
1043/**
1044 * device_complete - Complete a PM transition for given device.
1045 * @dev: Device to handle.
1046 * @state: PM transition of the system being carried out.
1047 */
1048static void device_complete(struct device *dev, pm_message_t state)
1049{
1050 void (*callback)(struct device *) = NULL;
1051 const char *info = NULL;
1052
1053 if (dev->power.syscore)
1054 return;
1055
1056 device_lock(dev);
1057
1058 if (dev->pm_domain) {
1059 info = "completing power domain ";
1060 callback = dev->pm_domain->ops.complete;
1061 } else if (dev->type && dev->type->pm) {
1062 info = "completing type ";
1063 callback = dev->type->pm->complete;
1064 } else if (dev->class && dev->class->pm) {
1065 info = "completing class ";
1066 callback = dev->class->pm->complete;
1067 } else if (dev->bus && dev->bus->pm) {
1068 info = "completing bus ";
1069 callback = dev->bus->pm->complete;
1070 }
1071
1072 if (!callback && dev->driver && dev->driver->pm) {
1073 info = "completing driver ";
1074 callback = dev->driver->pm->complete;
1075 }
1076
1077 if (callback) {
1078 pm_dev_dbg(dev, state, info);
1079 callback(dev);
1080 }
1081
1082 device_unlock(dev);
1083
1084 pm_runtime_put(dev);
1085}
1086
1087/**
1088 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1089 * @state: PM transition of the system being carried out.
1090 *
1091 * Execute the ->complete() callbacks for all devices whose PM status is not
1092 * DPM_ON (this allows new devices to be registered).
1093 */
1094void dpm_complete(pm_message_t state)
1095{
1096 struct list_head list;
1097
1098 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1099 might_sleep();
1100
1101 INIT_LIST_HEAD(&list);
1102 mutex_lock(&dpm_list_mtx);
1103 while (!list_empty(&dpm_prepared_list)) {
1104 struct device *dev = to_device(dpm_prepared_list.prev);
1105
1106 get_device(dev);
1107 dev->power.is_prepared = false;
1108 list_move(&dev->power.entry, &list);
1109 mutex_unlock(&dpm_list_mtx);
1110
1111 trace_device_pm_callback_start(dev, "", state.event);
1112 device_complete(dev, state);
1113 trace_device_pm_callback_end(dev, 0);
1114
1115 mutex_lock(&dpm_list_mtx);
1116 put_device(dev);
1117 }
1118 list_splice(&list, &dpm_list);
1119 mutex_unlock(&dpm_list_mtx);
1120
1121 /* Allow device probing and trigger re-probing of deferred devices */
1122 device_unblock_probing();
1123 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1124}
1125
1126/**
1127 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1128 * @state: PM transition of the system being carried out.
1129 *
1130 * Execute "resume" callbacks for all devices and complete the PM transition of
1131 * the system.
1132 */
1133void dpm_resume_end(pm_message_t state)
1134{
1135 dpm_resume(state);
1136 dpm_complete(state);
1137}
1138EXPORT_SYMBOL_GPL(dpm_resume_end);
1139
1140
1141/*------------------------- Suspend routines -------------------------*/
1142
1143/**
1144 * resume_event - Return a "resume" message for given "suspend" sleep state.
1145 * @sleep_state: PM message representing a sleep state.
1146 *
1147 * Return a PM message representing the resume event corresponding to given
1148 * sleep state.
1149 */
1150static pm_message_t resume_event(pm_message_t sleep_state)
1151{
1152 switch (sleep_state.event) {
1153 case PM_EVENT_SUSPEND:
1154 return PMSG_RESUME;
1155 case PM_EVENT_FREEZE:
1156 case PM_EVENT_QUIESCE:
1157 return PMSG_RECOVER;
1158 case PM_EVENT_HIBERNATE:
1159 return PMSG_RESTORE;
1160 }
1161 return PMSG_ON;
1162}
1163
1164static void dpm_superior_set_must_resume(struct device *dev)
1165{
1166 struct device_link *link;
1167 int idx;
1168
1169 if (dev->parent)
1170 dev->parent->power.must_resume = true;
1171
1172 idx = device_links_read_lock();
1173
1174 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1175 link->supplier->power.must_resume = true;
1176
1177 device_links_read_unlock(idx);
1178}
1179
1180/**
1181 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1182 * @dev: Device to handle.
1183 * @state: PM transition of the system being carried out.
1184 * @async: If true, the device is being suspended asynchronously.
1185 *
1186 * The driver of @dev will not receive interrupts while this function is being
1187 * executed.
1188 */
1189static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1190{
1191 pm_callback_t callback = NULL;
1192 const char *info = NULL;
1193 int error = 0;
1194
1195 TRACE_DEVICE(dev);
1196 TRACE_SUSPEND(0);
1197
1198 dpm_wait_for_subordinate(dev, async);
1199
1200 if (async_error)
1201 goto Complete;
1202
1203 if (dev->power.syscore || dev->power.direct_complete)
1204 goto Complete;
1205
1206 if (dev->pm_domain) {
1207 info = "noirq power domain ";
1208 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1209 } else if (dev->type && dev->type->pm) {
1210 info = "noirq type ";
1211 callback = pm_noirq_op(dev->type->pm, state);
1212 } else if (dev->class && dev->class->pm) {
1213 info = "noirq class ";
1214 callback = pm_noirq_op(dev->class->pm, state);
1215 } else if (dev->bus && dev->bus->pm) {
1216 info = "noirq bus ";
1217 callback = pm_noirq_op(dev->bus->pm, state);
1218 }
1219 if (callback)
1220 goto Run;
1221
1222 if (dev_pm_skip_suspend(dev))
1223 goto Skip;
1224
1225 if (dev->driver && dev->driver->pm) {
1226 info = "noirq driver ";
1227 callback = pm_noirq_op(dev->driver->pm, state);
1228 }
1229
1230Run:
1231 error = dpm_run_callback(callback, dev, state, info);
1232 if (error) {
1233 async_error = error;
1234 goto Complete;
1235 }
1236
1237Skip:
1238 dev->power.is_noirq_suspended = true;
1239
1240 /*
1241 * Skipping the resume of devices that were in use right before the
1242 * system suspend (as indicated by their PM-runtime usage counters)
1243 * would be suboptimal. Also resume them if doing that is not allowed
1244 * to be skipped.
1245 */
1246 if (atomic_read(&dev->power.usage_count) > 1 ||
1247 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1248 dev->power.may_skip_resume))
1249 dev->power.must_resume = true;
1250
1251 if (dev->power.must_resume)
1252 dpm_superior_set_must_resume(dev);
1253
1254Complete:
1255 complete_all(&dev->power.completion);
1256 TRACE_SUSPEND(error);
1257 return error;
1258}
1259
1260static void async_suspend_noirq(void *data, async_cookie_t cookie)
1261{
1262 struct device *dev = (struct device *)data;
1263 int error;
1264
1265 error = __device_suspend_noirq(dev, pm_transition, true);
1266 if (error) {
1267 dpm_save_failed_dev(dev_name(dev));
1268 pm_dev_err(dev, pm_transition, " async", error);
1269 }
1270
1271 put_device(dev);
1272}
1273
1274static int device_suspend_noirq(struct device *dev)
1275{
1276 if (dpm_async_fn(dev, async_suspend_noirq))
1277 return 0;
1278
1279 return __device_suspend_noirq(dev, pm_transition, false);
1280}
1281
1282static int dpm_noirq_suspend_devices(pm_message_t state)
1283{
1284 ktime_t starttime = ktime_get();
1285 int error = 0;
1286
1287 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1288 mutex_lock(&dpm_list_mtx);
1289 pm_transition = state;
1290 async_error = 0;
1291
1292 while (!list_empty(&dpm_late_early_list)) {
1293 struct device *dev = to_device(dpm_late_early_list.prev);
1294
1295 get_device(dev);
1296 mutex_unlock(&dpm_list_mtx);
1297
1298 error = device_suspend_noirq(dev);
1299
1300 mutex_lock(&dpm_list_mtx);
1301 if (error) {
1302 pm_dev_err(dev, state, " noirq", error);
1303 dpm_save_failed_dev(dev_name(dev));
1304 put_device(dev);
1305 break;
1306 }
1307 if (!list_empty(&dev->power.entry))
1308 list_move(&dev->power.entry, &dpm_noirq_list);
1309 put_device(dev);
1310
1311 if (async_error)
1312 break;
1313 }
1314 mutex_unlock(&dpm_list_mtx);
1315 async_synchronize_full();
1316 if (!error)
1317 error = async_error;
1318
1319 if (error) {
1320 suspend_stats.failed_suspend_noirq++;
1321 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1322 }
1323 dpm_show_time(starttime, state, error, "noirq");
1324 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1325 return error;
1326}
1327
1328/**
1329 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1330 * @state: PM transition of the system being carried out.
1331 *
1332 * Prevent device drivers' interrupt handlers from being called and invoke
1333 * "noirq" suspend callbacks for all non-sysdev devices.
1334 */
1335int dpm_suspend_noirq(pm_message_t state)
1336{
1337 int ret;
1338
1339 cpuidle_pause();
1340
1341 device_wakeup_arm_wake_irqs();
1342 suspend_device_irqs();
1343
1344 ret = dpm_noirq_suspend_devices(state);
1345 if (ret)
1346 dpm_resume_noirq(resume_event(state));
1347
1348 return ret;
1349}
1350
1351static void dpm_propagate_wakeup_to_parent(struct device *dev)
1352{
1353 struct device *parent = dev->parent;
1354
1355 if (!parent)
1356 return;
1357
1358 spin_lock_irq(&parent->power.lock);
1359
1360 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1361 parent->power.wakeup_path = true;
1362
1363 spin_unlock_irq(&parent->power.lock);
1364}
1365
1366/**
1367 * __device_suspend_late - Execute a "late suspend" callback for given device.
1368 * @dev: Device to handle.
1369 * @state: PM transition of the system being carried out.
1370 * @async: If true, the device is being suspended asynchronously.
1371 *
1372 * Runtime PM is disabled for @dev while this function is being executed.
1373 */
1374static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1375{
1376 pm_callback_t callback = NULL;
1377 const char *info = NULL;
1378 int error = 0;
1379
1380 TRACE_DEVICE(dev);
1381 TRACE_SUSPEND(0);
1382
1383 __pm_runtime_disable(dev, false);
1384
1385 dpm_wait_for_subordinate(dev, async);
1386
1387 if (async_error)
1388 goto Complete;
1389
1390 if (pm_wakeup_pending()) {
1391 async_error = -EBUSY;
1392 goto Complete;
1393 }
1394
1395 if (dev->power.syscore || dev->power.direct_complete)
1396 goto Complete;
1397
1398 if (dev->pm_domain) {
1399 info = "late power domain ";
1400 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1401 } else if (dev->type && dev->type->pm) {
1402 info = "late type ";
1403 callback = pm_late_early_op(dev->type->pm, state);
1404 } else if (dev->class && dev->class->pm) {
1405 info = "late class ";
1406 callback = pm_late_early_op(dev->class->pm, state);
1407 } else if (dev->bus && dev->bus->pm) {
1408 info = "late bus ";
1409 callback = pm_late_early_op(dev->bus->pm, state);
1410 }
1411 if (callback)
1412 goto Run;
1413
1414 if (dev_pm_skip_suspend(dev))
1415 goto Skip;
1416
1417 if (dev->driver && dev->driver->pm) {
1418 info = "late driver ";
1419 callback = pm_late_early_op(dev->driver->pm, state);
1420 }
1421
1422Run:
1423 error = dpm_run_callback(callback, dev, state, info);
1424 if (error) {
1425 async_error = error;
1426 goto Complete;
1427 }
1428 dpm_propagate_wakeup_to_parent(dev);
1429
1430Skip:
1431 dev->power.is_late_suspended = true;
1432
1433Complete:
1434 TRACE_SUSPEND(error);
1435 complete_all(&dev->power.completion);
1436 return error;
1437}
1438
1439static void async_suspend_late(void *data, async_cookie_t cookie)
1440{
1441 struct device *dev = (struct device *)data;
1442 int error;
1443
1444 error = __device_suspend_late(dev, pm_transition, true);
1445 if (error) {
1446 dpm_save_failed_dev(dev_name(dev));
1447 pm_dev_err(dev, pm_transition, " async", error);
1448 }
1449 put_device(dev);
1450}
1451
1452static int device_suspend_late(struct device *dev)
1453{
1454 if (dpm_async_fn(dev, async_suspend_late))
1455 return 0;
1456
1457 return __device_suspend_late(dev, pm_transition, false);
1458}
1459
1460/**
1461 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1462 * @state: PM transition of the system being carried out.
1463 */
1464int dpm_suspend_late(pm_message_t state)
1465{
1466 ktime_t starttime = ktime_get();
1467 int error = 0;
1468
1469 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1470 mutex_lock(&dpm_list_mtx);
1471 pm_transition = state;
1472 async_error = 0;
1473
1474 while (!list_empty(&dpm_suspended_list)) {
1475 struct device *dev = to_device(dpm_suspended_list.prev);
1476
1477 get_device(dev);
1478 mutex_unlock(&dpm_list_mtx);
1479
1480 error = device_suspend_late(dev);
1481
1482 mutex_lock(&dpm_list_mtx);
1483 if (!list_empty(&dev->power.entry))
1484 list_move(&dev->power.entry, &dpm_late_early_list);
1485
1486 if (error) {
1487 pm_dev_err(dev, state, " late", error);
1488 dpm_save_failed_dev(dev_name(dev));
1489 put_device(dev);
1490 break;
1491 }
1492 put_device(dev);
1493
1494 if (async_error)
1495 break;
1496 }
1497 mutex_unlock(&dpm_list_mtx);
1498 async_synchronize_full();
1499 if (!error)
1500 error = async_error;
1501 if (error) {
1502 suspend_stats.failed_suspend_late++;
1503 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1504 dpm_resume_early(resume_event(state));
1505 }
1506 dpm_show_time(starttime, state, error, "late");
1507 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1508 return error;
1509}
1510
1511/**
1512 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1513 * @state: PM transition of the system being carried out.
1514 */
1515int dpm_suspend_end(pm_message_t state)
1516{
1517 ktime_t starttime = ktime_get();
1518 int error;
1519
1520 error = dpm_suspend_late(state);
1521 if (error)
1522 goto out;
1523
1524 error = dpm_suspend_noirq(state);
1525 if (error)
1526 dpm_resume_early(resume_event(state));
1527
1528out:
1529 dpm_show_time(starttime, state, error, "end");
1530 return error;
1531}
1532EXPORT_SYMBOL_GPL(dpm_suspend_end);
1533
1534/**
1535 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1536 * @dev: Device to suspend.
1537 * @state: PM transition of the system being carried out.
1538 * @cb: Suspend callback to execute.
1539 * @info: string description of caller.
1540 */
1541static int legacy_suspend(struct device *dev, pm_message_t state,
1542 int (*cb)(struct device *dev, pm_message_t state),
1543 const char *info)
1544{
1545 int error;
1546 ktime_t calltime;
1547
1548 calltime = initcall_debug_start(dev, cb);
1549
1550 trace_device_pm_callback_start(dev, info, state.event);
1551 error = cb(dev, state);
1552 trace_device_pm_callback_end(dev, error);
1553 suspend_report_result(cb, error);
1554
1555 initcall_debug_report(dev, calltime, cb, error);
1556
1557 return error;
1558}
1559
1560static void dpm_clear_superiors_direct_complete(struct device *dev)
1561{
1562 struct device_link *link;
1563 int idx;
1564
1565 if (dev->parent) {
1566 spin_lock_irq(&dev->parent->power.lock);
1567 dev->parent->power.direct_complete = false;
1568 spin_unlock_irq(&dev->parent->power.lock);
1569 }
1570
1571 idx = device_links_read_lock();
1572
1573 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1574 spin_lock_irq(&link->supplier->power.lock);
1575 link->supplier->power.direct_complete = false;
1576 spin_unlock_irq(&link->supplier->power.lock);
1577 }
1578
1579 device_links_read_unlock(idx);
1580}
1581
1582/**
1583 * __device_suspend - Execute "suspend" callbacks for given device.
1584 * @dev: Device to handle.
1585 * @state: PM transition of the system being carried out.
1586 * @async: If true, the device is being suspended asynchronously.
1587 */
1588static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1589{
1590 pm_callback_t callback = NULL;
1591 const char *info = NULL;
1592 int error = 0;
1593 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1594
1595 TRACE_DEVICE(dev);
1596 TRACE_SUSPEND(0);
1597
1598 dpm_wait_for_subordinate(dev, async);
1599
1600 if (async_error) {
1601 dev->power.direct_complete = false;
1602 goto Complete;
1603 }
1604
1605 /*
1606 * Wait for possible runtime PM transitions of the device in progress
1607 * to complete and if there's a runtime resume request pending for it,
1608 * resume it before proceeding with invoking the system-wide suspend
1609 * callbacks for it.
1610 *
1611 * If the system-wide suspend callbacks below change the configuration
1612 * of the device, they must disable runtime PM for it or otherwise
1613 * ensure that its runtime-resume callbacks will not be confused by that
1614 * change in case they are invoked going forward.
1615 */
1616 pm_runtime_barrier(dev);
1617
1618 if (pm_wakeup_pending()) {
1619 dev->power.direct_complete = false;
1620 async_error = -EBUSY;
1621 goto Complete;
1622 }
1623
1624 if (dev->power.syscore)
1625 goto Complete;
1626
1627 /* Avoid direct_complete to let wakeup_path propagate. */
1628 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1629 dev->power.direct_complete = false;
1630
1631 if (dev->power.direct_complete) {
1632 if (pm_runtime_status_suspended(dev)) {
1633 pm_runtime_disable(dev);
1634 if (pm_runtime_status_suspended(dev)) {
1635 pm_dev_dbg(dev, state, "direct-complete ");
1636 goto Complete;
1637 }
1638
1639 pm_runtime_enable(dev);
1640 }
1641 dev->power.direct_complete = false;
1642 }
1643
1644 dev->power.may_skip_resume = true;
1645 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1646
1647 dpm_watchdog_set(&wd, dev);
1648 device_lock(dev);
1649
1650 if (dev->pm_domain) {
1651 info = "power domain ";
1652 callback = pm_op(&dev->pm_domain->ops, state);
1653 goto Run;
1654 }
1655
1656 if (dev->type && dev->type->pm) {
1657 info = "type ";
1658 callback = pm_op(dev->type->pm, state);
1659 goto Run;
1660 }
1661
1662 if (dev->class && dev->class->pm) {
1663 info = "class ";
1664 callback = pm_op(dev->class->pm, state);
1665 goto Run;
1666 }
1667
1668 if (dev->bus) {
1669 if (dev->bus->pm) {
1670 info = "bus ";
1671 callback = pm_op(dev->bus->pm, state);
1672 } else if (dev->bus->suspend) {
1673 pm_dev_dbg(dev, state, "legacy bus ");
1674 error = legacy_suspend(dev, state, dev->bus->suspend,
1675 "legacy bus ");
1676 goto End;
1677 }
1678 }
1679
1680 Run:
1681 if (!callback && dev->driver && dev->driver->pm) {
1682 info = "driver ";
1683 callback = pm_op(dev->driver->pm, state);
1684 }
1685
1686 error = dpm_run_callback(callback, dev, state, info);
1687
1688 End:
1689 if (!error) {
1690 dev->power.is_suspended = true;
1691 if (device_may_wakeup(dev))
1692 dev->power.wakeup_path = true;
1693
1694 dpm_propagate_wakeup_to_parent(dev);
1695 dpm_clear_superiors_direct_complete(dev);
1696 }
1697
1698 device_unlock(dev);
1699 dpm_watchdog_clear(&wd);
1700
1701 Complete:
1702 if (error)
1703 async_error = error;
1704
1705 complete_all(&dev->power.completion);
1706 TRACE_SUSPEND(error);
1707 return error;
1708}
1709
1710static void async_suspend(void *data, async_cookie_t cookie)
1711{
1712 struct device *dev = (struct device *)data;
1713 int error;
1714
1715 error = __device_suspend(dev, pm_transition, true);
1716 if (error) {
1717 dpm_save_failed_dev(dev_name(dev));
1718 pm_dev_err(dev, pm_transition, " async", error);
1719 }
1720
1721 put_device(dev);
1722}
1723
1724static int device_suspend(struct device *dev)
1725{
1726 if (dpm_async_fn(dev, async_suspend))
1727 return 0;
1728
1729 return __device_suspend(dev, pm_transition, false);
1730}
1731
1732/**
1733 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1734 * @state: PM transition of the system being carried out.
1735 */
1736int dpm_suspend(pm_message_t state)
1737{
1738 ktime_t starttime = ktime_get();
1739 int error = 0;
1740
1741 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1742 might_sleep();
1743
1744 devfreq_suspend();
1745 cpufreq_suspend();
1746
1747 mutex_lock(&dpm_list_mtx);
1748 pm_transition = state;
1749 async_error = 0;
1750 while (!list_empty(&dpm_prepared_list)) {
1751 struct device *dev = to_device(dpm_prepared_list.prev);
1752
1753 get_device(dev);
1754 mutex_unlock(&dpm_list_mtx);
1755
1756 error = device_suspend(dev);
1757
1758 mutex_lock(&dpm_list_mtx);
1759 if (error) {
1760 pm_dev_err(dev, state, "", error);
1761 dpm_save_failed_dev(dev_name(dev));
1762 put_device(dev);
1763 break;
1764 }
1765 if (!list_empty(&dev->power.entry))
1766 list_move(&dev->power.entry, &dpm_suspended_list);
1767 put_device(dev);
1768 if (async_error)
1769 break;
1770 }
1771 mutex_unlock(&dpm_list_mtx);
1772 async_synchronize_full();
1773 if (!error)
1774 error = async_error;
1775 if (error) {
1776 suspend_stats.failed_suspend++;
1777 dpm_save_failed_step(SUSPEND_SUSPEND);
1778 }
1779 dpm_show_time(starttime, state, error, NULL);
1780 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1781 return error;
1782}
1783
1784/**
1785 * device_prepare - Prepare a device for system power transition.
1786 * @dev: Device to handle.
1787 * @state: PM transition of the system being carried out.
1788 *
1789 * Execute the ->prepare() callback(s) for given device. No new children of the
1790 * device may be registered after this function has returned.
1791 */
1792static int device_prepare(struct device *dev, pm_message_t state)
1793{
1794 int (*callback)(struct device *) = NULL;
1795 int ret = 0;
1796
1797 if (dev->power.syscore)
1798 return 0;
1799
1800 /*
1801 * If a device's parent goes into runtime suspend at the wrong time,
1802 * it won't be possible to resume the device. To prevent this we
1803 * block runtime suspend here, during the prepare phase, and allow
1804 * it again during the complete phase.
1805 */
1806 pm_runtime_get_noresume(dev);
1807
1808 device_lock(dev);
1809
1810 dev->power.wakeup_path = false;
1811
1812 if (dev->power.no_pm_callbacks)
1813 goto unlock;
1814
1815 if (dev->pm_domain)
1816 callback = dev->pm_domain->ops.prepare;
1817 else if (dev->type && dev->type->pm)
1818 callback = dev->type->pm->prepare;
1819 else if (dev->class && dev->class->pm)
1820 callback = dev->class->pm->prepare;
1821 else if (dev->bus && dev->bus->pm)
1822 callback = dev->bus->pm->prepare;
1823
1824 if (!callback && dev->driver && dev->driver->pm)
1825 callback = dev->driver->pm->prepare;
1826
1827 if (callback)
1828 ret = callback(dev);
1829
1830unlock:
1831 device_unlock(dev);
1832
1833 if (ret < 0) {
1834 suspend_report_result(callback, ret);
1835 pm_runtime_put(dev);
1836 return ret;
1837 }
1838 /*
1839 * A positive return value from ->prepare() means "this device appears
1840 * to be runtime-suspended and its state is fine, so if it really is
1841 * runtime-suspended, you can leave it in that state provided that you
1842 * will do the same thing with all of its descendants". This only
1843 * applies to suspend transitions, however.
1844 */
1845 spin_lock_irq(&dev->power.lock);
1846 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1847 (ret > 0 || dev->power.no_pm_callbacks) &&
1848 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1849 spin_unlock_irq(&dev->power.lock);
1850 return 0;
1851}
1852
1853/**
1854 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1855 * @state: PM transition of the system being carried out.
1856 *
1857 * Execute the ->prepare() callback(s) for all devices.
1858 */
1859int dpm_prepare(pm_message_t state)
1860{
1861 int error = 0;
1862
1863 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1864 might_sleep();
1865
1866 /*
1867 * Give a chance for the known devices to complete their probes, before
1868 * disable probing of devices. This sync point is important at least
1869 * at boot time + hibernation restore.
1870 */
1871 wait_for_device_probe();
1872 /*
1873 * It is unsafe if probing of devices will happen during suspend or
1874 * hibernation and system behavior will be unpredictable in this case.
1875 * So, let's prohibit device's probing here and defer their probes
1876 * instead. The normal behavior will be restored in dpm_complete().
1877 */
1878 device_block_probing();
1879
1880 mutex_lock(&dpm_list_mtx);
1881 while (!list_empty(&dpm_list)) {
1882 struct device *dev = to_device(dpm_list.next);
1883
1884 get_device(dev);
1885 mutex_unlock(&dpm_list_mtx);
1886
1887 trace_device_pm_callback_start(dev, "", state.event);
1888 error = device_prepare(dev, state);
1889 trace_device_pm_callback_end(dev, error);
1890
1891 mutex_lock(&dpm_list_mtx);
1892 if (error) {
1893 if (error == -EAGAIN) {
1894 put_device(dev);
1895 error = 0;
1896 continue;
1897 }
1898 dev_info(dev, "not prepared for power transition: code %d\n",
1899 error);
1900 put_device(dev);
1901 break;
1902 }
1903 dev->power.is_prepared = true;
1904 if (!list_empty(&dev->power.entry))
1905 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1906 put_device(dev);
1907 }
1908 mutex_unlock(&dpm_list_mtx);
1909 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1910 return error;
1911}
1912
1913/**
1914 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1915 * @state: PM transition of the system being carried out.
1916 *
1917 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1918 * callbacks for them.
1919 */
1920int dpm_suspend_start(pm_message_t state)
1921{
1922 ktime_t starttime = ktime_get();
1923 int error;
1924
1925 error = dpm_prepare(state);
1926 if (error) {
1927 suspend_stats.failed_prepare++;
1928 dpm_save_failed_step(SUSPEND_PREPARE);
1929 } else
1930 error = dpm_suspend(state);
1931 dpm_show_time(starttime, state, error, "start");
1932 return error;
1933}
1934EXPORT_SYMBOL_GPL(dpm_suspend_start);
1935
1936void __suspend_report_result(const char *function, void *fn, int ret)
1937{
1938 if (ret)
1939 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1940}
1941EXPORT_SYMBOL_GPL(__suspend_report_result);
1942
1943/**
1944 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1945 * @subordinate: Device that needs to wait for @dev.
1946 * @dev: Device to wait for.
1947 */
1948int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1949{
1950 dpm_wait(dev, subordinate->power.async_suspend);
1951 return async_error;
1952}
1953EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1954
1955/**
1956 * dpm_for_each_dev - device iterator.
1957 * @data: data for the callback.
1958 * @fn: function to be called for each device.
1959 *
1960 * Iterate over devices in dpm_list, and call @fn for each device,
1961 * passing it @data.
1962 */
1963void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1964{
1965 struct device *dev;
1966
1967 if (!fn)
1968 return;
1969
1970 device_pm_lock();
1971 list_for_each_entry(dev, &dpm_list, power.entry)
1972 fn(dev, data);
1973 device_pm_unlock();
1974}
1975EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1976
1977static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1978{
1979 if (!ops)
1980 return true;
1981
1982 return !ops->prepare &&
1983 !ops->suspend &&
1984 !ops->suspend_late &&
1985 !ops->suspend_noirq &&
1986 !ops->resume_noirq &&
1987 !ops->resume_early &&
1988 !ops->resume &&
1989 !ops->complete;
1990}
1991
1992void device_pm_check_callbacks(struct device *dev)
1993{
1994 spin_lock_irq(&dev->power.lock);
1995 dev->power.no_pm_callbacks =
1996 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1997 !dev->bus->suspend && !dev->bus->resume)) &&
1998 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1999 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2000 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2001 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2002 !dev->driver->suspend && !dev->driver->resume));
2003 spin_unlock_irq(&dev->power.lock);
2004}
2005
2006bool dev_pm_skip_suspend(struct device *dev)
2007{
2008 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2009 pm_runtime_status_suspended(dev);
2010}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18#define pr_fmt(fmt) "PM: " fmt
19#define dev_fmt pr_fmt
20
21#include <linux/device.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm-trace.h>
27#include <linux/pm_wakeirq.h>
28#include <linux/interrupt.h>
29#include <linux/sched.h>
30#include <linux/sched/debug.h>
31#include <linux/async.h>
32#include <linux/suspend.h>
33#include <trace/events/power.h>
34#include <linux/cpufreq.h>
35#include <linux/devfreq.h>
36#include <linux/timer.h>
37
38#include "../base.h"
39#include "power.h"
40
41typedef int (*pm_callback_t)(struct device *);
42
43#define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
47/*
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
54 * dpm_list_mutex.
55 */
56
57LIST_HEAD(dpm_list);
58static LIST_HEAD(dpm_prepared_list);
59static LIST_HEAD(dpm_suspended_list);
60static LIST_HEAD(dpm_late_early_list);
61static LIST_HEAD(dpm_noirq_list);
62
63struct suspend_stats suspend_stats;
64static DEFINE_MUTEX(dpm_list_mtx);
65static pm_message_t pm_transition;
66
67static int async_error;
68
69static const char *pm_verb(int event)
70{
71 switch (event) {
72 case PM_EVENT_SUSPEND:
73 return "suspend";
74 case PM_EVENT_RESUME:
75 return "resume";
76 case PM_EVENT_FREEZE:
77 return "freeze";
78 case PM_EVENT_QUIESCE:
79 return "quiesce";
80 case PM_EVENT_HIBERNATE:
81 return "hibernate";
82 case PM_EVENT_THAW:
83 return "thaw";
84 case PM_EVENT_RESTORE:
85 return "restore";
86 case PM_EVENT_RECOVER:
87 return "recover";
88 default:
89 return "(unknown PM event)";
90 }
91}
92
93/**
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
96 */
97void device_pm_sleep_init(struct device *dev)
98{
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
107}
108
109/**
110 * device_pm_lock - Lock the list of active devices used by the PM core.
111 */
112void device_pm_lock(void)
113{
114 mutex_lock(&dpm_list_mtx);
115}
116
117/**
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
119 */
120void device_pm_unlock(void)
121{
122 mutex_unlock(&dpm_list_mtx);
123}
124
125/**
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
128 */
129void device_pm_add(struct device *dev)
130{
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
133 return;
134
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
145}
146
147/**
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
150 */
151void device_pm_remove(struct device *dev)
152{
153 if (device_pm_not_required(dev))
154 return;
155
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
166}
167
168/**
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
172 */
173void device_pm_move_before(struct device *deva, struct device *devb)
174{
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
180}
181
182/**
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
186 */
187void device_pm_move_after(struct device *deva, struct device *devb)
188{
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
194}
195
196/**
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
199 */
200void device_pm_move_last(struct device *dev)
201{
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
205}
206
207static ktime_t initcall_debug_start(struct device *dev, void *cb)
208{
209 if (!pm_print_times_enabled)
210 return 0;
211
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
215 return ktime_get();
216}
217
218static void initcall_debug_report(struct device *dev, ktime_t calltime,
219 void *cb, int error)
220{
221 ktime_t rettime;
222
223 if (!pm_print_times_enabled)
224 return;
225
226 rettime = ktime_get();
227 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228 (unsigned long long)ktime_us_delta(rettime, calltime));
229}
230
231/**
232 * dpm_wait - Wait for a PM operation to complete.
233 * @dev: Device to wait for.
234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
235 */
236static void dpm_wait(struct device *dev, bool async)
237{
238 if (!dev)
239 return;
240
241 if (async || (pm_async_enabled && dev->power.async_suspend))
242 wait_for_completion(&dev->power.completion);
243}
244
245static int dpm_wait_fn(struct device *dev, void *async_ptr)
246{
247 dpm_wait(dev, *((bool *)async_ptr));
248 return 0;
249}
250
251static void dpm_wait_for_children(struct device *dev, bool async)
252{
253 device_for_each_child(dev, &async, dpm_wait_fn);
254}
255
256static void dpm_wait_for_suppliers(struct device *dev, bool async)
257{
258 struct device_link *link;
259 int idx;
260
261 idx = device_links_read_lock();
262
263 /*
264 * If the supplier goes away right after we've checked the link to it,
265 * we'll wait for its completion to change the state, but that's fine,
266 * because the only things that will block as a result are the SRCU
267 * callbacks freeing the link objects for the links in the list we're
268 * walking.
269 */
270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 dpm_wait(link->supplier, async);
273
274 device_links_read_unlock(idx);
275}
276
277static bool dpm_wait_for_superior(struct device *dev, bool async)
278{
279 struct device *parent;
280
281 /*
282 * If the device is resumed asynchronously and the parent's callback
283 * deletes both the device and the parent itself, the parent object may
284 * be freed while this function is running, so avoid that by reference
285 * counting the parent once more unless the device has been deleted
286 * already (in which case return right away).
287 */
288 mutex_lock(&dpm_list_mtx);
289
290 if (!device_pm_initialized(dev)) {
291 mutex_unlock(&dpm_list_mtx);
292 return false;
293 }
294
295 parent = get_device(dev->parent);
296
297 mutex_unlock(&dpm_list_mtx);
298
299 dpm_wait(parent, async);
300 put_device(parent);
301
302 dpm_wait_for_suppliers(dev, async);
303
304 /*
305 * If the parent's callback has deleted the device, attempting to resume
306 * it would be invalid, so avoid doing that then.
307 */
308 return device_pm_initialized(dev);
309}
310
311static void dpm_wait_for_consumers(struct device *dev, bool async)
312{
313 struct device_link *link;
314 int idx;
315
316 idx = device_links_read_lock();
317
318 /*
319 * The status of a device link can only be changed from "dormant" by a
320 * probe, but that cannot happen during system suspend/resume. In
321 * theory it can change to "dormant" at that time, but then it is
322 * reasonable to wait for the target device anyway (eg. if it goes
323 * away, it's better to wait for it to go away completely and then
324 * continue instead of trying to continue in parallel with its
325 * unregistration).
326 */
327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 dpm_wait(link->consumer, async);
330
331 device_links_read_unlock(idx);
332}
333
334static void dpm_wait_for_subordinate(struct device *dev, bool async)
335{
336 dpm_wait_for_children(dev, async);
337 dpm_wait_for_consumers(dev, async);
338}
339
340/**
341 * pm_op - Return the PM operation appropriate for given PM event.
342 * @ops: PM operations to choose from.
343 * @state: PM transition of the system being carried out.
344 */
345static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
346{
347 switch (state.event) {
348#ifdef CONFIG_SUSPEND
349 case PM_EVENT_SUSPEND:
350 return ops->suspend;
351 case PM_EVENT_RESUME:
352 return ops->resume;
353#endif /* CONFIG_SUSPEND */
354#ifdef CONFIG_HIBERNATE_CALLBACKS
355 case PM_EVENT_FREEZE:
356 case PM_EVENT_QUIESCE:
357 return ops->freeze;
358 case PM_EVENT_HIBERNATE:
359 return ops->poweroff;
360 case PM_EVENT_THAW:
361 case PM_EVENT_RECOVER:
362 return ops->thaw;
363 case PM_EVENT_RESTORE:
364 return ops->restore;
365#endif /* CONFIG_HIBERNATE_CALLBACKS */
366 }
367
368 return NULL;
369}
370
371/**
372 * pm_late_early_op - Return the PM operation appropriate for given PM event.
373 * @ops: PM operations to choose from.
374 * @state: PM transition of the system being carried out.
375 *
376 * Runtime PM is disabled for @dev while this function is being executed.
377 */
378static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
379 pm_message_t state)
380{
381 switch (state.event) {
382#ifdef CONFIG_SUSPEND
383 case PM_EVENT_SUSPEND:
384 return ops->suspend_late;
385 case PM_EVENT_RESUME:
386 return ops->resume_early;
387#endif /* CONFIG_SUSPEND */
388#ifdef CONFIG_HIBERNATE_CALLBACKS
389 case PM_EVENT_FREEZE:
390 case PM_EVENT_QUIESCE:
391 return ops->freeze_late;
392 case PM_EVENT_HIBERNATE:
393 return ops->poweroff_late;
394 case PM_EVENT_THAW:
395 case PM_EVENT_RECOVER:
396 return ops->thaw_early;
397 case PM_EVENT_RESTORE:
398 return ops->restore_early;
399#endif /* CONFIG_HIBERNATE_CALLBACKS */
400 }
401
402 return NULL;
403}
404
405/**
406 * pm_noirq_op - Return the PM operation appropriate for given PM event.
407 * @ops: PM operations to choose from.
408 * @state: PM transition of the system being carried out.
409 *
410 * The driver of @dev will not receive interrupts while this function is being
411 * executed.
412 */
413static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
414{
415 switch (state.event) {
416#ifdef CONFIG_SUSPEND
417 case PM_EVENT_SUSPEND:
418 return ops->suspend_noirq;
419 case PM_EVENT_RESUME:
420 return ops->resume_noirq;
421#endif /* CONFIG_SUSPEND */
422#ifdef CONFIG_HIBERNATE_CALLBACKS
423 case PM_EVENT_FREEZE:
424 case PM_EVENT_QUIESCE:
425 return ops->freeze_noirq;
426 case PM_EVENT_HIBERNATE:
427 return ops->poweroff_noirq;
428 case PM_EVENT_THAW:
429 case PM_EVENT_RECOVER:
430 return ops->thaw_noirq;
431 case PM_EVENT_RESTORE:
432 return ops->restore_noirq;
433#endif /* CONFIG_HIBERNATE_CALLBACKS */
434 }
435
436 return NULL;
437}
438
439static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
440{
441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 ", may wakeup" : "", dev->power.driver_flags);
444}
445
446static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447 int error)
448{
449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
450 error);
451}
452
453static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
454 const char *info)
455{
456 ktime_t calltime;
457 u64 usecs64;
458 int usecs;
459
460 calltime = ktime_get();
461 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 do_div(usecs64, NSEC_PER_USEC);
463 usecs = usecs64;
464 if (usecs == 0)
465 usecs = 1;
466
467 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 info ?: "", info ? " " : "", pm_verb(state.event),
469 error ? "aborted" : "complete",
470 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
471}
472
473static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 pm_message_t state, const char *info)
475{
476 ktime_t calltime;
477 int error;
478
479 if (!cb)
480 return 0;
481
482 calltime = initcall_debug_start(dev, cb);
483
484 pm_dev_dbg(dev, state, info);
485 trace_device_pm_callback_start(dev, info, state.event);
486 error = cb(dev);
487 trace_device_pm_callback_end(dev, error);
488 suspend_report_result(dev, cb, error);
489
490 initcall_debug_report(dev, calltime, cb, error);
491
492 return error;
493}
494
495#ifdef CONFIG_DPM_WATCHDOG
496struct dpm_watchdog {
497 struct device *dev;
498 struct task_struct *tsk;
499 struct timer_list timer;
500};
501
502#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 struct dpm_watchdog wd
504
505/**
506 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507 * @t: The timer that PM watchdog depends on.
508 *
509 * Called when a driver has timed out suspending or resuming.
510 * There's not much we can do here to recover so panic() to
511 * capture a crash-dump in pstore.
512 */
513static void dpm_watchdog_handler(struct timer_list *t)
514{
515 struct dpm_watchdog *wd = from_timer(wd, t, timer);
516
517 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518 show_stack(wd->tsk, NULL, KERN_EMERG);
519 panic("%s %s: unrecoverable failure\n",
520 dev_driver_string(wd->dev), dev_name(wd->dev));
521}
522
523/**
524 * dpm_watchdog_set - Enable pm watchdog for given device.
525 * @wd: Watchdog. Must be allocated on the stack.
526 * @dev: Device to handle.
527 */
528static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
529{
530 struct timer_list *timer = &wd->timer;
531
532 wd->dev = dev;
533 wd->tsk = current;
534
535 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
536 /* use same timeout value for both suspend and resume */
537 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
538 add_timer(timer);
539}
540
541/**
542 * dpm_watchdog_clear - Disable suspend/resume watchdog.
543 * @wd: Watchdog to disable.
544 */
545static void dpm_watchdog_clear(struct dpm_watchdog *wd)
546{
547 struct timer_list *timer = &wd->timer;
548
549 del_timer_sync(timer);
550 destroy_timer_on_stack(timer);
551}
552#else
553#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
554#define dpm_watchdog_set(x, y)
555#define dpm_watchdog_clear(x)
556#endif
557
558/*------------------------- Resume routines -------------------------*/
559
560/**
561 * dev_pm_skip_resume - System-wide device resume optimization check.
562 * @dev: Target device.
563 *
564 * Return:
565 * - %false if the transition under way is RESTORE.
566 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
567 * - The logical negation of %power.must_resume otherwise (that is, when the
568 * transition under way is RESUME).
569 */
570bool dev_pm_skip_resume(struct device *dev)
571{
572 if (pm_transition.event == PM_EVENT_RESTORE)
573 return false;
574
575 if (pm_transition.event == PM_EVENT_THAW)
576 return dev_pm_skip_suspend(dev);
577
578 return !dev->power.must_resume;
579}
580
581/**
582 * device_resume_noirq - Execute a "noirq resume" callback for given device.
583 * @dev: Device to handle.
584 * @state: PM transition of the system being carried out.
585 * @async: If true, the device is being resumed asynchronously.
586 *
587 * The driver of @dev will not receive interrupts while this function is being
588 * executed.
589 */
590static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
591{
592 pm_callback_t callback = NULL;
593 const char *info = NULL;
594 bool skip_resume;
595 int error = 0;
596
597 TRACE_DEVICE(dev);
598 TRACE_RESUME(0);
599
600 if (dev->power.syscore || dev->power.direct_complete)
601 goto Out;
602
603 if (!dev->power.is_noirq_suspended)
604 goto Out;
605
606 if (!dpm_wait_for_superior(dev, async))
607 goto Out;
608
609 skip_resume = dev_pm_skip_resume(dev);
610 /*
611 * If the driver callback is skipped below or by the middle layer
612 * callback and device_resume_early() also skips the driver callback for
613 * this device later, it needs to appear as "suspended" to PM-runtime,
614 * so change its status accordingly.
615 *
616 * Otherwise, the device is going to be resumed, so set its PM-runtime
617 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
618 * to avoid confusing drivers that don't use it.
619 */
620 if (skip_resume)
621 pm_runtime_set_suspended(dev);
622 else if (dev_pm_skip_suspend(dev))
623 pm_runtime_set_active(dev);
624
625 if (dev->pm_domain) {
626 info = "noirq power domain ";
627 callback = pm_noirq_op(&dev->pm_domain->ops, state);
628 } else if (dev->type && dev->type->pm) {
629 info = "noirq type ";
630 callback = pm_noirq_op(dev->type->pm, state);
631 } else if (dev->class && dev->class->pm) {
632 info = "noirq class ";
633 callback = pm_noirq_op(dev->class->pm, state);
634 } else if (dev->bus && dev->bus->pm) {
635 info = "noirq bus ";
636 callback = pm_noirq_op(dev->bus->pm, state);
637 }
638 if (callback)
639 goto Run;
640
641 if (skip_resume)
642 goto Skip;
643
644 if (dev->driver && dev->driver->pm) {
645 info = "noirq driver ";
646 callback = pm_noirq_op(dev->driver->pm, state);
647 }
648
649Run:
650 error = dpm_run_callback(callback, dev, state, info);
651
652Skip:
653 dev->power.is_noirq_suspended = false;
654
655Out:
656 complete_all(&dev->power.completion);
657 TRACE_RESUME(error);
658 return error;
659}
660
661static bool is_async(struct device *dev)
662{
663 return dev->power.async_suspend && pm_async_enabled
664 && !pm_trace_is_enabled();
665}
666
667static bool dpm_async_fn(struct device *dev, async_func_t func)
668{
669 reinit_completion(&dev->power.completion);
670
671 if (is_async(dev)) {
672 get_device(dev);
673 async_schedule_dev(func, dev);
674 return true;
675 }
676
677 return false;
678}
679
680static void async_resume_noirq(void *data, async_cookie_t cookie)
681{
682 struct device *dev = (struct device *)data;
683 int error;
684
685 error = device_resume_noirq(dev, pm_transition, true);
686 if (error)
687 pm_dev_err(dev, pm_transition, " async", error);
688
689 put_device(dev);
690}
691
692static void dpm_noirq_resume_devices(pm_message_t state)
693{
694 struct device *dev;
695 ktime_t starttime = ktime_get();
696
697 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
698 mutex_lock(&dpm_list_mtx);
699 pm_transition = state;
700
701 /*
702 * Advanced the async threads upfront,
703 * in case the starting of async threads is
704 * delayed by non-async resuming devices.
705 */
706 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
707 dpm_async_fn(dev, async_resume_noirq);
708
709 while (!list_empty(&dpm_noirq_list)) {
710 dev = to_device(dpm_noirq_list.next);
711 get_device(dev);
712 list_move_tail(&dev->power.entry, &dpm_late_early_list);
713
714 mutex_unlock(&dpm_list_mtx);
715
716 if (!is_async(dev)) {
717 int error;
718
719 error = device_resume_noirq(dev, state, false);
720 if (error) {
721 suspend_stats.failed_resume_noirq++;
722 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
723 dpm_save_failed_dev(dev_name(dev));
724 pm_dev_err(dev, state, " noirq", error);
725 }
726 }
727
728 put_device(dev);
729
730 mutex_lock(&dpm_list_mtx);
731 }
732 mutex_unlock(&dpm_list_mtx);
733 async_synchronize_full();
734 dpm_show_time(starttime, state, 0, "noirq");
735 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
736}
737
738/**
739 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
740 * @state: PM transition of the system being carried out.
741 *
742 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
743 * allow device drivers' interrupt handlers to be called.
744 */
745void dpm_resume_noirq(pm_message_t state)
746{
747 dpm_noirq_resume_devices(state);
748
749 resume_device_irqs();
750 device_wakeup_disarm_wake_irqs();
751}
752
753/**
754 * device_resume_early - Execute an "early resume" callback for given device.
755 * @dev: Device to handle.
756 * @state: PM transition of the system being carried out.
757 * @async: If true, the device is being resumed asynchronously.
758 *
759 * Runtime PM is disabled for @dev while this function is being executed.
760 */
761static int device_resume_early(struct device *dev, pm_message_t state, bool async)
762{
763 pm_callback_t callback = NULL;
764 const char *info = NULL;
765 int error = 0;
766
767 TRACE_DEVICE(dev);
768 TRACE_RESUME(0);
769
770 if (dev->power.syscore || dev->power.direct_complete)
771 goto Out;
772
773 if (!dev->power.is_late_suspended)
774 goto Out;
775
776 if (!dpm_wait_for_superior(dev, async))
777 goto Out;
778
779 if (dev->pm_domain) {
780 info = "early power domain ";
781 callback = pm_late_early_op(&dev->pm_domain->ops, state);
782 } else if (dev->type && dev->type->pm) {
783 info = "early type ";
784 callback = pm_late_early_op(dev->type->pm, state);
785 } else if (dev->class && dev->class->pm) {
786 info = "early class ";
787 callback = pm_late_early_op(dev->class->pm, state);
788 } else if (dev->bus && dev->bus->pm) {
789 info = "early bus ";
790 callback = pm_late_early_op(dev->bus->pm, state);
791 }
792 if (callback)
793 goto Run;
794
795 if (dev_pm_skip_resume(dev))
796 goto Skip;
797
798 if (dev->driver && dev->driver->pm) {
799 info = "early driver ";
800 callback = pm_late_early_op(dev->driver->pm, state);
801 }
802
803Run:
804 error = dpm_run_callback(callback, dev, state, info);
805
806Skip:
807 dev->power.is_late_suspended = false;
808
809Out:
810 TRACE_RESUME(error);
811
812 pm_runtime_enable(dev);
813 complete_all(&dev->power.completion);
814 return error;
815}
816
817static void async_resume_early(void *data, async_cookie_t cookie)
818{
819 struct device *dev = (struct device *)data;
820 int error;
821
822 error = device_resume_early(dev, pm_transition, true);
823 if (error)
824 pm_dev_err(dev, pm_transition, " async", error);
825
826 put_device(dev);
827}
828
829/**
830 * dpm_resume_early - Execute "early resume" callbacks for all devices.
831 * @state: PM transition of the system being carried out.
832 */
833void dpm_resume_early(pm_message_t state)
834{
835 struct device *dev;
836 ktime_t starttime = ktime_get();
837
838 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
839 mutex_lock(&dpm_list_mtx);
840 pm_transition = state;
841
842 /*
843 * Advanced the async threads upfront,
844 * in case the starting of async threads is
845 * delayed by non-async resuming devices.
846 */
847 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
848 dpm_async_fn(dev, async_resume_early);
849
850 while (!list_empty(&dpm_late_early_list)) {
851 dev = to_device(dpm_late_early_list.next);
852 get_device(dev);
853 list_move_tail(&dev->power.entry, &dpm_suspended_list);
854
855 mutex_unlock(&dpm_list_mtx);
856
857 if (!is_async(dev)) {
858 int error;
859
860 error = device_resume_early(dev, state, false);
861 if (error) {
862 suspend_stats.failed_resume_early++;
863 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
864 dpm_save_failed_dev(dev_name(dev));
865 pm_dev_err(dev, state, " early", error);
866 }
867 }
868
869 put_device(dev);
870
871 mutex_lock(&dpm_list_mtx);
872 }
873 mutex_unlock(&dpm_list_mtx);
874 async_synchronize_full();
875 dpm_show_time(starttime, state, 0, "early");
876 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
877}
878
879/**
880 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
881 * @state: PM transition of the system being carried out.
882 */
883void dpm_resume_start(pm_message_t state)
884{
885 dpm_resume_noirq(state);
886 dpm_resume_early(state);
887}
888EXPORT_SYMBOL_GPL(dpm_resume_start);
889
890/**
891 * device_resume - Execute "resume" callbacks for given device.
892 * @dev: Device to handle.
893 * @state: PM transition of the system being carried out.
894 * @async: If true, the device is being resumed asynchronously.
895 */
896static int device_resume(struct device *dev, pm_message_t state, bool async)
897{
898 pm_callback_t callback = NULL;
899 const char *info = NULL;
900 int error = 0;
901 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
902
903 TRACE_DEVICE(dev);
904 TRACE_RESUME(0);
905
906 if (dev->power.syscore)
907 goto Complete;
908
909 if (dev->power.direct_complete) {
910 /* Match the pm_runtime_disable() in __device_suspend(). */
911 pm_runtime_enable(dev);
912 goto Complete;
913 }
914
915 if (!dpm_wait_for_superior(dev, async))
916 goto Complete;
917
918 dpm_watchdog_set(&wd, dev);
919 device_lock(dev);
920
921 /*
922 * This is a fib. But we'll allow new children to be added below
923 * a resumed device, even if the device hasn't been completed yet.
924 */
925 dev->power.is_prepared = false;
926
927 if (!dev->power.is_suspended)
928 goto Unlock;
929
930 if (dev->pm_domain) {
931 info = "power domain ";
932 callback = pm_op(&dev->pm_domain->ops, state);
933 goto Driver;
934 }
935
936 if (dev->type && dev->type->pm) {
937 info = "type ";
938 callback = pm_op(dev->type->pm, state);
939 goto Driver;
940 }
941
942 if (dev->class && dev->class->pm) {
943 info = "class ";
944 callback = pm_op(dev->class->pm, state);
945 goto Driver;
946 }
947
948 if (dev->bus) {
949 if (dev->bus->pm) {
950 info = "bus ";
951 callback = pm_op(dev->bus->pm, state);
952 } else if (dev->bus->resume) {
953 info = "legacy bus ";
954 callback = dev->bus->resume;
955 goto End;
956 }
957 }
958
959 Driver:
960 if (!callback && dev->driver && dev->driver->pm) {
961 info = "driver ";
962 callback = pm_op(dev->driver->pm, state);
963 }
964
965 End:
966 error = dpm_run_callback(callback, dev, state, info);
967 dev->power.is_suspended = false;
968
969 Unlock:
970 device_unlock(dev);
971 dpm_watchdog_clear(&wd);
972
973 Complete:
974 complete_all(&dev->power.completion);
975
976 TRACE_RESUME(error);
977
978 return error;
979}
980
981static void async_resume(void *data, async_cookie_t cookie)
982{
983 struct device *dev = (struct device *)data;
984 int error;
985
986 error = device_resume(dev, pm_transition, true);
987 if (error)
988 pm_dev_err(dev, pm_transition, " async", error);
989 put_device(dev);
990}
991
992/**
993 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
994 * @state: PM transition of the system being carried out.
995 *
996 * Execute the appropriate "resume" callback for all devices whose status
997 * indicates that they are suspended.
998 */
999void dpm_resume(pm_message_t state)
1000{
1001 struct device *dev;
1002 ktime_t starttime = ktime_get();
1003
1004 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1005 might_sleep();
1006
1007 mutex_lock(&dpm_list_mtx);
1008 pm_transition = state;
1009 async_error = 0;
1010
1011 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1012 dpm_async_fn(dev, async_resume);
1013
1014 while (!list_empty(&dpm_suspended_list)) {
1015 dev = to_device(dpm_suspended_list.next);
1016 get_device(dev);
1017 if (!is_async(dev)) {
1018 int error;
1019
1020 mutex_unlock(&dpm_list_mtx);
1021
1022 error = device_resume(dev, state, false);
1023 if (error) {
1024 suspend_stats.failed_resume++;
1025 dpm_save_failed_step(SUSPEND_RESUME);
1026 dpm_save_failed_dev(dev_name(dev));
1027 pm_dev_err(dev, state, "", error);
1028 }
1029
1030 mutex_lock(&dpm_list_mtx);
1031 }
1032 if (!list_empty(&dev->power.entry))
1033 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1034
1035 mutex_unlock(&dpm_list_mtx);
1036
1037 put_device(dev);
1038
1039 mutex_lock(&dpm_list_mtx);
1040 }
1041 mutex_unlock(&dpm_list_mtx);
1042 async_synchronize_full();
1043 dpm_show_time(starttime, state, 0, NULL);
1044
1045 cpufreq_resume();
1046 devfreq_resume();
1047 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1048}
1049
1050/**
1051 * device_complete - Complete a PM transition for given device.
1052 * @dev: Device to handle.
1053 * @state: PM transition of the system being carried out.
1054 */
1055static void device_complete(struct device *dev, pm_message_t state)
1056{
1057 void (*callback)(struct device *) = NULL;
1058 const char *info = NULL;
1059
1060 if (dev->power.syscore)
1061 goto out;
1062
1063 device_lock(dev);
1064
1065 if (dev->pm_domain) {
1066 info = "completing power domain ";
1067 callback = dev->pm_domain->ops.complete;
1068 } else if (dev->type && dev->type->pm) {
1069 info = "completing type ";
1070 callback = dev->type->pm->complete;
1071 } else if (dev->class && dev->class->pm) {
1072 info = "completing class ";
1073 callback = dev->class->pm->complete;
1074 } else if (dev->bus && dev->bus->pm) {
1075 info = "completing bus ";
1076 callback = dev->bus->pm->complete;
1077 }
1078
1079 if (!callback && dev->driver && dev->driver->pm) {
1080 info = "completing driver ";
1081 callback = dev->driver->pm->complete;
1082 }
1083
1084 if (callback) {
1085 pm_dev_dbg(dev, state, info);
1086 callback(dev);
1087 }
1088
1089 device_unlock(dev);
1090
1091out:
1092 pm_runtime_put(dev);
1093}
1094
1095/**
1096 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1097 * @state: PM transition of the system being carried out.
1098 *
1099 * Execute the ->complete() callbacks for all devices whose PM status is not
1100 * DPM_ON (this allows new devices to be registered).
1101 */
1102void dpm_complete(pm_message_t state)
1103{
1104 struct list_head list;
1105
1106 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1107 might_sleep();
1108
1109 INIT_LIST_HEAD(&list);
1110 mutex_lock(&dpm_list_mtx);
1111 while (!list_empty(&dpm_prepared_list)) {
1112 struct device *dev = to_device(dpm_prepared_list.prev);
1113
1114 get_device(dev);
1115 dev->power.is_prepared = false;
1116 list_move(&dev->power.entry, &list);
1117
1118 mutex_unlock(&dpm_list_mtx);
1119
1120 trace_device_pm_callback_start(dev, "", state.event);
1121 device_complete(dev, state);
1122 trace_device_pm_callback_end(dev, 0);
1123
1124 put_device(dev);
1125
1126 mutex_lock(&dpm_list_mtx);
1127 }
1128 list_splice(&list, &dpm_list);
1129 mutex_unlock(&dpm_list_mtx);
1130
1131 /* Allow device probing and trigger re-probing of deferred devices */
1132 device_unblock_probing();
1133 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1134}
1135
1136/**
1137 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1138 * @state: PM transition of the system being carried out.
1139 *
1140 * Execute "resume" callbacks for all devices and complete the PM transition of
1141 * the system.
1142 */
1143void dpm_resume_end(pm_message_t state)
1144{
1145 dpm_resume(state);
1146 dpm_complete(state);
1147}
1148EXPORT_SYMBOL_GPL(dpm_resume_end);
1149
1150
1151/*------------------------- Suspend routines -------------------------*/
1152
1153/**
1154 * resume_event - Return a "resume" message for given "suspend" sleep state.
1155 * @sleep_state: PM message representing a sleep state.
1156 *
1157 * Return a PM message representing the resume event corresponding to given
1158 * sleep state.
1159 */
1160static pm_message_t resume_event(pm_message_t sleep_state)
1161{
1162 switch (sleep_state.event) {
1163 case PM_EVENT_SUSPEND:
1164 return PMSG_RESUME;
1165 case PM_EVENT_FREEZE:
1166 case PM_EVENT_QUIESCE:
1167 return PMSG_RECOVER;
1168 case PM_EVENT_HIBERNATE:
1169 return PMSG_RESTORE;
1170 }
1171 return PMSG_ON;
1172}
1173
1174static void dpm_superior_set_must_resume(struct device *dev)
1175{
1176 struct device_link *link;
1177 int idx;
1178
1179 if (dev->parent)
1180 dev->parent->power.must_resume = true;
1181
1182 idx = device_links_read_lock();
1183
1184 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1185 link->supplier->power.must_resume = true;
1186
1187 device_links_read_unlock(idx);
1188}
1189
1190/**
1191 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1192 * @dev: Device to handle.
1193 * @state: PM transition of the system being carried out.
1194 * @async: If true, the device is being suspended asynchronously.
1195 *
1196 * The driver of @dev will not receive interrupts while this function is being
1197 * executed.
1198 */
1199static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1200{
1201 pm_callback_t callback = NULL;
1202 const char *info = NULL;
1203 int error = 0;
1204
1205 TRACE_DEVICE(dev);
1206 TRACE_SUSPEND(0);
1207
1208 dpm_wait_for_subordinate(dev, async);
1209
1210 if (async_error)
1211 goto Complete;
1212
1213 if (dev->power.syscore || dev->power.direct_complete)
1214 goto Complete;
1215
1216 if (dev->pm_domain) {
1217 info = "noirq power domain ";
1218 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1219 } else if (dev->type && dev->type->pm) {
1220 info = "noirq type ";
1221 callback = pm_noirq_op(dev->type->pm, state);
1222 } else if (dev->class && dev->class->pm) {
1223 info = "noirq class ";
1224 callback = pm_noirq_op(dev->class->pm, state);
1225 } else if (dev->bus && dev->bus->pm) {
1226 info = "noirq bus ";
1227 callback = pm_noirq_op(dev->bus->pm, state);
1228 }
1229 if (callback)
1230 goto Run;
1231
1232 if (dev_pm_skip_suspend(dev))
1233 goto Skip;
1234
1235 if (dev->driver && dev->driver->pm) {
1236 info = "noirq driver ";
1237 callback = pm_noirq_op(dev->driver->pm, state);
1238 }
1239
1240Run:
1241 error = dpm_run_callback(callback, dev, state, info);
1242 if (error) {
1243 async_error = error;
1244 goto Complete;
1245 }
1246
1247Skip:
1248 dev->power.is_noirq_suspended = true;
1249
1250 /*
1251 * Skipping the resume of devices that were in use right before the
1252 * system suspend (as indicated by their PM-runtime usage counters)
1253 * would be suboptimal. Also resume them if doing that is not allowed
1254 * to be skipped.
1255 */
1256 if (atomic_read(&dev->power.usage_count) > 1 ||
1257 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1258 dev->power.may_skip_resume))
1259 dev->power.must_resume = true;
1260
1261 if (dev->power.must_resume)
1262 dpm_superior_set_must_resume(dev);
1263
1264Complete:
1265 complete_all(&dev->power.completion);
1266 TRACE_SUSPEND(error);
1267 return error;
1268}
1269
1270static void async_suspend_noirq(void *data, async_cookie_t cookie)
1271{
1272 struct device *dev = (struct device *)data;
1273 int error;
1274
1275 error = __device_suspend_noirq(dev, pm_transition, true);
1276 if (error) {
1277 dpm_save_failed_dev(dev_name(dev));
1278 pm_dev_err(dev, pm_transition, " async", error);
1279 }
1280
1281 put_device(dev);
1282}
1283
1284static int device_suspend_noirq(struct device *dev)
1285{
1286 if (dpm_async_fn(dev, async_suspend_noirq))
1287 return 0;
1288
1289 return __device_suspend_noirq(dev, pm_transition, false);
1290}
1291
1292static int dpm_noirq_suspend_devices(pm_message_t state)
1293{
1294 ktime_t starttime = ktime_get();
1295 int error = 0;
1296
1297 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1298 mutex_lock(&dpm_list_mtx);
1299 pm_transition = state;
1300 async_error = 0;
1301
1302 while (!list_empty(&dpm_late_early_list)) {
1303 struct device *dev = to_device(dpm_late_early_list.prev);
1304
1305 get_device(dev);
1306 mutex_unlock(&dpm_list_mtx);
1307
1308 error = device_suspend_noirq(dev);
1309
1310 mutex_lock(&dpm_list_mtx);
1311
1312 if (error) {
1313 pm_dev_err(dev, state, " noirq", error);
1314 dpm_save_failed_dev(dev_name(dev));
1315 } else if (!list_empty(&dev->power.entry)) {
1316 list_move(&dev->power.entry, &dpm_noirq_list);
1317 }
1318
1319 mutex_unlock(&dpm_list_mtx);
1320
1321 put_device(dev);
1322
1323 mutex_lock(&dpm_list_mtx);
1324
1325 if (error || async_error)
1326 break;
1327 }
1328 mutex_unlock(&dpm_list_mtx);
1329 async_synchronize_full();
1330 if (!error)
1331 error = async_error;
1332
1333 if (error) {
1334 suspend_stats.failed_suspend_noirq++;
1335 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1336 }
1337 dpm_show_time(starttime, state, error, "noirq");
1338 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1339 return error;
1340}
1341
1342/**
1343 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1344 * @state: PM transition of the system being carried out.
1345 *
1346 * Prevent device drivers' interrupt handlers from being called and invoke
1347 * "noirq" suspend callbacks for all non-sysdev devices.
1348 */
1349int dpm_suspend_noirq(pm_message_t state)
1350{
1351 int ret;
1352
1353 device_wakeup_arm_wake_irqs();
1354 suspend_device_irqs();
1355
1356 ret = dpm_noirq_suspend_devices(state);
1357 if (ret)
1358 dpm_resume_noirq(resume_event(state));
1359
1360 return ret;
1361}
1362
1363static void dpm_propagate_wakeup_to_parent(struct device *dev)
1364{
1365 struct device *parent = dev->parent;
1366
1367 if (!parent)
1368 return;
1369
1370 spin_lock_irq(&parent->power.lock);
1371
1372 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1373 parent->power.wakeup_path = true;
1374
1375 spin_unlock_irq(&parent->power.lock);
1376}
1377
1378/**
1379 * __device_suspend_late - Execute a "late suspend" callback for given device.
1380 * @dev: Device to handle.
1381 * @state: PM transition of the system being carried out.
1382 * @async: If true, the device is being suspended asynchronously.
1383 *
1384 * Runtime PM is disabled for @dev while this function is being executed.
1385 */
1386static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1387{
1388 pm_callback_t callback = NULL;
1389 const char *info = NULL;
1390 int error = 0;
1391
1392 TRACE_DEVICE(dev);
1393 TRACE_SUSPEND(0);
1394
1395 __pm_runtime_disable(dev, false);
1396
1397 dpm_wait_for_subordinate(dev, async);
1398
1399 if (async_error)
1400 goto Complete;
1401
1402 if (pm_wakeup_pending()) {
1403 async_error = -EBUSY;
1404 goto Complete;
1405 }
1406
1407 if (dev->power.syscore || dev->power.direct_complete)
1408 goto Complete;
1409
1410 if (dev->pm_domain) {
1411 info = "late power domain ";
1412 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1413 } else if (dev->type && dev->type->pm) {
1414 info = "late type ";
1415 callback = pm_late_early_op(dev->type->pm, state);
1416 } else if (dev->class && dev->class->pm) {
1417 info = "late class ";
1418 callback = pm_late_early_op(dev->class->pm, state);
1419 } else if (dev->bus && dev->bus->pm) {
1420 info = "late bus ";
1421 callback = pm_late_early_op(dev->bus->pm, state);
1422 }
1423 if (callback)
1424 goto Run;
1425
1426 if (dev_pm_skip_suspend(dev))
1427 goto Skip;
1428
1429 if (dev->driver && dev->driver->pm) {
1430 info = "late driver ";
1431 callback = pm_late_early_op(dev->driver->pm, state);
1432 }
1433
1434Run:
1435 error = dpm_run_callback(callback, dev, state, info);
1436 if (error) {
1437 async_error = error;
1438 goto Complete;
1439 }
1440 dpm_propagate_wakeup_to_parent(dev);
1441
1442Skip:
1443 dev->power.is_late_suspended = true;
1444
1445Complete:
1446 TRACE_SUSPEND(error);
1447 complete_all(&dev->power.completion);
1448 return error;
1449}
1450
1451static void async_suspend_late(void *data, async_cookie_t cookie)
1452{
1453 struct device *dev = (struct device *)data;
1454 int error;
1455
1456 error = __device_suspend_late(dev, pm_transition, true);
1457 if (error) {
1458 dpm_save_failed_dev(dev_name(dev));
1459 pm_dev_err(dev, pm_transition, " async", error);
1460 }
1461 put_device(dev);
1462}
1463
1464static int device_suspend_late(struct device *dev)
1465{
1466 if (dpm_async_fn(dev, async_suspend_late))
1467 return 0;
1468
1469 return __device_suspend_late(dev, pm_transition, false);
1470}
1471
1472/**
1473 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1474 * @state: PM transition of the system being carried out.
1475 */
1476int dpm_suspend_late(pm_message_t state)
1477{
1478 ktime_t starttime = ktime_get();
1479 int error = 0;
1480
1481 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1482 wake_up_all_idle_cpus();
1483 mutex_lock(&dpm_list_mtx);
1484 pm_transition = state;
1485 async_error = 0;
1486
1487 while (!list_empty(&dpm_suspended_list)) {
1488 struct device *dev = to_device(dpm_suspended_list.prev);
1489
1490 get_device(dev);
1491
1492 mutex_unlock(&dpm_list_mtx);
1493
1494 error = device_suspend_late(dev);
1495
1496 mutex_lock(&dpm_list_mtx);
1497
1498 if (!list_empty(&dev->power.entry))
1499 list_move(&dev->power.entry, &dpm_late_early_list);
1500
1501 if (error) {
1502 pm_dev_err(dev, state, " late", error);
1503 dpm_save_failed_dev(dev_name(dev));
1504 }
1505
1506 mutex_unlock(&dpm_list_mtx);
1507
1508 put_device(dev);
1509
1510 mutex_lock(&dpm_list_mtx);
1511
1512 if (error || async_error)
1513 break;
1514 }
1515 mutex_unlock(&dpm_list_mtx);
1516 async_synchronize_full();
1517 if (!error)
1518 error = async_error;
1519 if (error) {
1520 suspend_stats.failed_suspend_late++;
1521 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1522 dpm_resume_early(resume_event(state));
1523 }
1524 dpm_show_time(starttime, state, error, "late");
1525 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1526 return error;
1527}
1528
1529/**
1530 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1531 * @state: PM transition of the system being carried out.
1532 */
1533int dpm_suspend_end(pm_message_t state)
1534{
1535 ktime_t starttime = ktime_get();
1536 int error;
1537
1538 error = dpm_suspend_late(state);
1539 if (error)
1540 goto out;
1541
1542 error = dpm_suspend_noirq(state);
1543 if (error)
1544 dpm_resume_early(resume_event(state));
1545
1546out:
1547 dpm_show_time(starttime, state, error, "end");
1548 return error;
1549}
1550EXPORT_SYMBOL_GPL(dpm_suspend_end);
1551
1552/**
1553 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1554 * @dev: Device to suspend.
1555 * @state: PM transition of the system being carried out.
1556 * @cb: Suspend callback to execute.
1557 * @info: string description of caller.
1558 */
1559static int legacy_suspend(struct device *dev, pm_message_t state,
1560 int (*cb)(struct device *dev, pm_message_t state),
1561 const char *info)
1562{
1563 int error;
1564 ktime_t calltime;
1565
1566 calltime = initcall_debug_start(dev, cb);
1567
1568 trace_device_pm_callback_start(dev, info, state.event);
1569 error = cb(dev, state);
1570 trace_device_pm_callback_end(dev, error);
1571 suspend_report_result(dev, cb, error);
1572
1573 initcall_debug_report(dev, calltime, cb, error);
1574
1575 return error;
1576}
1577
1578static void dpm_clear_superiors_direct_complete(struct device *dev)
1579{
1580 struct device_link *link;
1581 int idx;
1582
1583 if (dev->parent) {
1584 spin_lock_irq(&dev->parent->power.lock);
1585 dev->parent->power.direct_complete = false;
1586 spin_unlock_irq(&dev->parent->power.lock);
1587 }
1588
1589 idx = device_links_read_lock();
1590
1591 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1592 spin_lock_irq(&link->supplier->power.lock);
1593 link->supplier->power.direct_complete = false;
1594 spin_unlock_irq(&link->supplier->power.lock);
1595 }
1596
1597 device_links_read_unlock(idx);
1598}
1599
1600/**
1601 * __device_suspend - Execute "suspend" callbacks for given device.
1602 * @dev: Device to handle.
1603 * @state: PM transition of the system being carried out.
1604 * @async: If true, the device is being suspended asynchronously.
1605 */
1606static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1607{
1608 pm_callback_t callback = NULL;
1609 const char *info = NULL;
1610 int error = 0;
1611 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1612
1613 TRACE_DEVICE(dev);
1614 TRACE_SUSPEND(0);
1615
1616 dpm_wait_for_subordinate(dev, async);
1617
1618 if (async_error) {
1619 dev->power.direct_complete = false;
1620 goto Complete;
1621 }
1622
1623 /*
1624 * Wait for possible runtime PM transitions of the device in progress
1625 * to complete and if there's a runtime resume request pending for it,
1626 * resume it before proceeding with invoking the system-wide suspend
1627 * callbacks for it.
1628 *
1629 * If the system-wide suspend callbacks below change the configuration
1630 * of the device, they must disable runtime PM for it or otherwise
1631 * ensure that its runtime-resume callbacks will not be confused by that
1632 * change in case they are invoked going forward.
1633 */
1634 pm_runtime_barrier(dev);
1635
1636 if (pm_wakeup_pending()) {
1637 dev->power.direct_complete = false;
1638 async_error = -EBUSY;
1639 goto Complete;
1640 }
1641
1642 if (dev->power.syscore)
1643 goto Complete;
1644
1645 /* Avoid direct_complete to let wakeup_path propagate. */
1646 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1647 dev->power.direct_complete = false;
1648
1649 if (dev->power.direct_complete) {
1650 if (pm_runtime_status_suspended(dev)) {
1651 pm_runtime_disable(dev);
1652 if (pm_runtime_status_suspended(dev)) {
1653 pm_dev_dbg(dev, state, "direct-complete ");
1654 goto Complete;
1655 }
1656
1657 pm_runtime_enable(dev);
1658 }
1659 dev->power.direct_complete = false;
1660 }
1661
1662 dev->power.may_skip_resume = true;
1663 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1664
1665 dpm_watchdog_set(&wd, dev);
1666 device_lock(dev);
1667
1668 if (dev->pm_domain) {
1669 info = "power domain ";
1670 callback = pm_op(&dev->pm_domain->ops, state);
1671 goto Run;
1672 }
1673
1674 if (dev->type && dev->type->pm) {
1675 info = "type ";
1676 callback = pm_op(dev->type->pm, state);
1677 goto Run;
1678 }
1679
1680 if (dev->class && dev->class->pm) {
1681 info = "class ";
1682 callback = pm_op(dev->class->pm, state);
1683 goto Run;
1684 }
1685
1686 if (dev->bus) {
1687 if (dev->bus->pm) {
1688 info = "bus ";
1689 callback = pm_op(dev->bus->pm, state);
1690 } else if (dev->bus->suspend) {
1691 pm_dev_dbg(dev, state, "legacy bus ");
1692 error = legacy_suspend(dev, state, dev->bus->suspend,
1693 "legacy bus ");
1694 goto End;
1695 }
1696 }
1697
1698 Run:
1699 if (!callback && dev->driver && dev->driver->pm) {
1700 info = "driver ";
1701 callback = pm_op(dev->driver->pm, state);
1702 }
1703
1704 error = dpm_run_callback(callback, dev, state, info);
1705
1706 End:
1707 if (!error) {
1708 dev->power.is_suspended = true;
1709 if (device_may_wakeup(dev))
1710 dev->power.wakeup_path = true;
1711
1712 dpm_propagate_wakeup_to_parent(dev);
1713 dpm_clear_superiors_direct_complete(dev);
1714 }
1715
1716 device_unlock(dev);
1717 dpm_watchdog_clear(&wd);
1718
1719 Complete:
1720 if (error)
1721 async_error = error;
1722
1723 complete_all(&dev->power.completion);
1724 TRACE_SUSPEND(error);
1725 return error;
1726}
1727
1728static void async_suspend(void *data, async_cookie_t cookie)
1729{
1730 struct device *dev = (struct device *)data;
1731 int error;
1732
1733 error = __device_suspend(dev, pm_transition, true);
1734 if (error) {
1735 dpm_save_failed_dev(dev_name(dev));
1736 pm_dev_err(dev, pm_transition, " async", error);
1737 }
1738
1739 put_device(dev);
1740}
1741
1742static int device_suspend(struct device *dev)
1743{
1744 if (dpm_async_fn(dev, async_suspend))
1745 return 0;
1746
1747 return __device_suspend(dev, pm_transition, false);
1748}
1749
1750/**
1751 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1752 * @state: PM transition of the system being carried out.
1753 */
1754int dpm_suspend(pm_message_t state)
1755{
1756 ktime_t starttime = ktime_get();
1757 int error = 0;
1758
1759 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1760 might_sleep();
1761
1762 devfreq_suspend();
1763 cpufreq_suspend();
1764
1765 mutex_lock(&dpm_list_mtx);
1766 pm_transition = state;
1767 async_error = 0;
1768 while (!list_empty(&dpm_prepared_list)) {
1769 struct device *dev = to_device(dpm_prepared_list.prev);
1770
1771 get_device(dev);
1772
1773 mutex_unlock(&dpm_list_mtx);
1774
1775 error = device_suspend(dev);
1776
1777 mutex_lock(&dpm_list_mtx);
1778
1779 if (error) {
1780 pm_dev_err(dev, state, "", error);
1781 dpm_save_failed_dev(dev_name(dev));
1782 } else if (!list_empty(&dev->power.entry)) {
1783 list_move(&dev->power.entry, &dpm_suspended_list);
1784 }
1785
1786 mutex_unlock(&dpm_list_mtx);
1787
1788 put_device(dev);
1789
1790 mutex_lock(&dpm_list_mtx);
1791
1792 if (error || async_error)
1793 break;
1794 }
1795 mutex_unlock(&dpm_list_mtx);
1796 async_synchronize_full();
1797 if (!error)
1798 error = async_error;
1799 if (error) {
1800 suspend_stats.failed_suspend++;
1801 dpm_save_failed_step(SUSPEND_SUSPEND);
1802 }
1803 dpm_show_time(starttime, state, error, NULL);
1804 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1805 return error;
1806}
1807
1808/**
1809 * device_prepare - Prepare a device for system power transition.
1810 * @dev: Device to handle.
1811 * @state: PM transition of the system being carried out.
1812 *
1813 * Execute the ->prepare() callback(s) for given device. No new children of the
1814 * device may be registered after this function has returned.
1815 */
1816static int device_prepare(struct device *dev, pm_message_t state)
1817{
1818 int (*callback)(struct device *) = NULL;
1819 int ret = 0;
1820
1821 /*
1822 * If a device's parent goes into runtime suspend at the wrong time,
1823 * it won't be possible to resume the device. To prevent this we
1824 * block runtime suspend here, during the prepare phase, and allow
1825 * it again during the complete phase.
1826 */
1827 pm_runtime_get_noresume(dev);
1828
1829 if (dev->power.syscore)
1830 return 0;
1831
1832 device_lock(dev);
1833
1834 dev->power.wakeup_path = false;
1835
1836 if (dev->power.no_pm_callbacks)
1837 goto unlock;
1838
1839 if (dev->pm_domain)
1840 callback = dev->pm_domain->ops.prepare;
1841 else if (dev->type && dev->type->pm)
1842 callback = dev->type->pm->prepare;
1843 else if (dev->class && dev->class->pm)
1844 callback = dev->class->pm->prepare;
1845 else if (dev->bus && dev->bus->pm)
1846 callback = dev->bus->pm->prepare;
1847
1848 if (!callback && dev->driver && dev->driver->pm)
1849 callback = dev->driver->pm->prepare;
1850
1851 if (callback)
1852 ret = callback(dev);
1853
1854unlock:
1855 device_unlock(dev);
1856
1857 if (ret < 0) {
1858 suspend_report_result(dev, callback, ret);
1859 pm_runtime_put(dev);
1860 return ret;
1861 }
1862 /*
1863 * A positive return value from ->prepare() means "this device appears
1864 * to be runtime-suspended and its state is fine, so if it really is
1865 * runtime-suspended, you can leave it in that state provided that you
1866 * will do the same thing with all of its descendants". This only
1867 * applies to suspend transitions, however.
1868 */
1869 spin_lock_irq(&dev->power.lock);
1870 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1871 (ret > 0 || dev->power.no_pm_callbacks) &&
1872 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1873 spin_unlock_irq(&dev->power.lock);
1874 return 0;
1875}
1876
1877/**
1878 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1879 * @state: PM transition of the system being carried out.
1880 *
1881 * Execute the ->prepare() callback(s) for all devices.
1882 */
1883int dpm_prepare(pm_message_t state)
1884{
1885 int error = 0;
1886
1887 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1888 might_sleep();
1889
1890 /*
1891 * Give a chance for the known devices to complete their probes, before
1892 * disable probing of devices. This sync point is important at least
1893 * at boot time + hibernation restore.
1894 */
1895 wait_for_device_probe();
1896 /*
1897 * It is unsafe if probing of devices will happen during suspend or
1898 * hibernation and system behavior will be unpredictable in this case.
1899 * So, let's prohibit device's probing here and defer their probes
1900 * instead. The normal behavior will be restored in dpm_complete().
1901 */
1902 device_block_probing();
1903
1904 mutex_lock(&dpm_list_mtx);
1905 while (!list_empty(&dpm_list) && !error) {
1906 struct device *dev = to_device(dpm_list.next);
1907
1908 get_device(dev);
1909
1910 mutex_unlock(&dpm_list_mtx);
1911
1912 trace_device_pm_callback_start(dev, "", state.event);
1913 error = device_prepare(dev, state);
1914 trace_device_pm_callback_end(dev, error);
1915
1916 mutex_lock(&dpm_list_mtx);
1917
1918 if (!error) {
1919 dev->power.is_prepared = true;
1920 if (!list_empty(&dev->power.entry))
1921 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1922 } else if (error == -EAGAIN) {
1923 error = 0;
1924 } else {
1925 dev_info(dev, "not prepared for power transition: code %d\n",
1926 error);
1927 }
1928
1929 mutex_unlock(&dpm_list_mtx);
1930
1931 put_device(dev);
1932
1933 mutex_lock(&dpm_list_mtx);
1934 }
1935 mutex_unlock(&dpm_list_mtx);
1936 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1937 return error;
1938}
1939
1940/**
1941 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1942 * @state: PM transition of the system being carried out.
1943 *
1944 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1945 * callbacks for them.
1946 */
1947int dpm_suspend_start(pm_message_t state)
1948{
1949 ktime_t starttime = ktime_get();
1950 int error;
1951
1952 error = dpm_prepare(state);
1953 if (error) {
1954 suspend_stats.failed_prepare++;
1955 dpm_save_failed_step(SUSPEND_PREPARE);
1956 } else
1957 error = dpm_suspend(state);
1958 dpm_show_time(starttime, state, error, "start");
1959 return error;
1960}
1961EXPORT_SYMBOL_GPL(dpm_suspend_start);
1962
1963void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1964{
1965 if (ret)
1966 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1967}
1968EXPORT_SYMBOL_GPL(__suspend_report_result);
1969
1970/**
1971 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1972 * @subordinate: Device that needs to wait for @dev.
1973 * @dev: Device to wait for.
1974 */
1975int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1976{
1977 dpm_wait(dev, subordinate->power.async_suspend);
1978 return async_error;
1979}
1980EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1981
1982/**
1983 * dpm_for_each_dev - device iterator.
1984 * @data: data for the callback.
1985 * @fn: function to be called for each device.
1986 *
1987 * Iterate over devices in dpm_list, and call @fn for each device,
1988 * passing it @data.
1989 */
1990void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1991{
1992 struct device *dev;
1993
1994 if (!fn)
1995 return;
1996
1997 device_pm_lock();
1998 list_for_each_entry(dev, &dpm_list, power.entry)
1999 fn(dev, data);
2000 device_pm_unlock();
2001}
2002EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2003
2004static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2005{
2006 if (!ops)
2007 return true;
2008
2009 return !ops->prepare &&
2010 !ops->suspend &&
2011 !ops->suspend_late &&
2012 !ops->suspend_noirq &&
2013 !ops->resume_noirq &&
2014 !ops->resume_early &&
2015 !ops->resume &&
2016 !ops->complete;
2017}
2018
2019void device_pm_check_callbacks(struct device *dev)
2020{
2021 unsigned long flags;
2022
2023 spin_lock_irqsave(&dev->power.lock, flags);
2024 dev->power.no_pm_callbacks =
2025 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2026 !dev->bus->suspend && !dev->bus->resume)) &&
2027 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2028 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2029 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2030 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2031 !dev->driver->suspend && !dev->driver->resume));
2032 spin_unlock_irqrestore(&dev->power.lock, flags);
2033}
2034
2035bool dev_pm_skip_suspend(struct device *dev)
2036{
2037 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2038 pm_runtime_status_suspended(dev);
2039}