Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18#define pr_fmt(fmt) "PM: " fmt
19
20#include <linux/device.h>
21#include <linux/export.h>
22#include <linux/mutex.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25#include <linux/pm-trace.h>
26#include <linux/pm_wakeirq.h>
27#include <linux/interrupt.h>
28#include <linux/sched.h>
29#include <linux/sched/debug.h>
30#include <linux/async.h>
31#include <linux/suspend.h>
32#include <trace/events/power.h>
33#include <linux/cpufreq.h>
34#include <linux/cpuidle.h>
35#include <linux/devfreq.h>
36#include <linux/timer.h>
37
38#include "../base.h"
39#include "power.h"
40
41typedef int (*pm_callback_t)(struct device *);
42
43#define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
47/*
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
54 * dpm_list_mutex.
55 */
56
57LIST_HEAD(dpm_list);
58static LIST_HEAD(dpm_prepared_list);
59static LIST_HEAD(dpm_suspended_list);
60static LIST_HEAD(dpm_late_early_list);
61static LIST_HEAD(dpm_noirq_list);
62
63struct suspend_stats suspend_stats;
64static DEFINE_MUTEX(dpm_list_mtx);
65static pm_message_t pm_transition;
66
67static int async_error;
68
69static const char *pm_verb(int event)
70{
71 switch (event) {
72 case PM_EVENT_SUSPEND:
73 return "suspend";
74 case PM_EVENT_RESUME:
75 return "resume";
76 case PM_EVENT_FREEZE:
77 return "freeze";
78 case PM_EVENT_QUIESCE:
79 return "quiesce";
80 case PM_EVENT_HIBERNATE:
81 return "hibernate";
82 case PM_EVENT_THAW:
83 return "thaw";
84 case PM_EVENT_RESTORE:
85 return "restore";
86 case PM_EVENT_RECOVER:
87 return "recover";
88 default:
89 return "(unknown PM event)";
90 }
91}
92
93/**
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
96 */
97void device_pm_sleep_init(struct device *dev)
98{
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
107}
108
109/**
110 * device_pm_lock - Lock the list of active devices used by the PM core.
111 */
112void device_pm_lock(void)
113{
114 mutex_lock(&dpm_list_mtx);
115}
116
117/**
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
119 */
120void device_pm_unlock(void)
121{
122 mutex_unlock(&dpm_list_mtx);
123}
124
125/**
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
128 */
129void device_pm_add(struct device *dev)
130{
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
133 return;
134
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
145}
146
147/**
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
150 */
151void device_pm_remove(struct device *dev)
152{
153 if (device_pm_not_required(dev))
154 return;
155
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
166}
167
168/**
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
172 */
173void device_pm_move_before(struct device *deva, struct device *devb)
174{
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
180}
181
182/**
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
186 */
187void device_pm_move_after(struct device *deva, struct device *devb)
188{
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
194}
195
196/**
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
199 */
200void device_pm_move_last(struct device *dev)
201{
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
205}
206
207static ktime_t initcall_debug_start(struct device *dev, void *cb)
208{
209 if (!pm_print_times_enabled)
210 return 0;
211
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
215 return ktime_get();
216}
217
218static void initcall_debug_report(struct device *dev, ktime_t calltime,
219 void *cb, int error)
220{
221 ktime_t rettime;
222 s64 nsecs;
223
224 if (!pm_print_times_enabled)
225 return;
226
227 rettime = ktime_get();
228 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
229
230 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
231 (unsigned long long)nsecs >> 10);
232}
233
234/**
235 * dpm_wait - Wait for a PM operation to complete.
236 * @dev: Device to wait for.
237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
238 */
239static void dpm_wait(struct device *dev, bool async)
240{
241 if (!dev)
242 return;
243
244 if (async || (pm_async_enabled && dev->power.async_suspend))
245 wait_for_completion(&dev->power.completion);
246}
247
248static int dpm_wait_fn(struct device *dev, void *async_ptr)
249{
250 dpm_wait(dev, *((bool *)async_ptr));
251 return 0;
252}
253
254static void dpm_wait_for_children(struct device *dev, bool async)
255{
256 device_for_each_child(dev, &async, dpm_wait_fn);
257}
258
259static void dpm_wait_for_suppliers(struct device *dev, bool async)
260{
261 struct device_link *link;
262 int idx;
263
264 idx = device_links_read_lock();
265
266 /*
267 * If the supplier goes away right after we've checked the link to it,
268 * we'll wait for its completion to change the state, but that's fine,
269 * because the only things that will block as a result are the SRCU
270 * callbacks freeing the link objects for the links in the list we're
271 * walking.
272 */
273 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
274 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275 dpm_wait(link->supplier, async);
276
277 device_links_read_unlock(idx);
278}
279
280static bool dpm_wait_for_superior(struct device *dev, bool async)
281{
282 struct device *parent;
283
284 /*
285 * If the device is resumed asynchronously and the parent's callback
286 * deletes both the device and the parent itself, the parent object may
287 * be freed while this function is running, so avoid that by reference
288 * counting the parent once more unless the device has been deleted
289 * already (in which case return right away).
290 */
291 mutex_lock(&dpm_list_mtx);
292
293 if (!device_pm_initialized(dev)) {
294 mutex_unlock(&dpm_list_mtx);
295 return false;
296 }
297
298 parent = get_device(dev->parent);
299
300 mutex_unlock(&dpm_list_mtx);
301
302 dpm_wait(parent, async);
303 put_device(parent);
304
305 dpm_wait_for_suppliers(dev, async);
306
307 /*
308 * If the parent's callback has deleted the device, attempting to resume
309 * it would be invalid, so avoid doing that then.
310 */
311 return device_pm_initialized(dev);
312}
313
314static void dpm_wait_for_consumers(struct device *dev, bool async)
315{
316 struct device_link *link;
317 int idx;
318
319 idx = device_links_read_lock();
320
321 /*
322 * The status of a device link can only be changed from "dormant" by a
323 * probe, but that cannot happen during system suspend/resume. In
324 * theory it can change to "dormant" at that time, but then it is
325 * reasonable to wait for the target device anyway (eg. if it goes
326 * away, it's better to wait for it to go away completely and then
327 * continue instead of trying to continue in parallel with its
328 * unregistration).
329 */
330 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
331 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332 dpm_wait(link->consumer, async);
333
334 device_links_read_unlock(idx);
335}
336
337static void dpm_wait_for_subordinate(struct device *dev, bool async)
338{
339 dpm_wait_for_children(dev, async);
340 dpm_wait_for_consumers(dev, async);
341}
342
343/**
344 * pm_op - Return the PM operation appropriate for given PM event.
345 * @ops: PM operations to choose from.
346 * @state: PM transition of the system being carried out.
347 */
348static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
349{
350 switch (state.event) {
351#ifdef CONFIG_SUSPEND
352 case PM_EVENT_SUSPEND:
353 return ops->suspend;
354 case PM_EVENT_RESUME:
355 return ops->resume;
356#endif /* CONFIG_SUSPEND */
357#ifdef CONFIG_HIBERNATE_CALLBACKS
358 case PM_EVENT_FREEZE:
359 case PM_EVENT_QUIESCE:
360 return ops->freeze;
361 case PM_EVENT_HIBERNATE:
362 return ops->poweroff;
363 case PM_EVENT_THAW:
364 case PM_EVENT_RECOVER:
365 return ops->thaw;
366 break;
367 case PM_EVENT_RESTORE:
368 return ops->restore;
369#endif /* CONFIG_HIBERNATE_CALLBACKS */
370 }
371
372 return NULL;
373}
374
375/**
376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
377 * @ops: PM operations to choose from.
378 * @state: PM transition of the system being carried out.
379 *
380 * Runtime PM is disabled for @dev while this function is being executed.
381 */
382static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
383 pm_message_t state)
384{
385 switch (state.event) {
386#ifdef CONFIG_SUSPEND
387 case PM_EVENT_SUSPEND:
388 return ops->suspend_late;
389 case PM_EVENT_RESUME:
390 return ops->resume_early;
391#endif /* CONFIG_SUSPEND */
392#ifdef CONFIG_HIBERNATE_CALLBACKS
393 case PM_EVENT_FREEZE:
394 case PM_EVENT_QUIESCE:
395 return ops->freeze_late;
396 case PM_EVENT_HIBERNATE:
397 return ops->poweroff_late;
398 case PM_EVENT_THAW:
399 case PM_EVENT_RECOVER:
400 return ops->thaw_early;
401 case PM_EVENT_RESTORE:
402 return ops->restore_early;
403#endif /* CONFIG_HIBERNATE_CALLBACKS */
404 }
405
406 return NULL;
407}
408
409/**
410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
411 * @ops: PM operations to choose from.
412 * @state: PM transition of the system being carried out.
413 *
414 * The driver of @dev will not receive interrupts while this function is being
415 * executed.
416 */
417static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
418{
419 switch (state.event) {
420#ifdef CONFIG_SUSPEND
421 case PM_EVENT_SUSPEND:
422 return ops->suspend_noirq;
423 case PM_EVENT_RESUME:
424 return ops->resume_noirq;
425#endif /* CONFIG_SUSPEND */
426#ifdef CONFIG_HIBERNATE_CALLBACKS
427 case PM_EVENT_FREEZE:
428 case PM_EVENT_QUIESCE:
429 return ops->freeze_noirq;
430 case PM_EVENT_HIBERNATE:
431 return ops->poweroff_noirq;
432 case PM_EVENT_THAW:
433 case PM_EVENT_RECOVER:
434 return ops->thaw_noirq;
435 case PM_EVENT_RESTORE:
436 return ops->restore_noirq;
437#endif /* CONFIG_HIBERNATE_CALLBACKS */
438 }
439
440 return NULL;
441}
442
443static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
444{
445 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
446 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
447 ", may wakeup" : "");
448}
449
450static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
451 int error)
452{
453 pr_err("Device %s failed to %s%s: error %d\n",
454 dev_name(dev), pm_verb(state.event), info, error);
455}
456
457static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
458 const char *info)
459{
460 ktime_t calltime;
461 u64 usecs64;
462 int usecs;
463
464 calltime = ktime_get();
465 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
466 do_div(usecs64, NSEC_PER_USEC);
467 usecs = usecs64;
468 if (usecs == 0)
469 usecs = 1;
470
471 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
472 info ?: "", info ? " " : "", pm_verb(state.event),
473 error ? "aborted" : "complete",
474 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
475}
476
477static int dpm_run_callback(pm_callback_t cb, struct device *dev,
478 pm_message_t state, const char *info)
479{
480 ktime_t calltime;
481 int error;
482
483 if (!cb)
484 return 0;
485
486 calltime = initcall_debug_start(dev, cb);
487
488 pm_dev_dbg(dev, state, info);
489 trace_device_pm_callback_start(dev, info, state.event);
490 error = cb(dev);
491 trace_device_pm_callback_end(dev, error);
492 suspend_report_result(cb, error);
493
494 initcall_debug_report(dev, calltime, cb, error);
495
496 return error;
497}
498
499#ifdef CONFIG_DPM_WATCHDOG
500struct dpm_watchdog {
501 struct device *dev;
502 struct task_struct *tsk;
503 struct timer_list timer;
504};
505
506#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
507 struct dpm_watchdog wd
508
509/**
510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
511 * @t: The timer that PM watchdog depends on.
512 *
513 * Called when a driver has timed out suspending or resuming.
514 * There's not much we can do here to recover so panic() to
515 * capture a crash-dump in pstore.
516 */
517static void dpm_watchdog_handler(struct timer_list *t)
518{
519 struct dpm_watchdog *wd = from_timer(wd, t, timer);
520
521 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 show_stack(wd->tsk, NULL, KERN_EMERG);
523 panic("%s %s: unrecoverable failure\n",
524 dev_driver_string(wd->dev), dev_name(wd->dev));
525}
526
527/**
528 * dpm_watchdog_set - Enable pm watchdog for given device.
529 * @wd: Watchdog. Must be allocated on the stack.
530 * @dev: Device to handle.
531 */
532static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
533{
534 struct timer_list *timer = &wd->timer;
535
536 wd->dev = dev;
537 wd->tsk = current;
538
539 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
540 /* use same timeout value for both suspend and resume */
541 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
542 add_timer(timer);
543}
544
545/**
546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
547 * @wd: Watchdog to disable.
548 */
549static void dpm_watchdog_clear(struct dpm_watchdog *wd)
550{
551 struct timer_list *timer = &wd->timer;
552
553 del_timer_sync(timer);
554 destroy_timer_on_stack(timer);
555}
556#else
557#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
558#define dpm_watchdog_set(x, y)
559#define dpm_watchdog_clear(x)
560#endif
561
562/*------------------------- Resume routines -------------------------*/
563
564/**
565 * dev_pm_skip_resume - System-wide device resume optimization check.
566 * @dev: Target device.
567 *
568 * Return:
569 * - %false if the transition under way is RESTORE.
570 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
571 * - The logical negation of %power.must_resume otherwise (that is, when the
572 * transition under way is RESUME).
573 */
574bool dev_pm_skip_resume(struct device *dev)
575{
576 if (pm_transition.event == PM_EVENT_RESTORE)
577 return false;
578
579 if (pm_transition.event == PM_EVENT_THAW)
580 return dev_pm_skip_suspend(dev);
581
582 return !dev->power.must_resume;
583}
584
585/**
586 * device_resume_noirq - Execute a "noirq resume" callback for given device.
587 * @dev: Device to handle.
588 * @state: PM transition of the system being carried out.
589 * @async: If true, the device is being resumed asynchronously.
590 *
591 * The driver of @dev will not receive interrupts while this function is being
592 * executed.
593 */
594static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
595{
596 pm_callback_t callback = NULL;
597 const char *info = NULL;
598 bool skip_resume;
599 int error = 0;
600
601 TRACE_DEVICE(dev);
602 TRACE_RESUME(0);
603
604 if (dev->power.syscore || dev->power.direct_complete)
605 goto Out;
606
607 if (!dev->power.is_noirq_suspended)
608 goto Out;
609
610 if (!dpm_wait_for_superior(dev, async))
611 goto Out;
612
613 skip_resume = dev_pm_skip_resume(dev);
614 /*
615 * If the driver callback is skipped below or by the middle layer
616 * callback and device_resume_early() also skips the driver callback for
617 * this device later, it needs to appear as "suspended" to PM-runtime,
618 * so change its status accordingly.
619 *
620 * Otherwise, the device is going to be resumed, so set its PM-runtime
621 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
622 * to avoid confusing drivers that don't use it.
623 */
624 if (skip_resume)
625 pm_runtime_set_suspended(dev);
626 else if (dev_pm_skip_suspend(dev))
627 pm_runtime_set_active(dev);
628
629 if (dev->pm_domain) {
630 info = "noirq power domain ";
631 callback = pm_noirq_op(&dev->pm_domain->ops, state);
632 } else if (dev->type && dev->type->pm) {
633 info = "noirq type ";
634 callback = pm_noirq_op(dev->type->pm, state);
635 } else if (dev->class && dev->class->pm) {
636 info = "noirq class ";
637 callback = pm_noirq_op(dev->class->pm, state);
638 } else if (dev->bus && dev->bus->pm) {
639 info = "noirq bus ";
640 callback = pm_noirq_op(dev->bus->pm, state);
641 }
642 if (callback)
643 goto Run;
644
645 if (skip_resume)
646 goto Skip;
647
648 if (dev->driver && dev->driver->pm) {
649 info = "noirq driver ";
650 callback = pm_noirq_op(dev->driver->pm, state);
651 }
652
653Run:
654 error = dpm_run_callback(callback, dev, state, info);
655
656Skip:
657 dev->power.is_noirq_suspended = false;
658
659Out:
660 complete_all(&dev->power.completion);
661 TRACE_RESUME(error);
662 return error;
663}
664
665static bool is_async(struct device *dev)
666{
667 return dev->power.async_suspend && pm_async_enabled
668 && !pm_trace_is_enabled();
669}
670
671static bool dpm_async_fn(struct device *dev, async_func_t func)
672{
673 reinit_completion(&dev->power.completion);
674
675 if (is_async(dev)) {
676 get_device(dev);
677 async_schedule_dev(func, dev);
678 return true;
679 }
680
681 return false;
682}
683
684static void async_resume_noirq(void *data, async_cookie_t cookie)
685{
686 struct device *dev = (struct device *)data;
687 int error;
688
689 error = device_resume_noirq(dev, pm_transition, true);
690 if (error)
691 pm_dev_err(dev, pm_transition, " async", error);
692
693 put_device(dev);
694}
695
696static void dpm_noirq_resume_devices(pm_message_t state)
697{
698 struct device *dev;
699 ktime_t starttime = ktime_get();
700
701 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
702 mutex_lock(&dpm_list_mtx);
703 pm_transition = state;
704
705 /*
706 * Advanced the async threads upfront,
707 * in case the starting of async threads is
708 * delayed by non-async resuming devices.
709 */
710 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
711 dpm_async_fn(dev, async_resume_noirq);
712
713 while (!list_empty(&dpm_noirq_list)) {
714 dev = to_device(dpm_noirq_list.next);
715 get_device(dev);
716 list_move_tail(&dev->power.entry, &dpm_late_early_list);
717 mutex_unlock(&dpm_list_mtx);
718
719 if (!is_async(dev)) {
720 int error;
721
722 error = device_resume_noirq(dev, state, false);
723 if (error) {
724 suspend_stats.failed_resume_noirq++;
725 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
726 dpm_save_failed_dev(dev_name(dev));
727 pm_dev_err(dev, state, " noirq", error);
728 }
729 }
730
731 mutex_lock(&dpm_list_mtx);
732 put_device(dev);
733 }
734 mutex_unlock(&dpm_list_mtx);
735 async_synchronize_full();
736 dpm_show_time(starttime, state, 0, "noirq");
737 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
738}
739
740/**
741 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
742 * @state: PM transition of the system being carried out.
743 *
744 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
745 * allow device drivers' interrupt handlers to be called.
746 */
747void dpm_resume_noirq(pm_message_t state)
748{
749 dpm_noirq_resume_devices(state);
750
751 resume_device_irqs();
752 device_wakeup_disarm_wake_irqs();
753
754 cpuidle_resume();
755}
756
757/**
758 * device_resume_early - Execute an "early resume" callback for given device.
759 * @dev: Device to handle.
760 * @state: PM transition of the system being carried out.
761 * @async: If true, the device is being resumed asynchronously.
762 *
763 * Runtime PM is disabled for @dev while this function is being executed.
764 */
765static int device_resume_early(struct device *dev, pm_message_t state, bool async)
766{
767 pm_callback_t callback = NULL;
768 const char *info = NULL;
769 int error = 0;
770
771 TRACE_DEVICE(dev);
772 TRACE_RESUME(0);
773
774 if (dev->power.syscore || dev->power.direct_complete)
775 goto Out;
776
777 if (!dev->power.is_late_suspended)
778 goto Out;
779
780 if (!dpm_wait_for_superior(dev, async))
781 goto Out;
782
783 if (dev->pm_domain) {
784 info = "early power domain ";
785 callback = pm_late_early_op(&dev->pm_domain->ops, state);
786 } else if (dev->type && dev->type->pm) {
787 info = "early type ";
788 callback = pm_late_early_op(dev->type->pm, state);
789 } else if (dev->class && dev->class->pm) {
790 info = "early class ";
791 callback = pm_late_early_op(dev->class->pm, state);
792 } else if (dev->bus && dev->bus->pm) {
793 info = "early bus ";
794 callback = pm_late_early_op(dev->bus->pm, state);
795 }
796 if (callback)
797 goto Run;
798
799 if (dev_pm_skip_resume(dev))
800 goto Skip;
801
802 if (dev->driver && dev->driver->pm) {
803 info = "early driver ";
804 callback = pm_late_early_op(dev->driver->pm, state);
805 }
806
807Run:
808 error = dpm_run_callback(callback, dev, state, info);
809
810Skip:
811 dev->power.is_late_suspended = false;
812
813Out:
814 TRACE_RESUME(error);
815
816 pm_runtime_enable(dev);
817 complete_all(&dev->power.completion);
818 return error;
819}
820
821static void async_resume_early(void *data, async_cookie_t cookie)
822{
823 struct device *dev = (struct device *)data;
824 int error;
825
826 error = device_resume_early(dev, pm_transition, true);
827 if (error)
828 pm_dev_err(dev, pm_transition, " async", error);
829
830 put_device(dev);
831}
832
833/**
834 * dpm_resume_early - Execute "early resume" callbacks for all devices.
835 * @state: PM transition of the system being carried out.
836 */
837void dpm_resume_early(pm_message_t state)
838{
839 struct device *dev;
840 ktime_t starttime = ktime_get();
841
842 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
843 mutex_lock(&dpm_list_mtx);
844 pm_transition = state;
845
846 /*
847 * Advanced the async threads upfront,
848 * in case the starting of async threads is
849 * delayed by non-async resuming devices.
850 */
851 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
852 dpm_async_fn(dev, async_resume_early);
853
854 while (!list_empty(&dpm_late_early_list)) {
855 dev = to_device(dpm_late_early_list.next);
856 get_device(dev);
857 list_move_tail(&dev->power.entry, &dpm_suspended_list);
858 mutex_unlock(&dpm_list_mtx);
859
860 if (!is_async(dev)) {
861 int error;
862
863 error = device_resume_early(dev, state, false);
864 if (error) {
865 suspend_stats.failed_resume_early++;
866 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
867 dpm_save_failed_dev(dev_name(dev));
868 pm_dev_err(dev, state, " early", error);
869 }
870 }
871 mutex_lock(&dpm_list_mtx);
872 put_device(dev);
873 }
874 mutex_unlock(&dpm_list_mtx);
875 async_synchronize_full();
876 dpm_show_time(starttime, state, 0, "early");
877 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
878}
879
880/**
881 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
882 * @state: PM transition of the system being carried out.
883 */
884void dpm_resume_start(pm_message_t state)
885{
886 dpm_resume_noirq(state);
887 dpm_resume_early(state);
888}
889EXPORT_SYMBOL_GPL(dpm_resume_start);
890
891/**
892 * device_resume - Execute "resume" callbacks for given device.
893 * @dev: Device to handle.
894 * @state: PM transition of the system being carried out.
895 * @async: If true, the device is being resumed asynchronously.
896 */
897static int device_resume(struct device *dev, pm_message_t state, bool async)
898{
899 pm_callback_t callback = NULL;
900 const char *info = NULL;
901 int error = 0;
902 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
903
904 TRACE_DEVICE(dev);
905 TRACE_RESUME(0);
906
907 if (dev->power.syscore)
908 goto Complete;
909
910 if (dev->power.direct_complete) {
911 /* Match the pm_runtime_disable() in __device_suspend(). */
912 pm_runtime_enable(dev);
913 goto Complete;
914 }
915
916 if (!dpm_wait_for_superior(dev, async))
917 goto Complete;
918
919 dpm_watchdog_set(&wd, dev);
920 device_lock(dev);
921
922 /*
923 * This is a fib. But we'll allow new children to be added below
924 * a resumed device, even if the device hasn't been completed yet.
925 */
926 dev->power.is_prepared = false;
927
928 if (!dev->power.is_suspended)
929 goto Unlock;
930
931 if (dev->pm_domain) {
932 info = "power domain ";
933 callback = pm_op(&dev->pm_domain->ops, state);
934 goto Driver;
935 }
936
937 if (dev->type && dev->type->pm) {
938 info = "type ";
939 callback = pm_op(dev->type->pm, state);
940 goto Driver;
941 }
942
943 if (dev->class && dev->class->pm) {
944 info = "class ";
945 callback = pm_op(dev->class->pm, state);
946 goto Driver;
947 }
948
949 if (dev->bus) {
950 if (dev->bus->pm) {
951 info = "bus ";
952 callback = pm_op(dev->bus->pm, state);
953 } else if (dev->bus->resume) {
954 info = "legacy bus ";
955 callback = dev->bus->resume;
956 goto End;
957 }
958 }
959
960 Driver:
961 if (!callback && dev->driver && dev->driver->pm) {
962 info = "driver ";
963 callback = pm_op(dev->driver->pm, state);
964 }
965
966 End:
967 error = dpm_run_callback(callback, dev, state, info);
968 dev->power.is_suspended = false;
969
970 Unlock:
971 device_unlock(dev);
972 dpm_watchdog_clear(&wd);
973
974 Complete:
975 complete_all(&dev->power.completion);
976
977 TRACE_RESUME(error);
978
979 return error;
980}
981
982static void async_resume(void *data, async_cookie_t cookie)
983{
984 struct device *dev = (struct device *)data;
985 int error;
986
987 error = device_resume(dev, pm_transition, true);
988 if (error)
989 pm_dev_err(dev, pm_transition, " async", error);
990 put_device(dev);
991}
992
993/**
994 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
995 * @state: PM transition of the system being carried out.
996 *
997 * Execute the appropriate "resume" callback for all devices whose status
998 * indicates that they are suspended.
999 */
1000void dpm_resume(pm_message_t state)
1001{
1002 struct device *dev;
1003 ktime_t starttime = ktime_get();
1004
1005 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1006 might_sleep();
1007
1008 mutex_lock(&dpm_list_mtx);
1009 pm_transition = state;
1010 async_error = 0;
1011
1012 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1013 dpm_async_fn(dev, async_resume);
1014
1015 while (!list_empty(&dpm_suspended_list)) {
1016 dev = to_device(dpm_suspended_list.next);
1017 get_device(dev);
1018 if (!is_async(dev)) {
1019 int error;
1020
1021 mutex_unlock(&dpm_list_mtx);
1022
1023 error = device_resume(dev, state, false);
1024 if (error) {
1025 suspend_stats.failed_resume++;
1026 dpm_save_failed_step(SUSPEND_RESUME);
1027 dpm_save_failed_dev(dev_name(dev));
1028 pm_dev_err(dev, state, "", error);
1029 }
1030
1031 mutex_lock(&dpm_list_mtx);
1032 }
1033 if (!list_empty(&dev->power.entry))
1034 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1035 put_device(dev);
1036 }
1037 mutex_unlock(&dpm_list_mtx);
1038 async_synchronize_full();
1039 dpm_show_time(starttime, state, 0, NULL);
1040
1041 cpufreq_resume();
1042 devfreq_resume();
1043 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1044}
1045
1046/**
1047 * device_complete - Complete a PM transition for given device.
1048 * @dev: Device to handle.
1049 * @state: PM transition of the system being carried out.
1050 */
1051static void device_complete(struct device *dev, pm_message_t state)
1052{
1053 void (*callback)(struct device *) = NULL;
1054 const char *info = NULL;
1055
1056 if (dev->power.syscore)
1057 return;
1058
1059 device_lock(dev);
1060
1061 if (dev->pm_domain) {
1062 info = "completing power domain ";
1063 callback = dev->pm_domain->ops.complete;
1064 } else if (dev->type && dev->type->pm) {
1065 info = "completing type ";
1066 callback = dev->type->pm->complete;
1067 } else if (dev->class && dev->class->pm) {
1068 info = "completing class ";
1069 callback = dev->class->pm->complete;
1070 } else if (dev->bus && dev->bus->pm) {
1071 info = "completing bus ";
1072 callback = dev->bus->pm->complete;
1073 }
1074
1075 if (!callback && dev->driver && dev->driver->pm) {
1076 info = "completing driver ";
1077 callback = dev->driver->pm->complete;
1078 }
1079
1080 if (callback) {
1081 pm_dev_dbg(dev, state, info);
1082 callback(dev);
1083 }
1084
1085 device_unlock(dev);
1086
1087 pm_runtime_put(dev);
1088}
1089
1090/**
1091 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1092 * @state: PM transition of the system being carried out.
1093 *
1094 * Execute the ->complete() callbacks for all devices whose PM status is not
1095 * DPM_ON (this allows new devices to be registered).
1096 */
1097void dpm_complete(pm_message_t state)
1098{
1099 struct list_head list;
1100
1101 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1102 might_sleep();
1103
1104 INIT_LIST_HEAD(&list);
1105 mutex_lock(&dpm_list_mtx);
1106 while (!list_empty(&dpm_prepared_list)) {
1107 struct device *dev = to_device(dpm_prepared_list.prev);
1108
1109 get_device(dev);
1110 dev->power.is_prepared = false;
1111 list_move(&dev->power.entry, &list);
1112 mutex_unlock(&dpm_list_mtx);
1113
1114 trace_device_pm_callback_start(dev, "", state.event);
1115 device_complete(dev, state);
1116 trace_device_pm_callback_end(dev, 0);
1117
1118 mutex_lock(&dpm_list_mtx);
1119 put_device(dev);
1120 }
1121 list_splice(&list, &dpm_list);
1122 mutex_unlock(&dpm_list_mtx);
1123
1124 /* Allow device probing and trigger re-probing of deferred devices */
1125 device_unblock_probing();
1126 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1127}
1128
1129/**
1130 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1131 * @state: PM transition of the system being carried out.
1132 *
1133 * Execute "resume" callbacks for all devices and complete the PM transition of
1134 * the system.
1135 */
1136void dpm_resume_end(pm_message_t state)
1137{
1138 dpm_resume(state);
1139 dpm_complete(state);
1140}
1141EXPORT_SYMBOL_GPL(dpm_resume_end);
1142
1143
1144/*------------------------- Suspend routines -------------------------*/
1145
1146/**
1147 * resume_event - Return a "resume" message for given "suspend" sleep state.
1148 * @sleep_state: PM message representing a sleep state.
1149 *
1150 * Return a PM message representing the resume event corresponding to given
1151 * sleep state.
1152 */
1153static pm_message_t resume_event(pm_message_t sleep_state)
1154{
1155 switch (sleep_state.event) {
1156 case PM_EVENT_SUSPEND:
1157 return PMSG_RESUME;
1158 case PM_EVENT_FREEZE:
1159 case PM_EVENT_QUIESCE:
1160 return PMSG_RECOVER;
1161 case PM_EVENT_HIBERNATE:
1162 return PMSG_RESTORE;
1163 }
1164 return PMSG_ON;
1165}
1166
1167static void dpm_superior_set_must_resume(struct device *dev)
1168{
1169 struct device_link *link;
1170 int idx;
1171
1172 if (dev->parent)
1173 dev->parent->power.must_resume = true;
1174
1175 idx = device_links_read_lock();
1176
1177 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1178 link->supplier->power.must_resume = true;
1179
1180 device_links_read_unlock(idx);
1181}
1182
1183/**
1184 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1185 * @dev: Device to handle.
1186 * @state: PM transition of the system being carried out.
1187 * @async: If true, the device is being suspended asynchronously.
1188 *
1189 * The driver of @dev will not receive interrupts while this function is being
1190 * executed.
1191 */
1192static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1193{
1194 pm_callback_t callback = NULL;
1195 const char *info = NULL;
1196 int error = 0;
1197
1198 TRACE_DEVICE(dev);
1199 TRACE_SUSPEND(0);
1200
1201 dpm_wait_for_subordinate(dev, async);
1202
1203 if (async_error)
1204 goto Complete;
1205
1206 if (dev->power.syscore || dev->power.direct_complete)
1207 goto Complete;
1208
1209 if (dev->pm_domain) {
1210 info = "noirq power domain ";
1211 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1212 } else if (dev->type && dev->type->pm) {
1213 info = "noirq type ";
1214 callback = pm_noirq_op(dev->type->pm, state);
1215 } else if (dev->class && dev->class->pm) {
1216 info = "noirq class ";
1217 callback = pm_noirq_op(dev->class->pm, state);
1218 } else if (dev->bus && dev->bus->pm) {
1219 info = "noirq bus ";
1220 callback = pm_noirq_op(dev->bus->pm, state);
1221 }
1222 if (callback)
1223 goto Run;
1224
1225 if (dev_pm_skip_suspend(dev))
1226 goto Skip;
1227
1228 if (dev->driver && dev->driver->pm) {
1229 info = "noirq driver ";
1230 callback = pm_noirq_op(dev->driver->pm, state);
1231 }
1232
1233Run:
1234 error = dpm_run_callback(callback, dev, state, info);
1235 if (error) {
1236 async_error = error;
1237 goto Complete;
1238 }
1239
1240Skip:
1241 dev->power.is_noirq_suspended = true;
1242
1243 /*
1244 * Skipping the resume of devices that were in use right before the
1245 * system suspend (as indicated by their PM-runtime usage counters)
1246 * would be suboptimal. Also resume them if doing that is not allowed
1247 * to be skipped.
1248 */
1249 if (atomic_read(&dev->power.usage_count) > 1 ||
1250 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1251 dev->power.may_skip_resume))
1252 dev->power.must_resume = true;
1253
1254 if (dev->power.must_resume)
1255 dpm_superior_set_must_resume(dev);
1256
1257Complete:
1258 complete_all(&dev->power.completion);
1259 TRACE_SUSPEND(error);
1260 return error;
1261}
1262
1263static void async_suspend_noirq(void *data, async_cookie_t cookie)
1264{
1265 struct device *dev = (struct device *)data;
1266 int error;
1267
1268 error = __device_suspend_noirq(dev, pm_transition, true);
1269 if (error) {
1270 dpm_save_failed_dev(dev_name(dev));
1271 pm_dev_err(dev, pm_transition, " async", error);
1272 }
1273
1274 put_device(dev);
1275}
1276
1277static int device_suspend_noirq(struct device *dev)
1278{
1279 if (dpm_async_fn(dev, async_suspend_noirq))
1280 return 0;
1281
1282 return __device_suspend_noirq(dev, pm_transition, false);
1283}
1284
1285static int dpm_noirq_suspend_devices(pm_message_t state)
1286{
1287 ktime_t starttime = ktime_get();
1288 int error = 0;
1289
1290 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1291 mutex_lock(&dpm_list_mtx);
1292 pm_transition = state;
1293 async_error = 0;
1294
1295 while (!list_empty(&dpm_late_early_list)) {
1296 struct device *dev = to_device(dpm_late_early_list.prev);
1297
1298 get_device(dev);
1299 mutex_unlock(&dpm_list_mtx);
1300
1301 error = device_suspend_noirq(dev);
1302
1303 mutex_lock(&dpm_list_mtx);
1304 if (error) {
1305 pm_dev_err(dev, state, " noirq", error);
1306 dpm_save_failed_dev(dev_name(dev));
1307 put_device(dev);
1308 break;
1309 }
1310 if (!list_empty(&dev->power.entry))
1311 list_move(&dev->power.entry, &dpm_noirq_list);
1312 put_device(dev);
1313
1314 if (async_error)
1315 break;
1316 }
1317 mutex_unlock(&dpm_list_mtx);
1318 async_synchronize_full();
1319 if (!error)
1320 error = async_error;
1321
1322 if (error) {
1323 suspend_stats.failed_suspend_noirq++;
1324 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1325 }
1326 dpm_show_time(starttime, state, error, "noirq");
1327 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1328 return error;
1329}
1330
1331/**
1332 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1333 * @state: PM transition of the system being carried out.
1334 *
1335 * Prevent device drivers' interrupt handlers from being called and invoke
1336 * "noirq" suspend callbacks for all non-sysdev devices.
1337 */
1338int dpm_suspend_noirq(pm_message_t state)
1339{
1340 int ret;
1341
1342 cpuidle_pause();
1343
1344 device_wakeup_arm_wake_irqs();
1345 suspend_device_irqs();
1346
1347 ret = dpm_noirq_suspend_devices(state);
1348 if (ret)
1349 dpm_resume_noirq(resume_event(state));
1350
1351 return ret;
1352}
1353
1354static void dpm_propagate_wakeup_to_parent(struct device *dev)
1355{
1356 struct device *parent = dev->parent;
1357
1358 if (!parent)
1359 return;
1360
1361 spin_lock_irq(&parent->power.lock);
1362
1363 if (dev->power.wakeup_path && !parent->power.ignore_children)
1364 parent->power.wakeup_path = true;
1365
1366 spin_unlock_irq(&parent->power.lock);
1367}
1368
1369/**
1370 * __device_suspend_late - Execute a "late suspend" callback for given device.
1371 * @dev: Device to handle.
1372 * @state: PM transition of the system being carried out.
1373 * @async: If true, the device is being suspended asynchronously.
1374 *
1375 * Runtime PM is disabled for @dev while this function is being executed.
1376 */
1377static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1378{
1379 pm_callback_t callback = NULL;
1380 const char *info = NULL;
1381 int error = 0;
1382
1383 TRACE_DEVICE(dev);
1384 TRACE_SUSPEND(0);
1385
1386 __pm_runtime_disable(dev, false);
1387
1388 dpm_wait_for_subordinate(dev, async);
1389
1390 if (async_error)
1391 goto Complete;
1392
1393 if (pm_wakeup_pending()) {
1394 async_error = -EBUSY;
1395 goto Complete;
1396 }
1397
1398 if (dev->power.syscore || dev->power.direct_complete)
1399 goto Complete;
1400
1401 if (dev->pm_domain) {
1402 info = "late power domain ";
1403 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1404 } else if (dev->type && dev->type->pm) {
1405 info = "late type ";
1406 callback = pm_late_early_op(dev->type->pm, state);
1407 } else if (dev->class && dev->class->pm) {
1408 info = "late class ";
1409 callback = pm_late_early_op(dev->class->pm, state);
1410 } else if (dev->bus && dev->bus->pm) {
1411 info = "late bus ";
1412 callback = pm_late_early_op(dev->bus->pm, state);
1413 }
1414 if (callback)
1415 goto Run;
1416
1417 if (dev_pm_skip_suspend(dev))
1418 goto Skip;
1419
1420 if (dev->driver && dev->driver->pm) {
1421 info = "late driver ";
1422 callback = pm_late_early_op(dev->driver->pm, state);
1423 }
1424
1425Run:
1426 error = dpm_run_callback(callback, dev, state, info);
1427 if (error) {
1428 async_error = error;
1429 goto Complete;
1430 }
1431 dpm_propagate_wakeup_to_parent(dev);
1432
1433Skip:
1434 dev->power.is_late_suspended = true;
1435
1436Complete:
1437 TRACE_SUSPEND(error);
1438 complete_all(&dev->power.completion);
1439 return error;
1440}
1441
1442static void async_suspend_late(void *data, async_cookie_t cookie)
1443{
1444 struct device *dev = (struct device *)data;
1445 int error;
1446
1447 error = __device_suspend_late(dev, pm_transition, true);
1448 if (error) {
1449 dpm_save_failed_dev(dev_name(dev));
1450 pm_dev_err(dev, pm_transition, " async", error);
1451 }
1452 put_device(dev);
1453}
1454
1455static int device_suspend_late(struct device *dev)
1456{
1457 if (dpm_async_fn(dev, async_suspend_late))
1458 return 0;
1459
1460 return __device_suspend_late(dev, pm_transition, false);
1461}
1462
1463/**
1464 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1465 * @state: PM transition of the system being carried out.
1466 */
1467int dpm_suspend_late(pm_message_t state)
1468{
1469 ktime_t starttime = ktime_get();
1470 int error = 0;
1471
1472 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1473 mutex_lock(&dpm_list_mtx);
1474 pm_transition = state;
1475 async_error = 0;
1476
1477 while (!list_empty(&dpm_suspended_list)) {
1478 struct device *dev = to_device(dpm_suspended_list.prev);
1479
1480 get_device(dev);
1481 mutex_unlock(&dpm_list_mtx);
1482
1483 error = device_suspend_late(dev);
1484
1485 mutex_lock(&dpm_list_mtx);
1486 if (!list_empty(&dev->power.entry))
1487 list_move(&dev->power.entry, &dpm_late_early_list);
1488
1489 if (error) {
1490 pm_dev_err(dev, state, " late", error);
1491 dpm_save_failed_dev(dev_name(dev));
1492 put_device(dev);
1493 break;
1494 }
1495 put_device(dev);
1496
1497 if (async_error)
1498 break;
1499 }
1500 mutex_unlock(&dpm_list_mtx);
1501 async_synchronize_full();
1502 if (!error)
1503 error = async_error;
1504 if (error) {
1505 suspend_stats.failed_suspend_late++;
1506 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1507 dpm_resume_early(resume_event(state));
1508 }
1509 dpm_show_time(starttime, state, error, "late");
1510 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1511 return error;
1512}
1513
1514/**
1515 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1516 * @state: PM transition of the system being carried out.
1517 */
1518int dpm_suspend_end(pm_message_t state)
1519{
1520 ktime_t starttime = ktime_get();
1521 int error;
1522
1523 error = dpm_suspend_late(state);
1524 if (error)
1525 goto out;
1526
1527 error = dpm_suspend_noirq(state);
1528 if (error)
1529 dpm_resume_early(resume_event(state));
1530
1531out:
1532 dpm_show_time(starttime, state, error, "end");
1533 return error;
1534}
1535EXPORT_SYMBOL_GPL(dpm_suspend_end);
1536
1537/**
1538 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1539 * @dev: Device to suspend.
1540 * @state: PM transition of the system being carried out.
1541 * @cb: Suspend callback to execute.
1542 * @info: string description of caller.
1543 */
1544static int legacy_suspend(struct device *dev, pm_message_t state,
1545 int (*cb)(struct device *dev, pm_message_t state),
1546 const char *info)
1547{
1548 int error;
1549 ktime_t calltime;
1550
1551 calltime = initcall_debug_start(dev, cb);
1552
1553 trace_device_pm_callback_start(dev, info, state.event);
1554 error = cb(dev, state);
1555 trace_device_pm_callback_end(dev, error);
1556 suspend_report_result(cb, error);
1557
1558 initcall_debug_report(dev, calltime, cb, error);
1559
1560 return error;
1561}
1562
1563static void dpm_clear_superiors_direct_complete(struct device *dev)
1564{
1565 struct device_link *link;
1566 int idx;
1567
1568 if (dev->parent) {
1569 spin_lock_irq(&dev->parent->power.lock);
1570 dev->parent->power.direct_complete = false;
1571 spin_unlock_irq(&dev->parent->power.lock);
1572 }
1573
1574 idx = device_links_read_lock();
1575
1576 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1577 spin_lock_irq(&link->supplier->power.lock);
1578 link->supplier->power.direct_complete = false;
1579 spin_unlock_irq(&link->supplier->power.lock);
1580 }
1581
1582 device_links_read_unlock(idx);
1583}
1584
1585/**
1586 * __device_suspend - Execute "suspend" callbacks for given device.
1587 * @dev: Device to handle.
1588 * @state: PM transition of the system being carried out.
1589 * @async: If true, the device is being suspended asynchronously.
1590 */
1591static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1592{
1593 pm_callback_t callback = NULL;
1594 const char *info = NULL;
1595 int error = 0;
1596 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1597
1598 TRACE_DEVICE(dev);
1599 TRACE_SUSPEND(0);
1600
1601 dpm_wait_for_subordinate(dev, async);
1602
1603 if (async_error) {
1604 dev->power.direct_complete = false;
1605 goto Complete;
1606 }
1607
1608 /*
1609 * Wait for possible runtime PM transitions of the device in progress
1610 * to complete and if there's a runtime resume request pending for it,
1611 * resume it before proceeding with invoking the system-wide suspend
1612 * callbacks for it.
1613 *
1614 * If the system-wide suspend callbacks below change the configuration
1615 * of the device, they must disable runtime PM for it or otherwise
1616 * ensure that its runtime-resume callbacks will not be confused by that
1617 * change in case they are invoked going forward.
1618 */
1619 pm_runtime_barrier(dev);
1620
1621 if (pm_wakeup_pending()) {
1622 dev->power.direct_complete = false;
1623 async_error = -EBUSY;
1624 goto Complete;
1625 }
1626
1627 if (dev->power.syscore)
1628 goto Complete;
1629
1630 /* Avoid direct_complete to let wakeup_path propagate. */
1631 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1632 dev->power.direct_complete = false;
1633
1634 if (dev->power.direct_complete) {
1635 if (pm_runtime_status_suspended(dev)) {
1636 pm_runtime_disable(dev);
1637 if (pm_runtime_status_suspended(dev)) {
1638 pm_dev_dbg(dev, state, "direct-complete ");
1639 goto Complete;
1640 }
1641
1642 pm_runtime_enable(dev);
1643 }
1644 dev->power.direct_complete = false;
1645 }
1646
1647 dev->power.may_skip_resume = true;
1648 dev->power.must_resume = false;
1649
1650 dpm_watchdog_set(&wd, dev);
1651 device_lock(dev);
1652
1653 if (dev->pm_domain) {
1654 info = "power domain ";
1655 callback = pm_op(&dev->pm_domain->ops, state);
1656 goto Run;
1657 }
1658
1659 if (dev->type && dev->type->pm) {
1660 info = "type ";
1661 callback = pm_op(dev->type->pm, state);
1662 goto Run;
1663 }
1664
1665 if (dev->class && dev->class->pm) {
1666 info = "class ";
1667 callback = pm_op(dev->class->pm, state);
1668 goto Run;
1669 }
1670
1671 if (dev->bus) {
1672 if (dev->bus->pm) {
1673 info = "bus ";
1674 callback = pm_op(dev->bus->pm, state);
1675 } else if (dev->bus->suspend) {
1676 pm_dev_dbg(dev, state, "legacy bus ");
1677 error = legacy_suspend(dev, state, dev->bus->suspend,
1678 "legacy bus ");
1679 goto End;
1680 }
1681 }
1682
1683 Run:
1684 if (!callback && dev->driver && dev->driver->pm) {
1685 info = "driver ";
1686 callback = pm_op(dev->driver->pm, state);
1687 }
1688
1689 error = dpm_run_callback(callback, dev, state, info);
1690
1691 End:
1692 if (!error) {
1693 dev->power.is_suspended = true;
1694 if (device_may_wakeup(dev))
1695 dev->power.wakeup_path = true;
1696
1697 dpm_propagate_wakeup_to_parent(dev);
1698 dpm_clear_superiors_direct_complete(dev);
1699 }
1700
1701 device_unlock(dev);
1702 dpm_watchdog_clear(&wd);
1703
1704 Complete:
1705 if (error)
1706 async_error = error;
1707
1708 complete_all(&dev->power.completion);
1709 TRACE_SUSPEND(error);
1710 return error;
1711}
1712
1713static void async_suspend(void *data, async_cookie_t cookie)
1714{
1715 struct device *dev = (struct device *)data;
1716 int error;
1717
1718 error = __device_suspend(dev, pm_transition, true);
1719 if (error) {
1720 dpm_save_failed_dev(dev_name(dev));
1721 pm_dev_err(dev, pm_transition, " async", error);
1722 }
1723
1724 put_device(dev);
1725}
1726
1727static int device_suspend(struct device *dev)
1728{
1729 if (dpm_async_fn(dev, async_suspend))
1730 return 0;
1731
1732 return __device_suspend(dev, pm_transition, false);
1733}
1734
1735/**
1736 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1737 * @state: PM transition of the system being carried out.
1738 */
1739int dpm_suspend(pm_message_t state)
1740{
1741 ktime_t starttime = ktime_get();
1742 int error = 0;
1743
1744 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1745 might_sleep();
1746
1747 devfreq_suspend();
1748 cpufreq_suspend();
1749
1750 mutex_lock(&dpm_list_mtx);
1751 pm_transition = state;
1752 async_error = 0;
1753 while (!list_empty(&dpm_prepared_list)) {
1754 struct device *dev = to_device(dpm_prepared_list.prev);
1755
1756 get_device(dev);
1757 mutex_unlock(&dpm_list_mtx);
1758
1759 error = device_suspend(dev);
1760
1761 mutex_lock(&dpm_list_mtx);
1762 if (error) {
1763 pm_dev_err(dev, state, "", error);
1764 dpm_save_failed_dev(dev_name(dev));
1765 put_device(dev);
1766 break;
1767 }
1768 if (!list_empty(&dev->power.entry))
1769 list_move(&dev->power.entry, &dpm_suspended_list);
1770 put_device(dev);
1771 if (async_error)
1772 break;
1773 }
1774 mutex_unlock(&dpm_list_mtx);
1775 async_synchronize_full();
1776 if (!error)
1777 error = async_error;
1778 if (error) {
1779 suspend_stats.failed_suspend++;
1780 dpm_save_failed_step(SUSPEND_SUSPEND);
1781 }
1782 dpm_show_time(starttime, state, error, NULL);
1783 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1784 return error;
1785}
1786
1787/**
1788 * device_prepare - Prepare a device for system power transition.
1789 * @dev: Device to handle.
1790 * @state: PM transition of the system being carried out.
1791 *
1792 * Execute the ->prepare() callback(s) for given device. No new children of the
1793 * device may be registered after this function has returned.
1794 */
1795static int device_prepare(struct device *dev, pm_message_t state)
1796{
1797 int (*callback)(struct device *) = NULL;
1798 int ret = 0;
1799
1800 if (dev->power.syscore)
1801 return 0;
1802
1803 /*
1804 * If a device's parent goes into runtime suspend at the wrong time,
1805 * it won't be possible to resume the device. To prevent this we
1806 * block runtime suspend here, during the prepare phase, and allow
1807 * it again during the complete phase.
1808 */
1809 pm_runtime_get_noresume(dev);
1810
1811 device_lock(dev);
1812
1813 dev->power.wakeup_path = false;
1814
1815 if (dev->power.no_pm_callbacks)
1816 goto unlock;
1817
1818 if (dev->pm_domain)
1819 callback = dev->pm_domain->ops.prepare;
1820 else if (dev->type && dev->type->pm)
1821 callback = dev->type->pm->prepare;
1822 else if (dev->class && dev->class->pm)
1823 callback = dev->class->pm->prepare;
1824 else if (dev->bus && dev->bus->pm)
1825 callback = dev->bus->pm->prepare;
1826
1827 if (!callback && dev->driver && dev->driver->pm)
1828 callback = dev->driver->pm->prepare;
1829
1830 if (callback)
1831 ret = callback(dev);
1832
1833unlock:
1834 device_unlock(dev);
1835
1836 if (ret < 0) {
1837 suspend_report_result(callback, ret);
1838 pm_runtime_put(dev);
1839 return ret;
1840 }
1841 /*
1842 * A positive return value from ->prepare() means "this device appears
1843 * to be runtime-suspended and its state is fine, so if it really is
1844 * runtime-suspended, you can leave it in that state provided that you
1845 * will do the same thing with all of its descendants". This only
1846 * applies to suspend transitions, however.
1847 */
1848 spin_lock_irq(&dev->power.lock);
1849 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1850 (ret > 0 || dev->power.no_pm_callbacks) &&
1851 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1852 spin_unlock_irq(&dev->power.lock);
1853 return 0;
1854}
1855
1856/**
1857 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1858 * @state: PM transition of the system being carried out.
1859 *
1860 * Execute the ->prepare() callback(s) for all devices.
1861 */
1862int dpm_prepare(pm_message_t state)
1863{
1864 int error = 0;
1865
1866 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1867 might_sleep();
1868
1869 /*
1870 * Give a chance for the known devices to complete their probes, before
1871 * disable probing of devices. This sync point is important at least
1872 * at boot time + hibernation restore.
1873 */
1874 wait_for_device_probe();
1875 /*
1876 * It is unsafe if probing of devices will happen during suspend or
1877 * hibernation and system behavior will be unpredictable in this case.
1878 * So, let's prohibit device's probing here and defer their probes
1879 * instead. The normal behavior will be restored in dpm_complete().
1880 */
1881 device_block_probing();
1882
1883 mutex_lock(&dpm_list_mtx);
1884 while (!list_empty(&dpm_list)) {
1885 struct device *dev = to_device(dpm_list.next);
1886
1887 get_device(dev);
1888 mutex_unlock(&dpm_list_mtx);
1889
1890 trace_device_pm_callback_start(dev, "", state.event);
1891 error = device_prepare(dev, state);
1892 trace_device_pm_callback_end(dev, error);
1893
1894 mutex_lock(&dpm_list_mtx);
1895 if (error) {
1896 if (error == -EAGAIN) {
1897 put_device(dev);
1898 error = 0;
1899 continue;
1900 }
1901 pr_info("Device %s not prepared for power transition: code %d\n",
1902 dev_name(dev), error);
1903 put_device(dev);
1904 break;
1905 }
1906 dev->power.is_prepared = true;
1907 if (!list_empty(&dev->power.entry))
1908 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1909 put_device(dev);
1910 }
1911 mutex_unlock(&dpm_list_mtx);
1912 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1913 return error;
1914}
1915
1916/**
1917 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1918 * @state: PM transition of the system being carried out.
1919 *
1920 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1921 * callbacks for them.
1922 */
1923int dpm_suspend_start(pm_message_t state)
1924{
1925 ktime_t starttime = ktime_get();
1926 int error;
1927
1928 error = dpm_prepare(state);
1929 if (error) {
1930 suspend_stats.failed_prepare++;
1931 dpm_save_failed_step(SUSPEND_PREPARE);
1932 } else
1933 error = dpm_suspend(state);
1934 dpm_show_time(starttime, state, error, "start");
1935 return error;
1936}
1937EXPORT_SYMBOL_GPL(dpm_suspend_start);
1938
1939void __suspend_report_result(const char *function, void *fn, int ret)
1940{
1941 if (ret)
1942 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1943}
1944EXPORT_SYMBOL_GPL(__suspend_report_result);
1945
1946/**
1947 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1948 * @subordinate: Device that needs to wait for @dev.
1949 * @dev: Device to wait for.
1950 */
1951int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1952{
1953 dpm_wait(dev, subordinate->power.async_suspend);
1954 return async_error;
1955}
1956EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1957
1958/**
1959 * dpm_for_each_dev - device iterator.
1960 * @data: data for the callback.
1961 * @fn: function to be called for each device.
1962 *
1963 * Iterate over devices in dpm_list, and call @fn for each device,
1964 * passing it @data.
1965 */
1966void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1967{
1968 struct device *dev;
1969
1970 if (!fn)
1971 return;
1972
1973 device_pm_lock();
1974 list_for_each_entry(dev, &dpm_list, power.entry)
1975 fn(dev, data);
1976 device_pm_unlock();
1977}
1978EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1979
1980static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1981{
1982 if (!ops)
1983 return true;
1984
1985 return !ops->prepare &&
1986 !ops->suspend &&
1987 !ops->suspend_late &&
1988 !ops->suspend_noirq &&
1989 !ops->resume_noirq &&
1990 !ops->resume_early &&
1991 !ops->resume &&
1992 !ops->complete;
1993}
1994
1995void device_pm_check_callbacks(struct device *dev)
1996{
1997 spin_lock_irq(&dev->power.lock);
1998 dev->power.no_pm_callbacks =
1999 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2000 !dev->bus->suspend && !dev->bus->resume)) &&
2001 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2002 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2003 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2004 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2005 !dev->driver->suspend && !dev->driver->resume));
2006 spin_unlock_irq(&dev->power.lock);
2007}
2008
2009bool dev_pm_skip_suspend(struct device *dev)
2010{
2011 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2012 pm_runtime_status_suspended(dev);
2013}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18#define pr_fmt(fmt) "PM: " fmt
19#define dev_fmt pr_fmt
20
21#include <linux/device.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm-trace.h>
27#include <linux/pm_wakeirq.h>
28#include <linux/interrupt.h>
29#include <linux/sched.h>
30#include <linux/sched/debug.h>
31#include <linux/async.h>
32#include <linux/suspend.h>
33#include <trace/events/power.h>
34#include <linux/cpufreq.h>
35#include <linux/devfreq.h>
36#include <linux/timer.h>
37
38#include "../base.h"
39#include "power.h"
40
41typedef int (*pm_callback_t)(struct device *);
42
43#define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
47/*
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
54 * dpm_list_mutex.
55 */
56
57LIST_HEAD(dpm_list);
58static LIST_HEAD(dpm_prepared_list);
59static LIST_HEAD(dpm_suspended_list);
60static LIST_HEAD(dpm_late_early_list);
61static LIST_HEAD(dpm_noirq_list);
62
63static DEFINE_MUTEX(dpm_list_mtx);
64static pm_message_t pm_transition;
65
66static int async_error;
67
68static const char *pm_verb(int event)
69{
70 switch (event) {
71 case PM_EVENT_SUSPEND:
72 return "suspend";
73 case PM_EVENT_RESUME:
74 return "resume";
75 case PM_EVENT_FREEZE:
76 return "freeze";
77 case PM_EVENT_QUIESCE:
78 return "quiesce";
79 case PM_EVENT_HIBERNATE:
80 return "hibernate";
81 case PM_EVENT_THAW:
82 return "thaw";
83 case PM_EVENT_RESTORE:
84 return "restore";
85 case PM_EVENT_RECOVER:
86 return "recover";
87 default:
88 return "(unknown PM event)";
89 }
90}
91
92/**
93 * device_pm_sleep_init - Initialize system suspend-related device fields.
94 * @dev: Device object being initialized.
95 */
96void device_pm_sleep_init(struct device *dev)
97{
98 dev->power.is_prepared = false;
99 dev->power.is_suspended = false;
100 dev->power.is_noirq_suspended = false;
101 dev->power.is_late_suspended = false;
102 init_completion(&dev->power.completion);
103 complete_all(&dev->power.completion);
104 dev->power.wakeup = NULL;
105 INIT_LIST_HEAD(&dev->power.entry);
106}
107
108/**
109 * device_pm_lock - Lock the list of active devices used by the PM core.
110 */
111void device_pm_lock(void)
112{
113 mutex_lock(&dpm_list_mtx);
114}
115
116/**
117 * device_pm_unlock - Unlock the list of active devices used by the PM core.
118 */
119void device_pm_unlock(void)
120{
121 mutex_unlock(&dpm_list_mtx);
122}
123
124/**
125 * device_pm_add - Add a device to the PM core's list of active devices.
126 * @dev: Device to add to the list.
127 */
128void device_pm_add(struct device *dev)
129{
130 /* Skip PM setup/initialization. */
131 if (device_pm_not_required(dev))
132 return;
133
134 pr_debug("Adding info for %s:%s\n",
135 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
136 device_pm_check_callbacks(dev);
137 mutex_lock(&dpm_list_mtx);
138 if (dev->parent && dev->parent->power.is_prepared)
139 dev_warn(dev, "parent %s should not be sleeping\n",
140 dev_name(dev->parent));
141 list_add_tail(&dev->power.entry, &dpm_list);
142 dev->power.in_dpm_list = true;
143 mutex_unlock(&dpm_list_mtx);
144}
145
146/**
147 * device_pm_remove - Remove a device from the PM core's list of active devices.
148 * @dev: Device to be removed from the list.
149 */
150void device_pm_remove(struct device *dev)
151{
152 if (device_pm_not_required(dev))
153 return;
154
155 pr_debug("Removing info for %s:%s\n",
156 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
157 complete_all(&dev->power.completion);
158 mutex_lock(&dpm_list_mtx);
159 list_del_init(&dev->power.entry);
160 dev->power.in_dpm_list = false;
161 mutex_unlock(&dpm_list_mtx);
162 device_wakeup_disable(dev);
163 pm_runtime_remove(dev);
164 device_pm_check_callbacks(dev);
165}
166
167/**
168 * device_pm_move_before - Move device in the PM core's list of active devices.
169 * @deva: Device to move in dpm_list.
170 * @devb: Device @deva should come before.
171 */
172void device_pm_move_before(struct device *deva, struct device *devb)
173{
174 pr_debug("Moving %s:%s before %s:%s\n",
175 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
176 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
177 /* Delete deva from dpm_list and reinsert before devb. */
178 list_move_tail(&deva->power.entry, &devb->power.entry);
179}
180
181/**
182 * device_pm_move_after - Move device in the PM core's list of active devices.
183 * @deva: Device to move in dpm_list.
184 * @devb: Device @deva should come after.
185 */
186void device_pm_move_after(struct device *deva, struct device *devb)
187{
188 pr_debug("Moving %s:%s after %s:%s\n",
189 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
190 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
191 /* Delete deva from dpm_list and reinsert after devb. */
192 list_move(&deva->power.entry, &devb->power.entry);
193}
194
195/**
196 * device_pm_move_last - Move device to end of the PM core's list of devices.
197 * @dev: Device to move in dpm_list.
198 */
199void device_pm_move_last(struct device *dev)
200{
201 pr_debug("Moving %s:%s to end of list\n",
202 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
203 list_move_tail(&dev->power.entry, &dpm_list);
204}
205
206static ktime_t initcall_debug_start(struct device *dev, void *cb)
207{
208 if (!pm_print_times_enabled)
209 return 0;
210
211 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
212 task_pid_nr(current),
213 dev->parent ? dev_name(dev->parent) : "none");
214 return ktime_get();
215}
216
217static void initcall_debug_report(struct device *dev, ktime_t calltime,
218 void *cb, int error)
219{
220 ktime_t rettime;
221
222 if (!pm_print_times_enabled)
223 return;
224
225 rettime = ktime_get();
226 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
227 (unsigned long long)ktime_us_delta(rettime, calltime));
228}
229
230/**
231 * dpm_wait - Wait for a PM operation to complete.
232 * @dev: Device to wait for.
233 * @async: If unset, wait only if the device's power.async_suspend flag is set.
234 */
235static void dpm_wait(struct device *dev, bool async)
236{
237 if (!dev)
238 return;
239
240 if (async || (pm_async_enabled && dev->power.async_suspend))
241 wait_for_completion(&dev->power.completion);
242}
243
244static int dpm_wait_fn(struct device *dev, void *async_ptr)
245{
246 dpm_wait(dev, *((bool *)async_ptr));
247 return 0;
248}
249
250static void dpm_wait_for_children(struct device *dev, bool async)
251{
252 device_for_each_child(dev, &async, dpm_wait_fn);
253}
254
255static void dpm_wait_for_suppliers(struct device *dev, bool async)
256{
257 struct device_link *link;
258 int idx;
259
260 idx = device_links_read_lock();
261
262 /*
263 * If the supplier goes away right after we've checked the link to it,
264 * we'll wait for its completion to change the state, but that's fine,
265 * because the only things that will block as a result are the SRCU
266 * callbacks freeing the link objects for the links in the list we're
267 * walking.
268 */
269 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
270 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271 dpm_wait(link->supplier, async);
272
273 device_links_read_unlock(idx);
274}
275
276static bool dpm_wait_for_superior(struct device *dev, bool async)
277{
278 struct device *parent;
279
280 /*
281 * If the device is resumed asynchronously and the parent's callback
282 * deletes both the device and the parent itself, the parent object may
283 * be freed while this function is running, so avoid that by reference
284 * counting the parent once more unless the device has been deleted
285 * already (in which case return right away).
286 */
287 mutex_lock(&dpm_list_mtx);
288
289 if (!device_pm_initialized(dev)) {
290 mutex_unlock(&dpm_list_mtx);
291 return false;
292 }
293
294 parent = get_device(dev->parent);
295
296 mutex_unlock(&dpm_list_mtx);
297
298 dpm_wait(parent, async);
299 put_device(parent);
300
301 dpm_wait_for_suppliers(dev, async);
302
303 /*
304 * If the parent's callback has deleted the device, attempting to resume
305 * it would be invalid, so avoid doing that then.
306 */
307 return device_pm_initialized(dev);
308}
309
310static void dpm_wait_for_consumers(struct device *dev, bool async)
311{
312 struct device_link *link;
313 int idx;
314
315 idx = device_links_read_lock();
316
317 /*
318 * The status of a device link can only be changed from "dormant" by a
319 * probe, but that cannot happen during system suspend/resume. In
320 * theory it can change to "dormant" at that time, but then it is
321 * reasonable to wait for the target device anyway (eg. if it goes
322 * away, it's better to wait for it to go away completely and then
323 * continue instead of trying to continue in parallel with its
324 * unregistration).
325 */
326 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
327 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
328 dpm_wait(link->consumer, async);
329
330 device_links_read_unlock(idx);
331}
332
333static void dpm_wait_for_subordinate(struct device *dev, bool async)
334{
335 dpm_wait_for_children(dev, async);
336 dpm_wait_for_consumers(dev, async);
337}
338
339/**
340 * pm_op - Return the PM operation appropriate for given PM event.
341 * @ops: PM operations to choose from.
342 * @state: PM transition of the system being carried out.
343 */
344static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
345{
346 switch (state.event) {
347#ifdef CONFIG_SUSPEND
348 case PM_EVENT_SUSPEND:
349 return ops->suspend;
350 case PM_EVENT_RESUME:
351 return ops->resume;
352#endif /* CONFIG_SUSPEND */
353#ifdef CONFIG_HIBERNATE_CALLBACKS
354 case PM_EVENT_FREEZE:
355 case PM_EVENT_QUIESCE:
356 return ops->freeze;
357 case PM_EVENT_HIBERNATE:
358 return ops->poweroff;
359 case PM_EVENT_THAW:
360 case PM_EVENT_RECOVER:
361 return ops->thaw;
362 case PM_EVENT_RESTORE:
363 return ops->restore;
364#endif /* CONFIG_HIBERNATE_CALLBACKS */
365 }
366
367 return NULL;
368}
369
370/**
371 * pm_late_early_op - Return the PM operation appropriate for given PM event.
372 * @ops: PM operations to choose from.
373 * @state: PM transition of the system being carried out.
374 *
375 * Runtime PM is disabled for @dev while this function is being executed.
376 */
377static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
378 pm_message_t state)
379{
380 switch (state.event) {
381#ifdef CONFIG_SUSPEND
382 case PM_EVENT_SUSPEND:
383 return ops->suspend_late;
384 case PM_EVENT_RESUME:
385 return ops->resume_early;
386#endif /* CONFIG_SUSPEND */
387#ifdef CONFIG_HIBERNATE_CALLBACKS
388 case PM_EVENT_FREEZE:
389 case PM_EVENT_QUIESCE:
390 return ops->freeze_late;
391 case PM_EVENT_HIBERNATE:
392 return ops->poweroff_late;
393 case PM_EVENT_THAW:
394 case PM_EVENT_RECOVER:
395 return ops->thaw_early;
396 case PM_EVENT_RESTORE:
397 return ops->restore_early;
398#endif /* CONFIG_HIBERNATE_CALLBACKS */
399 }
400
401 return NULL;
402}
403
404/**
405 * pm_noirq_op - Return the PM operation appropriate for given PM event.
406 * @ops: PM operations to choose from.
407 * @state: PM transition of the system being carried out.
408 *
409 * The driver of @dev will not receive interrupts while this function is being
410 * executed.
411 */
412static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
413{
414 switch (state.event) {
415#ifdef CONFIG_SUSPEND
416 case PM_EVENT_SUSPEND:
417 return ops->suspend_noirq;
418 case PM_EVENT_RESUME:
419 return ops->resume_noirq;
420#endif /* CONFIG_SUSPEND */
421#ifdef CONFIG_HIBERNATE_CALLBACKS
422 case PM_EVENT_FREEZE:
423 case PM_EVENT_QUIESCE:
424 return ops->freeze_noirq;
425 case PM_EVENT_HIBERNATE:
426 return ops->poweroff_noirq;
427 case PM_EVENT_THAW:
428 case PM_EVENT_RECOVER:
429 return ops->thaw_noirq;
430 case PM_EVENT_RESTORE:
431 return ops->restore_noirq;
432#endif /* CONFIG_HIBERNATE_CALLBACKS */
433 }
434
435 return NULL;
436}
437
438static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
439{
440 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
441 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
442 ", may wakeup" : "", dev->power.driver_flags);
443}
444
445static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
446 int error)
447{
448 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
449 error);
450}
451
452static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
453 const char *info)
454{
455 ktime_t calltime;
456 u64 usecs64;
457 int usecs;
458
459 calltime = ktime_get();
460 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
461 do_div(usecs64, NSEC_PER_USEC);
462 usecs = usecs64;
463 if (usecs == 0)
464 usecs = 1;
465
466 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
467 info ?: "", info ? " " : "", pm_verb(state.event),
468 error ? "aborted" : "complete",
469 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
470}
471
472static int dpm_run_callback(pm_callback_t cb, struct device *dev,
473 pm_message_t state, const char *info)
474{
475 ktime_t calltime;
476 int error;
477
478 if (!cb)
479 return 0;
480
481 calltime = initcall_debug_start(dev, cb);
482
483 pm_dev_dbg(dev, state, info);
484 trace_device_pm_callback_start(dev, info, state.event);
485 error = cb(dev);
486 trace_device_pm_callback_end(dev, error);
487 suspend_report_result(dev, cb, error);
488
489 initcall_debug_report(dev, calltime, cb, error);
490
491 return error;
492}
493
494#ifdef CONFIG_DPM_WATCHDOG
495struct dpm_watchdog {
496 struct device *dev;
497 struct task_struct *tsk;
498 struct timer_list timer;
499};
500
501#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
502 struct dpm_watchdog wd
503
504/**
505 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
506 * @t: The timer that PM watchdog depends on.
507 *
508 * Called when a driver has timed out suspending or resuming.
509 * There's not much we can do here to recover so panic() to
510 * capture a crash-dump in pstore.
511 */
512static void dpm_watchdog_handler(struct timer_list *t)
513{
514 struct dpm_watchdog *wd = from_timer(wd, t, timer);
515
516 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
517 show_stack(wd->tsk, NULL, KERN_EMERG);
518 panic("%s %s: unrecoverable failure\n",
519 dev_driver_string(wd->dev), dev_name(wd->dev));
520}
521
522/**
523 * dpm_watchdog_set - Enable pm watchdog for given device.
524 * @wd: Watchdog. Must be allocated on the stack.
525 * @dev: Device to handle.
526 */
527static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
528{
529 struct timer_list *timer = &wd->timer;
530
531 wd->dev = dev;
532 wd->tsk = current;
533
534 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
535 /* use same timeout value for both suspend and resume */
536 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
537 add_timer(timer);
538}
539
540/**
541 * dpm_watchdog_clear - Disable suspend/resume watchdog.
542 * @wd: Watchdog to disable.
543 */
544static void dpm_watchdog_clear(struct dpm_watchdog *wd)
545{
546 struct timer_list *timer = &wd->timer;
547
548 del_timer_sync(timer);
549 destroy_timer_on_stack(timer);
550}
551#else
552#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
553#define dpm_watchdog_set(x, y)
554#define dpm_watchdog_clear(x)
555#endif
556
557/*------------------------- Resume routines -------------------------*/
558
559/**
560 * dev_pm_skip_resume - System-wide device resume optimization check.
561 * @dev: Target device.
562 *
563 * Return:
564 * - %false if the transition under way is RESTORE.
565 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
566 * - The logical negation of %power.must_resume otherwise (that is, when the
567 * transition under way is RESUME).
568 */
569bool dev_pm_skip_resume(struct device *dev)
570{
571 if (pm_transition.event == PM_EVENT_RESTORE)
572 return false;
573
574 if (pm_transition.event == PM_EVENT_THAW)
575 return dev_pm_skip_suspend(dev);
576
577 return !dev->power.must_resume;
578}
579
580static bool is_async(struct device *dev)
581{
582 return dev->power.async_suspend && pm_async_enabled
583 && !pm_trace_is_enabled();
584}
585
586static bool dpm_async_fn(struct device *dev, async_func_t func)
587{
588 reinit_completion(&dev->power.completion);
589
590 if (is_async(dev)) {
591 dev->power.async_in_progress = true;
592
593 get_device(dev);
594
595 if (async_schedule_dev_nocall(func, dev))
596 return true;
597
598 put_device(dev);
599 }
600 /*
601 * Because async_schedule_dev_nocall() above has returned false or it
602 * has not been called at all, func() is not running and it is safe to
603 * update the async_in_progress flag without extra synchronization.
604 */
605 dev->power.async_in_progress = false;
606 return false;
607}
608
609/**
610 * device_resume_noirq - Execute a "noirq resume" callback for given device.
611 * @dev: Device to handle.
612 * @state: PM transition of the system being carried out.
613 * @async: If true, the device is being resumed asynchronously.
614 *
615 * The driver of @dev will not receive interrupts while this function is being
616 * executed.
617 */
618static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
619{
620 pm_callback_t callback = NULL;
621 const char *info = NULL;
622 bool skip_resume;
623 int error = 0;
624
625 TRACE_DEVICE(dev);
626 TRACE_RESUME(0);
627
628 if (dev->power.syscore || dev->power.direct_complete)
629 goto Out;
630
631 if (!dev->power.is_noirq_suspended)
632 goto Out;
633
634 if (!dpm_wait_for_superior(dev, async))
635 goto Out;
636
637 skip_resume = dev_pm_skip_resume(dev);
638 /*
639 * If the driver callback is skipped below or by the middle layer
640 * callback and device_resume_early() also skips the driver callback for
641 * this device later, it needs to appear as "suspended" to PM-runtime,
642 * so change its status accordingly.
643 *
644 * Otherwise, the device is going to be resumed, so set its PM-runtime
645 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
646 * to avoid confusing drivers that don't use it.
647 */
648 if (skip_resume)
649 pm_runtime_set_suspended(dev);
650 else if (dev_pm_skip_suspend(dev))
651 pm_runtime_set_active(dev);
652
653 if (dev->pm_domain) {
654 info = "noirq power domain ";
655 callback = pm_noirq_op(&dev->pm_domain->ops, state);
656 } else if (dev->type && dev->type->pm) {
657 info = "noirq type ";
658 callback = pm_noirq_op(dev->type->pm, state);
659 } else if (dev->class && dev->class->pm) {
660 info = "noirq class ";
661 callback = pm_noirq_op(dev->class->pm, state);
662 } else if (dev->bus && dev->bus->pm) {
663 info = "noirq bus ";
664 callback = pm_noirq_op(dev->bus->pm, state);
665 }
666 if (callback)
667 goto Run;
668
669 if (skip_resume)
670 goto Skip;
671
672 if (dev->driver && dev->driver->pm) {
673 info = "noirq driver ";
674 callback = pm_noirq_op(dev->driver->pm, state);
675 }
676
677Run:
678 error = dpm_run_callback(callback, dev, state, info);
679
680Skip:
681 dev->power.is_noirq_suspended = false;
682
683Out:
684 complete_all(&dev->power.completion);
685 TRACE_RESUME(error);
686
687 if (error) {
688 async_error = error;
689 dpm_save_failed_dev(dev_name(dev));
690 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
691 }
692}
693
694static void async_resume_noirq(void *data, async_cookie_t cookie)
695{
696 struct device *dev = data;
697
698 device_resume_noirq(dev, pm_transition, true);
699 put_device(dev);
700}
701
702static void dpm_noirq_resume_devices(pm_message_t state)
703{
704 struct device *dev;
705 ktime_t starttime = ktime_get();
706
707 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
708
709 async_error = 0;
710 pm_transition = state;
711
712 mutex_lock(&dpm_list_mtx);
713
714 /*
715 * Trigger the resume of "async" devices upfront so they don't have to
716 * wait for the "non-async" ones they don't depend on.
717 */
718 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
719 dpm_async_fn(dev, async_resume_noirq);
720
721 while (!list_empty(&dpm_noirq_list)) {
722 dev = to_device(dpm_noirq_list.next);
723 list_move_tail(&dev->power.entry, &dpm_late_early_list);
724
725 if (!dev->power.async_in_progress) {
726 get_device(dev);
727
728 mutex_unlock(&dpm_list_mtx);
729
730 device_resume_noirq(dev, state, false);
731
732 put_device(dev);
733
734 mutex_lock(&dpm_list_mtx);
735 }
736 }
737 mutex_unlock(&dpm_list_mtx);
738 async_synchronize_full();
739 dpm_show_time(starttime, state, 0, "noirq");
740 if (async_error)
741 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
742
743 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
744}
745
746/**
747 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
748 * @state: PM transition of the system being carried out.
749 *
750 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
751 * allow device drivers' interrupt handlers to be called.
752 */
753void dpm_resume_noirq(pm_message_t state)
754{
755 dpm_noirq_resume_devices(state);
756
757 resume_device_irqs();
758 device_wakeup_disarm_wake_irqs();
759}
760
761/**
762 * device_resume_early - Execute an "early resume" callback for given device.
763 * @dev: Device to handle.
764 * @state: PM transition of the system being carried out.
765 * @async: If true, the device is being resumed asynchronously.
766 *
767 * Runtime PM is disabled for @dev while this function is being executed.
768 */
769static void device_resume_early(struct device *dev, pm_message_t state, bool async)
770{
771 pm_callback_t callback = NULL;
772 const char *info = NULL;
773 int error = 0;
774
775 TRACE_DEVICE(dev);
776 TRACE_RESUME(0);
777
778 if (dev->power.syscore || dev->power.direct_complete)
779 goto Out;
780
781 if (!dev->power.is_late_suspended)
782 goto Out;
783
784 if (!dpm_wait_for_superior(dev, async))
785 goto Out;
786
787 if (dev->pm_domain) {
788 info = "early power domain ";
789 callback = pm_late_early_op(&dev->pm_domain->ops, state);
790 } else if (dev->type && dev->type->pm) {
791 info = "early type ";
792 callback = pm_late_early_op(dev->type->pm, state);
793 } else if (dev->class && dev->class->pm) {
794 info = "early class ";
795 callback = pm_late_early_op(dev->class->pm, state);
796 } else if (dev->bus && dev->bus->pm) {
797 info = "early bus ";
798 callback = pm_late_early_op(dev->bus->pm, state);
799 }
800 if (callback)
801 goto Run;
802
803 if (dev_pm_skip_resume(dev))
804 goto Skip;
805
806 if (dev->driver && dev->driver->pm) {
807 info = "early driver ";
808 callback = pm_late_early_op(dev->driver->pm, state);
809 }
810
811Run:
812 error = dpm_run_callback(callback, dev, state, info);
813
814Skip:
815 dev->power.is_late_suspended = false;
816
817Out:
818 TRACE_RESUME(error);
819
820 pm_runtime_enable(dev);
821 complete_all(&dev->power.completion);
822
823 if (error) {
824 async_error = error;
825 dpm_save_failed_dev(dev_name(dev));
826 pm_dev_err(dev, state, async ? " async early" : " early", error);
827 }
828}
829
830static void async_resume_early(void *data, async_cookie_t cookie)
831{
832 struct device *dev = data;
833
834 device_resume_early(dev, pm_transition, true);
835 put_device(dev);
836}
837
838/**
839 * dpm_resume_early - Execute "early resume" callbacks for all devices.
840 * @state: PM transition of the system being carried out.
841 */
842void dpm_resume_early(pm_message_t state)
843{
844 struct device *dev;
845 ktime_t starttime = ktime_get();
846
847 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
848
849 async_error = 0;
850 pm_transition = state;
851
852 mutex_lock(&dpm_list_mtx);
853
854 /*
855 * Trigger the resume of "async" devices upfront so they don't have to
856 * wait for the "non-async" ones they don't depend on.
857 */
858 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
859 dpm_async_fn(dev, async_resume_early);
860
861 while (!list_empty(&dpm_late_early_list)) {
862 dev = to_device(dpm_late_early_list.next);
863 list_move_tail(&dev->power.entry, &dpm_suspended_list);
864
865 if (!dev->power.async_in_progress) {
866 get_device(dev);
867
868 mutex_unlock(&dpm_list_mtx);
869
870 device_resume_early(dev, state, false);
871
872 put_device(dev);
873
874 mutex_lock(&dpm_list_mtx);
875 }
876 }
877 mutex_unlock(&dpm_list_mtx);
878 async_synchronize_full();
879 dpm_show_time(starttime, state, 0, "early");
880 if (async_error)
881 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
882
883 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
884}
885
886/**
887 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
888 * @state: PM transition of the system being carried out.
889 */
890void dpm_resume_start(pm_message_t state)
891{
892 dpm_resume_noirq(state);
893 dpm_resume_early(state);
894}
895EXPORT_SYMBOL_GPL(dpm_resume_start);
896
897/**
898 * device_resume - Execute "resume" callbacks for given device.
899 * @dev: Device to handle.
900 * @state: PM transition of the system being carried out.
901 * @async: If true, the device is being resumed asynchronously.
902 */
903static void device_resume(struct device *dev, pm_message_t state, bool async)
904{
905 pm_callback_t callback = NULL;
906 const char *info = NULL;
907 int error = 0;
908 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
909
910 TRACE_DEVICE(dev);
911 TRACE_RESUME(0);
912
913 if (dev->power.syscore)
914 goto Complete;
915
916 if (dev->power.direct_complete) {
917 /* Match the pm_runtime_disable() in __device_suspend(). */
918 pm_runtime_enable(dev);
919 goto Complete;
920 }
921
922 if (!dpm_wait_for_superior(dev, async))
923 goto Complete;
924
925 dpm_watchdog_set(&wd, dev);
926 device_lock(dev);
927
928 /*
929 * This is a fib. But we'll allow new children to be added below
930 * a resumed device, even if the device hasn't been completed yet.
931 */
932 dev->power.is_prepared = false;
933
934 if (!dev->power.is_suspended)
935 goto Unlock;
936
937 if (dev->pm_domain) {
938 info = "power domain ";
939 callback = pm_op(&dev->pm_domain->ops, state);
940 goto Driver;
941 }
942
943 if (dev->type && dev->type->pm) {
944 info = "type ";
945 callback = pm_op(dev->type->pm, state);
946 goto Driver;
947 }
948
949 if (dev->class && dev->class->pm) {
950 info = "class ";
951 callback = pm_op(dev->class->pm, state);
952 goto Driver;
953 }
954
955 if (dev->bus) {
956 if (dev->bus->pm) {
957 info = "bus ";
958 callback = pm_op(dev->bus->pm, state);
959 } else if (dev->bus->resume) {
960 info = "legacy bus ";
961 callback = dev->bus->resume;
962 goto End;
963 }
964 }
965
966 Driver:
967 if (!callback && dev->driver && dev->driver->pm) {
968 info = "driver ";
969 callback = pm_op(dev->driver->pm, state);
970 }
971
972 End:
973 error = dpm_run_callback(callback, dev, state, info);
974 dev->power.is_suspended = false;
975
976 Unlock:
977 device_unlock(dev);
978 dpm_watchdog_clear(&wd);
979
980 Complete:
981 complete_all(&dev->power.completion);
982
983 TRACE_RESUME(error);
984
985 if (error) {
986 async_error = error;
987 dpm_save_failed_dev(dev_name(dev));
988 pm_dev_err(dev, state, async ? " async" : "", error);
989 }
990}
991
992static void async_resume(void *data, async_cookie_t cookie)
993{
994 struct device *dev = data;
995
996 device_resume(dev, pm_transition, true);
997 put_device(dev);
998}
999
1000/**
1001 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1002 * @state: PM transition of the system being carried out.
1003 *
1004 * Execute the appropriate "resume" callback for all devices whose status
1005 * indicates that they are suspended.
1006 */
1007void dpm_resume(pm_message_t state)
1008{
1009 struct device *dev;
1010 ktime_t starttime = ktime_get();
1011
1012 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1013 might_sleep();
1014
1015 pm_transition = state;
1016 async_error = 0;
1017
1018 mutex_lock(&dpm_list_mtx);
1019
1020 /*
1021 * Trigger the resume of "async" devices upfront so they don't have to
1022 * wait for the "non-async" ones they don't depend on.
1023 */
1024 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1025 dpm_async_fn(dev, async_resume);
1026
1027 while (!list_empty(&dpm_suspended_list)) {
1028 dev = to_device(dpm_suspended_list.next);
1029 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1030
1031 if (!dev->power.async_in_progress) {
1032 get_device(dev);
1033
1034 mutex_unlock(&dpm_list_mtx);
1035
1036 device_resume(dev, state, false);
1037
1038 put_device(dev);
1039
1040 mutex_lock(&dpm_list_mtx);
1041 }
1042 }
1043 mutex_unlock(&dpm_list_mtx);
1044 async_synchronize_full();
1045 dpm_show_time(starttime, state, 0, NULL);
1046 if (async_error)
1047 dpm_save_failed_step(SUSPEND_RESUME);
1048
1049 cpufreq_resume();
1050 devfreq_resume();
1051 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1052}
1053
1054/**
1055 * device_complete - Complete a PM transition for given device.
1056 * @dev: Device to handle.
1057 * @state: PM transition of the system being carried out.
1058 */
1059static void device_complete(struct device *dev, pm_message_t state)
1060{
1061 void (*callback)(struct device *) = NULL;
1062 const char *info = NULL;
1063
1064 if (dev->power.syscore)
1065 goto out;
1066
1067 device_lock(dev);
1068
1069 if (dev->pm_domain) {
1070 info = "completing power domain ";
1071 callback = dev->pm_domain->ops.complete;
1072 } else if (dev->type && dev->type->pm) {
1073 info = "completing type ";
1074 callback = dev->type->pm->complete;
1075 } else if (dev->class && dev->class->pm) {
1076 info = "completing class ";
1077 callback = dev->class->pm->complete;
1078 } else if (dev->bus && dev->bus->pm) {
1079 info = "completing bus ";
1080 callback = dev->bus->pm->complete;
1081 }
1082
1083 if (!callback && dev->driver && dev->driver->pm) {
1084 info = "completing driver ";
1085 callback = dev->driver->pm->complete;
1086 }
1087
1088 if (callback) {
1089 pm_dev_dbg(dev, state, info);
1090 callback(dev);
1091 }
1092
1093 device_unlock(dev);
1094
1095out:
1096 pm_runtime_put(dev);
1097}
1098
1099/**
1100 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1101 * @state: PM transition of the system being carried out.
1102 *
1103 * Execute the ->complete() callbacks for all devices whose PM status is not
1104 * DPM_ON (this allows new devices to be registered).
1105 */
1106void dpm_complete(pm_message_t state)
1107{
1108 struct list_head list;
1109
1110 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1111 might_sleep();
1112
1113 INIT_LIST_HEAD(&list);
1114 mutex_lock(&dpm_list_mtx);
1115 while (!list_empty(&dpm_prepared_list)) {
1116 struct device *dev = to_device(dpm_prepared_list.prev);
1117
1118 get_device(dev);
1119 dev->power.is_prepared = false;
1120 list_move(&dev->power.entry, &list);
1121
1122 mutex_unlock(&dpm_list_mtx);
1123
1124 trace_device_pm_callback_start(dev, "", state.event);
1125 device_complete(dev, state);
1126 trace_device_pm_callback_end(dev, 0);
1127
1128 put_device(dev);
1129
1130 mutex_lock(&dpm_list_mtx);
1131 }
1132 list_splice(&list, &dpm_list);
1133 mutex_unlock(&dpm_list_mtx);
1134
1135 /* Allow device probing and trigger re-probing of deferred devices */
1136 device_unblock_probing();
1137 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1138}
1139
1140/**
1141 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1142 * @state: PM transition of the system being carried out.
1143 *
1144 * Execute "resume" callbacks for all devices and complete the PM transition of
1145 * the system.
1146 */
1147void dpm_resume_end(pm_message_t state)
1148{
1149 dpm_resume(state);
1150 dpm_complete(state);
1151}
1152EXPORT_SYMBOL_GPL(dpm_resume_end);
1153
1154
1155/*------------------------- Suspend routines -------------------------*/
1156
1157/**
1158 * resume_event - Return a "resume" message for given "suspend" sleep state.
1159 * @sleep_state: PM message representing a sleep state.
1160 *
1161 * Return a PM message representing the resume event corresponding to given
1162 * sleep state.
1163 */
1164static pm_message_t resume_event(pm_message_t sleep_state)
1165{
1166 switch (sleep_state.event) {
1167 case PM_EVENT_SUSPEND:
1168 return PMSG_RESUME;
1169 case PM_EVENT_FREEZE:
1170 case PM_EVENT_QUIESCE:
1171 return PMSG_RECOVER;
1172 case PM_EVENT_HIBERNATE:
1173 return PMSG_RESTORE;
1174 }
1175 return PMSG_ON;
1176}
1177
1178static void dpm_superior_set_must_resume(struct device *dev)
1179{
1180 struct device_link *link;
1181 int idx;
1182
1183 if (dev->parent)
1184 dev->parent->power.must_resume = true;
1185
1186 idx = device_links_read_lock();
1187
1188 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1189 link->supplier->power.must_resume = true;
1190
1191 device_links_read_unlock(idx);
1192}
1193
1194/**
1195 * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1196 * @dev: Device to handle.
1197 * @state: PM transition of the system being carried out.
1198 * @async: If true, the device is being suspended asynchronously.
1199 *
1200 * The driver of @dev will not receive interrupts while this function is being
1201 * executed.
1202 */
1203static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1204{
1205 pm_callback_t callback = NULL;
1206 const char *info = NULL;
1207 int error = 0;
1208
1209 TRACE_DEVICE(dev);
1210 TRACE_SUSPEND(0);
1211
1212 dpm_wait_for_subordinate(dev, async);
1213
1214 if (async_error)
1215 goto Complete;
1216
1217 if (dev->power.syscore || dev->power.direct_complete)
1218 goto Complete;
1219
1220 if (dev->pm_domain) {
1221 info = "noirq power domain ";
1222 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1223 } else if (dev->type && dev->type->pm) {
1224 info = "noirq type ";
1225 callback = pm_noirq_op(dev->type->pm, state);
1226 } else if (dev->class && dev->class->pm) {
1227 info = "noirq class ";
1228 callback = pm_noirq_op(dev->class->pm, state);
1229 } else if (dev->bus && dev->bus->pm) {
1230 info = "noirq bus ";
1231 callback = pm_noirq_op(dev->bus->pm, state);
1232 }
1233 if (callback)
1234 goto Run;
1235
1236 if (dev_pm_skip_suspend(dev))
1237 goto Skip;
1238
1239 if (dev->driver && dev->driver->pm) {
1240 info = "noirq driver ";
1241 callback = pm_noirq_op(dev->driver->pm, state);
1242 }
1243
1244Run:
1245 error = dpm_run_callback(callback, dev, state, info);
1246 if (error) {
1247 async_error = error;
1248 dpm_save_failed_dev(dev_name(dev));
1249 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1250 goto Complete;
1251 }
1252
1253Skip:
1254 dev->power.is_noirq_suspended = true;
1255
1256 /*
1257 * Skipping the resume of devices that were in use right before the
1258 * system suspend (as indicated by their PM-runtime usage counters)
1259 * would be suboptimal. Also resume them if doing that is not allowed
1260 * to be skipped.
1261 */
1262 if (atomic_read(&dev->power.usage_count) > 1 ||
1263 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1264 dev->power.may_skip_resume))
1265 dev->power.must_resume = true;
1266
1267 if (dev->power.must_resume)
1268 dpm_superior_set_must_resume(dev);
1269
1270Complete:
1271 complete_all(&dev->power.completion);
1272 TRACE_SUSPEND(error);
1273 return error;
1274}
1275
1276static void async_suspend_noirq(void *data, async_cookie_t cookie)
1277{
1278 struct device *dev = data;
1279
1280 device_suspend_noirq(dev, pm_transition, true);
1281 put_device(dev);
1282}
1283
1284static int dpm_noirq_suspend_devices(pm_message_t state)
1285{
1286 ktime_t starttime = ktime_get();
1287 int error = 0;
1288
1289 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1290
1291 pm_transition = state;
1292 async_error = 0;
1293
1294 mutex_lock(&dpm_list_mtx);
1295
1296 while (!list_empty(&dpm_late_early_list)) {
1297 struct device *dev = to_device(dpm_late_early_list.prev);
1298
1299 list_move(&dev->power.entry, &dpm_noirq_list);
1300
1301 if (dpm_async_fn(dev, async_suspend_noirq))
1302 continue;
1303
1304 get_device(dev);
1305
1306 mutex_unlock(&dpm_list_mtx);
1307
1308 error = device_suspend_noirq(dev, state, false);
1309
1310 put_device(dev);
1311
1312 mutex_lock(&dpm_list_mtx);
1313
1314 if (error || async_error)
1315 break;
1316 }
1317
1318 mutex_unlock(&dpm_list_mtx);
1319
1320 async_synchronize_full();
1321 if (!error)
1322 error = async_error;
1323
1324 if (error)
1325 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1326
1327 dpm_show_time(starttime, state, error, "noirq");
1328 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1329 return error;
1330}
1331
1332/**
1333 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1334 * @state: PM transition of the system being carried out.
1335 *
1336 * Prevent device drivers' interrupt handlers from being called and invoke
1337 * "noirq" suspend callbacks for all non-sysdev devices.
1338 */
1339int dpm_suspend_noirq(pm_message_t state)
1340{
1341 int ret;
1342
1343 device_wakeup_arm_wake_irqs();
1344 suspend_device_irqs();
1345
1346 ret = dpm_noirq_suspend_devices(state);
1347 if (ret)
1348 dpm_resume_noirq(resume_event(state));
1349
1350 return ret;
1351}
1352
1353static void dpm_propagate_wakeup_to_parent(struct device *dev)
1354{
1355 struct device *parent = dev->parent;
1356
1357 if (!parent)
1358 return;
1359
1360 spin_lock_irq(&parent->power.lock);
1361
1362 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1363 parent->power.wakeup_path = true;
1364
1365 spin_unlock_irq(&parent->power.lock);
1366}
1367
1368/**
1369 * device_suspend_late - Execute a "late suspend" callback for given device.
1370 * @dev: Device to handle.
1371 * @state: PM transition of the system being carried out.
1372 * @async: If true, the device is being suspended asynchronously.
1373 *
1374 * Runtime PM is disabled for @dev while this function is being executed.
1375 */
1376static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1377{
1378 pm_callback_t callback = NULL;
1379 const char *info = NULL;
1380 int error = 0;
1381
1382 TRACE_DEVICE(dev);
1383 TRACE_SUSPEND(0);
1384
1385 __pm_runtime_disable(dev, false);
1386
1387 dpm_wait_for_subordinate(dev, async);
1388
1389 if (async_error)
1390 goto Complete;
1391
1392 if (pm_wakeup_pending()) {
1393 async_error = -EBUSY;
1394 goto Complete;
1395 }
1396
1397 if (dev->power.syscore || dev->power.direct_complete)
1398 goto Complete;
1399
1400 if (dev->pm_domain) {
1401 info = "late power domain ";
1402 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1403 } else if (dev->type && dev->type->pm) {
1404 info = "late type ";
1405 callback = pm_late_early_op(dev->type->pm, state);
1406 } else if (dev->class && dev->class->pm) {
1407 info = "late class ";
1408 callback = pm_late_early_op(dev->class->pm, state);
1409 } else if (dev->bus && dev->bus->pm) {
1410 info = "late bus ";
1411 callback = pm_late_early_op(dev->bus->pm, state);
1412 }
1413 if (callback)
1414 goto Run;
1415
1416 if (dev_pm_skip_suspend(dev))
1417 goto Skip;
1418
1419 if (dev->driver && dev->driver->pm) {
1420 info = "late driver ";
1421 callback = pm_late_early_op(dev->driver->pm, state);
1422 }
1423
1424Run:
1425 error = dpm_run_callback(callback, dev, state, info);
1426 if (error) {
1427 async_error = error;
1428 dpm_save_failed_dev(dev_name(dev));
1429 pm_dev_err(dev, state, async ? " async late" : " late", error);
1430 goto Complete;
1431 }
1432 dpm_propagate_wakeup_to_parent(dev);
1433
1434Skip:
1435 dev->power.is_late_suspended = true;
1436
1437Complete:
1438 TRACE_SUSPEND(error);
1439 complete_all(&dev->power.completion);
1440 return error;
1441}
1442
1443static void async_suspend_late(void *data, async_cookie_t cookie)
1444{
1445 struct device *dev = data;
1446
1447 device_suspend_late(dev, pm_transition, true);
1448 put_device(dev);
1449}
1450
1451/**
1452 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1453 * @state: PM transition of the system being carried out.
1454 */
1455int dpm_suspend_late(pm_message_t state)
1456{
1457 ktime_t starttime = ktime_get();
1458 int error = 0;
1459
1460 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1461
1462 pm_transition = state;
1463 async_error = 0;
1464
1465 wake_up_all_idle_cpus();
1466
1467 mutex_lock(&dpm_list_mtx);
1468
1469 while (!list_empty(&dpm_suspended_list)) {
1470 struct device *dev = to_device(dpm_suspended_list.prev);
1471
1472 list_move(&dev->power.entry, &dpm_late_early_list);
1473
1474 if (dpm_async_fn(dev, async_suspend_late))
1475 continue;
1476
1477 get_device(dev);
1478
1479 mutex_unlock(&dpm_list_mtx);
1480
1481 error = device_suspend_late(dev, state, false);
1482
1483 put_device(dev);
1484
1485 mutex_lock(&dpm_list_mtx);
1486
1487 if (error || async_error)
1488 break;
1489 }
1490
1491 mutex_unlock(&dpm_list_mtx);
1492
1493 async_synchronize_full();
1494 if (!error)
1495 error = async_error;
1496
1497 if (error) {
1498 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1499 dpm_resume_early(resume_event(state));
1500 }
1501 dpm_show_time(starttime, state, error, "late");
1502 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1503 return error;
1504}
1505
1506/**
1507 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1508 * @state: PM transition of the system being carried out.
1509 */
1510int dpm_suspend_end(pm_message_t state)
1511{
1512 ktime_t starttime = ktime_get();
1513 int error;
1514
1515 error = dpm_suspend_late(state);
1516 if (error)
1517 goto out;
1518
1519 error = dpm_suspend_noirq(state);
1520 if (error)
1521 dpm_resume_early(resume_event(state));
1522
1523out:
1524 dpm_show_time(starttime, state, error, "end");
1525 return error;
1526}
1527EXPORT_SYMBOL_GPL(dpm_suspend_end);
1528
1529/**
1530 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1531 * @dev: Device to suspend.
1532 * @state: PM transition of the system being carried out.
1533 * @cb: Suspend callback to execute.
1534 * @info: string description of caller.
1535 */
1536static int legacy_suspend(struct device *dev, pm_message_t state,
1537 int (*cb)(struct device *dev, pm_message_t state),
1538 const char *info)
1539{
1540 int error;
1541 ktime_t calltime;
1542
1543 calltime = initcall_debug_start(dev, cb);
1544
1545 trace_device_pm_callback_start(dev, info, state.event);
1546 error = cb(dev, state);
1547 trace_device_pm_callback_end(dev, error);
1548 suspend_report_result(dev, cb, error);
1549
1550 initcall_debug_report(dev, calltime, cb, error);
1551
1552 return error;
1553}
1554
1555static void dpm_clear_superiors_direct_complete(struct device *dev)
1556{
1557 struct device_link *link;
1558 int idx;
1559
1560 if (dev->parent) {
1561 spin_lock_irq(&dev->parent->power.lock);
1562 dev->parent->power.direct_complete = false;
1563 spin_unlock_irq(&dev->parent->power.lock);
1564 }
1565
1566 idx = device_links_read_lock();
1567
1568 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1569 spin_lock_irq(&link->supplier->power.lock);
1570 link->supplier->power.direct_complete = false;
1571 spin_unlock_irq(&link->supplier->power.lock);
1572 }
1573
1574 device_links_read_unlock(idx);
1575}
1576
1577/**
1578 * device_suspend - Execute "suspend" callbacks for given device.
1579 * @dev: Device to handle.
1580 * @state: PM transition of the system being carried out.
1581 * @async: If true, the device is being suspended asynchronously.
1582 */
1583static int device_suspend(struct device *dev, pm_message_t state, bool async)
1584{
1585 pm_callback_t callback = NULL;
1586 const char *info = NULL;
1587 int error = 0;
1588 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1589
1590 TRACE_DEVICE(dev);
1591 TRACE_SUSPEND(0);
1592
1593 dpm_wait_for_subordinate(dev, async);
1594
1595 if (async_error) {
1596 dev->power.direct_complete = false;
1597 goto Complete;
1598 }
1599
1600 /*
1601 * Wait for possible runtime PM transitions of the device in progress
1602 * to complete and if there's a runtime resume request pending for it,
1603 * resume it before proceeding with invoking the system-wide suspend
1604 * callbacks for it.
1605 *
1606 * If the system-wide suspend callbacks below change the configuration
1607 * of the device, they must disable runtime PM for it or otherwise
1608 * ensure that its runtime-resume callbacks will not be confused by that
1609 * change in case they are invoked going forward.
1610 */
1611 pm_runtime_barrier(dev);
1612
1613 if (pm_wakeup_pending()) {
1614 dev->power.direct_complete = false;
1615 async_error = -EBUSY;
1616 goto Complete;
1617 }
1618
1619 if (dev->power.syscore)
1620 goto Complete;
1621
1622 /* Avoid direct_complete to let wakeup_path propagate. */
1623 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1624 dev->power.direct_complete = false;
1625
1626 if (dev->power.direct_complete) {
1627 if (pm_runtime_status_suspended(dev)) {
1628 pm_runtime_disable(dev);
1629 if (pm_runtime_status_suspended(dev)) {
1630 pm_dev_dbg(dev, state, "direct-complete ");
1631 goto Complete;
1632 }
1633
1634 pm_runtime_enable(dev);
1635 }
1636 dev->power.direct_complete = false;
1637 }
1638
1639 dev->power.may_skip_resume = true;
1640 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1641
1642 dpm_watchdog_set(&wd, dev);
1643 device_lock(dev);
1644
1645 if (dev->pm_domain) {
1646 info = "power domain ";
1647 callback = pm_op(&dev->pm_domain->ops, state);
1648 goto Run;
1649 }
1650
1651 if (dev->type && dev->type->pm) {
1652 info = "type ";
1653 callback = pm_op(dev->type->pm, state);
1654 goto Run;
1655 }
1656
1657 if (dev->class && dev->class->pm) {
1658 info = "class ";
1659 callback = pm_op(dev->class->pm, state);
1660 goto Run;
1661 }
1662
1663 if (dev->bus) {
1664 if (dev->bus->pm) {
1665 info = "bus ";
1666 callback = pm_op(dev->bus->pm, state);
1667 } else if (dev->bus->suspend) {
1668 pm_dev_dbg(dev, state, "legacy bus ");
1669 error = legacy_suspend(dev, state, dev->bus->suspend,
1670 "legacy bus ");
1671 goto End;
1672 }
1673 }
1674
1675 Run:
1676 if (!callback && dev->driver && dev->driver->pm) {
1677 info = "driver ";
1678 callback = pm_op(dev->driver->pm, state);
1679 }
1680
1681 error = dpm_run_callback(callback, dev, state, info);
1682
1683 End:
1684 if (!error) {
1685 dev->power.is_suspended = true;
1686 if (device_may_wakeup(dev))
1687 dev->power.wakeup_path = true;
1688
1689 dpm_propagate_wakeup_to_parent(dev);
1690 dpm_clear_superiors_direct_complete(dev);
1691 }
1692
1693 device_unlock(dev);
1694 dpm_watchdog_clear(&wd);
1695
1696 Complete:
1697 if (error) {
1698 async_error = error;
1699 dpm_save_failed_dev(dev_name(dev));
1700 pm_dev_err(dev, state, async ? " async" : "", error);
1701 }
1702
1703 complete_all(&dev->power.completion);
1704 TRACE_SUSPEND(error);
1705 return error;
1706}
1707
1708static void async_suspend(void *data, async_cookie_t cookie)
1709{
1710 struct device *dev = data;
1711
1712 device_suspend(dev, pm_transition, true);
1713 put_device(dev);
1714}
1715
1716/**
1717 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1718 * @state: PM transition of the system being carried out.
1719 */
1720int dpm_suspend(pm_message_t state)
1721{
1722 ktime_t starttime = ktime_get();
1723 int error = 0;
1724
1725 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1726 might_sleep();
1727
1728 devfreq_suspend();
1729 cpufreq_suspend();
1730
1731 pm_transition = state;
1732 async_error = 0;
1733
1734 mutex_lock(&dpm_list_mtx);
1735
1736 while (!list_empty(&dpm_prepared_list)) {
1737 struct device *dev = to_device(dpm_prepared_list.prev);
1738
1739 list_move(&dev->power.entry, &dpm_suspended_list);
1740
1741 if (dpm_async_fn(dev, async_suspend))
1742 continue;
1743
1744 get_device(dev);
1745
1746 mutex_unlock(&dpm_list_mtx);
1747
1748 error = device_suspend(dev, state, false);
1749
1750 put_device(dev);
1751
1752 mutex_lock(&dpm_list_mtx);
1753
1754 if (error || async_error)
1755 break;
1756 }
1757
1758 mutex_unlock(&dpm_list_mtx);
1759
1760 async_synchronize_full();
1761 if (!error)
1762 error = async_error;
1763
1764 if (error)
1765 dpm_save_failed_step(SUSPEND_SUSPEND);
1766
1767 dpm_show_time(starttime, state, error, NULL);
1768 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1769 return error;
1770}
1771
1772/**
1773 * device_prepare - Prepare a device for system power transition.
1774 * @dev: Device to handle.
1775 * @state: PM transition of the system being carried out.
1776 *
1777 * Execute the ->prepare() callback(s) for given device. No new children of the
1778 * device may be registered after this function has returned.
1779 */
1780static int device_prepare(struct device *dev, pm_message_t state)
1781{
1782 int (*callback)(struct device *) = NULL;
1783 int ret = 0;
1784
1785 /*
1786 * If a device's parent goes into runtime suspend at the wrong time,
1787 * it won't be possible to resume the device. To prevent this we
1788 * block runtime suspend here, during the prepare phase, and allow
1789 * it again during the complete phase.
1790 */
1791 pm_runtime_get_noresume(dev);
1792
1793 if (dev->power.syscore)
1794 return 0;
1795
1796 device_lock(dev);
1797
1798 dev->power.wakeup_path = false;
1799
1800 if (dev->power.no_pm_callbacks)
1801 goto unlock;
1802
1803 if (dev->pm_domain)
1804 callback = dev->pm_domain->ops.prepare;
1805 else if (dev->type && dev->type->pm)
1806 callback = dev->type->pm->prepare;
1807 else if (dev->class && dev->class->pm)
1808 callback = dev->class->pm->prepare;
1809 else if (dev->bus && dev->bus->pm)
1810 callback = dev->bus->pm->prepare;
1811
1812 if (!callback && dev->driver && dev->driver->pm)
1813 callback = dev->driver->pm->prepare;
1814
1815 if (callback)
1816 ret = callback(dev);
1817
1818unlock:
1819 device_unlock(dev);
1820
1821 if (ret < 0) {
1822 suspend_report_result(dev, callback, ret);
1823 pm_runtime_put(dev);
1824 return ret;
1825 }
1826 /*
1827 * A positive return value from ->prepare() means "this device appears
1828 * to be runtime-suspended and its state is fine, so if it really is
1829 * runtime-suspended, you can leave it in that state provided that you
1830 * will do the same thing with all of its descendants". This only
1831 * applies to suspend transitions, however.
1832 */
1833 spin_lock_irq(&dev->power.lock);
1834 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1835 (ret > 0 || dev->power.no_pm_callbacks) &&
1836 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1837 spin_unlock_irq(&dev->power.lock);
1838 return 0;
1839}
1840
1841/**
1842 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1843 * @state: PM transition of the system being carried out.
1844 *
1845 * Execute the ->prepare() callback(s) for all devices.
1846 */
1847int dpm_prepare(pm_message_t state)
1848{
1849 int error = 0;
1850
1851 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1852 might_sleep();
1853
1854 /*
1855 * Give a chance for the known devices to complete their probes, before
1856 * disable probing of devices. This sync point is important at least
1857 * at boot time + hibernation restore.
1858 */
1859 wait_for_device_probe();
1860 /*
1861 * It is unsafe if probing of devices will happen during suspend or
1862 * hibernation and system behavior will be unpredictable in this case.
1863 * So, let's prohibit device's probing here and defer their probes
1864 * instead. The normal behavior will be restored in dpm_complete().
1865 */
1866 device_block_probing();
1867
1868 mutex_lock(&dpm_list_mtx);
1869 while (!list_empty(&dpm_list) && !error) {
1870 struct device *dev = to_device(dpm_list.next);
1871
1872 get_device(dev);
1873
1874 mutex_unlock(&dpm_list_mtx);
1875
1876 trace_device_pm_callback_start(dev, "", state.event);
1877 error = device_prepare(dev, state);
1878 trace_device_pm_callback_end(dev, error);
1879
1880 mutex_lock(&dpm_list_mtx);
1881
1882 if (!error) {
1883 dev->power.is_prepared = true;
1884 if (!list_empty(&dev->power.entry))
1885 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1886 } else if (error == -EAGAIN) {
1887 error = 0;
1888 } else {
1889 dev_info(dev, "not prepared for power transition: code %d\n",
1890 error);
1891 }
1892
1893 mutex_unlock(&dpm_list_mtx);
1894
1895 put_device(dev);
1896
1897 mutex_lock(&dpm_list_mtx);
1898 }
1899 mutex_unlock(&dpm_list_mtx);
1900 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1901 return error;
1902}
1903
1904/**
1905 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1906 * @state: PM transition of the system being carried out.
1907 *
1908 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1909 * callbacks for them.
1910 */
1911int dpm_suspend_start(pm_message_t state)
1912{
1913 ktime_t starttime = ktime_get();
1914 int error;
1915
1916 error = dpm_prepare(state);
1917 if (error)
1918 dpm_save_failed_step(SUSPEND_PREPARE);
1919 else
1920 error = dpm_suspend(state);
1921
1922 dpm_show_time(starttime, state, error, "start");
1923 return error;
1924}
1925EXPORT_SYMBOL_GPL(dpm_suspend_start);
1926
1927void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1928{
1929 if (ret)
1930 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1931}
1932EXPORT_SYMBOL_GPL(__suspend_report_result);
1933
1934/**
1935 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1936 * @subordinate: Device that needs to wait for @dev.
1937 * @dev: Device to wait for.
1938 */
1939int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1940{
1941 dpm_wait(dev, subordinate->power.async_suspend);
1942 return async_error;
1943}
1944EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1945
1946/**
1947 * dpm_for_each_dev - device iterator.
1948 * @data: data for the callback.
1949 * @fn: function to be called for each device.
1950 *
1951 * Iterate over devices in dpm_list, and call @fn for each device,
1952 * passing it @data.
1953 */
1954void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1955{
1956 struct device *dev;
1957
1958 if (!fn)
1959 return;
1960
1961 device_pm_lock();
1962 list_for_each_entry(dev, &dpm_list, power.entry)
1963 fn(dev, data);
1964 device_pm_unlock();
1965}
1966EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1967
1968static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1969{
1970 if (!ops)
1971 return true;
1972
1973 return !ops->prepare &&
1974 !ops->suspend &&
1975 !ops->suspend_late &&
1976 !ops->suspend_noirq &&
1977 !ops->resume_noirq &&
1978 !ops->resume_early &&
1979 !ops->resume &&
1980 !ops->complete;
1981}
1982
1983void device_pm_check_callbacks(struct device *dev)
1984{
1985 unsigned long flags;
1986
1987 spin_lock_irqsave(&dev->power.lock, flags);
1988 dev->power.no_pm_callbacks =
1989 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1990 !dev->bus->suspend && !dev->bus->resume)) &&
1991 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1992 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1993 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1994 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1995 !dev->driver->suspend && !dev->driver->resume));
1996 spin_unlock_irqrestore(&dev->power.lock, flags);
1997}
1998
1999bool dev_pm_skip_suspend(struct device *dev)
2000{
2001 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2002 pm_runtime_status_suspended(dev);
2003}