Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18#define pr_fmt(fmt) "PM: " fmt
19#define dev_fmt pr_fmt
20
21#include <linux/device.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm-trace.h>
27#include <linux/pm_wakeirq.h>
28#include <linux/interrupt.h>
29#include <linux/sched.h>
30#include <linux/sched/debug.h>
31#include <linux/async.h>
32#include <linux/suspend.h>
33#include <trace/events/power.h>
34#include <linux/cpufreq.h>
35#include <linux/devfreq.h>
36#include <linux/timer.h>
37
38#include "../base.h"
39#include "power.h"
40
41typedef int (*pm_callback_t)(struct device *);
42
43#define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
47/*
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
54 * dpm_list_mutex.
55 */
56
57LIST_HEAD(dpm_list);
58static LIST_HEAD(dpm_prepared_list);
59static LIST_HEAD(dpm_suspended_list);
60static LIST_HEAD(dpm_late_early_list);
61static LIST_HEAD(dpm_noirq_list);
62
63struct suspend_stats suspend_stats;
64static DEFINE_MUTEX(dpm_list_mtx);
65static pm_message_t pm_transition;
66
67static int async_error;
68
69static const char *pm_verb(int event)
70{
71 switch (event) {
72 case PM_EVENT_SUSPEND:
73 return "suspend";
74 case PM_EVENT_RESUME:
75 return "resume";
76 case PM_EVENT_FREEZE:
77 return "freeze";
78 case PM_EVENT_QUIESCE:
79 return "quiesce";
80 case PM_EVENT_HIBERNATE:
81 return "hibernate";
82 case PM_EVENT_THAW:
83 return "thaw";
84 case PM_EVENT_RESTORE:
85 return "restore";
86 case PM_EVENT_RECOVER:
87 return "recover";
88 default:
89 return "(unknown PM event)";
90 }
91}
92
93/**
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
96 */
97void device_pm_sleep_init(struct device *dev)
98{
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
107}
108
109/**
110 * device_pm_lock - Lock the list of active devices used by the PM core.
111 */
112void device_pm_lock(void)
113{
114 mutex_lock(&dpm_list_mtx);
115}
116
117/**
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
119 */
120void device_pm_unlock(void)
121{
122 mutex_unlock(&dpm_list_mtx);
123}
124
125/**
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
128 */
129void device_pm_add(struct device *dev)
130{
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
133 return;
134
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
145}
146
147/**
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
150 */
151void device_pm_remove(struct device *dev)
152{
153 if (device_pm_not_required(dev))
154 return;
155
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
166}
167
168/**
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
172 */
173void device_pm_move_before(struct device *deva, struct device *devb)
174{
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
180}
181
182/**
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
186 */
187void device_pm_move_after(struct device *deva, struct device *devb)
188{
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
194}
195
196/**
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
199 */
200void device_pm_move_last(struct device *dev)
201{
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
205}
206
207static ktime_t initcall_debug_start(struct device *dev, void *cb)
208{
209 if (!pm_print_times_enabled)
210 return 0;
211
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
215 return ktime_get();
216}
217
218static void initcall_debug_report(struct device *dev, ktime_t calltime,
219 void *cb, int error)
220{
221 ktime_t rettime;
222
223 if (!pm_print_times_enabled)
224 return;
225
226 rettime = ktime_get();
227 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228 (unsigned long long)ktime_us_delta(rettime, calltime));
229}
230
231/**
232 * dpm_wait - Wait for a PM operation to complete.
233 * @dev: Device to wait for.
234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
235 */
236static void dpm_wait(struct device *dev, bool async)
237{
238 if (!dev)
239 return;
240
241 if (async || (pm_async_enabled && dev->power.async_suspend))
242 wait_for_completion(&dev->power.completion);
243}
244
245static int dpm_wait_fn(struct device *dev, void *async_ptr)
246{
247 dpm_wait(dev, *((bool *)async_ptr));
248 return 0;
249}
250
251static void dpm_wait_for_children(struct device *dev, bool async)
252{
253 device_for_each_child(dev, &async, dpm_wait_fn);
254}
255
256static void dpm_wait_for_suppliers(struct device *dev, bool async)
257{
258 struct device_link *link;
259 int idx;
260
261 idx = device_links_read_lock();
262
263 /*
264 * If the supplier goes away right after we've checked the link to it,
265 * we'll wait for its completion to change the state, but that's fine,
266 * because the only things that will block as a result are the SRCU
267 * callbacks freeing the link objects for the links in the list we're
268 * walking.
269 */
270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 dpm_wait(link->supplier, async);
273
274 device_links_read_unlock(idx);
275}
276
277static bool dpm_wait_for_superior(struct device *dev, bool async)
278{
279 struct device *parent;
280
281 /*
282 * If the device is resumed asynchronously and the parent's callback
283 * deletes both the device and the parent itself, the parent object may
284 * be freed while this function is running, so avoid that by reference
285 * counting the parent once more unless the device has been deleted
286 * already (in which case return right away).
287 */
288 mutex_lock(&dpm_list_mtx);
289
290 if (!device_pm_initialized(dev)) {
291 mutex_unlock(&dpm_list_mtx);
292 return false;
293 }
294
295 parent = get_device(dev->parent);
296
297 mutex_unlock(&dpm_list_mtx);
298
299 dpm_wait(parent, async);
300 put_device(parent);
301
302 dpm_wait_for_suppliers(dev, async);
303
304 /*
305 * If the parent's callback has deleted the device, attempting to resume
306 * it would be invalid, so avoid doing that then.
307 */
308 return device_pm_initialized(dev);
309}
310
311static void dpm_wait_for_consumers(struct device *dev, bool async)
312{
313 struct device_link *link;
314 int idx;
315
316 idx = device_links_read_lock();
317
318 /*
319 * The status of a device link can only be changed from "dormant" by a
320 * probe, but that cannot happen during system suspend/resume. In
321 * theory it can change to "dormant" at that time, but then it is
322 * reasonable to wait for the target device anyway (eg. if it goes
323 * away, it's better to wait for it to go away completely and then
324 * continue instead of trying to continue in parallel with its
325 * unregistration).
326 */
327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 dpm_wait(link->consumer, async);
330
331 device_links_read_unlock(idx);
332}
333
334static void dpm_wait_for_subordinate(struct device *dev, bool async)
335{
336 dpm_wait_for_children(dev, async);
337 dpm_wait_for_consumers(dev, async);
338}
339
340/**
341 * pm_op - Return the PM operation appropriate for given PM event.
342 * @ops: PM operations to choose from.
343 * @state: PM transition of the system being carried out.
344 */
345static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
346{
347 switch (state.event) {
348#ifdef CONFIG_SUSPEND
349 case PM_EVENT_SUSPEND:
350 return ops->suspend;
351 case PM_EVENT_RESUME:
352 return ops->resume;
353#endif /* CONFIG_SUSPEND */
354#ifdef CONFIG_HIBERNATE_CALLBACKS
355 case PM_EVENT_FREEZE:
356 case PM_EVENT_QUIESCE:
357 return ops->freeze;
358 case PM_EVENT_HIBERNATE:
359 return ops->poweroff;
360 case PM_EVENT_THAW:
361 case PM_EVENT_RECOVER:
362 return ops->thaw;
363 case PM_EVENT_RESTORE:
364 return ops->restore;
365#endif /* CONFIG_HIBERNATE_CALLBACKS */
366 }
367
368 return NULL;
369}
370
371/**
372 * pm_late_early_op - Return the PM operation appropriate for given PM event.
373 * @ops: PM operations to choose from.
374 * @state: PM transition of the system being carried out.
375 *
376 * Runtime PM is disabled for @dev while this function is being executed.
377 */
378static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
379 pm_message_t state)
380{
381 switch (state.event) {
382#ifdef CONFIG_SUSPEND
383 case PM_EVENT_SUSPEND:
384 return ops->suspend_late;
385 case PM_EVENT_RESUME:
386 return ops->resume_early;
387#endif /* CONFIG_SUSPEND */
388#ifdef CONFIG_HIBERNATE_CALLBACKS
389 case PM_EVENT_FREEZE:
390 case PM_EVENT_QUIESCE:
391 return ops->freeze_late;
392 case PM_EVENT_HIBERNATE:
393 return ops->poweroff_late;
394 case PM_EVENT_THAW:
395 case PM_EVENT_RECOVER:
396 return ops->thaw_early;
397 case PM_EVENT_RESTORE:
398 return ops->restore_early;
399#endif /* CONFIG_HIBERNATE_CALLBACKS */
400 }
401
402 return NULL;
403}
404
405/**
406 * pm_noirq_op - Return the PM operation appropriate for given PM event.
407 * @ops: PM operations to choose from.
408 * @state: PM transition of the system being carried out.
409 *
410 * The driver of @dev will not receive interrupts while this function is being
411 * executed.
412 */
413static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
414{
415 switch (state.event) {
416#ifdef CONFIG_SUSPEND
417 case PM_EVENT_SUSPEND:
418 return ops->suspend_noirq;
419 case PM_EVENT_RESUME:
420 return ops->resume_noirq;
421#endif /* CONFIG_SUSPEND */
422#ifdef CONFIG_HIBERNATE_CALLBACKS
423 case PM_EVENT_FREEZE:
424 case PM_EVENT_QUIESCE:
425 return ops->freeze_noirq;
426 case PM_EVENT_HIBERNATE:
427 return ops->poweroff_noirq;
428 case PM_EVENT_THAW:
429 case PM_EVENT_RECOVER:
430 return ops->thaw_noirq;
431 case PM_EVENT_RESTORE:
432 return ops->restore_noirq;
433#endif /* CONFIG_HIBERNATE_CALLBACKS */
434 }
435
436 return NULL;
437}
438
439static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
440{
441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 ", may wakeup" : "", dev->power.driver_flags);
444}
445
446static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447 int error)
448{
449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
450 error);
451}
452
453static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
454 const char *info)
455{
456 ktime_t calltime;
457 u64 usecs64;
458 int usecs;
459
460 calltime = ktime_get();
461 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 do_div(usecs64, NSEC_PER_USEC);
463 usecs = usecs64;
464 if (usecs == 0)
465 usecs = 1;
466
467 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 info ?: "", info ? " " : "", pm_verb(state.event),
469 error ? "aborted" : "complete",
470 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
471}
472
473static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 pm_message_t state, const char *info)
475{
476 ktime_t calltime;
477 int error;
478
479 if (!cb)
480 return 0;
481
482 calltime = initcall_debug_start(dev, cb);
483
484 pm_dev_dbg(dev, state, info);
485 trace_device_pm_callback_start(dev, info, state.event);
486 error = cb(dev);
487 trace_device_pm_callback_end(dev, error);
488 suspend_report_result(dev, cb, error);
489
490 initcall_debug_report(dev, calltime, cb, error);
491
492 return error;
493}
494
495#ifdef CONFIG_DPM_WATCHDOG
496struct dpm_watchdog {
497 struct device *dev;
498 struct task_struct *tsk;
499 struct timer_list timer;
500};
501
502#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 struct dpm_watchdog wd
504
505/**
506 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507 * @t: The timer that PM watchdog depends on.
508 *
509 * Called when a driver has timed out suspending or resuming.
510 * There's not much we can do here to recover so panic() to
511 * capture a crash-dump in pstore.
512 */
513static void dpm_watchdog_handler(struct timer_list *t)
514{
515 struct dpm_watchdog *wd = from_timer(wd, t, timer);
516
517 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518 show_stack(wd->tsk, NULL, KERN_EMERG);
519 panic("%s %s: unrecoverable failure\n",
520 dev_driver_string(wd->dev), dev_name(wd->dev));
521}
522
523/**
524 * dpm_watchdog_set - Enable pm watchdog for given device.
525 * @wd: Watchdog. Must be allocated on the stack.
526 * @dev: Device to handle.
527 */
528static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
529{
530 struct timer_list *timer = &wd->timer;
531
532 wd->dev = dev;
533 wd->tsk = current;
534
535 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
536 /* use same timeout value for both suspend and resume */
537 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
538 add_timer(timer);
539}
540
541/**
542 * dpm_watchdog_clear - Disable suspend/resume watchdog.
543 * @wd: Watchdog to disable.
544 */
545static void dpm_watchdog_clear(struct dpm_watchdog *wd)
546{
547 struct timer_list *timer = &wd->timer;
548
549 del_timer_sync(timer);
550 destroy_timer_on_stack(timer);
551}
552#else
553#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
554#define dpm_watchdog_set(x, y)
555#define dpm_watchdog_clear(x)
556#endif
557
558/*------------------------- Resume routines -------------------------*/
559
560/**
561 * dev_pm_skip_resume - System-wide device resume optimization check.
562 * @dev: Target device.
563 *
564 * Return:
565 * - %false if the transition under way is RESTORE.
566 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
567 * - The logical negation of %power.must_resume otherwise (that is, when the
568 * transition under way is RESUME).
569 */
570bool dev_pm_skip_resume(struct device *dev)
571{
572 if (pm_transition.event == PM_EVENT_RESTORE)
573 return false;
574
575 if (pm_transition.event == PM_EVENT_THAW)
576 return dev_pm_skip_suspend(dev);
577
578 return !dev->power.must_resume;
579}
580
581/**
582 * device_resume_noirq - Execute a "noirq resume" callback for given device.
583 * @dev: Device to handle.
584 * @state: PM transition of the system being carried out.
585 * @async: If true, the device is being resumed asynchronously.
586 *
587 * The driver of @dev will not receive interrupts while this function is being
588 * executed.
589 */
590static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
591{
592 pm_callback_t callback = NULL;
593 const char *info = NULL;
594 bool skip_resume;
595 int error = 0;
596
597 TRACE_DEVICE(dev);
598 TRACE_RESUME(0);
599
600 if (dev->power.syscore || dev->power.direct_complete)
601 goto Out;
602
603 if (!dev->power.is_noirq_suspended)
604 goto Out;
605
606 if (!dpm_wait_for_superior(dev, async))
607 goto Out;
608
609 skip_resume = dev_pm_skip_resume(dev);
610 /*
611 * If the driver callback is skipped below or by the middle layer
612 * callback and device_resume_early() also skips the driver callback for
613 * this device later, it needs to appear as "suspended" to PM-runtime,
614 * so change its status accordingly.
615 *
616 * Otherwise, the device is going to be resumed, so set its PM-runtime
617 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
618 * to avoid confusing drivers that don't use it.
619 */
620 if (skip_resume)
621 pm_runtime_set_suspended(dev);
622 else if (dev_pm_skip_suspend(dev))
623 pm_runtime_set_active(dev);
624
625 if (dev->pm_domain) {
626 info = "noirq power domain ";
627 callback = pm_noirq_op(&dev->pm_domain->ops, state);
628 } else if (dev->type && dev->type->pm) {
629 info = "noirq type ";
630 callback = pm_noirq_op(dev->type->pm, state);
631 } else if (dev->class && dev->class->pm) {
632 info = "noirq class ";
633 callback = pm_noirq_op(dev->class->pm, state);
634 } else if (dev->bus && dev->bus->pm) {
635 info = "noirq bus ";
636 callback = pm_noirq_op(dev->bus->pm, state);
637 }
638 if (callback)
639 goto Run;
640
641 if (skip_resume)
642 goto Skip;
643
644 if (dev->driver && dev->driver->pm) {
645 info = "noirq driver ";
646 callback = pm_noirq_op(dev->driver->pm, state);
647 }
648
649Run:
650 error = dpm_run_callback(callback, dev, state, info);
651
652Skip:
653 dev->power.is_noirq_suspended = false;
654
655Out:
656 complete_all(&dev->power.completion);
657 TRACE_RESUME(error);
658
659 if (error) {
660 suspend_stats.failed_resume_noirq++;
661 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
662 dpm_save_failed_dev(dev_name(dev));
663 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
664 }
665}
666
667static bool is_async(struct device *dev)
668{
669 return dev->power.async_suspend && pm_async_enabled
670 && !pm_trace_is_enabled();
671}
672
673static bool dpm_async_fn(struct device *dev, async_func_t func)
674{
675 reinit_completion(&dev->power.completion);
676
677 if (is_async(dev)) {
678 dev->power.async_in_progress = true;
679
680 get_device(dev);
681
682 if (async_schedule_dev_nocall(func, dev))
683 return true;
684
685 put_device(dev);
686 }
687 /*
688 * Because async_schedule_dev_nocall() above has returned false or it
689 * has not been called at all, func() is not running and it is safe to
690 * update the async_in_progress flag without extra synchronization.
691 */
692 dev->power.async_in_progress = false;
693 return false;
694}
695
696static void async_resume_noirq(void *data, async_cookie_t cookie)
697{
698 struct device *dev = data;
699
700 device_resume_noirq(dev, pm_transition, true);
701 put_device(dev);
702}
703
704static void dpm_noirq_resume_devices(pm_message_t state)
705{
706 struct device *dev;
707 ktime_t starttime = ktime_get();
708
709 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
710 mutex_lock(&dpm_list_mtx);
711 pm_transition = state;
712
713 /*
714 * Trigger the resume of "async" devices upfront so they don't have to
715 * wait for the "non-async" ones they don't depend on.
716 */
717 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
718 dpm_async_fn(dev, async_resume_noirq);
719
720 while (!list_empty(&dpm_noirq_list)) {
721 dev = to_device(dpm_noirq_list.next);
722 list_move_tail(&dev->power.entry, &dpm_late_early_list);
723
724 if (!dev->power.async_in_progress) {
725 get_device(dev);
726
727 mutex_unlock(&dpm_list_mtx);
728
729 device_resume_noirq(dev, state, false);
730
731 put_device(dev);
732
733 mutex_lock(&dpm_list_mtx);
734 }
735 }
736 mutex_unlock(&dpm_list_mtx);
737 async_synchronize_full();
738 dpm_show_time(starttime, state, 0, "noirq");
739 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
740}
741
742/**
743 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
744 * @state: PM transition of the system being carried out.
745 *
746 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
747 * allow device drivers' interrupt handlers to be called.
748 */
749void dpm_resume_noirq(pm_message_t state)
750{
751 dpm_noirq_resume_devices(state);
752
753 resume_device_irqs();
754 device_wakeup_disarm_wake_irqs();
755}
756
757/**
758 * device_resume_early - Execute an "early resume" callback for given device.
759 * @dev: Device to handle.
760 * @state: PM transition of the system being carried out.
761 * @async: If true, the device is being resumed asynchronously.
762 *
763 * Runtime PM is disabled for @dev while this function is being executed.
764 */
765static void device_resume_early(struct device *dev, pm_message_t state, bool async)
766{
767 pm_callback_t callback = NULL;
768 const char *info = NULL;
769 int error = 0;
770
771 TRACE_DEVICE(dev);
772 TRACE_RESUME(0);
773
774 if (dev->power.syscore || dev->power.direct_complete)
775 goto Out;
776
777 if (!dev->power.is_late_suspended)
778 goto Out;
779
780 if (!dpm_wait_for_superior(dev, async))
781 goto Out;
782
783 if (dev->pm_domain) {
784 info = "early power domain ";
785 callback = pm_late_early_op(&dev->pm_domain->ops, state);
786 } else if (dev->type && dev->type->pm) {
787 info = "early type ";
788 callback = pm_late_early_op(dev->type->pm, state);
789 } else if (dev->class && dev->class->pm) {
790 info = "early class ";
791 callback = pm_late_early_op(dev->class->pm, state);
792 } else if (dev->bus && dev->bus->pm) {
793 info = "early bus ";
794 callback = pm_late_early_op(dev->bus->pm, state);
795 }
796 if (callback)
797 goto Run;
798
799 if (dev_pm_skip_resume(dev))
800 goto Skip;
801
802 if (dev->driver && dev->driver->pm) {
803 info = "early driver ";
804 callback = pm_late_early_op(dev->driver->pm, state);
805 }
806
807Run:
808 error = dpm_run_callback(callback, dev, state, info);
809
810Skip:
811 dev->power.is_late_suspended = false;
812
813Out:
814 TRACE_RESUME(error);
815
816 pm_runtime_enable(dev);
817 complete_all(&dev->power.completion);
818
819 if (error) {
820 suspend_stats.failed_resume_early++;
821 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
822 dpm_save_failed_dev(dev_name(dev));
823 pm_dev_err(dev, state, async ? " async early" : " early", error);
824 }
825}
826
827static void async_resume_early(void *data, async_cookie_t cookie)
828{
829 struct device *dev = data;
830
831 device_resume_early(dev, pm_transition, true);
832 put_device(dev);
833}
834
835/**
836 * dpm_resume_early - Execute "early resume" callbacks for all devices.
837 * @state: PM transition of the system being carried out.
838 */
839void dpm_resume_early(pm_message_t state)
840{
841 struct device *dev;
842 ktime_t starttime = ktime_get();
843
844 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
845 mutex_lock(&dpm_list_mtx);
846 pm_transition = state;
847
848 /*
849 * Trigger the resume of "async" devices upfront so they don't have to
850 * wait for the "non-async" ones they don't depend on.
851 */
852 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
853 dpm_async_fn(dev, async_resume_early);
854
855 while (!list_empty(&dpm_late_early_list)) {
856 dev = to_device(dpm_late_early_list.next);
857 list_move_tail(&dev->power.entry, &dpm_suspended_list);
858
859 if (!dev->power.async_in_progress) {
860 get_device(dev);
861
862 mutex_unlock(&dpm_list_mtx);
863
864 device_resume_early(dev, state, false);
865
866 put_device(dev);
867
868 mutex_lock(&dpm_list_mtx);
869 }
870 }
871 mutex_unlock(&dpm_list_mtx);
872 async_synchronize_full();
873 dpm_show_time(starttime, state, 0, "early");
874 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
875}
876
877/**
878 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
879 * @state: PM transition of the system being carried out.
880 */
881void dpm_resume_start(pm_message_t state)
882{
883 dpm_resume_noirq(state);
884 dpm_resume_early(state);
885}
886EXPORT_SYMBOL_GPL(dpm_resume_start);
887
888/**
889 * device_resume - Execute "resume" callbacks for given device.
890 * @dev: Device to handle.
891 * @state: PM transition of the system being carried out.
892 * @async: If true, the device is being resumed asynchronously.
893 */
894static void device_resume(struct device *dev, pm_message_t state, bool async)
895{
896 pm_callback_t callback = NULL;
897 const char *info = NULL;
898 int error = 0;
899 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
900
901 TRACE_DEVICE(dev);
902 TRACE_RESUME(0);
903
904 if (dev->power.syscore)
905 goto Complete;
906
907 if (dev->power.direct_complete) {
908 /* Match the pm_runtime_disable() in __device_suspend(). */
909 pm_runtime_enable(dev);
910 goto Complete;
911 }
912
913 if (!dpm_wait_for_superior(dev, async))
914 goto Complete;
915
916 dpm_watchdog_set(&wd, dev);
917 device_lock(dev);
918
919 /*
920 * This is a fib. But we'll allow new children to be added below
921 * a resumed device, even if the device hasn't been completed yet.
922 */
923 dev->power.is_prepared = false;
924
925 if (!dev->power.is_suspended)
926 goto Unlock;
927
928 if (dev->pm_domain) {
929 info = "power domain ";
930 callback = pm_op(&dev->pm_domain->ops, state);
931 goto Driver;
932 }
933
934 if (dev->type && dev->type->pm) {
935 info = "type ";
936 callback = pm_op(dev->type->pm, state);
937 goto Driver;
938 }
939
940 if (dev->class && dev->class->pm) {
941 info = "class ";
942 callback = pm_op(dev->class->pm, state);
943 goto Driver;
944 }
945
946 if (dev->bus) {
947 if (dev->bus->pm) {
948 info = "bus ";
949 callback = pm_op(dev->bus->pm, state);
950 } else if (dev->bus->resume) {
951 info = "legacy bus ";
952 callback = dev->bus->resume;
953 goto End;
954 }
955 }
956
957 Driver:
958 if (!callback && dev->driver && dev->driver->pm) {
959 info = "driver ";
960 callback = pm_op(dev->driver->pm, state);
961 }
962
963 End:
964 error = dpm_run_callback(callback, dev, state, info);
965 dev->power.is_suspended = false;
966
967 Unlock:
968 device_unlock(dev);
969 dpm_watchdog_clear(&wd);
970
971 Complete:
972 complete_all(&dev->power.completion);
973
974 TRACE_RESUME(error);
975
976 if (error) {
977 suspend_stats.failed_resume++;
978 dpm_save_failed_step(SUSPEND_RESUME);
979 dpm_save_failed_dev(dev_name(dev));
980 pm_dev_err(dev, state, async ? " async" : "", error);
981 }
982}
983
984static void async_resume(void *data, async_cookie_t cookie)
985{
986 struct device *dev = data;
987
988 device_resume(dev, pm_transition, true);
989 put_device(dev);
990}
991
992/**
993 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
994 * @state: PM transition of the system being carried out.
995 *
996 * Execute the appropriate "resume" callback for all devices whose status
997 * indicates that they are suspended.
998 */
999void dpm_resume(pm_message_t state)
1000{
1001 struct device *dev;
1002 ktime_t starttime = ktime_get();
1003
1004 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1005 might_sleep();
1006
1007 mutex_lock(&dpm_list_mtx);
1008 pm_transition = state;
1009 async_error = 0;
1010
1011 /*
1012 * Trigger the resume of "async" devices upfront so they don't have to
1013 * wait for the "non-async" ones they don't depend on.
1014 */
1015 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1016 dpm_async_fn(dev, async_resume);
1017
1018 while (!list_empty(&dpm_suspended_list)) {
1019 dev = to_device(dpm_suspended_list.next);
1020
1021 get_device(dev);
1022
1023 if (!dev->power.async_in_progress) {
1024 mutex_unlock(&dpm_list_mtx);
1025
1026 device_resume(dev, state, false);
1027
1028 mutex_lock(&dpm_list_mtx);
1029 }
1030
1031 if (!list_empty(&dev->power.entry))
1032 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1033
1034 mutex_unlock(&dpm_list_mtx);
1035
1036 put_device(dev);
1037
1038 mutex_lock(&dpm_list_mtx);
1039 }
1040 mutex_unlock(&dpm_list_mtx);
1041 async_synchronize_full();
1042 dpm_show_time(starttime, state, 0, NULL);
1043
1044 cpufreq_resume();
1045 devfreq_resume();
1046 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1047}
1048
1049/**
1050 * device_complete - Complete a PM transition for given device.
1051 * @dev: Device to handle.
1052 * @state: PM transition of the system being carried out.
1053 */
1054static void device_complete(struct device *dev, pm_message_t state)
1055{
1056 void (*callback)(struct device *) = NULL;
1057 const char *info = NULL;
1058
1059 if (dev->power.syscore)
1060 goto out;
1061
1062 device_lock(dev);
1063
1064 if (dev->pm_domain) {
1065 info = "completing power domain ";
1066 callback = dev->pm_domain->ops.complete;
1067 } else if (dev->type && dev->type->pm) {
1068 info = "completing type ";
1069 callback = dev->type->pm->complete;
1070 } else if (dev->class && dev->class->pm) {
1071 info = "completing class ";
1072 callback = dev->class->pm->complete;
1073 } else if (dev->bus && dev->bus->pm) {
1074 info = "completing bus ";
1075 callback = dev->bus->pm->complete;
1076 }
1077
1078 if (!callback && dev->driver && dev->driver->pm) {
1079 info = "completing driver ";
1080 callback = dev->driver->pm->complete;
1081 }
1082
1083 if (callback) {
1084 pm_dev_dbg(dev, state, info);
1085 callback(dev);
1086 }
1087
1088 device_unlock(dev);
1089
1090out:
1091 pm_runtime_put(dev);
1092}
1093
1094/**
1095 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1096 * @state: PM transition of the system being carried out.
1097 *
1098 * Execute the ->complete() callbacks for all devices whose PM status is not
1099 * DPM_ON (this allows new devices to be registered).
1100 */
1101void dpm_complete(pm_message_t state)
1102{
1103 struct list_head list;
1104
1105 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1106 might_sleep();
1107
1108 INIT_LIST_HEAD(&list);
1109 mutex_lock(&dpm_list_mtx);
1110 while (!list_empty(&dpm_prepared_list)) {
1111 struct device *dev = to_device(dpm_prepared_list.prev);
1112
1113 get_device(dev);
1114 dev->power.is_prepared = false;
1115 list_move(&dev->power.entry, &list);
1116
1117 mutex_unlock(&dpm_list_mtx);
1118
1119 trace_device_pm_callback_start(dev, "", state.event);
1120 device_complete(dev, state);
1121 trace_device_pm_callback_end(dev, 0);
1122
1123 put_device(dev);
1124
1125 mutex_lock(&dpm_list_mtx);
1126 }
1127 list_splice(&list, &dpm_list);
1128 mutex_unlock(&dpm_list_mtx);
1129
1130 /* Allow device probing and trigger re-probing of deferred devices */
1131 device_unblock_probing();
1132 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1133}
1134
1135/**
1136 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1137 * @state: PM transition of the system being carried out.
1138 *
1139 * Execute "resume" callbacks for all devices and complete the PM transition of
1140 * the system.
1141 */
1142void dpm_resume_end(pm_message_t state)
1143{
1144 dpm_resume(state);
1145 dpm_complete(state);
1146}
1147EXPORT_SYMBOL_GPL(dpm_resume_end);
1148
1149
1150/*------------------------- Suspend routines -------------------------*/
1151
1152/**
1153 * resume_event - Return a "resume" message for given "suspend" sleep state.
1154 * @sleep_state: PM message representing a sleep state.
1155 *
1156 * Return a PM message representing the resume event corresponding to given
1157 * sleep state.
1158 */
1159static pm_message_t resume_event(pm_message_t sleep_state)
1160{
1161 switch (sleep_state.event) {
1162 case PM_EVENT_SUSPEND:
1163 return PMSG_RESUME;
1164 case PM_EVENT_FREEZE:
1165 case PM_EVENT_QUIESCE:
1166 return PMSG_RECOVER;
1167 case PM_EVENT_HIBERNATE:
1168 return PMSG_RESTORE;
1169 }
1170 return PMSG_ON;
1171}
1172
1173static void dpm_superior_set_must_resume(struct device *dev)
1174{
1175 struct device_link *link;
1176 int idx;
1177
1178 if (dev->parent)
1179 dev->parent->power.must_resume = true;
1180
1181 idx = device_links_read_lock();
1182
1183 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1184 link->supplier->power.must_resume = true;
1185
1186 device_links_read_unlock(idx);
1187}
1188
1189/**
1190 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1191 * @dev: Device to handle.
1192 * @state: PM transition of the system being carried out.
1193 * @async: If true, the device is being suspended asynchronously.
1194 *
1195 * The driver of @dev will not receive interrupts while this function is being
1196 * executed.
1197 */
1198static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1199{
1200 pm_callback_t callback = NULL;
1201 const char *info = NULL;
1202 int error = 0;
1203
1204 TRACE_DEVICE(dev);
1205 TRACE_SUSPEND(0);
1206
1207 dpm_wait_for_subordinate(dev, async);
1208
1209 if (async_error)
1210 goto Complete;
1211
1212 if (dev->power.syscore || dev->power.direct_complete)
1213 goto Complete;
1214
1215 if (dev->pm_domain) {
1216 info = "noirq power domain ";
1217 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1218 } else if (dev->type && dev->type->pm) {
1219 info = "noirq type ";
1220 callback = pm_noirq_op(dev->type->pm, state);
1221 } else if (dev->class && dev->class->pm) {
1222 info = "noirq class ";
1223 callback = pm_noirq_op(dev->class->pm, state);
1224 } else if (dev->bus && dev->bus->pm) {
1225 info = "noirq bus ";
1226 callback = pm_noirq_op(dev->bus->pm, state);
1227 }
1228 if (callback)
1229 goto Run;
1230
1231 if (dev_pm_skip_suspend(dev))
1232 goto Skip;
1233
1234 if (dev->driver && dev->driver->pm) {
1235 info = "noirq driver ";
1236 callback = pm_noirq_op(dev->driver->pm, state);
1237 }
1238
1239Run:
1240 error = dpm_run_callback(callback, dev, state, info);
1241 if (error) {
1242 async_error = error;
1243 goto Complete;
1244 }
1245
1246Skip:
1247 dev->power.is_noirq_suspended = true;
1248
1249 /*
1250 * Skipping the resume of devices that were in use right before the
1251 * system suspend (as indicated by their PM-runtime usage counters)
1252 * would be suboptimal. Also resume them if doing that is not allowed
1253 * to be skipped.
1254 */
1255 if (atomic_read(&dev->power.usage_count) > 1 ||
1256 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1257 dev->power.may_skip_resume))
1258 dev->power.must_resume = true;
1259
1260 if (dev->power.must_resume)
1261 dpm_superior_set_must_resume(dev);
1262
1263Complete:
1264 complete_all(&dev->power.completion);
1265 TRACE_SUSPEND(error);
1266 return error;
1267}
1268
1269static void async_suspend_noirq(void *data, async_cookie_t cookie)
1270{
1271 struct device *dev = data;
1272 int error;
1273
1274 error = __device_suspend_noirq(dev, pm_transition, true);
1275 if (error) {
1276 dpm_save_failed_dev(dev_name(dev));
1277 pm_dev_err(dev, pm_transition, " async", error);
1278 }
1279
1280 put_device(dev);
1281}
1282
1283static int device_suspend_noirq(struct device *dev)
1284{
1285 if (dpm_async_fn(dev, async_suspend_noirq))
1286 return 0;
1287
1288 return __device_suspend_noirq(dev, pm_transition, false);
1289}
1290
1291static int dpm_noirq_suspend_devices(pm_message_t state)
1292{
1293 ktime_t starttime = ktime_get();
1294 int error = 0;
1295
1296 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1297 mutex_lock(&dpm_list_mtx);
1298 pm_transition = state;
1299 async_error = 0;
1300
1301 while (!list_empty(&dpm_late_early_list)) {
1302 struct device *dev = to_device(dpm_late_early_list.prev);
1303
1304 get_device(dev);
1305 mutex_unlock(&dpm_list_mtx);
1306
1307 error = device_suspend_noirq(dev);
1308
1309 mutex_lock(&dpm_list_mtx);
1310
1311 if (error) {
1312 pm_dev_err(dev, state, " noirq", error);
1313 dpm_save_failed_dev(dev_name(dev));
1314 } else if (!list_empty(&dev->power.entry)) {
1315 list_move(&dev->power.entry, &dpm_noirq_list);
1316 }
1317
1318 mutex_unlock(&dpm_list_mtx);
1319
1320 put_device(dev);
1321
1322 mutex_lock(&dpm_list_mtx);
1323
1324 if (error || async_error)
1325 break;
1326 }
1327 mutex_unlock(&dpm_list_mtx);
1328 async_synchronize_full();
1329 if (!error)
1330 error = async_error;
1331
1332 if (error) {
1333 suspend_stats.failed_suspend_noirq++;
1334 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1335 }
1336 dpm_show_time(starttime, state, error, "noirq");
1337 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1338 return error;
1339}
1340
1341/**
1342 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1343 * @state: PM transition of the system being carried out.
1344 *
1345 * Prevent device drivers' interrupt handlers from being called and invoke
1346 * "noirq" suspend callbacks for all non-sysdev devices.
1347 */
1348int dpm_suspend_noirq(pm_message_t state)
1349{
1350 int ret;
1351
1352 device_wakeup_arm_wake_irqs();
1353 suspend_device_irqs();
1354
1355 ret = dpm_noirq_suspend_devices(state);
1356 if (ret)
1357 dpm_resume_noirq(resume_event(state));
1358
1359 return ret;
1360}
1361
1362static void dpm_propagate_wakeup_to_parent(struct device *dev)
1363{
1364 struct device *parent = dev->parent;
1365
1366 if (!parent)
1367 return;
1368
1369 spin_lock_irq(&parent->power.lock);
1370
1371 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1372 parent->power.wakeup_path = true;
1373
1374 spin_unlock_irq(&parent->power.lock);
1375}
1376
1377/**
1378 * __device_suspend_late - Execute a "late suspend" callback for given device.
1379 * @dev: Device to handle.
1380 * @state: PM transition of the system being carried out.
1381 * @async: If true, the device is being suspended asynchronously.
1382 *
1383 * Runtime PM is disabled for @dev while this function is being executed.
1384 */
1385static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1386{
1387 pm_callback_t callback = NULL;
1388 const char *info = NULL;
1389 int error = 0;
1390
1391 TRACE_DEVICE(dev);
1392 TRACE_SUSPEND(0);
1393
1394 __pm_runtime_disable(dev, false);
1395
1396 dpm_wait_for_subordinate(dev, async);
1397
1398 if (async_error)
1399 goto Complete;
1400
1401 if (pm_wakeup_pending()) {
1402 async_error = -EBUSY;
1403 goto Complete;
1404 }
1405
1406 if (dev->power.syscore || dev->power.direct_complete)
1407 goto Complete;
1408
1409 if (dev->pm_domain) {
1410 info = "late power domain ";
1411 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1412 } else if (dev->type && dev->type->pm) {
1413 info = "late type ";
1414 callback = pm_late_early_op(dev->type->pm, state);
1415 } else if (dev->class && dev->class->pm) {
1416 info = "late class ";
1417 callback = pm_late_early_op(dev->class->pm, state);
1418 } else if (dev->bus && dev->bus->pm) {
1419 info = "late bus ";
1420 callback = pm_late_early_op(dev->bus->pm, state);
1421 }
1422 if (callback)
1423 goto Run;
1424
1425 if (dev_pm_skip_suspend(dev))
1426 goto Skip;
1427
1428 if (dev->driver && dev->driver->pm) {
1429 info = "late driver ";
1430 callback = pm_late_early_op(dev->driver->pm, state);
1431 }
1432
1433Run:
1434 error = dpm_run_callback(callback, dev, state, info);
1435 if (error) {
1436 async_error = error;
1437 goto Complete;
1438 }
1439 dpm_propagate_wakeup_to_parent(dev);
1440
1441Skip:
1442 dev->power.is_late_suspended = true;
1443
1444Complete:
1445 TRACE_SUSPEND(error);
1446 complete_all(&dev->power.completion);
1447 return error;
1448}
1449
1450static void async_suspend_late(void *data, async_cookie_t cookie)
1451{
1452 struct device *dev = data;
1453 int error;
1454
1455 error = __device_suspend_late(dev, pm_transition, true);
1456 if (error) {
1457 dpm_save_failed_dev(dev_name(dev));
1458 pm_dev_err(dev, pm_transition, " async", error);
1459 }
1460 put_device(dev);
1461}
1462
1463static int device_suspend_late(struct device *dev)
1464{
1465 if (dpm_async_fn(dev, async_suspend_late))
1466 return 0;
1467
1468 return __device_suspend_late(dev, pm_transition, false);
1469}
1470
1471/**
1472 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1473 * @state: PM transition of the system being carried out.
1474 */
1475int dpm_suspend_late(pm_message_t state)
1476{
1477 ktime_t starttime = ktime_get();
1478 int error = 0;
1479
1480 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1481 wake_up_all_idle_cpus();
1482 mutex_lock(&dpm_list_mtx);
1483 pm_transition = state;
1484 async_error = 0;
1485
1486 while (!list_empty(&dpm_suspended_list)) {
1487 struct device *dev = to_device(dpm_suspended_list.prev);
1488
1489 get_device(dev);
1490
1491 mutex_unlock(&dpm_list_mtx);
1492
1493 error = device_suspend_late(dev);
1494
1495 mutex_lock(&dpm_list_mtx);
1496
1497 if (!list_empty(&dev->power.entry))
1498 list_move(&dev->power.entry, &dpm_late_early_list);
1499
1500 if (error) {
1501 pm_dev_err(dev, state, " late", error);
1502 dpm_save_failed_dev(dev_name(dev));
1503 }
1504
1505 mutex_unlock(&dpm_list_mtx);
1506
1507 put_device(dev);
1508
1509 mutex_lock(&dpm_list_mtx);
1510
1511 if (error || async_error)
1512 break;
1513 }
1514 mutex_unlock(&dpm_list_mtx);
1515 async_synchronize_full();
1516 if (!error)
1517 error = async_error;
1518 if (error) {
1519 suspend_stats.failed_suspend_late++;
1520 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1521 dpm_resume_early(resume_event(state));
1522 }
1523 dpm_show_time(starttime, state, error, "late");
1524 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1525 return error;
1526}
1527
1528/**
1529 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1530 * @state: PM transition of the system being carried out.
1531 */
1532int dpm_suspend_end(pm_message_t state)
1533{
1534 ktime_t starttime = ktime_get();
1535 int error;
1536
1537 error = dpm_suspend_late(state);
1538 if (error)
1539 goto out;
1540
1541 error = dpm_suspend_noirq(state);
1542 if (error)
1543 dpm_resume_early(resume_event(state));
1544
1545out:
1546 dpm_show_time(starttime, state, error, "end");
1547 return error;
1548}
1549EXPORT_SYMBOL_GPL(dpm_suspend_end);
1550
1551/**
1552 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1553 * @dev: Device to suspend.
1554 * @state: PM transition of the system being carried out.
1555 * @cb: Suspend callback to execute.
1556 * @info: string description of caller.
1557 */
1558static int legacy_suspend(struct device *dev, pm_message_t state,
1559 int (*cb)(struct device *dev, pm_message_t state),
1560 const char *info)
1561{
1562 int error;
1563 ktime_t calltime;
1564
1565 calltime = initcall_debug_start(dev, cb);
1566
1567 trace_device_pm_callback_start(dev, info, state.event);
1568 error = cb(dev, state);
1569 trace_device_pm_callback_end(dev, error);
1570 suspend_report_result(dev, cb, error);
1571
1572 initcall_debug_report(dev, calltime, cb, error);
1573
1574 return error;
1575}
1576
1577static void dpm_clear_superiors_direct_complete(struct device *dev)
1578{
1579 struct device_link *link;
1580 int idx;
1581
1582 if (dev->parent) {
1583 spin_lock_irq(&dev->parent->power.lock);
1584 dev->parent->power.direct_complete = false;
1585 spin_unlock_irq(&dev->parent->power.lock);
1586 }
1587
1588 idx = device_links_read_lock();
1589
1590 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1591 spin_lock_irq(&link->supplier->power.lock);
1592 link->supplier->power.direct_complete = false;
1593 spin_unlock_irq(&link->supplier->power.lock);
1594 }
1595
1596 device_links_read_unlock(idx);
1597}
1598
1599/**
1600 * __device_suspend - Execute "suspend" callbacks for given device.
1601 * @dev: Device to handle.
1602 * @state: PM transition of the system being carried out.
1603 * @async: If true, the device is being suspended asynchronously.
1604 */
1605static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1606{
1607 pm_callback_t callback = NULL;
1608 const char *info = NULL;
1609 int error = 0;
1610 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1611
1612 TRACE_DEVICE(dev);
1613 TRACE_SUSPEND(0);
1614
1615 dpm_wait_for_subordinate(dev, async);
1616
1617 if (async_error) {
1618 dev->power.direct_complete = false;
1619 goto Complete;
1620 }
1621
1622 /*
1623 * Wait for possible runtime PM transitions of the device in progress
1624 * to complete and if there's a runtime resume request pending for it,
1625 * resume it before proceeding with invoking the system-wide suspend
1626 * callbacks for it.
1627 *
1628 * If the system-wide suspend callbacks below change the configuration
1629 * of the device, they must disable runtime PM for it or otherwise
1630 * ensure that its runtime-resume callbacks will not be confused by that
1631 * change in case they are invoked going forward.
1632 */
1633 pm_runtime_barrier(dev);
1634
1635 if (pm_wakeup_pending()) {
1636 dev->power.direct_complete = false;
1637 async_error = -EBUSY;
1638 goto Complete;
1639 }
1640
1641 if (dev->power.syscore)
1642 goto Complete;
1643
1644 /* Avoid direct_complete to let wakeup_path propagate. */
1645 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1646 dev->power.direct_complete = false;
1647
1648 if (dev->power.direct_complete) {
1649 if (pm_runtime_status_suspended(dev)) {
1650 pm_runtime_disable(dev);
1651 if (pm_runtime_status_suspended(dev)) {
1652 pm_dev_dbg(dev, state, "direct-complete ");
1653 goto Complete;
1654 }
1655
1656 pm_runtime_enable(dev);
1657 }
1658 dev->power.direct_complete = false;
1659 }
1660
1661 dev->power.may_skip_resume = true;
1662 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1663
1664 dpm_watchdog_set(&wd, dev);
1665 device_lock(dev);
1666
1667 if (dev->pm_domain) {
1668 info = "power domain ";
1669 callback = pm_op(&dev->pm_domain->ops, state);
1670 goto Run;
1671 }
1672
1673 if (dev->type && dev->type->pm) {
1674 info = "type ";
1675 callback = pm_op(dev->type->pm, state);
1676 goto Run;
1677 }
1678
1679 if (dev->class && dev->class->pm) {
1680 info = "class ";
1681 callback = pm_op(dev->class->pm, state);
1682 goto Run;
1683 }
1684
1685 if (dev->bus) {
1686 if (dev->bus->pm) {
1687 info = "bus ";
1688 callback = pm_op(dev->bus->pm, state);
1689 } else if (dev->bus->suspend) {
1690 pm_dev_dbg(dev, state, "legacy bus ");
1691 error = legacy_suspend(dev, state, dev->bus->suspend,
1692 "legacy bus ");
1693 goto End;
1694 }
1695 }
1696
1697 Run:
1698 if (!callback && dev->driver && dev->driver->pm) {
1699 info = "driver ";
1700 callback = pm_op(dev->driver->pm, state);
1701 }
1702
1703 error = dpm_run_callback(callback, dev, state, info);
1704
1705 End:
1706 if (!error) {
1707 dev->power.is_suspended = true;
1708 if (device_may_wakeup(dev))
1709 dev->power.wakeup_path = true;
1710
1711 dpm_propagate_wakeup_to_parent(dev);
1712 dpm_clear_superiors_direct_complete(dev);
1713 }
1714
1715 device_unlock(dev);
1716 dpm_watchdog_clear(&wd);
1717
1718 Complete:
1719 if (error)
1720 async_error = error;
1721
1722 complete_all(&dev->power.completion);
1723 TRACE_SUSPEND(error);
1724 return error;
1725}
1726
1727static void async_suspend(void *data, async_cookie_t cookie)
1728{
1729 struct device *dev = data;
1730 int error;
1731
1732 error = __device_suspend(dev, pm_transition, true);
1733 if (error) {
1734 dpm_save_failed_dev(dev_name(dev));
1735 pm_dev_err(dev, pm_transition, " async", error);
1736 }
1737
1738 put_device(dev);
1739}
1740
1741static int device_suspend(struct device *dev)
1742{
1743 if (dpm_async_fn(dev, async_suspend))
1744 return 0;
1745
1746 return __device_suspend(dev, pm_transition, false);
1747}
1748
1749/**
1750 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1751 * @state: PM transition of the system being carried out.
1752 */
1753int dpm_suspend(pm_message_t state)
1754{
1755 ktime_t starttime = ktime_get();
1756 int error = 0;
1757
1758 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1759 might_sleep();
1760
1761 devfreq_suspend();
1762 cpufreq_suspend();
1763
1764 mutex_lock(&dpm_list_mtx);
1765 pm_transition = state;
1766 async_error = 0;
1767 while (!list_empty(&dpm_prepared_list)) {
1768 struct device *dev = to_device(dpm_prepared_list.prev);
1769
1770 get_device(dev);
1771
1772 mutex_unlock(&dpm_list_mtx);
1773
1774 error = device_suspend(dev);
1775
1776 mutex_lock(&dpm_list_mtx);
1777
1778 if (error) {
1779 pm_dev_err(dev, state, "", error);
1780 dpm_save_failed_dev(dev_name(dev));
1781 } else if (!list_empty(&dev->power.entry)) {
1782 list_move(&dev->power.entry, &dpm_suspended_list);
1783 }
1784
1785 mutex_unlock(&dpm_list_mtx);
1786
1787 put_device(dev);
1788
1789 mutex_lock(&dpm_list_mtx);
1790
1791 if (error || async_error)
1792 break;
1793 }
1794 mutex_unlock(&dpm_list_mtx);
1795 async_synchronize_full();
1796 if (!error)
1797 error = async_error;
1798 if (error) {
1799 suspend_stats.failed_suspend++;
1800 dpm_save_failed_step(SUSPEND_SUSPEND);
1801 }
1802 dpm_show_time(starttime, state, error, NULL);
1803 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1804 return error;
1805}
1806
1807/**
1808 * device_prepare - Prepare a device for system power transition.
1809 * @dev: Device to handle.
1810 * @state: PM transition of the system being carried out.
1811 *
1812 * Execute the ->prepare() callback(s) for given device. No new children of the
1813 * device may be registered after this function has returned.
1814 */
1815static int device_prepare(struct device *dev, pm_message_t state)
1816{
1817 int (*callback)(struct device *) = NULL;
1818 int ret = 0;
1819
1820 /*
1821 * If a device's parent goes into runtime suspend at the wrong time,
1822 * it won't be possible to resume the device. To prevent this we
1823 * block runtime suspend here, during the prepare phase, and allow
1824 * it again during the complete phase.
1825 */
1826 pm_runtime_get_noresume(dev);
1827
1828 if (dev->power.syscore)
1829 return 0;
1830
1831 device_lock(dev);
1832
1833 dev->power.wakeup_path = false;
1834
1835 if (dev->power.no_pm_callbacks)
1836 goto unlock;
1837
1838 if (dev->pm_domain)
1839 callback = dev->pm_domain->ops.prepare;
1840 else if (dev->type && dev->type->pm)
1841 callback = dev->type->pm->prepare;
1842 else if (dev->class && dev->class->pm)
1843 callback = dev->class->pm->prepare;
1844 else if (dev->bus && dev->bus->pm)
1845 callback = dev->bus->pm->prepare;
1846
1847 if (!callback && dev->driver && dev->driver->pm)
1848 callback = dev->driver->pm->prepare;
1849
1850 if (callback)
1851 ret = callback(dev);
1852
1853unlock:
1854 device_unlock(dev);
1855
1856 if (ret < 0) {
1857 suspend_report_result(dev, callback, ret);
1858 pm_runtime_put(dev);
1859 return ret;
1860 }
1861 /*
1862 * A positive return value from ->prepare() means "this device appears
1863 * to be runtime-suspended and its state is fine, so if it really is
1864 * runtime-suspended, you can leave it in that state provided that you
1865 * will do the same thing with all of its descendants". This only
1866 * applies to suspend transitions, however.
1867 */
1868 spin_lock_irq(&dev->power.lock);
1869 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1870 (ret > 0 || dev->power.no_pm_callbacks) &&
1871 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1872 spin_unlock_irq(&dev->power.lock);
1873 return 0;
1874}
1875
1876/**
1877 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1878 * @state: PM transition of the system being carried out.
1879 *
1880 * Execute the ->prepare() callback(s) for all devices.
1881 */
1882int dpm_prepare(pm_message_t state)
1883{
1884 int error = 0;
1885
1886 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1887 might_sleep();
1888
1889 /*
1890 * Give a chance for the known devices to complete their probes, before
1891 * disable probing of devices. This sync point is important at least
1892 * at boot time + hibernation restore.
1893 */
1894 wait_for_device_probe();
1895 /*
1896 * It is unsafe if probing of devices will happen during suspend or
1897 * hibernation and system behavior will be unpredictable in this case.
1898 * So, let's prohibit device's probing here and defer their probes
1899 * instead. The normal behavior will be restored in dpm_complete().
1900 */
1901 device_block_probing();
1902
1903 mutex_lock(&dpm_list_mtx);
1904 while (!list_empty(&dpm_list) && !error) {
1905 struct device *dev = to_device(dpm_list.next);
1906
1907 get_device(dev);
1908
1909 mutex_unlock(&dpm_list_mtx);
1910
1911 trace_device_pm_callback_start(dev, "", state.event);
1912 error = device_prepare(dev, state);
1913 trace_device_pm_callback_end(dev, error);
1914
1915 mutex_lock(&dpm_list_mtx);
1916
1917 if (!error) {
1918 dev->power.is_prepared = true;
1919 if (!list_empty(&dev->power.entry))
1920 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1921 } else if (error == -EAGAIN) {
1922 error = 0;
1923 } else {
1924 dev_info(dev, "not prepared for power transition: code %d\n",
1925 error);
1926 }
1927
1928 mutex_unlock(&dpm_list_mtx);
1929
1930 put_device(dev);
1931
1932 mutex_lock(&dpm_list_mtx);
1933 }
1934 mutex_unlock(&dpm_list_mtx);
1935 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1936 return error;
1937}
1938
1939/**
1940 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1941 * @state: PM transition of the system being carried out.
1942 *
1943 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1944 * callbacks for them.
1945 */
1946int dpm_suspend_start(pm_message_t state)
1947{
1948 ktime_t starttime = ktime_get();
1949 int error;
1950
1951 error = dpm_prepare(state);
1952 if (error) {
1953 suspend_stats.failed_prepare++;
1954 dpm_save_failed_step(SUSPEND_PREPARE);
1955 } else
1956 error = dpm_suspend(state);
1957 dpm_show_time(starttime, state, error, "start");
1958 return error;
1959}
1960EXPORT_SYMBOL_GPL(dpm_suspend_start);
1961
1962void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1963{
1964 if (ret)
1965 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1966}
1967EXPORT_SYMBOL_GPL(__suspend_report_result);
1968
1969/**
1970 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1971 * @subordinate: Device that needs to wait for @dev.
1972 * @dev: Device to wait for.
1973 */
1974int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1975{
1976 dpm_wait(dev, subordinate->power.async_suspend);
1977 return async_error;
1978}
1979EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1980
1981/**
1982 * dpm_for_each_dev - device iterator.
1983 * @data: data for the callback.
1984 * @fn: function to be called for each device.
1985 *
1986 * Iterate over devices in dpm_list, and call @fn for each device,
1987 * passing it @data.
1988 */
1989void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1990{
1991 struct device *dev;
1992
1993 if (!fn)
1994 return;
1995
1996 device_pm_lock();
1997 list_for_each_entry(dev, &dpm_list, power.entry)
1998 fn(dev, data);
1999 device_pm_unlock();
2000}
2001EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2002
2003static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2004{
2005 if (!ops)
2006 return true;
2007
2008 return !ops->prepare &&
2009 !ops->suspend &&
2010 !ops->suspend_late &&
2011 !ops->suspend_noirq &&
2012 !ops->resume_noirq &&
2013 !ops->resume_early &&
2014 !ops->resume &&
2015 !ops->complete;
2016}
2017
2018void device_pm_check_callbacks(struct device *dev)
2019{
2020 unsigned long flags;
2021
2022 spin_lock_irqsave(&dev->power.lock, flags);
2023 dev->power.no_pm_callbacks =
2024 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2025 !dev->bus->suspend && !dev->bus->resume)) &&
2026 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2027 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2028 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2029 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2030 !dev->driver->suspend && !dev->driver->resume));
2031 spin_unlock_irqrestore(&dev->power.lock, flags);
2032}
2033
2034bool dev_pm_skip_suspend(struct device *dev)
2035{
2036 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2037 pm_runtime_status_suspended(dev);
2038}
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/resume-trace.h>
27#include <linux/interrupt.h>
28#include <linux/sched.h>
29#include <linux/async.h>
30#include <linux/suspend.h>
31
32#include "../base.h"
33#include "power.h"
34
35typedef int (*pm_callback_t)(struct device *);
36
37/*
38 * The entries in the dpm_list list are in a depth first order, simply
39 * because children are guaranteed to be discovered after parents, and
40 * are inserted at the back of the list on discovery.
41 *
42 * Since device_pm_add() may be called with a device lock held,
43 * we must never try to acquire a device lock while holding
44 * dpm_list_mutex.
45 */
46
47LIST_HEAD(dpm_list);
48LIST_HEAD(dpm_prepared_list);
49LIST_HEAD(dpm_suspended_list);
50LIST_HEAD(dpm_late_early_list);
51LIST_HEAD(dpm_noirq_list);
52
53struct suspend_stats suspend_stats;
54static DEFINE_MUTEX(dpm_list_mtx);
55static pm_message_t pm_transition;
56
57static int async_error;
58
59/**
60 * device_pm_init - Initialize the PM-related part of a device object.
61 * @dev: Device object being initialized.
62 */
63void device_pm_init(struct device *dev)
64{
65 dev->power.is_prepared = false;
66 dev->power.is_suspended = false;
67 init_completion(&dev->power.completion);
68 complete_all(&dev->power.completion);
69 dev->power.wakeup = NULL;
70 spin_lock_init(&dev->power.lock);
71 pm_runtime_init(dev);
72 INIT_LIST_HEAD(&dev->power.entry);
73 dev->power.power_state = PMSG_INVALID;
74}
75
76/**
77 * device_pm_lock - Lock the list of active devices used by the PM core.
78 */
79void device_pm_lock(void)
80{
81 mutex_lock(&dpm_list_mtx);
82}
83
84/**
85 * device_pm_unlock - Unlock the list of active devices used by the PM core.
86 */
87void device_pm_unlock(void)
88{
89 mutex_unlock(&dpm_list_mtx);
90}
91
92/**
93 * device_pm_add - Add a device to the PM core's list of active devices.
94 * @dev: Device to add to the list.
95 */
96void device_pm_add(struct device *dev)
97{
98 pr_debug("PM: Adding info for %s:%s\n",
99 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
100 mutex_lock(&dpm_list_mtx);
101 if (dev->parent && dev->parent->power.is_prepared)
102 dev_warn(dev, "parent %s should not be sleeping\n",
103 dev_name(dev->parent));
104 list_add_tail(&dev->power.entry, &dpm_list);
105 dev_pm_qos_constraints_init(dev);
106 mutex_unlock(&dpm_list_mtx);
107}
108
109/**
110 * device_pm_remove - Remove a device from the PM core's list of active devices.
111 * @dev: Device to be removed from the list.
112 */
113void device_pm_remove(struct device *dev)
114{
115 pr_debug("PM: Removing info for %s:%s\n",
116 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
117 complete_all(&dev->power.completion);
118 mutex_lock(&dpm_list_mtx);
119 dev_pm_qos_constraints_destroy(dev);
120 list_del_init(&dev->power.entry);
121 mutex_unlock(&dpm_list_mtx);
122 device_wakeup_disable(dev);
123 pm_runtime_remove(dev);
124}
125
126/**
127 * device_pm_move_before - Move device in the PM core's list of active devices.
128 * @deva: Device to move in dpm_list.
129 * @devb: Device @deva should come before.
130 */
131void device_pm_move_before(struct device *deva, struct device *devb)
132{
133 pr_debug("PM: Moving %s:%s before %s:%s\n",
134 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
135 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
136 /* Delete deva from dpm_list and reinsert before devb. */
137 list_move_tail(&deva->power.entry, &devb->power.entry);
138}
139
140/**
141 * device_pm_move_after - Move device in the PM core's list of active devices.
142 * @deva: Device to move in dpm_list.
143 * @devb: Device @deva should come after.
144 */
145void device_pm_move_after(struct device *deva, struct device *devb)
146{
147 pr_debug("PM: Moving %s:%s after %s:%s\n",
148 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
149 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
150 /* Delete deva from dpm_list and reinsert after devb. */
151 list_move(&deva->power.entry, &devb->power.entry);
152}
153
154/**
155 * device_pm_move_last - Move device to end of the PM core's list of devices.
156 * @dev: Device to move in dpm_list.
157 */
158void device_pm_move_last(struct device *dev)
159{
160 pr_debug("PM: Moving %s:%s to end of list\n",
161 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
162 list_move_tail(&dev->power.entry, &dpm_list);
163}
164
165static ktime_t initcall_debug_start(struct device *dev)
166{
167 ktime_t calltime = ktime_set(0, 0);
168
169 if (initcall_debug) {
170 pr_info("calling %s+ @ %i, parent: %s\n",
171 dev_name(dev), task_pid_nr(current),
172 dev->parent ? dev_name(dev->parent) : "none");
173 calltime = ktime_get();
174 }
175
176 return calltime;
177}
178
179static void initcall_debug_report(struct device *dev, ktime_t calltime,
180 int error)
181{
182 ktime_t delta, rettime;
183
184 if (initcall_debug) {
185 rettime = ktime_get();
186 delta = ktime_sub(rettime, calltime);
187 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
188 error, (unsigned long long)ktime_to_ns(delta) >> 10);
189 }
190}
191
192/**
193 * dpm_wait - Wait for a PM operation to complete.
194 * @dev: Device to wait for.
195 * @async: If unset, wait only if the device's power.async_suspend flag is set.
196 */
197static void dpm_wait(struct device *dev, bool async)
198{
199 if (!dev)
200 return;
201
202 if (async || (pm_async_enabled && dev->power.async_suspend))
203 wait_for_completion(&dev->power.completion);
204}
205
206static int dpm_wait_fn(struct device *dev, void *async_ptr)
207{
208 dpm_wait(dev, *((bool *)async_ptr));
209 return 0;
210}
211
212static void dpm_wait_for_children(struct device *dev, bool async)
213{
214 device_for_each_child(dev, &async, dpm_wait_fn);
215}
216
217/**
218 * pm_op - Return the PM operation appropriate for given PM event.
219 * @ops: PM operations to choose from.
220 * @state: PM transition of the system being carried out.
221 */
222static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
223{
224 switch (state.event) {
225#ifdef CONFIG_SUSPEND
226 case PM_EVENT_SUSPEND:
227 return ops->suspend;
228 case PM_EVENT_RESUME:
229 return ops->resume;
230#endif /* CONFIG_SUSPEND */
231#ifdef CONFIG_HIBERNATE_CALLBACKS
232 case PM_EVENT_FREEZE:
233 case PM_EVENT_QUIESCE:
234 return ops->freeze;
235 case PM_EVENT_HIBERNATE:
236 return ops->poweroff;
237 case PM_EVENT_THAW:
238 case PM_EVENT_RECOVER:
239 return ops->thaw;
240 break;
241 case PM_EVENT_RESTORE:
242 return ops->restore;
243#endif /* CONFIG_HIBERNATE_CALLBACKS */
244 }
245
246 return NULL;
247}
248
249/**
250 * pm_late_early_op - Return the PM operation appropriate for given PM event.
251 * @ops: PM operations to choose from.
252 * @state: PM transition of the system being carried out.
253 *
254 * Runtime PM is disabled for @dev while this function is being executed.
255 */
256static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
257 pm_message_t state)
258{
259 switch (state.event) {
260#ifdef CONFIG_SUSPEND
261 case PM_EVENT_SUSPEND:
262 return ops->suspend_late;
263 case PM_EVENT_RESUME:
264 return ops->resume_early;
265#endif /* CONFIG_SUSPEND */
266#ifdef CONFIG_HIBERNATE_CALLBACKS
267 case PM_EVENT_FREEZE:
268 case PM_EVENT_QUIESCE:
269 return ops->freeze_late;
270 case PM_EVENT_HIBERNATE:
271 return ops->poweroff_late;
272 case PM_EVENT_THAW:
273 case PM_EVENT_RECOVER:
274 return ops->thaw_early;
275 case PM_EVENT_RESTORE:
276 return ops->restore_early;
277#endif /* CONFIG_HIBERNATE_CALLBACKS */
278 }
279
280 return NULL;
281}
282
283/**
284 * pm_noirq_op - Return the PM operation appropriate for given PM event.
285 * @ops: PM operations to choose from.
286 * @state: PM transition of the system being carried out.
287 *
288 * The driver of @dev will not receive interrupts while this function is being
289 * executed.
290 */
291static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
292{
293 switch (state.event) {
294#ifdef CONFIG_SUSPEND
295 case PM_EVENT_SUSPEND:
296 return ops->suspend_noirq;
297 case PM_EVENT_RESUME:
298 return ops->resume_noirq;
299#endif /* CONFIG_SUSPEND */
300#ifdef CONFIG_HIBERNATE_CALLBACKS
301 case PM_EVENT_FREEZE:
302 case PM_EVENT_QUIESCE:
303 return ops->freeze_noirq;
304 case PM_EVENT_HIBERNATE:
305 return ops->poweroff_noirq;
306 case PM_EVENT_THAW:
307 case PM_EVENT_RECOVER:
308 return ops->thaw_noirq;
309 case PM_EVENT_RESTORE:
310 return ops->restore_noirq;
311#endif /* CONFIG_HIBERNATE_CALLBACKS */
312 }
313
314 return NULL;
315}
316
317static char *pm_verb(int event)
318{
319 switch (event) {
320 case PM_EVENT_SUSPEND:
321 return "suspend";
322 case PM_EVENT_RESUME:
323 return "resume";
324 case PM_EVENT_FREEZE:
325 return "freeze";
326 case PM_EVENT_QUIESCE:
327 return "quiesce";
328 case PM_EVENT_HIBERNATE:
329 return "hibernate";
330 case PM_EVENT_THAW:
331 return "thaw";
332 case PM_EVENT_RESTORE:
333 return "restore";
334 case PM_EVENT_RECOVER:
335 return "recover";
336 default:
337 return "(unknown PM event)";
338 }
339}
340
341static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
342{
343 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
344 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
345 ", may wakeup" : "");
346}
347
348static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
349 int error)
350{
351 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
352 dev_name(dev), pm_verb(state.event), info, error);
353}
354
355static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
356{
357 ktime_t calltime;
358 u64 usecs64;
359 int usecs;
360
361 calltime = ktime_get();
362 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
363 do_div(usecs64, NSEC_PER_USEC);
364 usecs = usecs64;
365 if (usecs == 0)
366 usecs = 1;
367 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
368 info ?: "", info ? " " : "", pm_verb(state.event),
369 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
370}
371
372static int dpm_run_callback(pm_callback_t cb, struct device *dev,
373 pm_message_t state, char *info)
374{
375 ktime_t calltime;
376 int error;
377
378 if (!cb)
379 return 0;
380
381 calltime = initcall_debug_start(dev);
382
383 pm_dev_dbg(dev, state, info);
384 error = cb(dev);
385 suspend_report_result(cb, error);
386
387 initcall_debug_report(dev, calltime, error);
388
389 return error;
390}
391
392/*------------------------- Resume routines -------------------------*/
393
394/**
395 * device_resume_noirq - Execute an "early resume" callback for given device.
396 * @dev: Device to handle.
397 * @state: PM transition of the system being carried out.
398 *
399 * The driver of @dev will not receive interrupts while this function is being
400 * executed.
401 */
402static int device_resume_noirq(struct device *dev, pm_message_t state)
403{
404 pm_callback_t callback = NULL;
405 char *info = NULL;
406 int error = 0;
407
408 TRACE_DEVICE(dev);
409 TRACE_RESUME(0);
410
411 if (dev->pm_domain) {
412 info = "noirq power domain ";
413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
414 } else if (dev->type && dev->type->pm) {
415 info = "noirq type ";
416 callback = pm_noirq_op(dev->type->pm, state);
417 } else if (dev->class && dev->class->pm) {
418 info = "noirq class ";
419 callback = pm_noirq_op(dev->class->pm, state);
420 } else if (dev->bus && dev->bus->pm) {
421 info = "noirq bus ";
422 callback = pm_noirq_op(dev->bus->pm, state);
423 }
424
425 if (!callback && dev->driver && dev->driver->pm) {
426 info = "noirq driver ";
427 callback = pm_noirq_op(dev->driver->pm, state);
428 }
429
430 error = dpm_run_callback(callback, dev, state, info);
431
432 TRACE_RESUME(error);
433 return error;
434}
435
436/**
437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
438 * @state: PM transition of the system being carried out.
439 *
440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
441 * enable device drivers to receive interrupts.
442 */
443static void dpm_resume_noirq(pm_message_t state)
444{
445 ktime_t starttime = ktime_get();
446
447 mutex_lock(&dpm_list_mtx);
448 while (!list_empty(&dpm_noirq_list)) {
449 struct device *dev = to_device(dpm_noirq_list.next);
450 int error;
451
452 get_device(dev);
453 list_move_tail(&dev->power.entry, &dpm_late_early_list);
454 mutex_unlock(&dpm_list_mtx);
455
456 error = device_resume_noirq(dev, state);
457 if (error) {
458 suspend_stats.failed_resume_noirq++;
459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
460 dpm_save_failed_dev(dev_name(dev));
461 pm_dev_err(dev, state, " noirq", error);
462 }
463
464 mutex_lock(&dpm_list_mtx);
465 put_device(dev);
466 }
467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs();
470}
471
472/**
473 * device_resume_early - Execute an "early resume" callback for given device.
474 * @dev: Device to handle.
475 * @state: PM transition of the system being carried out.
476 *
477 * Runtime PM is disabled for @dev while this function is being executed.
478 */
479static int device_resume_early(struct device *dev, pm_message_t state)
480{
481 pm_callback_t callback = NULL;
482 char *info = NULL;
483 int error = 0;
484
485 TRACE_DEVICE(dev);
486 TRACE_RESUME(0);
487
488 if (dev->pm_domain) {
489 info = "early power domain ";
490 callback = pm_late_early_op(&dev->pm_domain->ops, state);
491 } else if (dev->type && dev->type->pm) {
492 info = "early type ";
493 callback = pm_late_early_op(dev->type->pm, state);
494 } else if (dev->class && dev->class->pm) {
495 info = "early class ";
496 callback = pm_late_early_op(dev->class->pm, state);
497 } else if (dev->bus && dev->bus->pm) {
498 info = "early bus ";
499 callback = pm_late_early_op(dev->bus->pm, state);
500 }
501
502 if (!callback && dev->driver && dev->driver->pm) {
503 info = "early driver ";
504 callback = pm_late_early_op(dev->driver->pm, state);
505 }
506
507 error = dpm_run_callback(callback, dev, state, info);
508
509 TRACE_RESUME(error);
510 return error;
511}
512
513/**
514 * dpm_resume_early - Execute "early resume" callbacks for all devices.
515 * @state: PM transition of the system being carried out.
516 */
517static void dpm_resume_early(pm_message_t state)
518{
519 ktime_t starttime = ktime_get();
520
521 mutex_lock(&dpm_list_mtx);
522 while (!list_empty(&dpm_late_early_list)) {
523 struct device *dev = to_device(dpm_late_early_list.next);
524 int error;
525
526 get_device(dev);
527 list_move_tail(&dev->power.entry, &dpm_suspended_list);
528 mutex_unlock(&dpm_list_mtx);
529
530 error = device_resume_early(dev, state);
531 if (error) {
532 suspend_stats.failed_resume_early++;
533 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
534 dpm_save_failed_dev(dev_name(dev));
535 pm_dev_err(dev, state, " early", error);
536 }
537
538 mutex_lock(&dpm_list_mtx);
539 put_device(dev);
540 }
541 mutex_unlock(&dpm_list_mtx);
542 dpm_show_time(starttime, state, "early");
543}
544
545/**
546 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
547 * @state: PM transition of the system being carried out.
548 */
549void dpm_resume_start(pm_message_t state)
550{
551 dpm_resume_noirq(state);
552 dpm_resume_early(state);
553}
554EXPORT_SYMBOL_GPL(dpm_resume_start);
555
556/**
557 * device_resume - Execute "resume" callbacks for given device.
558 * @dev: Device to handle.
559 * @state: PM transition of the system being carried out.
560 * @async: If true, the device is being resumed asynchronously.
561 */
562static int device_resume(struct device *dev, pm_message_t state, bool async)
563{
564 pm_callback_t callback = NULL;
565 char *info = NULL;
566 int error = 0;
567 bool put = false;
568
569 TRACE_DEVICE(dev);
570 TRACE_RESUME(0);
571
572 dpm_wait(dev->parent, async);
573 device_lock(dev);
574
575 /*
576 * This is a fib. But we'll allow new children to be added below
577 * a resumed device, even if the device hasn't been completed yet.
578 */
579 dev->power.is_prepared = false;
580
581 if (!dev->power.is_suspended)
582 goto Unlock;
583
584 pm_runtime_enable(dev);
585 put = true;
586
587 if (dev->pm_domain) {
588 info = "power domain ";
589 callback = pm_op(&dev->pm_domain->ops, state);
590 goto Driver;
591 }
592
593 if (dev->type && dev->type->pm) {
594 info = "type ";
595 callback = pm_op(dev->type->pm, state);
596 goto Driver;
597 }
598
599 if (dev->class) {
600 if (dev->class->pm) {
601 info = "class ";
602 callback = pm_op(dev->class->pm, state);
603 goto Driver;
604 } else if (dev->class->resume) {
605 info = "legacy class ";
606 callback = dev->class->resume;
607 goto End;
608 }
609 }
610
611 if (dev->bus) {
612 if (dev->bus->pm) {
613 info = "bus ";
614 callback = pm_op(dev->bus->pm, state);
615 } else if (dev->bus->resume) {
616 info = "legacy bus ";
617 callback = dev->bus->resume;
618 goto End;
619 }
620 }
621
622 Driver:
623 if (!callback && dev->driver && dev->driver->pm) {
624 info = "driver ";
625 callback = pm_op(dev->driver->pm, state);
626 }
627
628 End:
629 error = dpm_run_callback(callback, dev, state, info);
630 dev->power.is_suspended = false;
631
632 Unlock:
633 device_unlock(dev);
634 complete_all(&dev->power.completion);
635
636 TRACE_RESUME(error);
637
638 if (put)
639 pm_runtime_put_sync(dev);
640
641 return error;
642}
643
644static void async_resume(void *data, async_cookie_t cookie)
645{
646 struct device *dev = (struct device *)data;
647 int error;
648
649 error = device_resume(dev, pm_transition, true);
650 if (error)
651 pm_dev_err(dev, pm_transition, " async", error);
652 put_device(dev);
653}
654
655static bool is_async(struct device *dev)
656{
657 return dev->power.async_suspend && pm_async_enabled
658 && !pm_trace_is_enabled();
659}
660
661/**
662 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
663 * @state: PM transition of the system being carried out.
664 *
665 * Execute the appropriate "resume" callback for all devices whose status
666 * indicates that they are suspended.
667 */
668void dpm_resume(pm_message_t state)
669{
670 struct device *dev;
671 ktime_t starttime = ktime_get();
672
673 might_sleep();
674
675 mutex_lock(&dpm_list_mtx);
676 pm_transition = state;
677 async_error = 0;
678
679 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
680 INIT_COMPLETION(dev->power.completion);
681 if (is_async(dev)) {
682 get_device(dev);
683 async_schedule(async_resume, dev);
684 }
685 }
686
687 while (!list_empty(&dpm_suspended_list)) {
688 dev = to_device(dpm_suspended_list.next);
689 get_device(dev);
690 if (!is_async(dev)) {
691 int error;
692
693 mutex_unlock(&dpm_list_mtx);
694
695 error = device_resume(dev, state, false);
696 if (error) {
697 suspend_stats.failed_resume++;
698 dpm_save_failed_step(SUSPEND_RESUME);
699 dpm_save_failed_dev(dev_name(dev));
700 pm_dev_err(dev, state, "", error);
701 }
702
703 mutex_lock(&dpm_list_mtx);
704 }
705 if (!list_empty(&dev->power.entry))
706 list_move_tail(&dev->power.entry, &dpm_prepared_list);
707 put_device(dev);
708 }
709 mutex_unlock(&dpm_list_mtx);
710 async_synchronize_full();
711 dpm_show_time(starttime, state, NULL);
712}
713
714/**
715 * device_complete - Complete a PM transition for given device.
716 * @dev: Device to handle.
717 * @state: PM transition of the system being carried out.
718 */
719static void device_complete(struct device *dev, pm_message_t state)
720{
721 void (*callback)(struct device *) = NULL;
722 char *info = NULL;
723
724 device_lock(dev);
725
726 if (dev->pm_domain) {
727 info = "completing power domain ";
728 callback = dev->pm_domain->ops.complete;
729 } else if (dev->type && dev->type->pm) {
730 info = "completing type ";
731 callback = dev->type->pm->complete;
732 } else if (dev->class && dev->class->pm) {
733 info = "completing class ";
734 callback = dev->class->pm->complete;
735 } else if (dev->bus && dev->bus->pm) {
736 info = "completing bus ";
737 callback = dev->bus->pm->complete;
738 }
739
740 if (!callback && dev->driver && dev->driver->pm) {
741 info = "completing driver ";
742 callback = dev->driver->pm->complete;
743 }
744
745 if (callback) {
746 pm_dev_dbg(dev, state, info);
747 callback(dev);
748 }
749
750 device_unlock(dev);
751}
752
753/**
754 * dpm_complete - Complete a PM transition for all non-sysdev devices.
755 * @state: PM transition of the system being carried out.
756 *
757 * Execute the ->complete() callbacks for all devices whose PM status is not
758 * DPM_ON (this allows new devices to be registered).
759 */
760void dpm_complete(pm_message_t state)
761{
762 struct list_head list;
763
764 might_sleep();
765
766 INIT_LIST_HEAD(&list);
767 mutex_lock(&dpm_list_mtx);
768 while (!list_empty(&dpm_prepared_list)) {
769 struct device *dev = to_device(dpm_prepared_list.prev);
770
771 get_device(dev);
772 dev->power.is_prepared = false;
773 list_move(&dev->power.entry, &list);
774 mutex_unlock(&dpm_list_mtx);
775
776 device_complete(dev, state);
777
778 mutex_lock(&dpm_list_mtx);
779 put_device(dev);
780 }
781 list_splice(&list, &dpm_list);
782 mutex_unlock(&dpm_list_mtx);
783}
784
785/**
786 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
787 * @state: PM transition of the system being carried out.
788 *
789 * Execute "resume" callbacks for all devices and complete the PM transition of
790 * the system.
791 */
792void dpm_resume_end(pm_message_t state)
793{
794 dpm_resume(state);
795 dpm_complete(state);
796}
797EXPORT_SYMBOL_GPL(dpm_resume_end);
798
799
800/*------------------------- Suspend routines -------------------------*/
801
802/**
803 * resume_event - Return a "resume" message for given "suspend" sleep state.
804 * @sleep_state: PM message representing a sleep state.
805 *
806 * Return a PM message representing the resume event corresponding to given
807 * sleep state.
808 */
809static pm_message_t resume_event(pm_message_t sleep_state)
810{
811 switch (sleep_state.event) {
812 case PM_EVENT_SUSPEND:
813 return PMSG_RESUME;
814 case PM_EVENT_FREEZE:
815 case PM_EVENT_QUIESCE:
816 return PMSG_RECOVER;
817 case PM_EVENT_HIBERNATE:
818 return PMSG_RESTORE;
819 }
820 return PMSG_ON;
821}
822
823/**
824 * device_suspend_noirq - Execute a "late suspend" callback for given device.
825 * @dev: Device to handle.
826 * @state: PM transition of the system being carried out.
827 *
828 * The driver of @dev will not receive interrupts while this function is being
829 * executed.
830 */
831static int device_suspend_noirq(struct device *dev, pm_message_t state)
832{
833 pm_callback_t callback = NULL;
834 char *info = NULL;
835
836 if (dev->pm_domain) {
837 info = "noirq power domain ";
838 callback = pm_noirq_op(&dev->pm_domain->ops, state);
839 } else if (dev->type && dev->type->pm) {
840 info = "noirq type ";
841 callback = pm_noirq_op(dev->type->pm, state);
842 } else if (dev->class && dev->class->pm) {
843 info = "noirq class ";
844 callback = pm_noirq_op(dev->class->pm, state);
845 } else if (dev->bus && dev->bus->pm) {
846 info = "noirq bus ";
847 callback = pm_noirq_op(dev->bus->pm, state);
848 }
849
850 if (!callback && dev->driver && dev->driver->pm) {
851 info = "noirq driver ";
852 callback = pm_noirq_op(dev->driver->pm, state);
853 }
854
855 return dpm_run_callback(callback, dev, state, info);
856}
857
858/**
859 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
860 * @state: PM transition of the system being carried out.
861 *
862 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
863 * handlers for all non-sysdev devices.
864 */
865static int dpm_suspend_noirq(pm_message_t state)
866{
867 ktime_t starttime = ktime_get();
868 int error = 0;
869
870 suspend_device_irqs();
871 mutex_lock(&dpm_list_mtx);
872 while (!list_empty(&dpm_late_early_list)) {
873 struct device *dev = to_device(dpm_late_early_list.prev);
874
875 get_device(dev);
876 mutex_unlock(&dpm_list_mtx);
877
878 error = device_suspend_noirq(dev, state);
879
880 mutex_lock(&dpm_list_mtx);
881 if (error) {
882 pm_dev_err(dev, state, " noirq", error);
883 suspend_stats.failed_suspend_noirq++;
884 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
885 dpm_save_failed_dev(dev_name(dev));
886 put_device(dev);
887 break;
888 }
889 if (!list_empty(&dev->power.entry))
890 list_move(&dev->power.entry, &dpm_noirq_list);
891 put_device(dev);
892
893 if (pm_wakeup_pending()) {
894 error = -EBUSY;
895 break;
896 }
897 }
898 mutex_unlock(&dpm_list_mtx);
899 if (error)
900 dpm_resume_noirq(resume_event(state));
901 else
902 dpm_show_time(starttime, state, "noirq");
903 return error;
904}
905
906/**
907 * device_suspend_late - Execute a "late suspend" callback for given device.
908 * @dev: Device to handle.
909 * @state: PM transition of the system being carried out.
910 *
911 * Runtime PM is disabled for @dev while this function is being executed.
912 */
913static int device_suspend_late(struct device *dev, pm_message_t state)
914{
915 pm_callback_t callback = NULL;
916 char *info = NULL;
917
918 if (dev->pm_domain) {
919 info = "late power domain ";
920 callback = pm_late_early_op(&dev->pm_domain->ops, state);
921 } else if (dev->type && dev->type->pm) {
922 info = "late type ";
923 callback = pm_late_early_op(dev->type->pm, state);
924 } else if (dev->class && dev->class->pm) {
925 info = "late class ";
926 callback = pm_late_early_op(dev->class->pm, state);
927 } else if (dev->bus && dev->bus->pm) {
928 info = "late bus ";
929 callback = pm_late_early_op(dev->bus->pm, state);
930 }
931
932 if (!callback && dev->driver && dev->driver->pm) {
933 info = "late driver ";
934 callback = pm_late_early_op(dev->driver->pm, state);
935 }
936
937 return dpm_run_callback(callback, dev, state, info);
938}
939
940/**
941 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
942 * @state: PM transition of the system being carried out.
943 */
944static int dpm_suspend_late(pm_message_t state)
945{
946 ktime_t starttime = ktime_get();
947 int error = 0;
948
949 mutex_lock(&dpm_list_mtx);
950 while (!list_empty(&dpm_suspended_list)) {
951 struct device *dev = to_device(dpm_suspended_list.prev);
952
953 get_device(dev);
954 mutex_unlock(&dpm_list_mtx);
955
956 error = device_suspend_late(dev, state);
957
958 mutex_lock(&dpm_list_mtx);
959 if (error) {
960 pm_dev_err(dev, state, " late", error);
961 suspend_stats.failed_suspend_late++;
962 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
963 dpm_save_failed_dev(dev_name(dev));
964 put_device(dev);
965 break;
966 }
967 if (!list_empty(&dev->power.entry))
968 list_move(&dev->power.entry, &dpm_late_early_list);
969 put_device(dev);
970
971 if (pm_wakeup_pending()) {
972 error = -EBUSY;
973 break;
974 }
975 }
976 mutex_unlock(&dpm_list_mtx);
977 if (error)
978 dpm_resume_early(resume_event(state));
979 else
980 dpm_show_time(starttime, state, "late");
981
982 return error;
983}
984
985/**
986 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
987 * @state: PM transition of the system being carried out.
988 */
989int dpm_suspend_end(pm_message_t state)
990{
991 int error = dpm_suspend_late(state);
992 if (error)
993 return error;
994
995 error = dpm_suspend_noirq(state);
996 if (error) {
997 dpm_resume_early(state);
998 return error;
999 }
1000
1001 return 0;
1002}
1003EXPORT_SYMBOL_GPL(dpm_suspend_end);
1004
1005/**
1006 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1007 * @dev: Device to suspend.
1008 * @state: PM transition of the system being carried out.
1009 * @cb: Suspend callback to execute.
1010 */
1011static int legacy_suspend(struct device *dev, pm_message_t state,
1012 int (*cb)(struct device *dev, pm_message_t state))
1013{
1014 int error;
1015 ktime_t calltime;
1016
1017 calltime = initcall_debug_start(dev);
1018
1019 error = cb(dev, state);
1020 suspend_report_result(cb, error);
1021
1022 initcall_debug_report(dev, calltime, error);
1023
1024 return error;
1025}
1026
1027/**
1028 * device_suspend - Execute "suspend" callbacks for given device.
1029 * @dev: Device to handle.
1030 * @state: PM transition of the system being carried out.
1031 * @async: If true, the device is being suspended asynchronously.
1032 */
1033static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1034{
1035 pm_callback_t callback = NULL;
1036 char *info = NULL;
1037 int error = 0;
1038
1039 dpm_wait_for_children(dev, async);
1040
1041 if (async_error)
1042 goto Complete;
1043
1044 pm_runtime_get_noresume(dev);
1045 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1046 pm_wakeup_event(dev, 0);
1047
1048 if (pm_wakeup_pending()) {
1049 pm_runtime_put_sync(dev);
1050 async_error = -EBUSY;
1051 goto Complete;
1052 }
1053
1054 device_lock(dev);
1055
1056 if (dev->pm_domain) {
1057 info = "power domain ";
1058 callback = pm_op(&dev->pm_domain->ops, state);
1059 goto Run;
1060 }
1061
1062 if (dev->type && dev->type->pm) {
1063 info = "type ";
1064 callback = pm_op(dev->type->pm, state);
1065 goto Run;
1066 }
1067
1068 if (dev->class) {
1069 if (dev->class->pm) {
1070 info = "class ";
1071 callback = pm_op(dev->class->pm, state);
1072 goto Run;
1073 } else if (dev->class->suspend) {
1074 pm_dev_dbg(dev, state, "legacy class ");
1075 error = legacy_suspend(dev, state, dev->class->suspend);
1076 goto End;
1077 }
1078 }
1079
1080 if (dev->bus) {
1081 if (dev->bus->pm) {
1082 info = "bus ";
1083 callback = pm_op(dev->bus->pm, state);
1084 } else if (dev->bus->suspend) {
1085 pm_dev_dbg(dev, state, "legacy bus ");
1086 error = legacy_suspend(dev, state, dev->bus->suspend);
1087 goto End;
1088 }
1089 }
1090
1091 Run:
1092 if (!callback && dev->driver && dev->driver->pm) {
1093 info = "driver ";
1094 callback = pm_op(dev->driver->pm, state);
1095 }
1096
1097 error = dpm_run_callback(callback, dev, state, info);
1098
1099 End:
1100 if (!error) {
1101 dev->power.is_suspended = true;
1102 if (dev->power.wakeup_path
1103 && dev->parent && !dev->parent->power.ignore_children)
1104 dev->parent->power.wakeup_path = true;
1105 }
1106
1107 device_unlock(dev);
1108
1109 Complete:
1110 complete_all(&dev->power.completion);
1111
1112 if (error) {
1113 pm_runtime_put_sync(dev);
1114 async_error = error;
1115 } else if (dev->power.is_suspended) {
1116 __pm_runtime_disable(dev, false);
1117 }
1118
1119 return error;
1120}
1121
1122static void async_suspend(void *data, async_cookie_t cookie)
1123{
1124 struct device *dev = (struct device *)data;
1125 int error;
1126
1127 error = __device_suspend(dev, pm_transition, true);
1128 if (error) {
1129 dpm_save_failed_dev(dev_name(dev));
1130 pm_dev_err(dev, pm_transition, " async", error);
1131 }
1132
1133 put_device(dev);
1134}
1135
1136static int device_suspend(struct device *dev)
1137{
1138 INIT_COMPLETION(dev->power.completion);
1139
1140 if (pm_async_enabled && dev->power.async_suspend) {
1141 get_device(dev);
1142 async_schedule(async_suspend, dev);
1143 return 0;
1144 }
1145
1146 return __device_suspend(dev, pm_transition, false);
1147}
1148
1149/**
1150 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1151 * @state: PM transition of the system being carried out.
1152 */
1153int dpm_suspend(pm_message_t state)
1154{
1155 ktime_t starttime = ktime_get();
1156 int error = 0;
1157
1158 might_sleep();
1159
1160 mutex_lock(&dpm_list_mtx);
1161 pm_transition = state;
1162 async_error = 0;
1163 while (!list_empty(&dpm_prepared_list)) {
1164 struct device *dev = to_device(dpm_prepared_list.prev);
1165
1166 get_device(dev);
1167 mutex_unlock(&dpm_list_mtx);
1168
1169 error = device_suspend(dev);
1170
1171 mutex_lock(&dpm_list_mtx);
1172 if (error) {
1173 pm_dev_err(dev, state, "", error);
1174 dpm_save_failed_dev(dev_name(dev));
1175 put_device(dev);
1176 break;
1177 }
1178 if (!list_empty(&dev->power.entry))
1179 list_move(&dev->power.entry, &dpm_suspended_list);
1180 put_device(dev);
1181 if (async_error)
1182 break;
1183 }
1184 mutex_unlock(&dpm_list_mtx);
1185 async_synchronize_full();
1186 if (!error)
1187 error = async_error;
1188 if (error) {
1189 suspend_stats.failed_suspend++;
1190 dpm_save_failed_step(SUSPEND_SUSPEND);
1191 } else
1192 dpm_show_time(starttime, state, NULL);
1193 return error;
1194}
1195
1196/**
1197 * device_prepare - Prepare a device for system power transition.
1198 * @dev: Device to handle.
1199 * @state: PM transition of the system being carried out.
1200 *
1201 * Execute the ->prepare() callback(s) for given device. No new children of the
1202 * device may be registered after this function has returned.
1203 */
1204static int device_prepare(struct device *dev, pm_message_t state)
1205{
1206 int (*callback)(struct device *) = NULL;
1207 char *info = NULL;
1208 int error = 0;
1209
1210 device_lock(dev);
1211
1212 dev->power.wakeup_path = device_may_wakeup(dev);
1213
1214 if (dev->pm_domain) {
1215 info = "preparing power domain ";
1216 callback = dev->pm_domain->ops.prepare;
1217 } else if (dev->type && dev->type->pm) {
1218 info = "preparing type ";
1219 callback = dev->type->pm->prepare;
1220 } else if (dev->class && dev->class->pm) {
1221 info = "preparing class ";
1222 callback = dev->class->pm->prepare;
1223 } else if (dev->bus && dev->bus->pm) {
1224 info = "preparing bus ";
1225 callback = dev->bus->pm->prepare;
1226 }
1227
1228 if (!callback && dev->driver && dev->driver->pm) {
1229 info = "preparing driver ";
1230 callback = dev->driver->pm->prepare;
1231 }
1232
1233 if (callback) {
1234 error = callback(dev);
1235 suspend_report_result(callback, error);
1236 }
1237
1238 device_unlock(dev);
1239
1240 return error;
1241}
1242
1243/**
1244 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1245 * @state: PM transition of the system being carried out.
1246 *
1247 * Execute the ->prepare() callback(s) for all devices.
1248 */
1249int dpm_prepare(pm_message_t state)
1250{
1251 int error = 0;
1252
1253 might_sleep();
1254
1255 mutex_lock(&dpm_list_mtx);
1256 while (!list_empty(&dpm_list)) {
1257 struct device *dev = to_device(dpm_list.next);
1258
1259 get_device(dev);
1260 mutex_unlock(&dpm_list_mtx);
1261
1262 error = device_prepare(dev, state);
1263
1264 mutex_lock(&dpm_list_mtx);
1265 if (error) {
1266 if (error == -EAGAIN) {
1267 put_device(dev);
1268 error = 0;
1269 continue;
1270 }
1271 printk(KERN_INFO "PM: Device %s not prepared "
1272 "for power transition: code %d\n",
1273 dev_name(dev), error);
1274 put_device(dev);
1275 break;
1276 }
1277 dev->power.is_prepared = true;
1278 if (!list_empty(&dev->power.entry))
1279 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1280 put_device(dev);
1281 }
1282 mutex_unlock(&dpm_list_mtx);
1283 return error;
1284}
1285
1286/**
1287 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1288 * @state: PM transition of the system being carried out.
1289 *
1290 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1291 * callbacks for them.
1292 */
1293int dpm_suspend_start(pm_message_t state)
1294{
1295 int error;
1296
1297 error = dpm_prepare(state);
1298 if (error) {
1299 suspend_stats.failed_prepare++;
1300 dpm_save_failed_step(SUSPEND_PREPARE);
1301 } else
1302 error = dpm_suspend(state);
1303 return error;
1304}
1305EXPORT_SYMBOL_GPL(dpm_suspend_start);
1306
1307void __suspend_report_result(const char *function, void *fn, int ret)
1308{
1309 if (ret)
1310 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1311}
1312EXPORT_SYMBOL_GPL(__suspend_report_result);
1313
1314/**
1315 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1316 * @dev: Device to wait for.
1317 * @subordinate: Device that needs to wait for @dev.
1318 */
1319int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1320{
1321 dpm_wait(dev, subordinate->power.async_suspend);
1322 return async_error;
1323}
1324EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);