Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/main.c - Where the driver meets power management.
   4 *
   5 * Copyright (c) 2003 Patrick Mochel
   6 * Copyright (c) 2003 Open Source Development Lab
   7 *
   8 * The driver model core calls device_pm_add() when a device is registered.
   9 * This will initialize the embedded device_pm_info object in the device
  10 * and add it to the list of power-controlled devices. sysfs entries for
  11 * controlling device power management will also be added.
  12 *
  13 * A separate list is used for keeping track of power info, because the power
  14 * domain dependencies may differ from the ancestral dependencies that the
  15 * subsystem list maintains.
  16 */
  17
  18#define pr_fmt(fmt) "PM: " fmt
  19#define dev_fmt pr_fmt
  20
  21#include <linux/device.h>
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm-trace.h>
  27#include <linux/pm_wakeirq.h>
  28#include <linux/interrupt.h>
  29#include <linux/sched.h>
  30#include <linux/sched/debug.h>
  31#include <linux/async.h>
  32#include <linux/suspend.h>
  33#include <trace/events/power.h>
  34#include <linux/cpufreq.h>
  35#include <linux/cpuidle.h>
  36#include <linux/devfreq.h>
  37#include <linux/timer.h>
  38
  39#include "../base.h"
  40#include "power.h"
  41
  42typedef int (*pm_callback_t)(struct device *);
  43
  44#define list_for_each_entry_rcu_locked(pos, head, member) \
  45	list_for_each_entry_rcu(pos, head, member, \
  46			device_links_read_lock_held())
  47
  48/*
  49 * The entries in the dpm_list list are in a depth first order, simply
  50 * because children are guaranteed to be discovered after parents, and
  51 * are inserted at the back of the list on discovery.
  52 *
  53 * Since device_pm_add() may be called with a device lock held,
  54 * we must never try to acquire a device lock while holding
  55 * dpm_list_mutex.
  56 */
  57
  58LIST_HEAD(dpm_list);
  59static LIST_HEAD(dpm_prepared_list);
  60static LIST_HEAD(dpm_suspended_list);
  61static LIST_HEAD(dpm_late_early_list);
  62static LIST_HEAD(dpm_noirq_list);
  63
  64struct suspend_stats suspend_stats;
  65static DEFINE_MUTEX(dpm_list_mtx);
  66static pm_message_t pm_transition;
  67
  68static int async_error;
  69
  70static const char *pm_verb(int event)
  71{
  72	switch (event) {
  73	case PM_EVENT_SUSPEND:
  74		return "suspend";
  75	case PM_EVENT_RESUME:
  76		return "resume";
  77	case PM_EVENT_FREEZE:
  78		return "freeze";
  79	case PM_EVENT_QUIESCE:
  80		return "quiesce";
  81	case PM_EVENT_HIBERNATE:
  82		return "hibernate";
  83	case PM_EVENT_THAW:
  84		return "thaw";
  85	case PM_EVENT_RESTORE:
  86		return "restore";
  87	case PM_EVENT_RECOVER:
  88		return "recover";
  89	default:
  90		return "(unknown PM event)";
  91	}
  92}
  93
  94/**
  95 * device_pm_sleep_init - Initialize system suspend-related device fields.
  96 * @dev: Device object being initialized.
  97 */
  98void device_pm_sleep_init(struct device *dev)
  99{
 100	dev->power.is_prepared = false;
 101	dev->power.is_suspended = false;
 102	dev->power.is_noirq_suspended = false;
 103	dev->power.is_late_suspended = false;
 104	init_completion(&dev->power.completion);
 105	complete_all(&dev->power.completion);
 106	dev->power.wakeup = NULL;
 107	INIT_LIST_HEAD(&dev->power.entry);
 108}
 109
 110/**
 111 * device_pm_lock - Lock the list of active devices used by the PM core.
 112 */
 113void device_pm_lock(void)
 114{
 115	mutex_lock(&dpm_list_mtx);
 116}
 117
 118/**
 119 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 120 */
 121void device_pm_unlock(void)
 122{
 123	mutex_unlock(&dpm_list_mtx);
 124}
 125
 126/**
 127 * device_pm_add - Add a device to the PM core's list of active devices.
 128 * @dev: Device to add to the list.
 129 */
 130void device_pm_add(struct device *dev)
 131{
 132	/* Skip PM setup/initialization. */
 133	if (device_pm_not_required(dev))
 134		return;
 135
 136	pr_debug("Adding info for %s:%s\n",
 137		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 138	device_pm_check_callbacks(dev);
 139	mutex_lock(&dpm_list_mtx);
 140	if (dev->parent && dev->parent->power.is_prepared)
 141		dev_warn(dev, "parent %s should not be sleeping\n",
 142			dev_name(dev->parent));
 143	list_add_tail(&dev->power.entry, &dpm_list);
 144	dev->power.in_dpm_list = true;
 145	mutex_unlock(&dpm_list_mtx);
 146}
 147
 148/**
 149 * device_pm_remove - Remove a device from the PM core's list of active devices.
 150 * @dev: Device to be removed from the list.
 151 */
 152void device_pm_remove(struct device *dev)
 153{
 154	if (device_pm_not_required(dev))
 155		return;
 156
 157	pr_debug("Removing info for %s:%s\n",
 158		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 159	complete_all(&dev->power.completion);
 160	mutex_lock(&dpm_list_mtx);
 161	list_del_init(&dev->power.entry);
 162	dev->power.in_dpm_list = false;
 163	mutex_unlock(&dpm_list_mtx);
 164	device_wakeup_disable(dev);
 165	pm_runtime_remove(dev);
 166	device_pm_check_callbacks(dev);
 167}
 168
 169/**
 170 * device_pm_move_before - Move device in the PM core's list of active devices.
 171 * @deva: Device to move in dpm_list.
 172 * @devb: Device @deva should come before.
 173 */
 174void device_pm_move_before(struct device *deva, struct device *devb)
 175{
 176	pr_debug("Moving %s:%s before %s:%s\n",
 177		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 178		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 179	/* Delete deva from dpm_list and reinsert before devb. */
 180	list_move_tail(&deva->power.entry, &devb->power.entry);
 181}
 182
 183/**
 184 * device_pm_move_after - Move device in the PM core's list of active devices.
 185 * @deva: Device to move in dpm_list.
 186 * @devb: Device @deva should come after.
 187 */
 188void device_pm_move_after(struct device *deva, struct device *devb)
 189{
 190	pr_debug("Moving %s:%s after %s:%s\n",
 191		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 192		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 193	/* Delete deva from dpm_list and reinsert after devb. */
 194	list_move(&deva->power.entry, &devb->power.entry);
 195}
 196
 197/**
 198 * device_pm_move_last - Move device to end of the PM core's list of devices.
 199 * @dev: Device to move in dpm_list.
 200 */
 201void device_pm_move_last(struct device *dev)
 202{
 203	pr_debug("Moving %s:%s to end of list\n",
 204		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 205	list_move_tail(&dev->power.entry, &dpm_list);
 206}
 207
 208static ktime_t initcall_debug_start(struct device *dev, void *cb)
 209{
 210	if (!pm_print_times_enabled)
 211		return 0;
 212
 213	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
 214		 task_pid_nr(current),
 215		 dev->parent ? dev_name(dev->parent) : "none");
 216	return ktime_get();
 217}
 218
 219static void initcall_debug_report(struct device *dev, ktime_t calltime,
 220				  void *cb, int error)
 221{
 222	ktime_t rettime;
 
 223
 224	if (!pm_print_times_enabled)
 225		return;
 226
 227	rettime = ktime_get();
 
 
 228	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
 229		 (unsigned long long)ktime_us_delta(rettime, calltime));
 230}
 231
 232/**
 233 * dpm_wait - Wait for a PM operation to complete.
 234 * @dev: Device to wait for.
 235 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 236 */
 237static void dpm_wait(struct device *dev, bool async)
 238{
 239	if (!dev)
 240		return;
 241
 242	if (async || (pm_async_enabled && dev->power.async_suspend))
 243		wait_for_completion(&dev->power.completion);
 244}
 245
 246static int dpm_wait_fn(struct device *dev, void *async_ptr)
 247{
 248	dpm_wait(dev, *((bool *)async_ptr));
 249	return 0;
 250}
 251
 252static void dpm_wait_for_children(struct device *dev, bool async)
 253{
 254       device_for_each_child(dev, &async, dpm_wait_fn);
 255}
 256
 257static void dpm_wait_for_suppliers(struct device *dev, bool async)
 258{
 259	struct device_link *link;
 260	int idx;
 261
 262	idx = device_links_read_lock();
 263
 264	/*
 265	 * If the supplier goes away right after we've checked the link to it,
 266	 * we'll wait for its completion to change the state, but that's fine,
 267	 * because the only things that will block as a result are the SRCU
 268	 * callbacks freeing the link objects for the links in the list we're
 269	 * walking.
 270	 */
 271	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
 272		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 273			dpm_wait(link->supplier, async);
 274
 275	device_links_read_unlock(idx);
 276}
 277
 278static bool dpm_wait_for_superior(struct device *dev, bool async)
 279{
 280	struct device *parent;
 281
 282	/*
 283	 * If the device is resumed asynchronously and the parent's callback
 284	 * deletes both the device and the parent itself, the parent object may
 285	 * be freed while this function is running, so avoid that by reference
 286	 * counting the parent once more unless the device has been deleted
 287	 * already (in which case return right away).
 288	 */
 289	mutex_lock(&dpm_list_mtx);
 290
 291	if (!device_pm_initialized(dev)) {
 292		mutex_unlock(&dpm_list_mtx);
 293		return false;
 294	}
 295
 296	parent = get_device(dev->parent);
 297
 298	mutex_unlock(&dpm_list_mtx);
 299
 300	dpm_wait(parent, async);
 301	put_device(parent);
 302
 303	dpm_wait_for_suppliers(dev, async);
 304
 305	/*
 306	 * If the parent's callback has deleted the device, attempting to resume
 307	 * it would be invalid, so avoid doing that then.
 308	 */
 309	return device_pm_initialized(dev);
 310}
 311
 312static void dpm_wait_for_consumers(struct device *dev, bool async)
 313{
 314	struct device_link *link;
 315	int idx;
 316
 317	idx = device_links_read_lock();
 318
 319	/*
 320	 * The status of a device link can only be changed from "dormant" by a
 321	 * probe, but that cannot happen during system suspend/resume.  In
 322	 * theory it can change to "dormant" at that time, but then it is
 323	 * reasonable to wait for the target device anyway (eg. if it goes
 324	 * away, it's better to wait for it to go away completely and then
 325	 * continue instead of trying to continue in parallel with its
 326	 * unregistration).
 327	 */
 328	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
 329		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 330			dpm_wait(link->consumer, async);
 331
 332	device_links_read_unlock(idx);
 333}
 334
 335static void dpm_wait_for_subordinate(struct device *dev, bool async)
 336{
 337	dpm_wait_for_children(dev, async);
 338	dpm_wait_for_consumers(dev, async);
 339}
 340
 341/**
 342 * pm_op - Return the PM operation appropriate for given PM event.
 343 * @ops: PM operations to choose from.
 344 * @state: PM transition of the system being carried out.
 345 */
 346static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 347{
 348	switch (state.event) {
 349#ifdef CONFIG_SUSPEND
 350	case PM_EVENT_SUSPEND:
 351		return ops->suspend;
 352	case PM_EVENT_RESUME:
 353		return ops->resume;
 354#endif /* CONFIG_SUSPEND */
 355#ifdef CONFIG_HIBERNATE_CALLBACKS
 356	case PM_EVENT_FREEZE:
 357	case PM_EVENT_QUIESCE:
 358		return ops->freeze;
 359	case PM_EVENT_HIBERNATE:
 360		return ops->poweroff;
 361	case PM_EVENT_THAW:
 362	case PM_EVENT_RECOVER:
 363		return ops->thaw;
 
 364	case PM_EVENT_RESTORE:
 365		return ops->restore;
 366#endif /* CONFIG_HIBERNATE_CALLBACKS */
 367	}
 368
 369	return NULL;
 370}
 371
 372/**
 373 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 374 * @ops: PM operations to choose from.
 375 * @state: PM transition of the system being carried out.
 376 *
 377 * Runtime PM is disabled for @dev while this function is being executed.
 378 */
 379static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 380				      pm_message_t state)
 381{
 382	switch (state.event) {
 383#ifdef CONFIG_SUSPEND
 384	case PM_EVENT_SUSPEND:
 385		return ops->suspend_late;
 386	case PM_EVENT_RESUME:
 387		return ops->resume_early;
 388#endif /* CONFIG_SUSPEND */
 389#ifdef CONFIG_HIBERNATE_CALLBACKS
 390	case PM_EVENT_FREEZE:
 391	case PM_EVENT_QUIESCE:
 392		return ops->freeze_late;
 393	case PM_EVENT_HIBERNATE:
 394		return ops->poweroff_late;
 395	case PM_EVENT_THAW:
 396	case PM_EVENT_RECOVER:
 397		return ops->thaw_early;
 398	case PM_EVENT_RESTORE:
 399		return ops->restore_early;
 400#endif /* CONFIG_HIBERNATE_CALLBACKS */
 401	}
 402
 403	return NULL;
 404}
 405
 406/**
 407 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 408 * @ops: PM operations to choose from.
 409 * @state: PM transition of the system being carried out.
 410 *
 411 * The driver of @dev will not receive interrupts while this function is being
 412 * executed.
 413 */
 414static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 415{
 416	switch (state.event) {
 417#ifdef CONFIG_SUSPEND
 418	case PM_EVENT_SUSPEND:
 419		return ops->suspend_noirq;
 420	case PM_EVENT_RESUME:
 421		return ops->resume_noirq;
 422#endif /* CONFIG_SUSPEND */
 423#ifdef CONFIG_HIBERNATE_CALLBACKS
 424	case PM_EVENT_FREEZE:
 425	case PM_EVENT_QUIESCE:
 426		return ops->freeze_noirq;
 427	case PM_EVENT_HIBERNATE:
 428		return ops->poweroff_noirq;
 429	case PM_EVENT_THAW:
 430	case PM_EVENT_RECOVER:
 431		return ops->thaw_noirq;
 432	case PM_EVENT_RESTORE:
 433		return ops->restore_noirq;
 434#endif /* CONFIG_HIBERNATE_CALLBACKS */
 435	}
 436
 437	return NULL;
 438}
 439
 440static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 441{
 442	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
 443		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 444		", may wakeup" : "", dev->power.driver_flags);
 445}
 446
 447static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 448			int error)
 449{
 450	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
 451		error);
 452}
 453
 454static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 455			  const char *info)
 456{
 457	ktime_t calltime;
 458	u64 usecs64;
 459	int usecs;
 460
 461	calltime = ktime_get();
 462	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 463	do_div(usecs64, NSEC_PER_USEC);
 464	usecs = usecs64;
 465	if (usecs == 0)
 466		usecs = 1;
 467
 468	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 469		  info ?: "", info ? " " : "", pm_verb(state.event),
 470		  error ? "aborted" : "complete",
 471		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 472}
 473
 474static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 475			    pm_message_t state, const char *info)
 476{
 477	ktime_t calltime;
 478	int error;
 479
 480	if (!cb)
 481		return 0;
 482
 483	calltime = initcall_debug_start(dev, cb);
 484
 485	pm_dev_dbg(dev, state, info);
 486	trace_device_pm_callback_start(dev, info, state.event);
 487	error = cb(dev);
 488	trace_device_pm_callback_end(dev, error);
 489	suspend_report_result(cb, error);
 490
 491	initcall_debug_report(dev, calltime, cb, error);
 492
 493	return error;
 494}
 495
 496#ifdef CONFIG_DPM_WATCHDOG
 497struct dpm_watchdog {
 498	struct device		*dev;
 499	struct task_struct	*tsk;
 500	struct timer_list	timer;
 501};
 502
 503#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 504	struct dpm_watchdog wd
 505
 506/**
 507 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 508 * @t: The timer that PM watchdog depends on.
 509 *
 510 * Called when a driver has timed out suspending or resuming.
 511 * There's not much we can do here to recover so panic() to
 512 * capture a crash-dump in pstore.
 513 */
 514static void dpm_watchdog_handler(struct timer_list *t)
 515{
 516	struct dpm_watchdog *wd = from_timer(wd, t, timer);
 517
 518	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 519	show_stack(wd->tsk, NULL, KERN_EMERG);
 520	panic("%s %s: unrecoverable failure\n",
 521		dev_driver_string(wd->dev), dev_name(wd->dev));
 522}
 523
 524/**
 525 * dpm_watchdog_set - Enable pm watchdog for given device.
 526 * @wd: Watchdog. Must be allocated on the stack.
 527 * @dev: Device to handle.
 528 */
 529static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 530{
 531	struct timer_list *timer = &wd->timer;
 532
 533	wd->dev = dev;
 534	wd->tsk = current;
 535
 536	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 537	/* use same timeout value for both suspend and resume */
 538	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 539	add_timer(timer);
 540}
 541
 542/**
 543 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 544 * @wd: Watchdog to disable.
 545 */
 546static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 547{
 548	struct timer_list *timer = &wd->timer;
 549
 550	del_timer_sync(timer);
 551	destroy_timer_on_stack(timer);
 552}
 553#else
 554#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 555#define dpm_watchdog_set(x, y)
 556#define dpm_watchdog_clear(x)
 557#endif
 558
 559/*------------------------- Resume routines -------------------------*/
 560
 561/**
 562 * dev_pm_skip_resume - System-wide device resume optimization check.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 563 * @dev: Target device.
 564 *
 565 * Return:
 566 * - %false if the transition under way is RESTORE.
 567 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
 568 * - The logical negation of %power.must_resume otherwise (that is, when the
 569 *   transition under way is RESUME).
 570 */
 571bool dev_pm_skip_resume(struct device *dev)
 572{
 573	if (pm_transition.event == PM_EVENT_RESTORE)
 574		return false;
 575
 576	if (pm_transition.event == PM_EVENT_THAW)
 577		return dev_pm_skip_suspend(dev);
 
 
 
 
 578
 579	return !dev->power.must_resume;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 580}
 581
 
 
 
 
 
 
 
 
 582/**
 583 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 584 * @dev: Device to handle.
 585 * @state: PM transition of the system being carried out.
 586 * @async: If true, the device is being resumed asynchronously.
 587 *
 588 * The driver of @dev will not receive interrupts while this function is being
 589 * executed.
 590 */
 591static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 592{
 593	pm_callback_t callback = NULL;
 594	const char *info = NULL;
 595	bool skip_resume;
 596	int error = 0;
 597
 598	TRACE_DEVICE(dev);
 599	TRACE_RESUME(0);
 600
 601	if (dev->power.syscore || dev->power.direct_complete)
 602		goto Out;
 603
 604	if (!dev->power.is_noirq_suspended)
 605		goto Out;
 606
 607	if (!dpm_wait_for_superior(dev, async))
 608		goto Out;
 609
 610	skip_resume = dev_pm_skip_resume(dev);
 611	/*
 612	 * If the driver callback is skipped below or by the middle layer
 613	 * callback and device_resume_early() also skips the driver callback for
 614	 * this device later, it needs to appear as "suspended" to PM-runtime,
 615	 * so change its status accordingly.
 616	 *
 617	 * Otherwise, the device is going to be resumed, so set its PM-runtime
 618	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
 619	 * to avoid confusing drivers that don't use it.
 620	 */
 621	if (skip_resume)
 622		pm_runtime_set_suspended(dev);
 623	else if (dev_pm_skip_suspend(dev))
 624		pm_runtime_set_active(dev);
 625
 626	if (dev->pm_domain) {
 627		info = "noirq power domain ";
 628		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 629	} else if (dev->type && dev->type->pm) {
 630		info = "noirq type ";
 631		callback = pm_noirq_op(dev->type->pm, state);
 632	} else if (dev->class && dev->class->pm) {
 633		info = "noirq class ";
 634		callback = pm_noirq_op(dev->class->pm, state);
 635	} else if (dev->bus && dev->bus->pm) {
 636		info = "noirq bus ";
 637		callback = pm_noirq_op(dev->bus->pm, state);
 638	}
 639	if (callback)
 640		goto Run;
 641
 642	if (skip_resume)
 643		goto Skip;
 644
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 645	if (dev->driver && dev->driver->pm) {
 646		info = "noirq driver ";
 647		callback = pm_noirq_op(dev->driver->pm, state);
 648	}
 649
 650Run:
 651	error = dpm_run_callback(callback, dev, state, info);
 652
 653Skip:
 654	dev->power.is_noirq_suspended = false;
 655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 656Out:
 657	complete_all(&dev->power.completion);
 658	TRACE_RESUME(error);
 659	return error;
 660}
 661
 662static bool is_async(struct device *dev)
 663{
 664	return dev->power.async_suspend && pm_async_enabled
 665		&& !pm_trace_is_enabled();
 666}
 667
 668static bool dpm_async_fn(struct device *dev, async_func_t func)
 669{
 670	reinit_completion(&dev->power.completion);
 671
 672	if (is_async(dev)) {
 673		get_device(dev);
 674		async_schedule_dev(func, dev);
 675		return true;
 676	}
 677
 678	return false;
 679}
 680
 681static void async_resume_noirq(void *data, async_cookie_t cookie)
 682{
 683	struct device *dev = (struct device *)data;
 684	int error;
 685
 686	error = device_resume_noirq(dev, pm_transition, true);
 687	if (error)
 688		pm_dev_err(dev, pm_transition, " async", error);
 689
 690	put_device(dev);
 691}
 692
 693static void dpm_noirq_resume_devices(pm_message_t state)
 694{
 695	struct device *dev;
 696	ktime_t starttime = ktime_get();
 697
 698	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 699	mutex_lock(&dpm_list_mtx);
 700	pm_transition = state;
 701
 702	/*
 703	 * Advanced the async threads upfront,
 704	 * in case the starting of async threads is
 705	 * delayed by non-async resuming devices.
 706	 */
 707	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
 708		dpm_async_fn(dev, async_resume_noirq);
 709
 710	while (!list_empty(&dpm_noirq_list)) {
 711		dev = to_device(dpm_noirq_list.next);
 712		get_device(dev);
 713		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 714		mutex_unlock(&dpm_list_mtx);
 715
 716		if (!is_async(dev)) {
 717			int error;
 718
 719			error = device_resume_noirq(dev, state, false);
 720			if (error) {
 721				suspend_stats.failed_resume_noirq++;
 722				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 723				dpm_save_failed_dev(dev_name(dev));
 724				pm_dev_err(dev, state, " noirq", error);
 725			}
 726		}
 727
 728		mutex_lock(&dpm_list_mtx);
 729		put_device(dev);
 730	}
 731	mutex_unlock(&dpm_list_mtx);
 732	async_synchronize_full();
 733	dpm_show_time(starttime, state, 0, "noirq");
 734	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 735}
 736
 737/**
 738 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 739 * @state: PM transition of the system being carried out.
 740 *
 741 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 742 * allow device drivers' interrupt handlers to be called.
 743 */
 744void dpm_resume_noirq(pm_message_t state)
 745{
 746	dpm_noirq_resume_devices(state);
 747
 748	resume_device_irqs();
 749	device_wakeup_disarm_wake_irqs();
 750
 751	cpuidle_resume();
 752}
 753
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 754/**
 755 * device_resume_early - Execute an "early resume" callback for given device.
 756 * @dev: Device to handle.
 757 * @state: PM transition of the system being carried out.
 758 * @async: If true, the device is being resumed asynchronously.
 759 *
 760 * Runtime PM is disabled for @dev while this function is being executed.
 761 */
 762static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 763{
 764	pm_callback_t callback = NULL;
 765	const char *info = NULL;
 766	int error = 0;
 767
 768	TRACE_DEVICE(dev);
 769	TRACE_RESUME(0);
 770
 771	if (dev->power.syscore || dev->power.direct_complete)
 772		goto Out;
 773
 774	if (!dev->power.is_late_suspended)
 775		goto Out;
 776
 777	if (!dpm_wait_for_superior(dev, async))
 778		goto Out;
 779
 780	if (dev->pm_domain) {
 781		info = "early power domain ";
 782		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 783	} else if (dev->type && dev->type->pm) {
 784		info = "early type ";
 785		callback = pm_late_early_op(dev->type->pm, state);
 786	} else if (dev->class && dev->class->pm) {
 787		info = "early class ";
 788		callback = pm_late_early_op(dev->class->pm, state);
 789	} else if (dev->bus && dev->bus->pm) {
 790		info = "early bus ";
 791		callback = pm_late_early_op(dev->bus->pm, state);
 792	}
 793	if (callback)
 794		goto Run;
 795
 796	if (dev_pm_skip_resume(dev))
 797		goto Skip;
 798
 799	if (dev->driver && dev->driver->pm) {
 800		info = "early driver ";
 801		callback = pm_late_early_op(dev->driver->pm, state);
 802	}
 803
 804Run:
 805	error = dpm_run_callback(callback, dev, state, info);
 806
 807Skip:
 808	dev->power.is_late_suspended = false;
 809
 810Out:
 811	TRACE_RESUME(error);
 812
 813	pm_runtime_enable(dev);
 814	complete_all(&dev->power.completion);
 815	return error;
 816}
 817
 818static void async_resume_early(void *data, async_cookie_t cookie)
 819{
 820	struct device *dev = (struct device *)data;
 821	int error;
 822
 823	error = device_resume_early(dev, pm_transition, true);
 824	if (error)
 825		pm_dev_err(dev, pm_transition, " async", error);
 826
 827	put_device(dev);
 828}
 829
 830/**
 831 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 832 * @state: PM transition of the system being carried out.
 833 */
 834void dpm_resume_early(pm_message_t state)
 835{
 836	struct device *dev;
 837	ktime_t starttime = ktime_get();
 838
 839	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 840	mutex_lock(&dpm_list_mtx);
 841	pm_transition = state;
 842
 843	/*
 844	 * Advanced the async threads upfront,
 845	 * in case the starting of async threads is
 846	 * delayed by non-async resuming devices.
 847	 */
 848	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
 849		dpm_async_fn(dev, async_resume_early);
 850
 851	while (!list_empty(&dpm_late_early_list)) {
 852		dev = to_device(dpm_late_early_list.next);
 853		get_device(dev);
 854		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 855		mutex_unlock(&dpm_list_mtx);
 856
 857		if (!is_async(dev)) {
 858			int error;
 859
 860			error = device_resume_early(dev, state, false);
 861			if (error) {
 862				suspend_stats.failed_resume_early++;
 863				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 864				dpm_save_failed_dev(dev_name(dev));
 865				pm_dev_err(dev, state, " early", error);
 866			}
 867		}
 868		mutex_lock(&dpm_list_mtx);
 869		put_device(dev);
 870	}
 871	mutex_unlock(&dpm_list_mtx);
 872	async_synchronize_full();
 873	dpm_show_time(starttime, state, 0, "early");
 874	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 875}
 876
 877/**
 878 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 879 * @state: PM transition of the system being carried out.
 880 */
 881void dpm_resume_start(pm_message_t state)
 882{
 883	dpm_resume_noirq(state);
 884	dpm_resume_early(state);
 885}
 886EXPORT_SYMBOL_GPL(dpm_resume_start);
 887
 888/**
 889 * device_resume - Execute "resume" callbacks for given device.
 890 * @dev: Device to handle.
 891 * @state: PM transition of the system being carried out.
 892 * @async: If true, the device is being resumed asynchronously.
 893 */
 894static int device_resume(struct device *dev, pm_message_t state, bool async)
 895{
 896	pm_callback_t callback = NULL;
 897	const char *info = NULL;
 898	int error = 0;
 899	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 900
 901	TRACE_DEVICE(dev);
 902	TRACE_RESUME(0);
 903
 904	if (dev->power.syscore)
 905		goto Complete;
 906
 907	if (dev->power.direct_complete) {
 908		/* Match the pm_runtime_disable() in __device_suspend(). */
 909		pm_runtime_enable(dev);
 910		goto Complete;
 911	}
 912
 913	if (!dpm_wait_for_superior(dev, async))
 914		goto Complete;
 915
 916	dpm_watchdog_set(&wd, dev);
 917	device_lock(dev);
 918
 919	/*
 920	 * This is a fib.  But we'll allow new children to be added below
 921	 * a resumed device, even if the device hasn't been completed yet.
 922	 */
 923	dev->power.is_prepared = false;
 924
 925	if (!dev->power.is_suspended)
 926		goto Unlock;
 927
 928	if (dev->pm_domain) {
 929		info = "power domain ";
 930		callback = pm_op(&dev->pm_domain->ops, state);
 931		goto Driver;
 932	}
 933
 934	if (dev->type && dev->type->pm) {
 935		info = "type ";
 936		callback = pm_op(dev->type->pm, state);
 937		goto Driver;
 938	}
 939
 940	if (dev->class && dev->class->pm) {
 941		info = "class ";
 942		callback = pm_op(dev->class->pm, state);
 943		goto Driver;
 944	}
 945
 946	if (dev->bus) {
 947		if (dev->bus->pm) {
 948			info = "bus ";
 949			callback = pm_op(dev->bus->pm, state);
 950		} else if (dev->bus->resume) {
 951			info = "legacy bus ";
 952			callback = dev->bus->resume;
 953			goto End;
 954		}
 955	}
 956
 957 Driver:
 958	if (!callback && dev->driver && dev->driver->pm) {
 959		info = "driver ";
 960		callback = pm_op(dev->driver->pm, state);
 961	}
 962
 963 End:
 964	error = dpm_run_callback(callback, dev, state, info);
 965	dev->power.is_suspended = false;
 966
 967 Unlock:
 968	device_unlock(dev);
 969	dpm_watchdog_clear(&wd);
 970
 971 Complete:
 972	complete_all(&dev->power.completion);
 973
 974	TRACE_RESUME(error);
 975
 976	return error;
 977}
 978
 979static void async_resume(void *data, async_cookie_t cookie)
 980{
 981	struct device *dev = (struct device *)data;
 982	int error;
 983
 984	error = device_resume(dev, pm_transition, true);
 985	if (error)
 986		pm_dev_err(dev, pm_transition, " async", error);
 987	put_device(dev);
 988}
 989
 990/**
 991 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 992 * @state: PM transition of the system being carried out.
 993 *
 994 * Execute the appropriate "resume" callback for all devices whose status
 995 * indicates that they are suspended.
 996 */
 997void dpm_resume(pm_message_t state)
 998{
 999	struct device *dev;
1000	ktime_t starttime = ktime_get();
1001
1002	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1003	might_sleep();
1004
1005	mutex_lock(&dpm_list_mtx);
1006	pm_transition = state;
1007	async_error = 0;
1008
1009	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1010		dpm_async_fn(dev, async_resume);
1011
1012	while (!list_empty(&dpm_suspended_list)) {
1013		dev = to_device(dpm_suspended_list.next);
1014		get_device(dev);
1015		if (!is_async(dev)) {
1016			int error;
1017
1018			mutex_unlock(&dpm_list_mtx);
1019
1020			error = device_resume(dev, state, false);
1021			if (error) {
1022				suspend_stats.failed_resume++;
1023				dpm_save_failed_step(SUSPEND_RESUME);
1024				dpm_save_failed_dev(dev_name(dev));
1025				pm_dev_err(dev, state, "", error);
1026			}
1027
1028			mutex_lock(&dpm_list_mtx);
1029		}
1030		if (!list_empty(&dev->power.entry))
1031			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1032		put_device(dev);
1033	}
1034	mutex_unlock(&dpm_list_mtx);
1035	async_synchronize_full();
1036	dpm_show_time(starttime, state, 0, NULL);
1037
1038	cpufreq_resume();
1039	devfreq_resume();
1040	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1041}
1042
1043/**
1044 * device_complete - Complete a PM transition for given device.
1045 * @dev: Device to handle.
1046 * @state: PM transition of the system being carried out.
1047 */
1048static void device_complete(struct device *dev, pm_message_t state)
1049{
1050	void (*callback)(struct device *) = NULL;
1051	const char *info = NULL;
1052
1053	if (dev->power.syscore)
1054		return;
1055
1056	device_lock(dev);
1057
1058	if (dev->pm_domain) {
1059		info = "completing power domain ";
1060		callback = dev->pm_domain->ops.complete;
1061	} else if (dev->type && dev->type->pm) {
1062		info = "completing type ";
1063		callback = dev->type->pm->complete;
1064	} else if (dev->class && dev->class->pm) {
1065		info = "completing class ";
1066		callback = dev->class->pm->complete;
1067	} else if (dev->bus && dev->bus->pm) {
1068		info = "completing bus ";
1069		callback = dev->bus->pm->complete;
1070	}
1071
1072	if (!callback && dev->driver && dev->driver->pm) {
1073		info = "completing driver ";
1074		callback = dev->driver->pm->complete;
1075	}
1076
1077	if (callback) {
1078		pm_dev_dbg(dev, state, info);
1079		callback(dev);
1080	}
1081
1082	device_unlock(dev);
1083
1084	pm_runtime_put(dev);
1085}
1086
1087/**
1088 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1089 * @state: PM transition of the system being carried out.
1090 *
1091 * Execute the ->complete() callbacks for all devices whose PM status is not
1092 * DPM_ON (this allows new devices to be registered).
1093 */
1094void dpm_complete(pm_message_t state)
1095{
1096	struct list_head list;
1097
1098	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1099	might_sleep();
1100
1101	INIT_LIST_HEAD(&list);
1102	mutex_lock(&dpm_list_mtx);
1103	while (!list_empty(&dpm_prepared_list)) {
1104		struct device *dev = to_device(dpm_prepared_list.prev);
1105
1106		get_device(dev);
1107		dev->power.is_prepared = false;
1108		list_move(&dev->power.entry, &list);
1109		mutex_unlock(&dpm_list_mtx);
1110
1111		trace_device_pm_callback_start(dev, "", state.event);
1112		device_complete(dev, state);
1113		trace_device_pm_callback_end(dev, 0);
1114
1115		mutex_lock(&dpm_list_mtx);
1116		put_device(dev);
1117	}
1118	list_splice(&list, &dpm_list);
1119	mutex_unlock(&dpm_list_mtx);
1120
1121	/* Allow device probing and trigger re-probing of deferred devices */
1122	device_unblock_probing();
1123	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1124}
1125
1126/**
1127 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1128 * @state: PM transition of the system being carried out.
1129 *
1130 * Execute "resume" callbacks for all devices and complete the PM transition of
1131 * the system.
1132 */
1133void dpm_resume_end(pm_message_t state)
1134{
1135	dpm_resume(state);
1136	dpm_complete(state);
1137}
1138EXPORT_SYMBOL_GPL(dpm_resume_end);
1139
1140
1141/*------------------------- Suspend routines -------------------------*/
1142
1143/**
1144 * resume_event - Return a "resume" message for given "suspend" sleep state.
1145 * @sleep_state: PM message representing a sleep state.
1146 *
1147 * Return a PM message representing the resume event corresponding to given
1148 * sleep state.
1149 */
1150static pm_message_t resume_event(pm_message_t sleep_state)
1151{
1152	switch (sleep_state.event) {
1153	case PM_EVENT_SUSPEND:
1154		return PMSG_RESUME;
1155	case PM_EVENT_FREEZE:
1156	case PM_EVENT_QUIESCE:
1157		return PMSG_RECOVER;
1158	case PM_EVENT_HIBERNATE:
1159		return PMSG_RESTORE;
1160	}
1161	return PMSG_ON;
1162}
1163
1164static void dpm_superior_set_must_resume(struct device *dev)
1165{
1166	struct device_link *link;
1167	int idx;
1168
1169	if (dev->parent)
1170		dev->parent->power.must_resume = true;
1171
1172	idx = device_links_read_lock();
1173
1174	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1175		link->supplier->power.must_resume = true;
1176
1177	device_links_read_unlock(idx);
1178}
1179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180/**
1181 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1182 * @dev: Device to handle.
1183 * @state: PM transition of the system being carried out.
1184 * @async: If true, the device is being suspended asynchronously.
1185 *
1186 * The driver of @dev will not receive interrupts while this function is being
1187 * executed.
1188 */
1189static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1190{
1191	pm_callback_t callback = NULL;
1192	const char *info = NULL;
 
1193	int error = 0;
1194
1195	TRACE_DEVICE(dev);
1196	TRACE_SUSPEND(0);
1197
1198	dpm_wait_for_subordinate(dev, async);
1199
1200	if (async_error)
1201		goto Complete;
1202
1203	if (dev->power.syscore || dev->power.direct_complete)
1204		goto Complete;
1205
1206	if (dev->pm_domain) {
1207		info = "noirq power domain ";
1208		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1209	} else if (dev->type && dev->type->pm) {
1210		info = "noirq type ";
1211		callback = pm_noirq_op(dev->type->pm, state);
1212	} else if (dev->class && dev->class->pm) {
1213		info = "noirq class ";
1214		callback = pm_noirq_op(dev->class->pm, state);
1215	} else if (dev->bus && dev->bus->pm) {
1216		info = "noirq bus ";
1217		callback = pm_noirq_op(dev->bus->pm, state);
1218	}
1219	if (callback)
1220		goto Run;
1221
1222	if (dev_pm_skip_suspend(dev))
 
 
1223		goto Skip;
1224
1225	if (dev->driver && dev->driver->pm) {
1226		info = "noirq driver ";
1227		callback = pm_noirq_op(dev->driver->pm, state);
1228	}
1229
1230Run:
1231	error = dpm_run_callback(callback, dev, state, info);
1232	if (error) {
1233		async_error = error;
1234		goto Complete;
1235	}
1236
1237Skip:
1238	dev->power.is_noirq_suspended = true;
1239
1240	/*
1241	 * Skipping the resume of devices that were in use right before the
1242	 * system suspend (as indicated by their PM-runtime usage counters)
1243	 * would be suboptimal.  Also resume them if doing that is not allowed
1244	 * to be skipped.
1245	 */
1246	if (atomic_read(&dev->power.usage_count) > 1 ||
1247	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1248	      dev->power.may_skip_resume))
1249		dev->power.must_resume = true;
 
1250
1251	if (dev->power.must_resume)
1252		dpm_superior_set_must_resume(dev);
1253
1254Complete:
1255	complete_all(&dev->power.completion);
1256	TRACE_SUSPEND(error);
1257	return error;
1258}
1259
1260static void async_suspend_noirq(void *data, async_cookie_t cookie)
1261{
1262	struct device *dev = (struct device *)data;
1263	int error;
1264
1265	error = __device_suspend_noirq(dev, pm_transition, true);
1266	if (error) {
1267		dpm_save_failed_dev(dev_name(dev));
1268		pm_dev_err(dev, pm_transition, " async", error);
1269	}
1270
1271	put_device(dev);
1272}
1273
1274static int device_suspend_noirq(struct device *dev)
1275{
1276	if (dpm_async_fn(dev, async_suspend_noirq))
1277		return 0;
1278
1279	return __device_suspend_noirq(dev, pm_transition, false);
1280}
1281
1282static int dpm_noirq_suspend_devices(pm_message_t state)
1283{
1284	ktime_t starttime = ktime_get();
1285	int error = 0;
1286
1287	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1288	mutex_lock(&dpm_list_mtx);
1289	pm_transition = state;
1290	async_error = 0;
1291
1292	while (!list_empty(&dpm_late_early_list)) {
1293		struct device *dev = to_device(dpm_late_early_list.prev);
1294
1295		get_device(dev);
1296		mutex_unlock(&dpm_list_mtx);
1297
1298		error = device_suspend_noirq(dev);
1299
1300		mutex_lock(&dpm_list_mtx);
1301		if (error) {
1302			pm_dev_err(dev, state, " noirq", error);
1303			dpm_save_failed_dev(dev_name(dev));
1304			put_device(dev);
1305			break;
1306		}
1307		if (!list_empty(&dev->power.entry))
1308			list_move(&dev->power.entry, &dpm_noirq_list);
1309		put_device(dev);
1310
1311		if (async_error)
1312			break;
1313	}
1314	mutex_unlock(&dpm_list_mtx);
1315	async_synchronize_full();
1316	if (!error)
1317		error = async_error;
1318
1319	if (error) {
1320		suspend_stats.failed_suspend_noirq++;
1321		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1322	}
1323	dpm_show_time(starttime, state, error, "noirq");
1324	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1325	return error;
1326}
1327
1328/**
1329 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1330 * @state: PM transition of the system being carried out.
1331 *
1332 * Prevent device drivers' interrupt handlers from being called and invoke
1333 * "noirq" suspend callbacks for all non-sysdev devices.
1334 */
1335int dpm_suspend_noirq(pm_message_t state)
1336{
1337	int ret;
1338
1339	cpuidle_pause();
1340
1341	device_wakeup_arm_wake_irqs();
1342	suspend_device_irqs();
1343
1344	ret = dpm_noirq_suspend_devices(state);
1345	if (ret)
1346		dpm_resume_noirq(resume_event(state));
1347
1348	return ret;
1349}
1350
1351static void dpm_propagate_wakeup_to_parent(struct device *dev)
1352{
1353	struct device *parent = dev->parent;
1354
1355	if (!parent)
1356		return;
1357
1358	spin_lock_irq(&parent->power.lock);
1359
1360	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1361		parent->power.wakeup_path = true;
1362
1363	spin_unlock_irq(&parent->power.lock);
1364}
1365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1366/**
1367 * __device_suspend_late - Execute a "late suspend" callback for given device.
1368 * @dev: Device to handle.
1369 * @state: PM transition of the system being carried out.
1370 * @async: If true, the device is being suspended asynchronously.
1371 *
1372 * Runtime PM is disabled for @dev while this function is being executed.
1373 */
1374static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1375{
1376	pm_callback_t callback = NULL;
1377	const char *info = NULL;
1378	int error = 0;
1379
1380	TRACE_DEVICE(dev);
1381	TRACE_SUSPEND(0);
1382
1383	__pm_runtime_disable(dev, false);
1384
1385	dpm_wait_for_subordinate(dev, async);
1386
1387	if (async_error)
1388		goto Complete;
1389
1390	if (pm_wakeup_pending()) {
1391		async_error = -EBUSY;
1392		goto Complete;
1393	}
1394
1395	if (dev->power.syscore || dev->power.direct_complete)
1396		goto Complete;
1397
1398	if (dev->pm_domain) {
1399		info = "late power domain ";
1400		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1401	} else if (dev->type && dev->type->pm) {
1402		info = "late type ";
1403		callback = pm_late_early_op(dev->type->pm, state);
1404	} else if (dev->class && dev->class->pm) {
1405		info = "late class ";
1406		callback = pm_late_early_op(dev->class->pm, state);
1407	} else if (dev->bus && dev->bus->pm) {
1408		info = "late bus ";
1409		callback = pm_late_early_op(dev->bus->pm, state);
1410	}
1411	if (callback)
1412		goto Run;
1413
1414	if (dev_pm_skip_suspend(dev))
 
1415		goto Skip;
1416
1417	if (dev->driver && dev->driver->pm) {
1418		info = "late driver ";
1419		callback = pm_late_early_op(dev->driver->pm, state);
1420	}
1421
1422Run:
1423	error = dpm_run_callback(callback, dev, state, info);
1424	if (error) {
1425		async_error = error;
1426		goto Complete;
1427	}
1428	dpm_propagate_wakeup_to_parent(dev);
1429
1430Skip:
1431	dev->power.is_late_suspended = true;
1432
1433Complete:
1434	TRACE_SUSPEND(error);
1435	complete_all(&dev->power.completion);
1436	return error;
1437}
1438
1439static void async_suspend_late(void *data, async_cookie_t cookie)
1440{
1441	struct device *dev = (struct device *)data;
1442	int error;
1443
1444	error = __device_suspend_late(dev, pm_transition, true);
1445	if (error) {
1446		dpm_save_failed_dev(dev_name(dev));
1447		pm_dev_err(dev, pm_transition, " async", error);
1448	}
1449	put_device(dev);
1450}
1451
1452static int device_suspend_late(struct device *dev)
1453{
1454	if (dpm_async_fn(dev, async_suspend_late))
1455		return 0;
1456
1457	return __device_suspend_late(dev, pm_transition, false);
1458}
1459
1460/**
1461 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1462 * @state: PM transition of the system being carried out.
1463 */
1464int dpm_suspend_late(pm_message_t state)
1465{
1466	ktime_t starttime = ktime_get();
1467	int error = 0;
1468
1469	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1470	mutex_lock(&dpm_list_mtx);
1471	pm_transition = state;
1472	async_error = 0;
1473
1474	while (!list_empty(&dpm_suspended_list)) {
1475		struct device *dev = to_device(dpm_suspended_list.prev);
1476
1477		get_device(dev);
1478		mutex_unlock(&dpm_list_mtx);
1479
1480		error = device_suspend_late(dev);
1481
1482		mutex_lock(&dpm_list_mtx);
1483		if (!list_empty(&dev->power.entry))
1484			list_move(&dev->power.entry, &dpm_late_early_list);
1485
1486		if (error) {
1487			pm_dev_err(dev, state, " late", error);
1488			dpm_save_failed_dev(dev_name(dev));
1489			put_device(dev);
1490			break;
1491		}
1492		put_device(dev);
1493
1494		if (async_error)
1495			break;
1496	}
1497	mutex_unlock(&dpm_list_mtx);
1498	async_synchronize_full();
1499	if (!error)
1500		error = async_error;
1501	if (error) {
1502		suspend_stats.failed_suspend_late++;
1503		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1504		dpm_resume_early(resume_event(state));
1505	}
1506	dpm_show_time(starttime, state, error, "late");
1507	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1508	return error;
1509}
1510
1511/**
1512 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1513 * @state: PM transition of the system being carried out.
1514 */
1515int dpm_suspend_end(pm_message_t state)
1516{
1517	ktime_t starttime = ktime_get();
1518	int error;
1519
1520	error = dpm_suspend_late(state);
1521	if (error)
1522		goto out;
1523
1524	error = dpm_suspend_noirq(state);
1525	if (error)
1526		dpm_resume_early(resume_event(state));
1527
1528out:
1529	dpm_show_time(starttime, state, error, "end");
1530	return error;
1531}
1532EXPORT_SYMBOL_GPL(dpm_suspend_end);
1533
1534/**
1535 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1536 * @dev: Device to suspend.
1537 * @state: PM transition of the system being carried out.
1538 * @cb: Suspend callback to execute.
1539 * @info: string description of caller.
1540 */
1541static int legacy_suspend(struct device *dev, pm_message_t state,
1542			  int (*cb)(struct device *dev, pm_message_t state),
1543			  const char *info)
1544{
1545	int error;
1546	ktime_t calltime;
1547
1548	calltime = initcall_debug_start(dev, cb);
1549
1550	trace_device_pm_callback_start(dev, info, state.event);
1551	error = cb(dev, state);
1552	trace_device_pm_callback_end(dev, error);
1553	suspend_report_result(cb, error);
1554
1555	initcall_debug_report(dev, calltime, cb, error);
1556
1557	return error;
1558}
1559
1560static void dpm_clear_superiors_direct_complete(struct device *dev)
1561{
1562	struct device_link *link;
1563	int idx;
1564
1565	if (dev->parent) {
1566		spin_lock_irq(&dev->parent->power.lock);
1567		dev->parent->power.direct_complete = false;
1568		spin_unlock_irq(&dev->parent->power.lock);
1569	}
1570
1571	idx = device_links_read_lock();
1572
1573	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1574		spin_lock_irq(&link->supplier->power.lock);
1575		link->supplier->power.direct_complete = false;
1576		spin_unlock_irq(&link->supplier->power.lock);
1577	}
1578
1579	device_links_read_unlock(idx);
1580}
1581
1582/**
1583 * __device_suspend - Execute "suspend" callbacks for given device.
1584 * @dev: Device to handle.
1585 * @state: PM transition of the system being carried out.
1586 * @async: If true, the device is being suspended asynchronously.
1587 */
1588static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1589{
1590	pm_callback_t callback = NULL;
1591	const char *info = NULL;
1592	int error = 0;
1593	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1594
1595	TRACE_DEVICE(dev);
1596	TRACE_SUSPEND(0);
1597
1598	dpm_wait_for_subordinate(dev, async);
1599
1600	if (async_error) {
1601		dev->power.direct_complete = false;
1602		goto Complete;
1603	}
1604
1605	/*
1606	 * Wait for possible runtime PM transitions of the device in progress
1607	 * to complete and if there's a runtime resume request pending for it,
1608	 * resume it before proceeding with invoking the system-wide suspend
1609	 * callbacks for it.
1610	 *
1611	 * If the system-wide suspend callbacks below change the configuration
1612	 * of the device, they must disable runtime PM for it or otherwise
1613	 * ensure that its runtime-resume callbacks will not be confused by that
1614	 * change in case they are invoked going forward.
1615	 */
1616	pm_runtime_barrier(dev);
 
1617
1618	if (pm_wakeup_pending()) {
1619		dev->power.direct_complete = false;
1620		async_error = -EBUSY;
1621		goto Complete;
1622	}
1623
1624	if (dev->power.syscore)
1625		goto Complete;
1626
1627	/* Avoid direct_complete to let wakeup_path propagate. */
1628	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1629		dev->power.direct_complete = false;
1630
1631	if (dev->power.direct_complete) {
1632		if (pm_runtime_status_suspended(dev)) {
1633			pm_runtime_disable(dev);
1634			if (pm_runtime_status_suspended(dev)) {
1635				pm_dev_dbg(dev, state, "direct-complete ");
1636				goto Complete;
1637			}
1638
1639			pm_runtime_enable(dev);
1640		}
1641		dev->power.direct_complete = false;
1642	}
1643
1644	dev->power.may_skip_resume = true;
1645	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1646
1647	dpm_watchdog_set(&wd, dev);
1648	device_lock(dev);
1649
1650	if (dev->pm_domain) {
1651		info = "power domain ";
1652		callback = pm_op(&dev->pm_domain->ops, state);
1653		goto Run;
1654	}
1655
1656	if (dev->type && dev->type->pm) {
1657		info = "type ";
1658		callback = pm_op(dev->type->pm, state);
1659		goto Run;
1660	}
1661
1662	if (dev->class && dev->class->pm) {
1663		info = "class ";
1664		callback = pm_op(dev->class->pm, state);
1665		goto Run;
1666	}
1667
1668	if (dev->bus) {
1669		if (dev->bus->pm) {
1670			info = "bus ";
1671			callback = pm_op(dev->bus->pm, state);
1672		} else if (dev->bus->suspend) {
1673			pm_dev_dbg(dev, state, "legacy bus ");
1674			error = legacy_suspend(dev, state, dev->bus->suspend,
1675						"legacy bus ");
1676			goto End;
1677		}
1678	}
1679
1680 Run:
1681	if (!callback && dev->driver && dev->driver->pm) {
1682		info = "driver ";
1683		callback = pm_op(dev->driver->pm, state);
1684	}
1685
1686	error = dpm_run_callback(callback, dev, state, info);
1687
1688 End:
1689	if (!error) {
1690		dev->power.is_suspended = true;
1691		if (device_may_wakeup(dev))
1692			dev->power.wakeup_path = true;
1693
1694		dpm_propagate_wakeup_to_parent(dev);
1695		dpm_clear_superiors_direct_complete(dev);
1696	}
1697
1698	device_unlock(dev);
1699	dpm_watchdog_clear(&wd);
1700
1701 Complete:
1702	if (error)
1703		async_error = error;
1704
1705	complete_all(&dev->power.completion);
1706	TRACE_SUSPEND(error);
1707	return error;
1708}
1709
1710static void async_suspend(void *data, async_cookie_t cookie)
1711{
1712	struct device *dev = (struct device *)data;
1713	int error;
1714
1715	error = __device_suspend(dev, pm_transition, true);
1716	if (error) {
1717		dpm_save_failed_dev(dev_name(dev));
1718		pm_dev_err(dev, pm_transition, " async", error);
1719	}
1720
1721	put_device(dev);
1722}
1723
1724static int device_suspend(struct device *dev)
1725{
1726	if (dpm_async_fn(dev, async_suspend))
1727		return 0;
1728
1729	return __device_suspend(dev, pm_transition, false);
1730}
1731
1732/**
1733 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1734 * @state: PM transition of the system being carried out.
1735 */
1736int dpm_suspend(pm_message_t state)
1737{
1738	ktime_t starttime = ktime_get();
1739	int error = 0;
1740
1741	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1742	might_sleep();
1743
1744	devfreq_suspend();
1745	cpufreq_suspend();
1746
1747	mutex_lock(&dpm_list_mtx);
1748	pm_transition = state;
1749	async_error = 0;
1750	while (!list_empty(&dpm_prepared_list)) {
1751		struct device *dev = to_device(dpm_prepared_list.prev);
1752
1753		get_device(dev);
1754		mutex_unlock(&dpm_list_mtx);
1755
1756		error = device_suspend(dev);
1757
1758		mutex_lock(&dpm_list_mtx);
1759		if (error) {
1760			pm_dev_err(dev, state, "", error);
1761			dpm_save_failed_dev(dev_name(dev));
1762			put_device(dev);
1763			break;
1764		}
1765		if (!list_empty(&dev->power.entry))
1766			list_move(&dev->power.entry, &dpm_suspended_list);
1767		put_device(dev);
1768		if (async_error)
1769			break;
1770	}
1771	mutex_unlock(&dpm_list_mtx);
1772	async_synchronize_full();
1773	if (!error)
1774		error = async_error;
1775	if (error) {
1776		suspend_stats.failed_suspend++;
1777		dpm_save_failed_step(SUSPEND_SUSPEND);
1778	}
1779	dpm_show_time(starttime, state, error, NULL);
1780	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1781	return error;
1782}
1783
1784/**
1785 * device_prepare - Prepare a device for system power transition.
1786 * @dev: Device to handle.
1787 * @state: PM transition of the system being carried out.
1788 *
1789 * Execute the ->prepare() callback(s) for given device.  No new children of the
1790 * device may be registered after this function has returned.
1791 */
1792static int device_prepare(struct device *dev, pm_message_t state)
1793{
1794	int (*callback)(struct device *) = NULL;
1795	int ret = 0;
1796
1797	if (dev->power.syscore)
1798		return 0;
1799
 
 
 
 
1800	/*
1801	 * If a device's parent goes into runtime suspend at the wrong time,
1802	 * it won't be possible to resume the device.  To prevent this we
1803	 * block runtime suspend here, during the prepare phase, and allow
1804	 * it again during the complete phase.
1805	 */
1806	pm_runtime_get_noresume(dev);
1807
1808	device_lock(dev);
1809
1810	dev->power.wakeup_path = false;
1811
1812	if (dev->power.no_pm_callbacks)
1813		goto unlock;
1814
1815	if (dev->pm_domain)
1816		callback = dev->pm_domain->ops.prepare;
1817	else if (dev->type && dev->type->pm)
1818		callback = dev->type->pm->prepare;
1819	else if (dev->class && dev->class->pm)
1820		callback = dev->class->pm->prepare;
1821	else if (dev->bus && dev->bus->pm)
1822		callback = dev->bus->pm->prepare;
1823
1824	if (!callback && dev->driver && dev->driver->pm)
1825		callback = dev->driver->pm->prepare;
1826
1827	if (callback)
1828		ret = callback(dev);
1829
1830unlock:
1831	device_unlock(dev);
1832
1833	if (ret < 0) {
1834		suspend_report_result(callback, ret);
1835		pm_runtime_put(dev);
1836		return ret;
1837	}
1838	/*
1839	 * A positive return value from ->prepare() means "this device appears
1840	 * to be runtime-suspended and its state is fine, so if it really is
1841	 * runtime-suspended, you can leave it in that state provided that you
1842	 * will do the same thing with all of its descendants".  This only
1843	 * applies to suspend transitions, however.
1844	 */
1845	spin_lock_irq(&dev->power.lock);
1846	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1847		(ret > 0 || dev->power.no_pm_callbacks) &&
1848		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
 
1849	spin_unlock_irq(&dev->power.lock);
1850	return 0;
1851}
1852
1853/**
1854 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1855 * @state: PM transition of the system being carried out.
1856 *
1857 * Execute the ->prepare() callback(s) for all devices.
1858 */
1859int dpm_prepare(pm_message_t state)
1860{
1861	int error = 0;
1862
1863	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1864	might_sleep();
1865
1866	/*
1867	 * Give a chance for the known devices to complete their probes, before
1868	 * disable probing of devices. This sync point is important at least
1869	 * at boot time + hibernation restore.
1870	 */
1871	wait_for_device_probe();
1872	/*
1873	 * It is unsafe if probing of devices will happen during suspend or
1874	 * hibernation and system behavior will be unpredictable in this case.
1875	 * So, let's prohibit device's probing here and defer their probes
1876	 * instead. The normal behavior will be restored in dpm_complete().
1877	 */
1878	device_block_probing();
1879
1880	mutex_lock(&dpm_list_mtx);
1881	while (!list_empty(&dpm_list)) {
1882		struct device *dev = to_device(dpm_list.next);
1883
1884		get_device(dev);
1885		mutex_unlock(&dpm_list_mtx);
1886
1887		trace_device_pm_callback_start(dev, "", state.event);
1888		error = device_prepare(dev, state);
1889		trace_device_pm_callback_end(dev, error);
1890
1891		mutex_lock(&dpm_list_mtx);
1892		if (error) {
1893			if (error == -EAGAIN) {
1894				put_device(dev);
1895				error = 0;
1896				continue;
1897			}
1898			dev_info(dev, "not prepared for power transition: code %d\n",
1899				 error);
1900			put_device(dev);
1901			break;
1902		}
1903		dev->power.is_prepared = true;
1904		if (!list_empty(&dev->power.entry))
1905			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1906		put_device(dev);
1907	}
1908	mutex_unlock(&dpm_list_mtx);
1909	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1910	return error;
1911}
1912
1913/**
1914 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1915 * @state: PM transition of the system being carried out.
1916 *
1917 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1918 * callbacks for them.
1919 */
1920int dpm_suspend_start(pm_message_t state)
1921{
1922	ktime_t starttime = ktime_get();
1923	int error;
1924
1925	error = dpm_prepare(state);
1926	if (error) {
1927		suspend_stats.failed_prepare++;
1928		dpm_save_failed_step(SUSPEND_PREPARE);
1929	} else
1930		error = dpm_suspend(state);
1931	dpm_show_time(starttime, state, error, "start");
1932	return error;
1933}
1934EXPORT_SYMBOL_GPL(dpm_suspend_start);
1935
1936void __suspend_report_result(const char *function, void *fn, int ret)
1937{
1938	if (ret)
1939		pr_err("%s(): %pS returns %d\n", function, fn, ret);
1940}
1941EXPORT_SYMBOL_GPL(__suspend_report_result);
1942
1943/**
1944 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1945 * @subordinate: Device that needs to wait for @dev.
1946 * @dev: Device to wait for.
1947 */
1948int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1949{
1950	dpm_wait(dev, subordinate->power.async_suspend);
1951	return async_error;
1952}
1953EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1954
1955/**
1956 * dpm_for_each_dev - device iterator.
1957 * @data: data for the callback.
1958 * @fn: function to be called for each device.
1959 *
1960 * Iterate over devices in dpm_list, and call @fn for each device,
1961 * passing it @data.
1962 */
1963void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1964{
1965	struct device *dev;
1966
1967	if (!fn)
1968		return;
1969
1970	device_pm_lock();
1971	list_for_each_entry(dev, &dpm_list, power.entry)
1972		fn(dev, data);
1973	device_pm_unlock();
1974}
1975EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1976
1977static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1978{
1979	if (!ops)
1980		return true;
1981
1982	return !ops->prepare &&
1983	       !ops->suspend &&
1984	       !ops->suspend_late &&
1985	       !ops->suspend_noirq &&
1986	       !ops->resume_noirq &&
1987	       !ops->resume_early &&
1988	       !ops->resume &&
1989	       !ops->complete;
1990}
1991
1992void device_pm_check_callbacks(struct device *dev)
1993{
1994	spin_lock_irq(&dev->power.lock);
1995	dev->power.no_pm_callbacks =
1996		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1997		 !dev->bus->suspend && !dev->bus->resume)) &&
1998		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1999		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2000		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2001		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2002		 !dev->driver->suspend && !dev->driver->resume));
2003	spin_unlock_irq(&dev->power.lock);
2004}
2005
2006bool dev_pm_skip_suspend(struct device *dev)
2007{
2008	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2009		pm_runtime_status_suspended(dev);
2010}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/main.c - Where the driver meets power management.
   4 *
   5 * Copyright (c) 2003 Patrick Mochel
   6 * Copyright (c) 2003 Open Source Development Lab
   7 *
   8 * The driver model core calls device_pm_add() when a device is registered.
   9 * This will initialize the embedded device_pm_info object in the device
  10 * and add it to the list of power-controlled devices. sysfs entries for
  11 * controlling device power management will also be added.
  12 *
  13 * A separate list is used for keeping track of power info, because the power
  14 * domain dependencies may differ from the ancestral dependencies that the
  15 * subsystem list maintains.
  16 */
  17
  18#define pr_fmt(fmt) "PM: " fmt
 
  19
  20#include <linux/device.h>
  21#include <linux/export.h>
  22#include <linux/mutex.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/pm-trace.h>
  26#include <linux/pm_wakeirq.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/sched/debug.h>
  30#include <linux/async.h>
  31#include <linux/suspend.h>
  32#include <trace/events/power.h>
  33#include <linux/cpufreq.h>
  34#include <linux/cpuidle.h>
  35#include <linux/devfreq.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
 
 
 
 
  43/*
  44 * The entries in the dpm_list list are in a depth first order, simply
  45 * because children are guaranteed to be discovered after parents, and
  46 * are inserted at the back of the list on discovery.
  47 *
  48 * Since device_pm_add() may be called with a device lock held,
  49 * we must never try to acquire a device lock while holding
  50 * dpm_list_mutex.
  51 */
  52
  53LIST_HEAD(dpm_list);
  54static LIST_HEAD(dpm_prepared_list);
  55static LIST_HEAD(dpm_suspended_list);
  56static LIST_HEAD(dpm_late_early_list);
  57static LIST_HEAD(dpm_noirq_list);
  58
  59struct suspend_stats suspend_stats;
  60static DEFINE_MUTEX(dpm_list_mtx);
  61static pm_message_t pm_transition;
  62
  63static int async_error;
  64
  65static const char *pm_verb(int event)
  66{
  67	switch (event) {
  68	case PM_EVENT_SUSPEND:
  69		return "suspend";
  70	case PM_EVENT_RESUME:
  71		return "resume";
  72	case PM_EVENT_FREEZE:
  73		return "freeze";
  74	case PM_EVENT_QUIESCE:
  75		return "quiesce";
  76	case PM_EVENT_HIBERNATE:
  77		return "hibernate";
  78	case PM_EVENT_THAW:
  79		return "thaw";
  80	case PM_EVENT_RESTORE:
  81		return "restore";
  82	case PM_EVENT_RECOVER:
  83		return "recover";
  84	default:
  85		return "(unknown PM event)";
  86	}
  87}
  88
  89/**
  90 * device_pm_sleep_init - Initialize system suspend-related device fields.
  91 * @dev: Device object being initialized.
  92 */
  93void device_pm_sleep_init(struct device *dev)
  94{
  95	dev->power.is_prepared = false;
  96	dev->power.is_suspended = false;
  97	dev->power.is_noirq_suspended = false;
  98	dev->power.is_late_suspended = false;
  99	init_completion(&dev->power.completion);
 100	complete_all(&dev->power.completion);
 101	dev->power.wakeup = NULL;
 102	INIT_LIST_HEAD(&dev->power.entry);
 103}
 104
 105/**
 106 * device_pm_lock - Lock the list of active devices used by the PM core.
 107 */
 108void device_pm_lock(void)
 109{
 110	mutex_lock(&dpm_list_mtx);
 111}
 112
 113/**
 114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 115 */
 116void device_pm_unlock(void)
 117{
 118	mutex_unlock(&dpm_list_mtx);
 119}
 120
 121/**
 122 * device_pm_add - Add a device to the PM core's list of active devices.
 123 * @dev: Device to add to the list.
 124 */
 125void device_pm_add(struct device *dev)
 126{
 127	/* Skip PM setup/initialization. */
 128	if (device_pm_not_required(dev))
 129		return;
 130
 131	pr_debug("Adding info for %s:%s\n",
 132		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 133	device_pm_check_callbacks(dev);
 134	mutex_lock(&dpm_list_mtx);
 135	if (dev->parent && dev->parent->power.is_prepared)
 136		dev_warn(dev, "parent %s should not be sleeping\n",
 137			dev_name(dev->parent));
 138	list_add_tail(&dev->power.entry, &dpm_list);
 139	dev->power.in_dpm_list = true;
 140	mutex_unlock(&dpm_list_mtx);
 141}
 142
 143/**
 144 * device_pm_remove - Remove a device from the PM core's list of active devices.
 145 * @dev: Device to be removed from the list.
 146 */
 147void device_pm_remove(struct device *dev)
 148{
 149	if (device_pm_not_required(dev))
 150		return;
 151
 152	pr_debug("Removing info for %s:%s\n",
 153		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 154	complete_all(&dev->power.completion);
 155	mutex_lock(&dpm_list_mtx);
 156	list_del_init(&dev->power.entry);
 157	dev->power.in_dpm_list = false;
 158	mutex_unlock(&dpm_list_mtx);
 159	device_wakeup_disable(dev);
 160	pm_runtime_remove(dev);
 161	device_pm_check_callbacks(dev);
 162}
 163
 164/**
 165 * device_pm_move_before - Move device in the PM core's list of active devices.
 166 * @deva: Device to move in dpm_list.
 167 * @devb: Device @deva should come before.
 168 */
 169void device_pm_move_before(struct device *deva, struct device *devb)
 170{
 171	pr_debug("Moving %s:%s before %s:%s\n",
 172		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 173		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 174	/* Delete deva from dpm_list and reinsert before devb. */
 175	list_move_tail(&deva->power.entry, &devb->power.entry);
 176}
 177
 178/**
 179 * device_pm_move_after - Move device in the PM core's list of active devices.
 180 * @deva: Device to move in dpm_list.
 181 * @devb: Device @deva should come after.
 182 */
 183void device_pm_move_after(struct device *deva, struct device *devb)
 184{
 185	pr_debug("Moving %s:%s after %s:%s\n",
 186		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 187		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 188	/* Delete deva from dpm_list and reinsert after devb. */
 189	list_move(&deva->power.entry, &devb->power.entry);
 190}
 191
 192/**
 193 * device_pm_move_last - Move device to end of the PM core's list of devices.
 194 * @dev: Device to move in dpm_list.
 195 */
 196void device_pm_move_last(struct device *dev)
 197{
 198	pr_debug("Moving %s:%s to end of list\n",
 199		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 200	list_move_tail(&dev->power.entry, &dpm_list);
 201}
 202
 203static ktime_t initcall_debug_start(struct device *dev, void *cb)
 204{
 205	if (!pm_print_times_enabled)
 206		return 0;
 207
 208	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
 209		 task_pid_nr(current),
 210		 dev->parent ? dev_name(dev->parent) : "none");
 211	return ktime_get();
 212}
 213
 214static void initcall_debug_report(struct device *dev, ktime_t calltime,
 215				  void *cb, int error)
 216{
 217	ktime_t rettime;
 218	s64 nsecs;
 219
 220	if (!pm_print_times_enabled)
 221		return;
 222
 223	rettime = ktime_get();
 224	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 225
 226	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
 227		 (unsigned long long)nsecs >> 10);
 228}
 229
 230/**
 231 * dpm_wait - Wait for a PM operation to complete.
 232 * @dev: Device to wait for.
 233 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 234 */
 235static void dpm_wait(struct device *dev, bool async)
 236{
 237	if (!dev)
 238		return;
 239
 240	if (async || (pm_async_enabled && dev->power.async_suspend))
 241		wait_for_completion(&dev->power.completion);
 242}
 243
 244static int dpm_wait_fn(struct device *dev, void *async_ptr)
 245{
 246	dpm_wait(dev, *((bool *)async_ptr));
 247	return 0;
 248}
 249
 250static void dpm_wait_for_children(struct device *dev, bool async)
 251{
 252       device_for_each_child(dev, &async, dpm_wait_fn);
 253}
 254
 255static void dpm_wait_for_suppliers(struct device *dev, bool async)
 256{
 257	struct device_link *link;
 258	int idx;
 259
 260	idx = device_links_read_lock();
 261
 262	/*
 263	 * If the supplier goes away right after we've checked the link to it,
 264	 * we'll wait for its completion to change the state, but that's fine,
 265	 * because the only things that will block as a result are the SRCU
 266	 * callbacks freeing the link objects for the links in the list we're
 267	 * walking.
 268	 */
 269	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
 270		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 271			dpm_wait(link->supplier, async);
 272
 273	device_links_read_unlock(idx);
 274}
 275
 276static void dpm_wait_for_superior(struct device *dev, bool async)
 277{
 278	dpm_wait(dev->parent, async);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279	dpm_wait_for_suppliers(dev, async);
 
 
 
 
 
 
 280}
 281
 282static void dpm_wait_for_consumers(struct device *dev, bool async)
 283{
 284	struct device_link *link;
 285	int idx;
 286
 287	idx = device_links_read_lock();
 288
 289	/*
 290	 * The status of a device link can only be changed from "dormant" by a
 291	 * probe, but that cannot happen during system suspend/resume.  In
 292	 * theory it can change to "dormant" at that time, but then it is
 293	 * reasonable to wait for the target device anyway (eg. if it goes
 294	 * away, it's better to wait for it to go away completely and then
 295	 * continue instead of trying to continue in parallel with its
 296	 * unregistration).
 297	 */
 298	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
 299		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 300			dpm_wait(link->consumer, async);
 301
 302	device_links_read_unlock(idx);
 303}
 304
 305static void dpm_wait_for_subordinate(struct device *dev, bool async)
 306{
 307	dpm_wait_for_children(dev, async);
 308	dpm_wait_for_consumers(dev, async);
 309}
 310
 311/**
 312 * pm_op - Return the PM operation appropriate for given PM event.
 313 * @ops: PM operations to choose from.
 314 * @state: PM transition of the system being carried out.
 315 */
 316static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 317{
 318	switch (state.event) {
 319#ifdef CONFIG_SUSPEND
 320	case PM_EVENT_SUSPEND:
 321		return ops->suspend;
 322	case PM_EVENT_RESUME:
 323		return ops->resume;
 324#endif /* CONFIG_SUSPEND */
 325#ifdef CONFIG_HIBERNATE_CALLBACKS
 326	case PM_EVENT_FREEZE:
 327	case PM_EVENT_QUIESCE:
 328		return ops->freeze;
 329	case PM_EVENT_HIBERNATE:
 330		return ops->poweroff;
 331	case PM_EVENT_THAW:
 332	case PM_EVENT_RECOVER:
 333		return ops->thaw;
 334		break;
 335	case PM_EVENT_RESTORE:
 336		return ops->restore;
 337#endif /* CONFIG_HIBERNATE_CALLBACKS */
 338	}
 339
 340	return NULL;
 341}
 342
 343/**
 344 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 345 * @ops: PM operations to choose from.
 346 * @state: PM transition of the system being carried out.
 347 *
 348 * Runtime PM is disabled for @dev while this function is being executed.
 349 */
 350static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 351				      pm_message_t state)
 352{
 353	switch (state.event) {
 354#ifdef CONFIG_SUSPEND
 355	case PM_EVENT_SUSPEND:
 356		return ops->suspend_late;
 357	case PM_EVENT_RESUME:
 358		return ops->resume_early;
 359#endif /* CONFIG_SUSPEND */
 360#ifdef CONFIG_HIBERNATE_CALLBACKS
 361	case PM_EVENT_FREEZE:
 362	case PM_EVENT_QUIESCE:
 363		return ops->freeze_late;
 364	case PM_EVENT_HIBERNATE:
 365		return ops->poweroff_late;
 366	case PM_EVENT_THAW:
 367	case PM_EVENT_RECOVER:
 368		return ops->thaw_early;
 369	case PM_EVENT_RESTORE:
 370		return ops->restore_early;
 371#endif /* CONFIG_HIBERNATE_CALLBACKS */
 372	}
 373
 374	return NULL;
 375}
 376
 377/**
 378 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 379 * @ops: PM operations to choose from.
 380 * @state: PM transition of the system being carried out.
 381 *
 382 * The driver of @dev will not receive interrupts while this function is being
 383 * executed.
 384 */
 385static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 386{
 387	switch (state.event) {
 388#ifdef CONFIG_SUSPEND
 389	case PM_EVENT_SUSPEND:
 390		return ops->suspend_noirq;
 391	case PM_EVENT_RESUME:
 392		return ops->resume_noirq;
 393#endif /* CONFIG_SUSPEND */
 394#ifdef CONFIG_HIBERNATE_CALLBACKS
 395	case PM_EVENT_FREEZE:
 396	case PM_EVENT_QUIESCE:
 397		return ops->freeze_noirq;
 398	case PM_EVENT_HIBERNATE:
 399		return ops->poweroff_noirq;
 400	case PM_EVENT_THAW:
 401	case PM_EVENT_RECOVER:
 402		return ops->thaw_noirq;
 403	case PM_EVENT_RESTORE:
 404		return ops->restore_noirq;
 405#endif /* CONFIG_HIBERNATE_CALLBACKS */
 406	}
 407
 408	return NULL;
 409}
 410
 411static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 412{
 413	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 414		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 415		", may wakeup" : "");
 416}
 417
 418static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 419			int error)
 420{
 421	pr_err("Device %s failed to %s%s: error %d\n",
 422	       dev_name(dev), pm_verb(state.event), info, error);
 423}
 424
 425static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 426			  const char *info)
 427{
 428	ktime_t calltime;
 429	u64 usecs64;
 430	int usecs;
 431
 432	calltime = ktime_get();
 433	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 434	do_div(usecs64, NSEC_PER_USEC);
 435	usecs = usecs64;
 436	if (usecs == 0)
 437		usecs = 1;
 438
 439	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 440		  info ?: "", info ? " " : "", pm_verb(state.event),
 441		  error ? "aborted" : "complete",
 442		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 443}
 444
 445static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 446			    pm_message_t state, const char *info)
 447{
 448	ktime_t calltime;
 449	int error;
 450
 451	if (!cb)
 452		return 0;
 453
 454	calltime = initcall_debug_start(dev, cb);
 455
 456	pm_dev_dbg(dev, state, info);
 457	trace_device_pm_callback_start(dev, info, state.event);
 458	error = cb(dev);
 459	trace_device_pm_callback_end(dev, error);
 460	suspend_report_result(cb, error);
 461
 462	initcall_debug_report(dev, calltime, cb, error);
 463
 464	return error;
 465}
 466
 467#ifdef CONFIG_DPM_WATCHDOG
 468struct dpm_watchdog {
 469	struct device		*dev;
 470	struct task_struct	*tsk;
 471	struct timer_list	timer;
 472};
 473
 474#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 475	struct dpm_watchdog wd
 476
 477/**
 478 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 479 * @t: The timer that PM watchdog depends on.
 480 *
 481 * Called when a driver has timed out suspending or resuming.
 482 * There's not much we can do here to recover so panic() to
 483 * capture a crash-dump in pstore.
 484 */
 485static void dpm_watchdog_handler(struct timer_list *t)
 486{
 487	struct dpm_watchdog *wd = from_timer(wd, t, timer);
 488
 489	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 490	show_stack(wd->tsk, NULL);
 491	panic("%s %s: unrecoverable failure\n",
 492		dev_driver_string(wd->dev), dev_name(wd->dev));
 493}
 494
 495/**
 496 * dpm_watchdog_set - Enable pm watchdog for given device.
 497 * @wd: Watchdog. Must be allocated on the stack.
 498 * @dev: Device to handle.
 499 */
 500static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 501{
 502	struct timer_list *timer = &wd->timer;
 503
 504	wd->dev = dev;
 505	wd->tsk = current;
 506
 507	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 508	/* use same timeout value for both suspend and resume */
 509	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 510	add_timer(timer);
 511}
 512
 513/**
 514 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 515 * @wd: Watchdog to disable.
 516 */
 517static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 518{
 519	struct timer_list *timer = &wd->timer;
 520
 521	del_timer_sync(timer);
 522	destroy_timer_on_stack(timer);
 523}
 524#else
 525#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 526#define dpm_watchdog_set(x, y)
 527#define dpm_watchdog_clear(x)
 528#endif
 529
 530/*------------------------- Resume routines -------------------------*/
 531
 532/**
 533 * suspend_event - Return a "suspend" message for given "resume" one.
 534 * @resume_msg: PM message representing a system-wide resume transition.
 535 */
 536static pm_message_t suspend_event(pm_message_t resume_msg)
 537{
 538	switch (resume_msg.event) {
 539	case PM_EVENT_RESUME:
 540		return PMSG_SUSPEND;
 541	case PM_EVENT_THAW:
 542	case PM_EVENT_RESTORE:
 543		return PMSG_FREEZE;
 544	case PM_EVENT_RECOVER:
 545		return PMSG_HIBERNATE;
 546	}
 547	return PMSG_ON;
 548}
 549
 550/**
 551 * dev_pm_may_skip_resume - System-wide device resume optimization check.
 552 * @dev: Target device.
 553 *
 554 * Checks whether or not the device may be left in suspend after a system-wide
 555 * transition to the working state.
 
 
 
 556 */
 557bool dev_pm_may_skip_resume(struct device *dev)
 558{
 559	return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
 560}
 561
 562static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
 563						pm_message_t state,
 564						const char **info_p)
 565{
 566	pm_callback_t callback;
 567	const char *info;
 568
 569	if (dev->pm_domain) {
 570		info = "noirq power domain ";
 571		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 572	} else if (dev->type && dev->type->pm) {
 573		info = "noirq type ";
 574		callback = pm_noirq_op(dev->type->pm, state);
 575	} else if (dev->class && dev->class->pm) {
 576		info = "noirq class ";
 577		callback = pm_noirq_op(dev->class->pm, state);
 578	} else if (dev->bus && dev->bus->pm) {
 579		info = "noirq bus ";
 580		callback = pm_noirq_op(dev->bus->pm, state);
 581	} else {
 582		return NULL;
 583	}
 584
 585	if (info_p)
 586		*info_p = info;
 587
 588	return callback;
 589}
 590
 591static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
 592						 pm_message_t state,
 593						 const char **info_p);
 594
 595static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
 596						pm_message_t state,
 597						const char **info_p);
 598
 599/**
 600 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 601 * @dev: Device to handle.
 602 * @state: PM transition of the system being carried out.
 603 * @async: If true, the device is being resumed asynchronously.
 604 *
 605 * The driver of @dev will not receive interrupts while this function is being
 606 * executed.
 607 */
 608static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 609{
 610	pm_callback_t callback;
 611	const char *info;
 612	bool skip_resume;
 613	int error = 0;
 614
 615	TRACE_DEVICE(dev);
 616	TRACE_RESUME(0);
 617
 618	if (dev->power.syscore || dev->power.direct_complete)
 619		goto Out;
 620
 621	if (!dev->power.is_noirq_suspended)
 622		goto Out;
 623
 624	dpm_wait_for_superior(dev, async);
 
 625
 626	skip_resume = dev_pm_may_skip_resume(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627
 628	callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
 
 
 
 
 
 
 
 
 
 
 
 
 629	if (callback)
 630		goto Run;
 631
 632	if (skip_resume)
 633		goto Skip;
 634
 635	if (dev_pm_smart_suspend_and_suspended(dev)) {
 636		pm_message_t suspend_msg = suspend_event(state);
 637
 638		/*
 639		 * If "freeze" callbacks have been skipped during a transition
 640		 * related to hibernation, the subsequent "thaw" callbacks must
 641		 * be skipped too or bad things may happen.  Otherwise, resume
 642		 * callbacks are going to be run for the device, so its runtime
 643		 * PM status must be changed to reflect the new state after the
 644		 * transition under way.
 645		 */
 646		if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
 647		    !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
 648			if (state.event == PM_EVENT_THAW) {
 649				skip_resume = true;
 650				goto Skip;
 651			} else {
 652				pm_runtime_set_active(dev);
 653			}
 654		}
 655	}
 656
 657	if (dev->driver && dev->driver->pm) {
 658		info = "noirq driver ";
 659		callback = pm_noirq_op(dev->driver->pm, state);
 660	}
 661
 662Run:
 663	error = dpm_run_callback(callback, dev, state, info);
 664
 665Skip:
 666	dev->power.is_noirq_suspended = false;
 667
 668	if (skip_resume) {
 669		/* Make the next phases of resume skip the device. */
 670		dev->power.is_late_suspended = false;
 671		dev->power.is_suspended = false;
 672		/*
 673		 * The device is going to be left in suspend, but it might not
 674		 * have been in runtime suspend before the system suspended, so
 675		 * its runtime PM status needs to be updated to avoid confusing
 676		 * the runtime PM framework when runtime PM is enabled for the
 677		 * device again.
 678		 */
 679		pm_runtime_set_suspended(dev);
 680	}
 681
 682Out:
 683	complete_all(&dev->power.completion);
 684	TRACE_RESUME(error);
 685	return error;
 686}
 687
 688static bool is_async(struct device *dev)
 689{
 690	return dev->power.async_suspend && pm_async_enabled
 691		&& !pm_trace_is_enabled();
 692}
 693
 694static bool dpm_async_fn(struct device *dev, async_func_t func)
 695{
 696	reinit_completion(&dev->power.completion);
 697
 698	if (is_async(dev)) {
 699		get_device(dev);
 700		async_schedule(func, dev);
 701		return true;
 702	}
 703
 704	return false;
 705}
 706
 707static void async_resume_noirq(void *data, async_cookie_t cookie)
 708{
 709	struct device *dev = (struct device *)data;
 710	int error;
 711
 712	error = device_resume_noirq(dev, pm_transition, true);
 713	if (error)
 714		pm_dev_err(dev, pm_transition, " async", error);
 715
 716	put_device(dev);
 717}
 718
 719static void dpm_noirq_resume_devices(pm_message_t state)
 720{
 721	struct device *dev;
 722	ktime_t starttime = ktime_get();
 723
 724	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 725	mutex_lock(&dpm_list_mtx);
 726	pm_transition = state;
 727
 728	/*
 729	 * Advanced the async threads upfront,
 730	 * in case the starting of async threads is
 731	 * delayed by non-async resuming devices.
 732	 */
 733	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
 734		dpm_async_fn(dev, async_resume_noirq);
 735
 736	while (!list_empty(&dpm_noirq_list)) {
 737		dev = to_device(dpm_noirq_list.next);
 738		get_device(dev);
 739		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 740		mutex_unlock(&dpm_list_mtx);
 741
 742		if (!is_async(dev)) {
 743			int error;
 744
 745			error = device_resume_noirq(dev, state, false);
 746			if (error) {
 747				suspend_stats.failed_resume_noirq++;
 748				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 749				dpm_save_failed_dev(dev_name(dev));
 750				pm_dev_err(dev, state, " noirq", error);
 751			}
 752		}
 753
 754		mutex_lock(&dpm_list_mtx);
 755		put_device(dev);
 756	}
 757	mutex_unlock(&dpm_list_mtx);
 758	async_synchronize_full();
 759	dpm_show_time(starttime, state, 0, "noirq");
 760	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 761}
 762
 763/**
 764 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 765 * @state: PM transition of the system being carried out.
 766 *
 767 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 768 * allow device drivers' interrupt handlers to be called.
 769 */
 770void dpm_resume_noirq(pm_message_t state)
 771{
 772	dpm_noirq_resume_devices(state);
 773
 774	resume_device_irqs();
 775	device_wakeup_disarm_wake_irqs();
 776
 777	cpuidle_resume();
 778}
 779
 780static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
 781						pm_message_t state,
 782						const char **info_p)
 783{
 784	pm_callback_t callback;
 785	const char *info;
 786
 787	if (dev->pm_domain) {
 788		info = "early power domain ";
 789		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 790	} else if (dev->type && dev->type->pm) {
 791		info = "early type ";
 792		callback = pm_late_early_op(dev->type->pm, state);
 793	} else if (dev->class && dev->class->pm) {
 794		info = "early class ";
 795		callback = pm_late_early_op(dev->class->pm, state);
 796	} else if (dev->bus && dev->bus->pm) {
 797		info = "early bus ";
 798		callback = pm_late_early_op(dev->bus->pm, state);
 799	} else {
 800		return NULL;
 801	}
 802
 803	if (info_p)
 804		*info_p = info;
 805
 806	return callback;
 807}
 808
 809/**
 810 * device_resume_early - Execute an "early resume" callback for given device.
 811 * @dev: Device to handle.
 812 * @state: PM transition of the system being carried out.
 813 * @async: If true, the device is being resumed asynchronously.
 814 *
 815 * Runtime PM is disabled for @dev while this function is being executed.
 816 */
 817static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 818{
 819	pm_callback_t callback;
 820	const char *info;
 821	int error = 0;
 822
 823	TRACE_DEVICE(dev);
 824	TRACE_RESUME(0);
 825
 826	if (dev->power.syscore || dev->power.direct_complete)
 827		goto Out;
 828
 829	if (!dev->power.is_late_suspended)
 830		goto Out;
 831
 832	dpm_wait_for_superior(dev, async);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833
 834	callback = dpm_subsys_resume_early_cb(dev, state, &info);
 
 835
 836	if (!callback && dev->driver && dev->driver->pm) {
 837		info = "early driver ";
 838		callback = pm_late_early_op(dev->driver->pm, state);
 839	}
 840
 
 841	error = dpm_run_callback(callback, dev, state, info);
 
 
 842	dev->power.is_late_suspended = false;
 843
 844 Out:
 845	TRACE_RESUME(error);
 846
 847	pm_runtime_enable(dev);
 848	complete_all(&dev->power.completion);
 849	return error;
 850}
 851
 852static void async_resume_early(void *data, async_cookie_t cookie)
 853{
 854	struct device *dev = (struct device *)data;
 855	int error;
 856
 857	error = device_resume_early(dev, pm_transition, true);
 858	if (error)
 859		pm_dev_err(dev, pm_transition, " async", error);
 860
 861	put_device(dev);
 862}
 863
 864/**
 865 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 866 * @state: PM transition of the system being carried out.
 867 */
 868void dpm_resume_early(pm_message_t state)
 869{
 870	struct device *dev;
 871	ktime_t starttime = ktime_get();
 872
 873	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 874	mutex_lock(&dpm_list_mtx);
 875	pm_transition = state;
 876
 877	/*
 878	 * Advanced the async threads upfront,
 879	 * in case the starting of async threads is
 880	 * delayed by non-async resuming devices.
 881	 */
 882	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
 883		dpm_async_fn(dev, async_resume_early);
 884
 885	while (!list_empty(&dpm_late_early_list)) {
 886		dev = to_device(dpm_late_early_list.next);
 887		get_device(dev);
 888		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 889		mutex_unlock(&dpm_list_mtx);
 890
 891		if (!is_async(dev)) {
 892			int error;
 893
 894			error = device_resume_early(dev, state, false);
 895			if (error) {
 896				suspend_stats.failed_resume_early++;
 897				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 898				dpm_save_failed_dev(dev_name(dev));
 899				pm_dev_err(dev, state, " early", error);
 900			}
 901		}
 902		mutex_lock(&dpm_list_mtx);
 903		put_device(dev);
 904	}
 905	mutex_unlock(&dpm_list_mtx);
 906	async_synchronize_full();
 907	dpm_show_time(starttime, state, 0, "early");
 908	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 909}
 910
 911/**
 912 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 913 * @state: PM transition of the system being carried out.
 914 */
 915void dpm_resume_start(pm_message_t state)
 916{
 917	dpm_resume_noirq(state);
 918	dpm_resume_early(state);
 919}
 920EXPORT_SYMBOL_GPL(dpm_resume_start);
 921
 922/**
 923 * device_resume - Execute "resume" callbacks for given device.
 924 * @dev: Device to handle.
 925 * @state: PM transition of the system being carried out.
 926 * @async: If true, the device is being resumed asynchronously.
 927 */
 928static int device_resume(struct device *dev, pm_message_t state, bool async)
 929{
 930	pm_callback_t callback = NULL;
 931	const char *info = NULL;
 932	int error = 0;
 933	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 934
 935	TRACE_DEVICE(dev);
 936	TRACE_RESUME(0);
 937
 938	if (dev->power.syscore)
 939		goto Complete;
 940
 941	if (dev->power.direct_complete) {
 942		/* Match the pm_runtime_disable() in __device_suspend(). */
 943		pm_runtime_enable(dev);
 944		goto Complete;
 945	}
 946
 947	dpm_wait_for_superior(dev, async);
 
 
 948	dpm_watchdog_set(&wd, dev);
 949	device_lock(dev);
 950
 951	/*
 952	 * This is a fib.  But we'll allow new children to be added below
 953	 * a resumed device, even if the device hasn't been completed yet.
 954	 */
 955	dev->power.is_prepared = false;
 956
 957	if (!dev->power.is_suspended)
 958		goto Unlock;
 959
 960	if (dev->pm_domain) {
 961		info = "power domain ";
 962		callback = pm_op(&dev->pm_domain->ops, state);
 963		goto Driver;
 964	}
 965
 966	if (dev->type && dev->type->pm) {
 967		info = "type ";
 968		callback = pm_op(dev->type->pm, state);
 969		goto Driver;
 970	}
 971
 972	if (dev->class && dev->class->pm) {
 973		info = "class ";
 974		callback = pm_op(dev->class->pm, state);
 975		goto Driver;
 976	}
 977
 978	if (dev->bus) {
 979		if (dev->bus->pm) {
 980			info = "bus ";
 981			callback = pm_op(dev->bus->pm, state);
 982		} else if (dev->bus->resume) {
 983			info = "legacy bus ";
 984			callback = dev->bus->resume;
 985			goto End;
 986		}
 987	}
 988
 989 Driver:
 990	if (!callback && dev->driver && dev->driver->pm) {
 991		info = "driver ";
 992		callback = pm_op(dev->driver->pm, state);
 993	}
 994
 995 End:
 996	error = dpm_run_callback(callback, dev, state, info);
 997	dev->power.is_suspended = false;
 998
 999 Unlock:
1000	device_unlock(dev);
1001	dpm_watchdog_clear(&wd);
1002
1003 Complete:
1004	complete_all(&dev->power.completion);
1005
1006	TRACE_RESUME(error);
1007
1008	return error;
1009}
1010
1011static void async_resume(void *data, async_cookie_t cookie)
1012{
1013	struct device *dev = (struct device *)data;
1014	int error;
1015
1016	error = device_resume(dev, pm_transition, true);
1017	if (error)
1018		pm_dev_err(dev, pm_transition, " async", error);
1019	put_device(dev);
1020}
1021
1022/**
1023 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1024 * @state: PM transition of the system being carried out.
1025 *
1026 * Execute the appropriate "resume" callback for all devices whose status
1027 * indicates that they are suspended.
1028 */
1029void dpm_resume(pm_message_t state)
1030{
1031	struct device *dev;
1032	ktime_t starttime = ktime_get();
1033
1034	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1035	might_sleep();
1036
1037	mutex_lock(&dpm_list_mtx);
1038	pm_transition = state;
1039	async_error = 0;
1040
1041	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1042		dpm_async_fn(dev, async_resume);
1043
1044	while (!list_empty(&dpm_suspended_list)) {
1045		dev = to_device(dpm_suspended_list.next);
1046		get_device(dev);
1047		if (!is_async(dev)) {
1048			int error;
1049
1050			mutex_unlock(&dpm_list_mtx);
1051
1052			error = device_resume(dev, state, false);
1053			if (error) {
1054				suspend_stats.failed_resume++;
1055				dpm_save_failed_step(SUSPEND_RESUME);
1056				dpm_save_failed_dev(dev_name(dev));
1057				pm_dev_err(dev, state, "", error);
1058			}
1059
1060			mutex_lock(&dpm_list_mtx);
1061		}
1062		if (!list_empty(&dev->power.entry))
1063			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1064		put_device(dev);
1065	}
1066	mutex_unlock(&dpm_list_mtx);
1067	async_synchronize_full();
1068	dpm_show_time(starttime, state, 0, NULL);
1069
1070	cpufreq_resume();
1071	devfreq_resume();
1072	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1073}
1074
1075/**
1076 * device_complete - Complete a PM transition for given device.
1077 * @dev: Device to handle.
1078 * @state: PM transition of the system being carried out.
1079 */
1080static void device_complete(struct device *dev, pm_message_t state)
1081{
1082	void (*callback)(struct device *) = NULL;
1083	const char *info = NULL;
1084
1085	if (dev->power.syscore)
1086		return;
1087
1088	device_lock(dev);
1089
1090	if (dev->pm_domain) {
1091		info = "completing power domain ";
1092		callback = dev->pm_domain->ops.complete;
1093	} else if (dev->type && dev->type->pm) {
1094		info = "completing type ";
1095		callback = dev->type->pm->complete;
1096	} else if (dev->class && dev->class->pm) {
1097		info = "completing class ";
1098		callback = dev->class->pm->complete;
1099	} else if (dev->bus && dev->bus->pm) {
1100		info = "completing bus ";
1101		callback = dev->bus->pm->complete;
1102	}
1103
1104	if (!callback && dev->driver && dev->driver->pm) {
1105		info = "completing driver ";
1106		callback = dev->driver->pm->complete;
1107	}
1108
1109	if (callback) {
1110		pm_dev_dbg(dev, state, info);
1111		callback(dev);
1112	}
1113
1114	device_unlock(dev);
1115
1116	pm_runtime_put(dev);
1117}
1118
1119/**
1120 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1121 * @state: PM transition of the system being carried out.
1122 *
1123 * Execute the ->complete() callbacks for all devices whose PM status is not
1124 * DPM_ON (this allows new devices to be registered).
1125 */
1126void dpm_complete(pm_message_t state)
1127{
1128	struct list_head list;
1129
1130	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1131	might_sleep();
1132
1133	INIT_LIST_HEAD(&list);
1134	mutex_lock(&dpm_list_mtx);
1135	while (!list_empty(&dpm_prepared_list)) {
1136		struct device *dev = to_device(dpm_prepared_list.prev);
1137
1138		get_device(dev);
1139		dev->power.is_prepared = false;
1140		list_move(&dev->power.entry, &list);
1141		mutex_unlock(&dpm_list_mtx);
1142
1143		trace_device_pm_callback_start(dev, "", state.event);
1144		device_complete(dev, state);
1145		trace_device_pm_callback_end(dev, 0);
1146
1147		mutex_lock(&dpm_list_mtx);
1148		put_device(dev);
1149	}
1150	list_splice(&list, &dpm_list);
1151	mutex_unlock(&dpm_list_mtx);
1152
1153	/* Allow device probing and trigger re-probing of deferred devices */
1154	device_unblock_probing();
1155	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1156}
1157
1158/**
1159 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1160 * @state: PM transition of the system being carried out.
1161 *
1162 * Execute "resume" callbacks for all devices and complete the PM transition of
1163 * the system.
1164 */
1165void dpm_resume_end(pm_message_t state)
1166{
1167	dpm_resume(state);
1168	dpm_complete(state);
1169}
1170EXPORT_SYMBOL_GPL(dpm_resume_end);
1171
1172
1173/*------------------------- Suspend routines -------------------------*/
1174
1175/**
1176 * resume_event - Return a "resume" message for given "suspend" sleep state.
1177 * @sleep_state: PM message representing a sleep state.
1178 *
1179 * Return a PM message representing the resume event corresponding to given
1180 * sleep state.
1181 */
1182static pm_message_t resume_event(pm_message_t sleep_state)
1183{
1184	switch (sleep_state.event) {
1185	case PM_EVENT_SUSPEND:
1186		return PMSG_RESUME;
1187	case PM_EVENT_FREEZE:
1188	case PM_EVENT_QUIESCE:
1189		return PMSG_RECOVER;
1190	case PM_EVENT_HIBERNATE:
1191		return PMSG_RESTORE;
1192	}
1193	return PMSG_ON;
1194}
1195
1196static void dpm_superior_set_must_resume(struct device *dev)
1197{
1198	struct device_link *link;
1199	int idx;
1200
1201	if (dev->parent)
1202		dev->parent->power.must_resume = true;
1203
1204	idx = device_links_read_lock();
1205
1206	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1207		link->supplier->power.must_resume = true;
1208
1209	device_links_read_unlock(idx);
1210}
1211
1212static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1213						 pm_message_t state,
1214						 const char **info_p)
1215{
1216	pm_callback_t callback;
1217	const char *info;
1218
1219	if (dev->pm_domain) {
1220		info = "noirq power domain ";
1221		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1222	} else if (dev->type && dev->type->pm) {
1223		info = "noirq type ";
1224		callback = pm_noirq_op(dev->type->pm, state);
1225	} else if (dev->class && dev->class->pm) {
1226		info = "noirq class ";
1227		callback = pm_noirq_op(dev->class->pm, state);
1228	} else if (dev->bus && dev->bus->pm) {
1229		info = "noirq bus ";
1230		callback = pm_noirq_op(dev->bus->pm, state);
1231	} else {
1232		return NULL;
1233	}
1234
1235	if (info_p)
1236		*info_p = info;
1237
1238	return callback;
1239}
1240
1241static bool device_must_resume(struct device *dev, pm_message_t state,
1242			       bool no_subsys_suspend_noirq)
1243{
1244	pm_message_t resume_msg = resume_event(state);
1245
1246	/*
1247	 * If all of the device driver's "noirq", "late" and "early" callbacks
1248	 * are invoked directly by the core, the decision to allow the device to
1249	 * stay in suspend can be based on its current runtime PM status and its
1250	 * wakeup settings.
1251	 */
1252	if (no_subsys_suspend_noirq &&
1253	    !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1254	    !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1255	    !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1256		return !pm_runtime_status_suspended(dev) &&
1257			(resume_msg.event != PM_EVENT_RESUME ||
1258			 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1259
1260	/*
1261	 * The only safe strategy here is to require that if the device may not
1262	 * be left in suspend, resume callbacks must be invoked for it.
1263	 */
1264	return !dev->power.may_skip_resume;
1265}
1266
1267/**
1268 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1269 * @dev: Device to handle.
1270 * @state: PM transition of the system being carried out.
1271 * @async: If true, the device is being suspended asynchronously.
1272 *
1273 * The driver of @dev will not receive interrupts while this function is being
1274 * executed.
1275 */
1276static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1277{
1278	pm_callback_t callback;
1279	const char *info;
1280	bool no_subsys_cb = false;
1281	int error = 0;
1282
1283	TRACE_DEVICE(dev);
1284	TRACE_SUSPEND(0);
1285
1286	dpm_wait_for_subordinate(dev, async);
1287
1288	if (async_error)
1289		goto Complete;
1290
1291	if (dev->power.syscore || dev->power.direct_complete)
1292		goto Complete;
1293
1294	callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
 
 
 
 
 
 
 
 
 
 
 
 
1295	if (callback)
1296		goto Run;
1297
1298	no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1299
1300	if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1301		goto Skip;
1302
1303	if (dev->driver && dev->driver->pm) {
1304		info = "noirq driver ";
1305		callback = pm_noirq_op(dev->driver->pm, state);
1306	}
1307
1308Run:
1309	error = dpm_run_callback(callback, dev, state, info);
1310	if (error) {
1311		async_error = error;
1312		goto Complete;
1313	}
1314
1315Skip:
1316	dev->power.is_noirq_suspended = true;
1317
1318	if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1319		dev->power.must_resume = dev->power.must_resume ||
1320				atomic_read(&dev->power.usage_count) > 1 ||
1321				device_must_resume(dev, state, no_subsys_cb);
1322	} else {
 
 
 
 
1323		dev->power.must_resume = true;
1324	}
1325
1326	if (dev->power.must_resume)
1327		dpm_superior_set_must_resume(dev);
1328
1329Complete:
1330	complete_all(&dev->power.completion);
1331	TRACE_SUSPEND(error);
1332	return error;
1333}
1334
1335static void async_suspend_noirq(void *data, async_cookie_t cookie)
1336{
1337	struct device *dev = (struct device *)data;
1338	int error;
1339
1340	error = __device_suspend_noirq(dev, pm_transition, true);
1341	if (error) {
1342		dpm_save_failed_dev(dev_name(dev));
1343		pm_dev_err(dev, pm_transition, " async", error);
1344	}
1345
1346	put_device(dev);
1347}
1348
1349static int device_suspend_noirq(struct device *dev)
1350{
1351	if (dpm_async_fn(dev, async_suspend_noirq))
1352		return 0;
1353
1354	return __device_suspend_noirq(dev, pm_transition, false);
1355}
1356
1357static int dpm_noirq_suspend_devices(pm_message_t state)
1358{
1359	ktime_t starttime = ktime_get();
1360	int error = 0;
1361
1362	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1363	mutex_lock(&dpm_list_mtx);
1364	pm_transition = state;
1365	async_error = 0;
1366
1367	while (!list_empty(&dpm_late_early_list)) {
1368		struct device *dev = to_device(dpm_late_early_list.prev);
1369
1370		get_device(dev);
1371		mutex_unlock(&dpm_list_mtx);
1372
1373		error = device_suspend_noirq(dev);
1374
1375		mutex_lock(&dpm_list_mtx);
1376		if (error) {
1377			pm_dev_err(dev, state, " noirq", error);
1378			dpm_save_failed_dev(dev_name(dev));
1379			put_device(dev);
1380			break;
1381		}
1382		if (!list_empty(&dev->power.entry))
1383			list_move(&dev->power.entry, &dpm_noirq_list);
1384		put_device(dev);
1385
1386		if (async_error)
1387			break;
1388	}
1389	mutex_unlock(&dpm_list_mtx);
1390	async_synchronize_full();
1391	if (!error)
1392		error = async_error;
1393
1394	if (error) {
1395		suspend_stats.failed_suspend_noirq++;
1396		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1397	}
1398	dpm_show_time(starttime, state, error, "noirq");
1399	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1400	return error;
1401}
1402
1403/**
1404 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1405 * @state: PM transition of the system being carried out.
1406 *
1407 * Prevent device drivers' interrupt handlers from being called and invoke
1408 * "noirq" suspend callbacks for all non-sysdev devices.
1409 */
1410int dpm_suspend_noirq(pm_message_t state)
1411{
1412	int ret;
1413
1414	cpuidle_pause();
1415
1416	device_wakeup_arm_wake_irqs();
1417	suspend_device_irqs();
1418
1419	ret = dpm_noirq_suspend_devices(state);
1420	if (ret)
1421		dpm_resume_noirq(resume_event(state));
1422
1423	return ret;
1424}
1425
1426static void dpm_propagate_wakeup_to_parent(struct device *dev)
1427{
1428	struct device *parent = dev->parent;
1429
1430	if (!parent)
1431		return;
1432
1433	spin_lock_irq(&parent->power.lock);
1434
1435	if (dev->power.wakeup_path && !parent->power.ignore_children)
1436		parent->power.wakeup_path = true;
1437
1438	spin_unlock_irq(&parent->power.lock);
1439}
1440
1441static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1442						pm_message_t state,
1443						const char **info_p)
1444{
1445	pm_callback_t callback;
1446	const char *info;
1447
1448	if (dev->pm_domain) {
1449		info = "late power domain ";
1450		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1451	} else if (dev->type && dev->type->pm) {
1452		info = "late type ";
1453		callback = pm_late_early_op(dev->type->pm, state);
1454	} else if (dev->class && dev->class->pm) {
1455		info = "late class ";
1456		callback = pm_late_early_op(dev->class->pm, state);
1457	} else if (dev->bus && dev->bus->pm) {
1458		info = "late bus ";
1459		callback = pm_late_early_op(dev->bus->pm, state);
1460	} else {
1461		return NULL;
1462	}
1463
1464	if (info_p)
1465		*info_p = info;
1466
1467	return callback;
1468}
1469
1470/**
1471 * __device_suspend_late - Execute a "late suspend" callback for given device.
1472 * @dev: Device to handle.
1473 * @state: PM transition of the system being carried out.
1474 * @async: If true, the device is being suspended asynchronously.
1475 *
1476 * Runtime PM is disabled for @dev while this function is being executed.
1477 */
1478static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1479{
1480	pm_callback_t callback;
1481	const char *info;
1482	int error = 0;
1483
1484	TRACE_DEVICE(dev);
1485	TRACE_SUSPEND(0);
1486
1487	__pm_runtime_disable(dev, false);
1488
1489	dpm_wait_for_subordinate(dev, async);
1490
1491	if (async_error)
1492		goto Complete;
1493
1494	if (pm_wakeup_pending()) {
1495		async_error = -EBUSY;
1496		goto Complete;
1497	}
1498
1499	if (dev->power.syscore || dev->power.direct_complete)
1500		goto Complete;
1501
1502	callback = dpm_subsys_suspend_late_cb(dev, state, &info);
 
 
 
 
 
 
 
 
 
 
 
 
1503	if (callback)
1504		goto Run;
1505
1506	if (dev_pm_smart_suspend_and_suspended(dev) &&
1507	    !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1508		goto Skip;
1509
1510	if (dev->driver && dev->driver->pm) {
1511		info = "late driver ";
1512		callback = pm_late_early_op(dev->driver->pm, state);
1513	}
1514
1515Run:
1516	error = dpm_run_callback(callback, dev, state, info);
1517	if (error) {
1518		async_error = error;
1519		goto Complete;
1520	}
1521	dpm_propagate_wakeup_to_parent(dev);
1522
1523Skip:
1524	dev->power.is_late_suspended = true;
1525
1526Complete:
1527	TRACE_SUSPEND(error);
1528	complete_all(&dev->power.completion);
1529	return error;
1530}
1531
1532static void async_suspend_late(void *data, async_cookie_t cookie)
1533{
1534	struct device *dev = (struct device *)data;
1535	int error;
1536
1537	error = __device_suspend_late(dev, pm_transition, true);
1538	if (error) {
1539		dpm_save_failed_dev(dev_name(dev));
1540		pm_dev_err(dev, pm_transition, " async", error);
1541	}
1542	put_device(dev);
1543}
1544
1545static int device_suspend_late(struct device *dev)
1546{
1547	if (dpm_async_fn(dev, async_suspend_late))
1548		return 0;
1549
1550	return __device_suspend_late(dev, pm_transition, false);
1551}
1552
1553/**
1554 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1555 * @state: PM transition of the system being carried out.
1556 */
1557int dpm_suspend_late(pm_message_t state)
1558{
1559	ktime_t starttime = ktime_get();
1560	int error = 0;
1561
1562	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1563	mutex_lock(&dpm_list_mtx);
1564	pm_transition = state;
1565	async_error = 0;
1566
1567	while (!list_empty(&dpm_suspended_list)) {
1568		struct device *dev = to_device(dpm_suspended_list.prev);
1569
1570		get_device(dev);
1571		mutex_unlock(&dpm_list_mtx);
1572
1573		error = device_suspend_late(dev);
1574
1575		mutex_lock(&dpm_list_mtx);
1576		if (!list_empty(&dev->power.entry))
1577			list_move(&dev->power.entry, &dpm_late_early_list);
1578
1579		if (error) {
1580			pm_dev_err(dev, state, " late", error);
1581			dpm_save_failed_dev(dev_name(dev));
1582			put_device(dev);
1583			break;
1584		}
1585		put_device(dev);
1586
1587		if (async_error)
1588			break;
1589	}
1590	mutex_unlock(&dpm_list_mtx);
1591	async_synchronize_full();
1592	if (!error)
1593		error = async_error;
1594	if (error) {
1595		suspend_stats.failed_suspend_late++;
1596		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1597		dpm_resume_early(resume_event(state));
1598	}
1599	dpm_show_time(starttime, state, error, "late");
1600	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1601	return error;
1602}
1603
1604/**
1605 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1606 * @state: PM transition of the system being carried out.
1607 */
1608int dpm_suspend_end(pm_message_t state)
1609{
1610	ktime_t starttime = ktime_get();
1611	int error;
1612
1613	error = dpm_suspend_late(state);
1614	if (error)
1615		goto out;
1616
1617	error = dpm_suspend_noirq(state);
1618	if (error)
1619		dpm_resume_early(resume_event(state));
1620
1621out:
1622	dpm_show_time(starttime, state, error, "end");
1623	return error;
1624}
1625EXPORT_SYMBOL_GPL(dpm_suspend_end);
1626
1627/**
1628 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1629 * @dev: Device to suspend.
1630 * @state: PM transition of the system being carried out.
1631 * @cb: Suspend callback to execute.
1632 * @info: string description of caller.
1633 */
1634static int legacy_suspend(struct device *dev, pm_message_t state,
1635			  int (*cb)(struct device *dev, pm_message_t state),
1636			  const char *info)
1637{
1638	int error;
1639	ktime_t calltime;
1640
1641	calltime = initcall_debug_start(dev, cb);
1642
1643	trace_device_pm_callback_start(dev, info, state.event);
1644	error = cb(dev, state);
1645	trace_device_pm_callback_end(dev, error);
1646	suspend_report_result(cb, error);
1647
1648	initcall_debug_report(dev, calltime, cb, error);
1649
1650	return error;
1651}
1652
1653static void dpm_clear_superiors_direct_complete(struct device *dev)
1654{
1655	struct device_link *link;
1656	int idx;
1657
1658	if (dev->parent) {
1659		spin_lock_irq(&dev->parent->power.lock);
1660		dev->parent->power.direct_complete = false;
1661		spin_unlock_irq(&dev->parent->power.lock);
1662	}
1663
1664	idx = device_links_read_lock();
1665
1666	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1667		spin_lock_irq(&link->supplier->power.lock);
1668		link->supplier->power.direct_complete = false;
1669		spin_unlock_irq(&link->supplier->power.lock);
1670	}
1671
1672	device_links_read_unlock(idx);
1673}
1674
1675/**
1676 * __device_suspend - Execute "suspend" callbacks for given device.
1677 * @dev: Device to handle.
1678 * @state: PM transition of the system being carried out.
1679 * @async: If true, the device is being suspended asynchronously.
1680 */
1681static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1682{
1683	pm_callback_t callback = NULL;
1684	const char *info = NULL;
1685	int error = 0;
1686	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1687
1688	TRACE_DEVICE(dev);
1689	TRACE_SUSPEND(0);
1690
1691	dpm_wait_for_subordinate(dev, async);
1692
1693	if (async_error) {
1694		dev->power.direct_complete = false;
1695		goto Complete;
1696	}
1697
1698	/*
1699	 * If a device configured to wake up the system from sleep states
1700	 * has been suspended at run time and there's a resume request pending
1701	 * for it, this is equivalent to the device signaling wakeup, so the
1702	 * system suspend operation should be aborted.
 
 
 
 
 
1703	 */
1704	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1705		pm_wakeup_event(dev, 0);
1706
1707	if (pm_wakeup_pending()) {
1708		dev->power.direct_complete = false;
1709		async_error = -EBUSY;
1710		goto Complete;
1711	}
1712
1713	if (dev->power.syscore)
1714		goto Complete;
1715
1716	/* Avoid direct_complete to let wakeup_path propagate. */
1717	if (device_may_wakeup(dev) || dev->power.wakeup_path)
1718		dev->power.direct_complete = false;
1719
1720	if (dev->power.direct_complete) {
1721		if (pm_runtime_status_suspended(dev)) {
1722			pm_runtime_disable(dev);
1723			if (pm_runtime_status_suspended(dev)) {
1724				pm_dev_dbg(dev, state, "direct-complete ");
1725				goto Complete;
1726			}
1727
1728			pm_runtime_enable(dev);
1729		}
1730		dev->power.direct_complete = false;
1731	}
1732
1733	dev->power.may_skip_resume = false;
1734	dev->power.must_resume = false;
1735
1736	dpm_watchdog_set(&wd, dev);
1737	device_lock(dev);
1738
1739	if (dev->pm_domain) {
1740		info = "power domain ";
1741		callback = pm_op(&dev->pm_domain->ops, state);
1742		goto Run;
1743	}
1744
1745	if (dev->type && dev->type->pm) {
1746		info = "type ";
1747		callback = pm_op(dev->type->pm, state);
1748		goto Run;
1749	}
1750
1751	if (dev->class && dev->class->pm) {
1752		info = "class ";
1753		callback = pm_op(dev->class->pm, state);
1754		goto Run;
1755	}
1756
1757	if (dev->bus) {
1758		if (dev->bus->pm) {
1759			info = "bus ";
1760			callback = pm_op(dev->bus->pm, state);
1761		} else if (dev->bus->suspend) {
1762			pm_dev_dbg(dev, state, "legacy bus ");
1763			error = legacy_suspend(dev, state, dev->bus->suspend,
1764						"legacy bus ");
1765			goto End;
1766		}
1767	}
1768
1769 Run:
1770	if (!callback && dev->driver && dev->driver->pm) {
1771		info = "driver ";
1772		callback = pm_op(dev->driver->pm, state);
1773	}
1774
1775	error = dpm_run_callback(callback, dev, state, info);
1776
1777 End:
1778	if (!error) {
1779		dev->power.is_suspended = true;
1780		if (device_may_wakeup(dev))
1781			dev->power.wakeup_path = true;
1782
1783		dpm_propagate_wakeup_to_parent(dev);
1784		dpm_clear_superiors_direct_complete(dev);
1785	}
1786
1787	device_unlock(dev);
1788	dpm_watchdog_clear(&wd);
1789
1790 Complete:
1791	if (error)
1792		async_error = error;
1793
1794	complete_all(&dev->power.completion);
1795	TRACE_SUSPEND(error);
1796	return error;
1797}
1798
1799static void async_suspend(void *data, async_cookie_t cookie)
1800{
1801	struct device *dev = (struct device *)data;
1802	int error;
1803
1804	error = __device_suspend(dev, pm_transition, true);
1805	if (error) {
1806		dpm_save_failed_dev(dev_name(dev));
1807		pm_dev_err(dev, pm_transition, " async", error);
1808	}
1809
1810	put_device(dev);
1811}
1812
1813static int device_suspend(struct device *dev)
1814{
1815	if (dpm_async_fn(dev, async_suspend))
1816		return 0;
1817
1818	return __device_suspend(dev, pm_transition, false);
1819}
1820
1821/**
1822 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1823 * @state: PM transition of the system being carried out.
1824 */
1825int dpm_suspend(pm_message_t state)
1826{
1827	ktime_t starttime = ktime_get();
1828	int error = 0;
1829
1830	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1831	might_sleep();
1832
1833	devfreq_suspend();
1834	cpufreq_suspend();
1835
1836	mutex_lock(&dpm_list_mtx);
1837	pm_transition = state;
1838	async_error = 0;
1839	while (!list_empty(&dpm_prepared_list)) {
1840		struct device *dev = to_device(dpm_prepared_list.prev);
1841
1842		get_device(dev);
1843		mutex_unlock(&dpm_list_mtx);
1844
1845		error = device_suspend(dev);
1846
1847		mutex_lock(&dpm_list_mtx);
1848		if (error) {
1849			pm_dev_err(dev, state, "", error);
1850			dpm_save_failed_dev(dev_name(dev));
1851			put_device(dev);
1852			break;
1853		}
1854		if (!list_empty(&dev->power.entry))
1855			list_move(&dev->power.entry, &dpm_suspended_list);
1856		put_device(dev);
1857		if (async_error)
1858			break;
1859	}
1860	mutex_unlock(&dpm_list_mtx);
1861	async_synchronize_full();
1862	if (!error)
1863		error = async_error;
1864	if (error) {
1865		suspend_stats.failed_suspend++;
1866		dpm_save_failed_step(SUSPEND_SUSPEND);
1867	}
1868	dpm_show_time(starttime, state, error, NULL);
1869	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1870	return error;
1871}
1872
1873/**
1874 * device_prepare - Prepare a device for system power transition.
1875 * @dev: Device to handle.
1876 * @state: PM transition of the system being carried out.
1877 *
1878 * Execute the ->prepare() callback(s) for given device.  No new children of the
1879 * device may be registered after this function has returned.
1880 */
1881static int device_prepare(struct device *dev, pm_message_t state)
1882{
1883	int (*callback)(struct device *) = NULL;
1884	int ret = 0;
1885
1886	if (dev->power.syscore)
1887		return 0;
1888
1889	WARN_ON(!pm_runtime_enabled(dev) &&
1890		dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1891					      DPM_FLAG_LEAVE_SUSPENDED));
1892
1893	/*
1894	 * If a device's parent goes into runtime suspend at the wrong time,
1895	 * it won't be possible to resume the device.  To prevent this we
1896	 * block runtime suspend here, during the prepare phase, and allow
1897	 * it again during the complete phase.
1898	 */
1899	pm_runtime_get_noresume(dev);
1900
1901	device_lock(dev);
1902
1903	dev->power.wakeup_path = false;
1904
1905	if (dev->power.no_pm_callbacks)
1906		goto unlock;
1907
1908	if (dev->pm_domain)
1909		callback = dev->pm_domain->ops.prepare;
1910	else if (dev->type && dev->type->pm)
1911		callback = dev->type->pm->prepare;
1912	else if (dev->class && dev->class->pm)
1913		callback = dev->class->pm->prepare;
1914	else if (dev->bus && dev->bus->pm)
1915		callback = dev->bus->pm->prepare;
1916
1917	if (!callback && dev->driver && dev->driver->pm)
1918		callback = dev->driver->pm->prepare;
1919
1920	if (callback)
1921		ret = callback(dev);
1922
1923unlock:
1924	device_unlock(dev);
1925
1926	if (ret < 0) {
1927		suspend_report_result(callback, ret);
1928		pm_runtime_put(dev);
1929		return ret;
1930	}
1931	/*
1932	 * A positive return value from ->prepare() means "this device appears
1933	 * to be runtime-suspended and its state is fine, so if it really is
1934	 * runtime-suspended, you can leave it in that state provided that you
1935	 * will do the same thing with all of its descendants".  This only
1936	 * applies to suspend transitions, however.
1937	 */
1938	spin_lock_irq(&dev->power.lock);
1939	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1940		((pm_runtime_suspended(dev) && ret > 0) ||
1941		 dev->power.no_pm_callbacks) &&
1942		!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1943	spin_unlock_irq(&dev->power.lock);
1944	return 0;
1945}
1946
1947/**
1948 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1949 * @state: PM transition of the system being carried out.
1950 *
1951 * Execute the ->prepare() callback(s) for all devices.
1952 */
1953int dpm_prepare(pm_message_t state)
1954{
1955	int error = 0;
1956
1957	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1958	might_sleep();
1959
1960	/*
1961	 * Give a chance for the known devices to complete their probes, before
1962	 * disable probing of devices. This sync point is important at least
1963	 * at boot time + hibernation restore.
1964	 */
1965	wait_for_device_probe();
1966	/*
1967	 * It is unsafe if probing of devices will happen during suspend or
1968	 * hibernation and system behavior will be unpredictable in this case.
1969	 * So, let's prohibit device's probing here and defer their probes
1970	 * instead. The normal behavior will be restored in dpm_complete().
1971	 */
1972	device_block_probing();
1973
1974	mutex_lock(&dpm_list_mtx);
1975	while (!list_empty(&dpm_list)) {
1976		struct device *dev = to_device(dpm_list.next);
1977
1978		get_device(dev);
1979		mutex_unlock(&dpm_list_mtx);
1980
1981		trace_device_pm_callback_start(dev, "", state.event);
1982		error = device_prepare(dev, state);
1983		trace_device_pm_callback_end(dev, error);
1984
1985		mutex_lock(&dpm_list_mtx);
1986		if (error) {
1987			if (error == -EAGAIN) {
1988				put_device(dev);
1989				error = 0;
1990				continue;
1991			}
1992			pr_info("Device %s not prepared for power transition: code %d\n",
1993				dev_name(dev), error);
1994			put_device(dev);
1995			break;
1996		}
1997		dev->power.is_prepared = true;
1998		if (!list_empty(&dev->power.entry))
1999			list_move_tail(&dev->power.entry, &dpm_prepared_list);
2000		put_device(dev);
2001	}
2002	mutex_unlock(&dpm_list_mtx);
2003	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2004	return error;
2005}
2006
2007/**
2008 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2009 * @state: PM transition of the system being carried out.
2010 *
2011 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2012 * callbacks for them.
2013 */
2014int dpm_suspend_start(pm_message_t state)
2015{
2016	ktime_t starttime = ktime_get();
2017	int error;
2018
2019	error = dpm_prepare(state);
2020	if (error) {
2021		suspend_stats.failed_prepare++;
2022		dpm_save_failed_step(SUSPEND_PREPARE);
2023	} else
2024		error = dpm_suspend(state);
2025	dpm_show_time(starttime, state, error, "start");
2026	return error;
2027}
2028EXPORT_SYMBOL_GPL(dpm_suspend_start);
2029
2030void __suspend_report_result(const char *function, void *fn, int ret)
2031{
2032	if (ret)
2033		pr_err("%s(): %pS returns %d\n", function, fn, ret);
2034}
2035EXPORT_SYMBOL_GPL(__suspend_report_result);
2036
2037/**
2038 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2039 * @subordinate: Device that needs to wait for @dev.
2040 * @dev: Device to wait for.
2041 */
2042int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2043{
2044	dpm_wait(dev, subordinate->power.async_suspend);
2045	return async_error;
2046}
2047EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2048
2049/**
2050 * dpm_for_each_dev - device iterator.
2051 * @data: data for the callback.
2052 * @fn: function to be called for each device.
2053 *
2054 * Iterate over devices in dpm_list, and call @fn for each device,
2055 * passing it @data.
2056 */
2057void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2058{
2059	struct device *dev;
2060
2061	if (!fn)
2062		return;
2063
2064	device_pm_lock();
2065	list_for_each_entry(dev, &dpm_list, power.entry)
2066		fn(dev, data);
2067	device_pm_unlock();
2068}
2069EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2070
2071static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2072{
2073	if (!ops)
2074		return true;
2075
2076	return !ops->prepare &&
2077	       !ops->suspend &&
2078	       !ops->suspend_late &&
2079	       !ops->suspend_noirq &&
2080	       !ops->resume_noirq &&
2081	       !ops->resume_early &&
2082	       !ops->resume &&
2083	       !ops->complete;
2084}
2085
2086void device_pm_check_callbacks(struct device *dev)
2087{
2088	spin_lock_irq(&dev->power.lock);
2089	dev->power.no_pm_callbacks =
2090		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2091		 !dev->bus->suspend && !dev->bus->resume)) &&
2092		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2093		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2094		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2095		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2096		 !dev->driver->suspend && !dev->driver->resume));
2097	spin_unlock_irq(&dev->power.lock);
2098}
2099
2100bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2101{
2102	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2103		pm_runtime_status_suspended(dev);
2104}