Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 *
   7 * This file is released under the GPLv2
   8 *
   9 *
  10 * The driver model core calls device_pm_add() when a device is registered.
  11 * This will initialize the embedded device_pm_info object in the device
  12 * and add it to the list of power-controlled devices. sysfs entries for
  13 * controlling device power management will also be added.
  14 *
  15 * A separate list is used for keeping track of power info, because the power
  16 * domain dependencies may differ from the ancestral dependencies that the
  17 * subsystem list maintains.
  18 */
  19
 
 
 
  20#include <linux/device.h>
  21#include <linux/kallsyms.h>
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm-trace.h>
  27#include <linux/pm_wakeirq.h>
  28#include <linux/interrupt.h>
  29#include <linux/sched.h>
 
  30#include <linux/async.h>
  31#include <linux/suspend.h>
  32#include <trace/events/power.h>
  33#include <linux/cpufreq.h>
  34#include <linux/cpuidle.h>
  35#include <linux/timer.h>
  36
  37#include "../base.h"
  38#include "power.h"
  39
  40typedef int (*pm_callback_t)(struct device *);
  41
 
 
 
 
  42/*
  43 * The entries in the dpm_list list are in a depth first order, simply
  44 * because children are guaranteed to be discovered after parents, and
  45 * are inserted at the back of the list on discovery.
  46 *
  47 * Since device_pm_add() may be called with a device lock held,
  48 * we must never try to acquire a device lock while holding
  49 * dpm_list_mutex.
  50 */
  51
  52LIST_HEAD(dpm_list);
  53static LIST_HEAD(dpm_prepared_list);
  54static LIST_HEAD(dpm_suspended_list);
  55static LIST_HEAD(dpm_late_early_list);
  56static LIST_HEAD(dpm_noirq_list);
  57
  58struct suspend_stats suspend_stats;
  59static DEFINE_MUTEX(dpm_list_mtx);
  60static pm_message_t pm_transition;
  61
  62static int async_error;
  63
  64static char *pm_verb(int event)
  65{
  66	switch (event) {
  67	case PM_EVENT_SUSPEND:
  68		return "suspend";
  69	case PM_EVENT_RESUME:
  70		return "resume";
  71	case PM_EVENT_FREEZE:
  72		return "freeze";
  73	case PM_EVENT_QUIESCE:
  74		return "quiesce";
  75	case PM_EVENT_HIBERNATE:
  76		return "hibernate";
  77	case PM_EVENT_THAW:
  78		return "thaw";
  79	case PM_EVENT_RESTORE:
  80		return "restore";
  81	case PM_EVENT_RECOVER:
  82		return "recover";
  83	default:
  84		return "(unknown PM event)";
  85	}
  86}
  87
  88/**
  89 * device_pm_sleep_init - Initialize system suspend-related device fields.
  90 * @dev: Device object being initialized.
  91 */
  92void device_pm_sleep_init(struct device *dev)
  93{
  94	dev->power.is_prepared = false;
  95	dev->power.is_suspended = false;
  96	dev->power.is_noirq_suspended = false;
  97	dev->power.is_late_suspended = false;
  98	init_completion(&dev->power.completion);
  99	complete_all(&dev->power.completion);
 100	dev->power.wakeup = NULL;
 101	INIT_LIST_HEAD(&dev->power.entry);
 102}
 103
 104/**
 105 * device_pm_lock - Lock the list of active devices used by the PM core.
 106 */
 107void device_pm_lock(void)
 108{
 109	mutex_lock(&dpm_list_mtx);
 110}
 111
 112/**
 113 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 114 */
 115void device_pm_unlock(void)
 116{
 117	mutex_unlock(&dpm_list_mtx);
 118}
 119
 120/**
 121 * device_pm_add - Add a device to the PM core's list of active devices.
 122 * @dev: Device to add to the list.
 123 */
 124void device_pm_add(struct device *dev)
 125{
 126	pr_debug("PM: Adding info for %s:%s\n",
 
 
 
 
 127		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 128	device_pm_check_callbacks(dev);
 129	mutex_lock(&dpm_list_mtx);
 130	if (dev->parent && dev->parent->power.is_prepared)
 131		dev_warn(dev, "parent %s should not be sleeping\n",
 132			dev_name(dev->parent));
 133	list_add_tail(&dev->power.entry, &dpm_list);
 
 134	mutex_unlock(&dpm_list_mtx);
 135}
 136
 137/**
 138 * device_pm_remove - Remove a device from the PM core's list of active devices.
 139 * @dev: Device to be removed from the list.
 140 */
 141void device_pm_remove(struct device *dev)
 142{
 143	pr_debug("PM: Removing info for %s:%s\n",
 
 
 
 144		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 145	complete_all(&dev->power.completion);
 146	mutex_lock(&dpm_list_mtx);
 147	list_del_init(&dev->power.entry);
 
 148	mutex_unlock(&dpm_list_mtx);
 149	device_wakeup_disable(dev);
 150	pm_runtime_remove(dev);
 151	device_pm_check_callbacks(dev);
 152}
 153
 154/**
 155 * device_pm_move_before - Move device in the PM core's list of active devices.
 156 * @deva: Device to move in dpm_list.
 157 * @devb: Device @deva should come before.
 158 */
 159void device_pm_move_before(struct device *deva, struct device *devb)
 160{
 161	pr_debug("PM: Moving %s:%s before %s:%s\n",
 162		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 163		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 164	/* Delete deva from dpm_list and reinsert before devb. */
 165	list_move_tail(&deva->power.entry, &devb->power.entry);
 166}
 167
 168/**
 169 * device_pm_move_after - Move device in the PM core's list of active devices.
 170 * @deva: Device to move in dpm_list.
 171 * @devb: Device @deva should come after.
 172 */
 173void device_pm_move_after(struct device *deva, struct device *devb)
 174{
 175	pr_debug("PM: Moving %s:%s after %s:%s\n",
 176		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 177		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 178	/* Delete deva from dpm_list and reinsert after devb. */
 179	list_move(&deva->power.entry, &devb->power.entry);
 180}
 181
 182/**
 183 * device_pm_move_last - Move device to end of the PM core's list of devices.
 184 * @dev: Device to move in dpm_list.
 185 */
 186void device_pm_move_last(struct device *dev)
 187{
 188	pr_debug("PM: Moving %s:%s to end of list\n",
 189		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 190	list_move_tail(&dev->power.entry, &dpm_list);
 191}
 192
 193static ktime_t initcall_debug_start(struct device *dev)
 194{
 195	ktime_t calltime = ktime_set(0, 0);
 196
 197	if (pm_print_times_enabled) {
 198		pr_info("calling  %s+ @ %i, parent: %s\n",
 199			dev_name(dev), task_pid_nr(current),
 200			dev->parent ? dev_name(dev->parent) : "none");
 201		calltime = ktime_get();
 202	}
 203
 204	return calltime;
 
 
 
 205}
 206
 207static void initcall_debug_report(struct device *dev, ktime_t calltime,
 208				  int error, pm_message_t state, char *info)
 209{
 210	ktime_t rettime;
 211	s64 nsecs;
 212
 213	rettime = ktime_get();
 214	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 215
 216	if (pm_print_times_enabled) {
 217		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 218			error, (unsigned long long)nsecs >> 10);
 219	}
 220}
 221
 222/**
 223 * dpm_wait - Wait for a PM operation to complete.
 224 * @dev: Device to wait for.
 225 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 226 */
 227static void dpm_wait(struct device *dev, bool async)
 228{
 229	if (!dev)
 230		return;
 231
 232	if (async || (pm_async_enabled && dev->power.async_suspend))
 233		wait_for_completion(&dev->power.completion);
 234}
 235
 236static int dpm_wait_fn(struct device *dev, void *async_ptr)
 237{
 238	dpm_wait(dev, *((bool *)async_ptr));
 239	return 0;
 240}
 241
 242static void dpm_wait_for_children(struct device *dev, bool async)
 243{
 244       device_for_each_child(dev, &async, dpm_wait_fn);
 245}
 246
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 247/**
 248 * pm_op - Return the PM operation appropriate for given PM event.
 249 * @ops: PM operations to choose from.
 250 * @state: PM transition of the system being carried out.
 251 */
 252static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 253{
 254	switch (state.event) {
 255#ifdef CONFIG_SUSPEND
 256	case PM_EVENT_SUSPEND:
 257		return ops->suspend;
 258	case PM_EVENT_RESUME:
 259		return ops->resume;
 260#endif /* CONFIG_SUSPEND */
 261#ifdef CONFIG_HIBERNATE_CALLBACKS
 262	case PM_EVENT_FREEZE:
 263	case PM_EVENT_QUIESCE:
 264		return ops->freeze;
 265	case PM_EVENT_HIBERNATE:
 266		return ops->poweroff;
 267	case PM_EVENT_THAW:
 268	case PM_EVENT_RECOVER:
 269		return ops->thaw;
 270		break;
 271	case PM_EVENT_RESTORE:
 272		return ops->restore;
 273#endif /* CONFIG_HIBERNATE_CALLBACKS */
 274	}
 275
 276	return NULL;
 277}
 278
 279/**
 280 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 281 * @ops: PM operations to choose from.
 282 * @state: PM transition of the system being carried out.
 283 *
 284 * Runtime PM is disabled for @dev while this function is being executed.
 285 */
 286static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 287				      pm_message_t state)
 288{
 289	switch (state.event) {
 290#ifdef CONFIG_SUSPEND
 291	case PM_EVENT_SUSPEND:
 292		return ops->suspend_late;
 293	case PM_EVENT_RESUME:
 294		return ops->resume_early;
 295#endif /* CONFIG_SUSPEND */
 296#ifdef CONFIG_HIBERNATE_CALLBACKS
 297	case PM_EVENT_FREEZE:
 298	case PM_EVENT_QUIESCE:
 299		return ops->freeze_late;
 300	case PM_EVENT_HIBERNATE:
 301		return ops->poweroff_late;
 302	case PM_EVENT_THAW:
 303	case PM_EVENT_RECOVER:
 304		return ops->thaw_early;
 305	case PM_EVENT_RESTORE:
 306		return ops->restore_early;
 307#endif /* CONFIG_HIBERNATE_CALLBACKS */
 308	}
 309
 310	return NULL;
 311}
 312
 313/**
 314 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 315 * @ops: PM operations to choose from.
 316 * @state: PM transition of the system being carried out.
 317 *
 318 * The driver of @dev will not receive interrupts while this function is being
 319 * executed.
 320 */
 321static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 322{
 323	switch (state.event) {
 324#ifdef CONFIG_SUSPEND
 325	case PM_EVENT_SUSPEND:
 326		return ops->suspend_noirq;
 327	case PM_EVENT_RESUME:
 328		return ops->resume_noirq;
 329#endif /* CONFIG_SUSPEND */
 330#ifdef CONFIG_HIBERNATE_CALLBACKS
 331	case PM_EVENT_FREEZE:
 332	case PM_EVENT_QUIESCE:
 333		return ops->freeze_noirq;
 334	case PM_EVENT_HIBERNATE:
 335		return ops->poweroff_noirq;
 336	case PM_EVENT_THAW:
 337	case PM_EVENT_RECOVER:
 338		return ops->thaw_noirq;
 339	case PM_EVENT_RESTORE:
 340		return ops->restore_noirq;
 341#endif /* CONFIG_HIBERNATE_CALLBACKS */
 342	}
 343
 344	return NULL;
 345}
 346
 347static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
 348{
 349	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 350		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 351		", may wakeup" : "");
 352}
 353
 354static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
 355			int error)
 356{
 357	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 358		dev_name(dev), pm_verb(state.event), info, error);
 359}
 360
 361static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
 
 362{
 363	ktime_t calltime;
 364	u64 usecs64;
 365	int usecs;
 366
 367	calltime = ktime_get();
 368	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 369	do_div(usecs64, NSEC_PER_USEC);
 370	usecs = usecs64;
 371	if (usecs == 0)
 372		usecs = 1;
 373	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
 374		info ?: "", info ? " " : "", pm_verb(state.event),
 375		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 
 
 376}
 377
 378static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 379			    pm_message_t state, char *info)
 380{
 381	ktime_t calltime;
 382	int error;
 383
 384	if (!cb)
 385		return 0;
 386
 387	calltime = initcall_debug_start(dev);
 388
 389	pm_dev_dbg(dev, state, info);
 390	trace_device_pm_callback_start(dev, info, state.event);
 391	error = cb(dev);
 392	trace_device_pm_callback_end(dev, error);
 393	suspend_report_result(cb, error);
 394
 395	initcall_debug_report(dev, calltime, error, state, info);
 396
 397	return error;
 398}
 399
 400#ifdef CONFIG_DPM_WATCHDOG
 401struct dpm_watchdog {
 402	struct device		*dev;
 403	struct task_struct	*tsk;
 404	struct timer_list	timer;
 405};
 406
 407#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 408	struct dpm_watchdog wd
 409
 410/**
 411 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 412 * @data: Watchdog object address.
 413 *
 414 * Called when a driver has timed out suspending or resuming.
 415 * There's not much we can do here to recover so panic() to
 416 * capture a crash-dump in pstore.
 417 */
 418static void dpm_watchdog_handler(unsigned long data)
 419{
 420	struct dpm_watchdog *wd = (void *)data;
 421
 422	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 423	show_stack(wd->tsk, NULL);
 424	panic("%s %s: unrecoverable failure\n",
 425		dev_driver_string(wd->dev), dev_name(wd->dev));
 426}
 427
 428/**
 429 * dpm_watchdog_set - Enable pm watchdog for given device.
 430 * @wd: Watchdog. Must be allocated on the stack.
 431 * @dev: Device to handle.
 432 */
 433static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 434{
 435	struct timer_list *timer = &wd->timer;
 436
 437	wd->dev = dev;
 438	wd->tsk = current;
 439
 440	init_timer_on_stack(timer);
 441	/* use same timeout value for both suspend and resume */
 442	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 443	timer->function = dpm_watchdog_handler;
 444	timer->data = (unsigned long)wd;
 445	add_timer(timer);
 446}
 447
 448/**
 449 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 450 * @wd: Watchdog to disable.
 451 */
 452static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 453{
 454	struct timer_list *timer = &wd->timer;
 455
 456	del_timer_sync(timer);
 457	destroy_timer_on_stack(timer);
 458}
 459#else
 460#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 461#define dpm_watchdog_set(x, y)
 462#define dpm_watchdog_clear(x)
 463#endif
 464
 465/*------------------------- Resume routines -------------------------*/
 466
 467/**
 468 * device_resume_noirq - Execute an "early resume" callback for given device.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 469 * @dev: Device to handle.
 470 * @state: PM transition of the system being carried out.
 471 * @async: If true, the device is being resumed asynchronously.
 472 *
 473 * The driver of @dev will not receive interrupts while this function is being
 474 * executed.
 475 */
 476static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 477{
 478	pm_callback_t callback = NULL;
 479	char *info = NULL;
 
 480	int error = 0;
 481
 482	TRACE_DEVICE(dev);
 483	TRACE_RESUME(0);
 484
 485	if (dev->power.syscore || dev->power.direct_complete)
 486		goto Out;
 487
 488	if (!dev->power.is_noirq_suspended)
 489		goto Out;
 490
 491	dpm_wait(dev->parent, async);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492
 493	if (dev->pm_domain) {
 494		info = "noirq power domain ";
 495		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 496	} else if (dev->type && dev->type->pm) {
 497		info = "noirq type ";
 498		callback = pm_noirq_op(dev->type->pm, state);
 499	} else if (dev->class && dev->class->pm) {
 500		info = "noirq class ";
 501		callback = pm_noirq_op(dev->class->pm, state);
 502	} else if (dev->bus && dev->bus->pm) {
 503		info = "noirq bus ";
 504		callback = pm_noirq_op(dev->bus->pm, state);
 505	}
 
 
 506
 507	if (!callback && dev->driver && dev->driver->pm) {
 
 
 
 508		info = "noirq driver ";
 509		callback = pm_noirq_op(dev->driver->pm, state);
 510	}
 511
 
 512	error = dpm_run_callback(callback, dev, state, info);
 
 
 513	dev->power.is_noirq_suspended = false;
 514
 515 Out:
 516	complete_all(&dev->power.completion);
 517	TRACE_RESUME(error);
 518	return error;
 519}
 520
 521static bool is_async(struct device *dev)
 522{
 523	return dev->power.async_suspend && pm_async_enabled
 524		&& !pm_trace_is_enabled();
 
 525}
 526
 527static void async_resume_noirq(void *data, async_cookie_t cookie)
 528{
 529	struct device *dev = (struct device *)data;
 530	int error;
 531
 532	error = device_resume_noirq(dev, pm_transition, true);
 533	if (error)
 534		pm_dev_err(dev, pm_transition, " async", error);
 535
 
 536	put_device(dev);
 537}
 538
 539/**
 540 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 541 * @state: PM transition of the system being carried out.
 542 *
 543 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
 544 * enable device drivers to receive interrupts.
 545 */
 546void dpm_resume_noirq(pm_message_t state)
 547{
 548	struct device *dev;
 549	ktime_t starttime = ktime_get();
 550
 551	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 552	mutex_lock(&dpm_list_mtx);
 
 553	pm_transition = state;
 554
 
 
 555	/*
 556	 * Advanced the async threads upfront,
 557	 * in case the starting of async threads is
 558	 * delayed by non-async resuming devices.
 559	 */
 560	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
 561		reinit_completion(&dev->power.completion);
 562		if (is_async(dev)) {
 563			get_device(dev);
 564			async_schedule(async_resume_noirq, dev);
 565		}
 566	}
 567
 568	while (!list_empty(&dpm_noirq_list)) {
 569		dev = to_device(dpm_noirq_list.next);
 570		get_device(dev);
 571		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 572		mutex_unlock(&dpm_list_mtx);
 573
 574		if (!is_async(dev)) {
 575			int error;
 576
 577			error = device_resume_noirq(dev, state, false);
 578			if (error) {
 579				suspend_stats.failed_resume_noirq++;
 580				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 581				dpm_save_failed_dev(dev_name(dev));
 582				pm_dev_err(dev, state, " noirq", error);
 583			}
 584		}
 585
 586		mutex_lock(&dpm_list_mtx);
 587		put_device(dev);
 
 
 
 
 588	}
 589	mutex_unlock(&dpm_list_mtx);
 590	async_synchronize_full();
 591	dpm_show_time(starttime, state, "noirq");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592	resume_device_irqs();
 593	device_wakeup_disarm_wake_irqs();
 594	cpuidle_resume();
 595	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 596}
 597
 598/**
 599 * device_resume_early - Execute an "early resume" callback for given device.
 600 * @dev: Device to handle.
 601 * @state: PM transition of the system being carried out.
 602 * @async: If true, the device is being resumed asynchronously.
 603 *
 604 * Runtime PM is disabled for @dev while this function is being executed.
 605 */
 606static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 607{
 608	pm_callback_t callback = NULL;
 609	char *info = NULL;
 610	int error = 0;
 611
 612	TRACE_DEVICE(dev);
 613	TRACE_RESUME(0);
 614
 615	if (dev->power.syscore || dev->power.direct_complete)
 616		goto Out;
 617
 618	if (!dev->power.is_late_suspended)
 619		goto Out;
 620
 621	dpm_wait(dev->parent, async);
 
 622
 623	if (dev->pm_domain) {
 624		info = "early power domain ";
 625		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 626	} else if (dev->type && dev->type->pm) {
 627		info = "early type ";
 628		callback = pm_late_early_op(dev->type->pm, state);
 629	} else if (dev->class && dev->class->pm) {
 630		info = "early class ";
 631		callback = pm_late_early_op(dev->class->pm, state);
 632	} else if (dev->bus && dev->bus->pm) {
 633		info = "early bus ";
 634		callback = pm_late_early_op(dev->bus->pm, state);
 635	}
 
 
 636
 637	if (!callback && dev->driver && dev->driver->pm) {
 
 
 
 638		info = "early driver ";
 639		callback = pm_late_early_op(dev->driver->pm, state);
 640	}
 641
 
 642	error = dpm_run_callback(callback, dev, state, info);
 
 
 643	dev->power.is_late_suspended = false;
 644
 645 Out:
 646	TRACE_RESUME(error);
 647
 648	pm_runtime_enable(dev);
 649	complete_all(&dev->power.completion);
 650	return error;
 
 
 
 
 
 651}
 652
 653static void async_resume_early(void *data, async_cookie_t cookie)
 654{
 655	struct device *dev = (struct device *)data;
 656	int error;
 657
 658	error = device_resume_early(dev, pm_transition, true);
 659	if (error)
 660		pm_dev_err(dev, pm_transition, " async", error);
 661
 
 662	put_device(dev);
 663}
 664
 665/**
 666 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 667 * @state: PM transition of the system being carried out.
 668 */
 669void dpm_resume_early(pm_message_t state)
 670{
 671	struct device *dev;
 672	ktime_t starttime = ktime_get();
 673
 674	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 675	mutex_lock(&dpm_list_mtx);
 
 676	pm_transition = state;
 677
 
 
 678	/*
 679	 * Advanced the async threads upfront,
 680	 * in case the starting of async threads is
 681	 * delayed by non-async resuming devices.
 682	 */
 683	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
 684		reinit_completion(&dev->power.completion);
 685		if (is_async(dev)) {
 686			get_device(dev);
 687			async_schedule(async_resume_early, dev);
 688		}
 689	}
 690
 691	while (!list_empty(&dpm_late_early_list)) {
 692		dev = to_device(dpm_late_early_list.next);
 693		get_device(dev);
 694		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 695		mutex_unlock(&dpm_list_mtx);
 696
 697		if (!is_async(dev)) {
 698			int error;
 699
 700			error = device_resume_early(dev, state, false);
 701			if (error) {
 702				suspend_stats.failed_resume_early++;
 703				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 704				dpm_save_failed_dev(dev_name(dev));
 705				pm_dev_err(dev, state, " early", error);
 706			}
 707		}
 708		mutex_lock(&dpm_list_mtx);
 709		put_device(dev);
 710	}
 711	mutex_unlock(&dpm_list_mtx);
 712	async_synchronize_full();
 713	dpm_show_time(starttime, state, "early");
 
 
 
 714	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 715}
 716
 717/**
 718 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 719 * @state: PM transition of the system being carried out.
 720 */
 721void dpm_resume_start(pm_message_t state)
 722{
 723	dpm_resume_noirq(state);
 724	dpm_resume_early(state);
 725}
 726EXPORT_SYMBOL_GPL(dpm_resume_start);
 727
 728/**
 729 * device_resume - Execute "resume" callbacks for given device.
 730 * @dev: Device to handle.
 731 * @state: PM transition of the system being carried out.
 732 * @async: If true, the device is being resumed asynchronously.
 733 */
 734static int device_resume(struct device *dev, pm_message_t state, bool async)
 735{
 736	pm_callback_t callback = NULL;
 737	char *info = NULL;
 738	int error = 0;
 739	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 740
 741	TRACE_DEVICE(dev);
 742	TRACE_RESUME(0);
 743
 744	if (dev->power.syscore)
 745		goto Complete;
 746
 747	if (dev->power.direct_complete) {
 748		/* Match the pm_runtime_disable() in __device_suspend(). */
 749		pm_runtime_enable(dev);
 750		goto Complete;
 751	}
 752
 753	dpm_wait(dev->parent, async);
 
 
 754	dpm_watchdog_set(&wd, dev);
 755	device_lock(dev);
 756
 757	/*
 758	 * This is a fib.  But we'll allow new children to be added below
 759	 * a resumed device, even if the device hasn't been completed yet.
 760	 */
 761	dev->power.is_prepared = false;
 762
 763	if (!dev->power.is_suspended)
 764		goto Unlock;
 765
 766	if (dev->pm_domain) {
 767		info = "power domain ";
 768		callback = pm_op(&dev->pm_domain->ops, state);
 769		goto Driver;
 770	}
 771
 772	if (dev->type && dev->type->pm) {
 773		info = "type ";
 774		callback = pm_op(dev->type->pm, state);
 775		goto Driver;
 776	}
 777
 778	if (dev->class) {
 779		if (dev->class->pm) {
 780			info = "class ";
 781			callback = pm_op(dev->class->pm, state);
 782			goto Driver;
 783		} else if (dev->class->resume) {
 784			info = "legacy class ";
 785			callback = dev->class->resume;
 786			goto End;
 787		}
 788	}
 789
 790	if (dev->bus) {
 791		if (dev->bus->pm) {
 792			info = "bus ";
 793			callback = pm_op(dev->bus->pm, state);
 794		} else if (dev->bus->resume) {
 795			info = "legacy bus ";
 796			callback = dev->bus->resume;
 797			goto End;
 798		}
 799	}
 800
 801 Driver:
 802	if (!callback && dev->driver && dev->driver->pm) {
 803		info = "driver ";
 804		callback = pm_op(dev->driver->pm, state);
 805	}
 806
 807 End:
 808	error = dpm_run_callback(callback, dev, state, info);
 809	dev->power.is_suspended = false;
 810
 811 Unlock:
 812	device_unlock(dev);
 813	dpm_watchdog_clear(&wd);
 814
 815 Complete:
 816	complete_all(&dev->power.completion);
 817
 818	TRACE_RESUME(error);
 819
 820	return error;
 
 
 
 
 821}
 822
 823static void async_resume(void *data, async_cookie_t cookie)
 824{
 825	struct device *dev = (struct device *)data;
 826	int error;
 827
 828	error = device_resume(dev, pm_transition, true);
 829	if (error)
 830		pm_dev_err(dev, pm_transition, " async", error);
 831	put_device(dev);
 832}
 833
 834/**
 835 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 836 * @state: PM transition of the system being carried out.
 837 *
 838 * Execute the appropriate "resume" callback for all devices whose status
 839 * indicates that they are suspended.
 840 */
 841void dpm_resume(pm_message_t state)
 842{
 843	struct device *dev;
 844	ktime_t starttime = ktime_get();
 845
 846	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
 847	might_sleep();
 848
 849	mutex_lock(&dpm_list_mtx);
 850	pm_transition = state;
 851	async_error = 0;
 852
 853	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 854		reinit_completion(&dev->power.completion);
 855		if (is_async(dev)) {
 856			get_device(dev);
 857			async_schedule(async_resume, dev);
 858		}
 859	}
 
 860
 861	while (!list_empty(&dpm_suspended_list)) {
 862		dev = to_device(dpm_suspended_list.next);
 863		get_device(dev);
 864		if (!is_async(dev)) {
 865			int error;
 
 866
 867			mutex_unlock(&dpm_list_mtx);
 868
 869			error = device_resume(dev, state, false);
 870			if (error) {
 871				suspend_stats.failed_resume++;
 872				dpm_save_failed_step(SUSPEND_RESUME);
 873				dpm_save_failed_dev(dev_name(dev));
 874				pm_dev_err(dev, state, "", error);
 875			}
 876
 877			mutex_lock(&dpm_list_mtx);
 878		}
 879		if (!list_empty(&dev->power.entry))
 880			list_move_tail(&dev->power.entry, &dpm_prepared_list);
 881		put_device(dev);
 882	}
 883	mutex_unlock(&dpm_list_mtx);
 884	async_synchronize_full();
 885	dpm_show_time(starttime, state, NULL);
 
 
 886
 887	cpufreq_resume();
 
 888	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
 889}
 890
 891/**
 892 * device_complete - Complete a PM transition for given device.
 893 * @dev: Device to handle.
 894 * @state: PM transition of the system being carried out.
 895 */
 896static void device_complete(struct device *dev, pm_message_t state)
 897{
 898	void (*callback)(struct device *) = NULL;
 899	char *info = NULL;
 900
 901	if (dev->power.syscore)
 902		return;
 903
 904	device_lock(dev);
 905
 906	if (dev->pm_domain) {
 907		info = "completing power domain ";
 908		callback = dev->pm_domain->ops.complete;
 909	} else if (dev->type && dev->type->pm) {
 910		info = "completing type ";
 911		callback = dev->type->pm->complete;
 912	} else if (dev->class && dev->class->pm) {
 913		info = "completing class ";
 914		callback = dev->class->pm->complete;
 915	} else if (dev->bus && dev->bus->pm) {
 916		info = "completing bus ";
 917		callback = dev->bus->pm->complete;
 918	}
 919
 920	if (!callback && dev->driver && dev->driver->pm) {
 921		info = "completing driver ";
 922		callback = dev->driver->pm->complete;
 923	}
 924
 925	if (callback) {
 926		pm_dev_dbg(dev, state, info);
 927		callback(dev);
 928	}
 929
 930	device_unlock(dev);
 931
 
 932	pm_runtime_put(dev);
 933}
 934
 935/**
 936 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 937 * @state: PM transition of the system being carried out.
 938 *
 939 * Execute the ->complete() callbacks for all devices whose PM status is not
 940 * DPM_ON (this allows new devices to be registered).
 941 */
 942void dpm_complete(pm_message_t state)
 943{
 944	struct list_head list;
 945
 946	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
 947	might_sleep();
 948
 949	INIT_LIST_HEAD(&list);
 950	mutex_lock(&dpm_list_mtx);
 951	while (!list_empty(&dpm_prepared_list)) {
 952		struct device *dev = to_device(dpm_prepared_list.prev);
 953
 954		get_device(dev);
 955		dev->power.is_prepared = false;
 956		list_move(&dev->power.entry, &list);
 
 957		mutex_unlock(&dpm_list_mtx);
 958
 959		trace_device_pm_callback_start(dev, "", state.event);
 960		device_complete(dev, state);
 961		trace_device_pm_callback_end(dev, 0);
 962
 963		mutex_lock(&dpm_list_mtx);
 964		put_device(dev);
 
 
 965	}
 966	list_splice(&list, &dpm_list);
 967	mutex_unlock(&dpm_list_mtx);
 968
 969	/* Allow device probing and trigger re-probing of deferred devices */
 970	device_unblock_probing();
 971	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
 972}
 973
 974/**
 975 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 976 * @state: PM transition of the system being carried out.
 977 *
 978 * Execute "resume" callbacks for all devices and complete the PM transition of
 979 * the system.
 980 */
 981void dpm_resume_end(pm_message_t state)
 982{
 983	dpm_resume(state);
 984	dpm_complete(state);
 985}
 986EXPORT_SYMBOL_GPL(dpm_resume_end);
 987
 988
 989/*------------------------- Suspend routines -------------------------*/
 990
 991/**
 992 * resume_event - Return a "resume" message for given "suspend" sleep state.
 993 * @sleep_state: PM message representing a sleep state.
 994 *
 995 * Return a PM message representing the resume event corresponding to given
 996 * sleep state.
 997 */
 998static pm_message_t resume_event(pm_message_t sleep_state)
 999{
1000	switch (sleep_state.event) {
1001	case PM_EVENT_SUSPEND:
1002		return PMSG_RESUME;
1003	case PM_EVENT_FREEZE:
1004	case PM_EVENT_QUIESCE:
1005		return PMSG_RECOVER;
1006	case PM_EVENT_HIBERNATE:
1007		return PMSG_RESTORE;
1008	}
1009	return PMSG_ON;
1010}
1011
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012/**
1013 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1014 * @dev: Device to handle.
1015 * @state: PM transition of the system being carried out.
1016 * @async: If true, the device is being suspended asynchronously.
1017 *
1018 * The driver of @dev will not receive interrupts while this function is being
1019 * executed.
1020 */
1021static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1022{
1023	pm_callback_t callback = NULL;
1024	char *info = NULL;
1025	int error = 0;
1026
1027	TRACE_DEVICE(dev);
1028	TRACE_SUSPEND(0);
1029
1030	if (async_error)
1031		goto Complete;
1032
1033	if (pm_wakeup_pending()) {
1034		async_error = -EBUSY;
1035		goto Complete;
1036	}
1037
1038	if (dev->power.syscore || dev->power.direct_complete)
1039		goto Complete;
1040
1041	dpm_wait_for_children(dev, async);
1042
1043	if (dev->pm_domain) {
1044		info = "noirq power domain ";
1045		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1046	} else if (dev->type && dev->type->pm) {
1047		info = "noirq type ";
1048		callback = pm_noirq_op(dev->type->pm, state);
1049	} else if (dev->class && dev->class->pm) {
1050		info = "noirq class ";
1051		callback = pm_noirq_op(dev->class->pm, state);
1052	} else if (dev->bus && dev->bus->pm) {
1053		info = "noirq bus ";
1054		callback = pm_noirq_op(dev->bus->pm, state);
1055	}
 
 
1056
1057	if (!callback && dev->driver && dev->driver->pm) {
 
 
 
1058		info = "noirq driver ";
1059		callback = pm_noirq_op(dev->driver->pm, state);
1060	}
1061
 
1062	error = dpm_run_callback(callback, dev, state, info);
1063	if (!error)
1064		dev->power.is_noirq_suspended = true;
1065	else
1066		async_error = error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067
1068Complete:
1069	complete_all(&dev->power.completion);
1070	TRACE_SUSPEND(error);
1071	return error;
1072}
1073
1074static void async_suspend_noirq(void *data, async_cookie_t cookie)
1075{
1076	struct device *dev = (struct device *)data;
1077	int error;
1078
1079	error = __device_suspend_noirq(dev, pm_transition, true);
1080	if (error) {
1081		dpm_save_failed_dev(dev_name(dev));
1082		pm_dev_err(dev, pm_transition, " async", error);
1083	}
1084
 
1085	put_device(dev);
1086}
1087
1088static int device_suspend_noirq(struct device *dev)
1089{
1090	reinit_completion(&dev->power.completion);
1091
1092	if (is_async(dev)) {
1093		get_device(dev);
1094		async_schedule(async_suspend_noirq, dev);
1095		return 0;
1096	}
1097	return __device_suspend_noirq(dev, pm_transition, false);
1098}
1099
1100/**
1101 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1102 * @state: PM transition of the system being carried out.
1103 *
1104 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1105 * handlers for all non-sysdev devices.
1106 */
1107int dpm_suspend_noirq(pm_message_t state)
1108{
1109	ktime_t starttime = ktime_get();
1110	int error = 0;
1111
1112	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1113	cpuidle_pause();
1114	device_wakeup_arm_wake_irqs();
1115	suspend_device_irqs();
1116	mutex_lock(&dpm_list_mtx);
1117	pm_transition = state;
1118	async_error = 0;
1119
 
 
1120	while (!list_empty(&dpm_late_early_list)) {
1121		struct device *dev = to_device(dpm_late_early_list.prev);
1122
 
 
 
 
 
1123		get_device(dev);
 
1124		mutex_unlock(&dpm_list_mtx);
1125
1126		error = device_suspend_noirq(dev);
1127
1128		mutex_lock(&dpm_list_mtx);
1129		if (error) {
1130			pm_dev_err(dev, state, " noirq", error);
1131			dpm_save_failed_dev(dev_name(dev));
1132			put_device(dev);
1133			break;
1134		}
1135		if (!list_empty(&dev->power.entry))
1136			list_move(&dev->power.entry, &dpm_noirq_list);
1137		put_device(dev);
1138
1139		if (async_error)
 
 
1140			break;
1141	}
 
1142	mutex_unlock(&dpm_list_mtx);
 
1143	async_synchronize_full();
1144	if (!error)
1145		error = async_error;
1146
1147	if (error) {
1148		suspend_stats.failed_suspend_noirq++;
1149		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1150		dpm_resume_noirq(resume_event(state));
1151	} else {
1152		dpm_show_time(starttime, state, "noirq");
1153	}
1154	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1155	return error;
1156}
1157
1158/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1159 * device_suspend_late - Execute a "late suspend" callback for given device.
1160 * @dev: Device to handle.
1161 * @state: PM transition of the system being carried out.
1162 * @async: If true, the device is being suspended asynchronously.
1163 *
1164 * Runtime PM is disabled for @dev while this function is being executed.
1165 */
1166static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1167{
1168	pm_callback_t callback = NULL;
1169	char *info = NULL;
1170	int error = 0;
1171
1172	TRACE_DEVICE(dev);
1173	TRACE_SUSPEND(0);
1174
1175	__pm_runtime_disable(dev, false);
1176
 
 
1177	if (async_error)
1178		goto Complete;
1179
1180	if (pm_wakeup_pending()) {
1181		async_error = -EBUSY;
1182		goto Complete;
1183	}
1184
1185	if (dev->power.syscore || dev->power.direct_complete)
1186		goto Complete;
1187
1188	dpm_wait_for_children(dev, async);
1189
1190	if (dev->pm_domain) {
1191		info = "late power domain ";
1192		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1193	} else if (dev->type && dev->type->pm) {
1194		info = "late type ";
1195		callback = pm_late_early_op(dev->type->pm, state);
1196	} else if (dev->class && dev->class->pm) {
1197		info = "late class ";
1198		callback = pm_late_early_op(dev->class->pm, state);
1199	} else if (dev->bus && dev->bus->pm) {
1200		info = "late bus ";
1201		callback = pm_late_early_op(dev->bus->pm, state);
1202	}
 
 
1203
1204	if (!callback && dev->driver && dev->driver->pm) {
 
 
 
1205		info = "late driver ";
1206		callback = pm_late_early_op(dev->driver->pm, state);
1207	}
1208
 
1209	error = dpm_run_callback(callback, dev, state, info);
1210	if (!error)
1211		dev->power.is_late_suspended = true;
1212	else
1213		async_error = error;
 
 
 
 
 
 
 
 
1214
1215Complete:
1216	TRACE_SUSPEND(error);
1217	complete_all(&dev->power.completion);
1218	return error;
1219}
1220
1221static void async_suspend_late(void *data, async_cookie_t cookie)
1222{
1223	struct device *dev = (struct device *)data;
1224	int error;
1225
1226	error = __device_suspend_late(dev, pm_transition, true);
1227	if (error) {
1228		dpm_save_failed_dev(dev_name(dev));
1229		pm_dev_err(dev, pm_transition, " async", error);
1230	}
1231	put_device(dev);
1232}
1233
1234static int device_suspend_late(struct device *dev)
1235{
1236	reinit_completion(&dev->power.completion);
1237
1238	if (is_async(dev)) {
1239		get_device(dev);
1240		async_schedule(async_suspend_late, dev);
1241		return 0;
1242	}
1243
1244	return __device_suspend_late(dev, pm_transition, false);
1245}
1246
1247/**
1248 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1249 * @state: PM transition of the system being carried out.
1250 */
1251int dpm_suspend_late(pm_message_t state)
1252{
1253	ktime_t starttime = ktime_get();
1254	int error = 0;
1255
1256	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1257	mutex_lock(&dpm_list_mtx);
1258	pm_transition = state;
1259	async_error = 0;
1260
 
 
 
 
1261	while (!list_empty(&dpm_suspended_list)) {
1262		struct device *dev = to_device(dpm_suspended_list.prev);
1263
 
 
 
 
 
1264		get_device(dev);
 
1265		mutex_unlock(&dpm_list_mtx);
1266
1267		error = device_suspend_late(dev);
1268
1269		mutex_lock(&dpm_list_mtx);
1270		if (error) {
1271			pm_dev_err(dev, state, " late", error);
1272			dpm_save_failed_dev(dev_name(dev));
1273			put_device(dev);
1274			break;
1275		}
1276		if (!list_empty(&dev->power.entry))
1277			list_move(&dev->power.entry, &dpm_late_early_list);
1278		put_device(dev);
1279
1280		if (async_error)
 
 
1281			break;
1282	}
 
1283	mutex_unlock(&dpm_list_mtx);
 
1284	async_synchronize_full();
1285	if (!error)
1286		error = async_error;
 
1287	if (error) {
1288		suspend_stats.failed_suspend_late++;
1289		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1290		dpm_resume_early(resume_event(state));
1291	} else {
1292		dpm_show_time(starttime, state, "late");
1293	}
 
1294	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1295	return error;
1296}
1297
1298/**
1299 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1300 * @state: PM transition of the system being carried out.
1301 */
1302int dpm_suspend_end(pm_message_t state)
1303{
1304	int error = dpm_suspend_late(state);
 
 
 
1305	if (error)
1306		return error;
1307
1308	error = dpm_suspend_noirq(state);
1309	if (error) {
1310		dpm_resume_early(resume_event(state));
1311		return error;
1312	}
1313
1314	return 0;
 
 
1315}
1316EXPORT_SYMBOL_GPL(dpm_suspend_end);
1317
1318/**
1319 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1320 * @dev: Device to suspend.
1321 * @state: PM transition of the system being carried out.
1322 * @cb: Suspend callback to execute.
1323 * @info: string description of caller.
1324 */
1325static int legacy_suspend(struct device *dev, pm_message_t state,
1326			  int (*cb)(struct device *dev, pm_message_t state),
1327			  char *info)
1328{
1329	int error;
1330	ktime_t calltime;
1331
1332	calltime = initcall_debug_start(dev);
1333
1334	trace_device_pm_callback_start(dev, info, state.event);
1335	error = cb(dev, state);
1336	trace_device_pm_callback_end(dev, error);
1337	suspend_report_result(cb, error);
1338
1339	initcall_debug_report(dev, calltime, error, state, info);
1340
1341	return error;
1342}
1343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1344/**
1345 * device_suspend - Execute "suspend" callbacks for given device.
1346 * @dev: Device to handle.
1347 * @state: PM transition of the system being carried out.
1348 * @async: If true, the device is being suspended asynchronously.
1349 */
1350static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1351{
1352	pm_callback_t callback = NULL;
1353	char *info = NULL;
1354	int error = 0;
1355	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1356
1357	TRACE_DEVICE(dev);
1358	TRACE_SUSPEND(0);
1359
1360	dpm_wait_for_children(dev, async);
1361
1362	if (async_error)
 
1363		goto Complete;
 
1364
1365	/*
1366	 * If a device configured to wake up the system from sleep states
1367	 * has been suspended at run time and there's a resume request pending
1368	 * for it, this is equivalent to the device signaling wakeup, so the
1369	 * system suspend operation should be aborted.
 
 
 
 
 
1370	 */
1371	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1372		pm_wakeup_event(dev, 0);
1373
1374	if (pm_wakeup_pending()) {
 
1375		async_error = -EBUSY;
1376		goto Complete;
1377	}
1378
1379	if (dev->power.syscore)
1380		goto Complete;
1381
 
 
 
 
1382	if (dev->power.direct_complete) {
1383		if (pm_runtime_status_suspended(dev)) {
1384			pm_runtime_disable(dev);
1385			if (pm_runtime_status_suspended(dev))
 
1386				goto Complete;
 
1387
1388			pm_runtime_enable(dev);
1389		}
1390		dev->power.direct_complete = false;
1391	}
1392
 
 
 
1393	dpm_watchdog_set(&wd, dev);
1394	device_lock(dev);
1395
1396	if (dev->pm_domain) {
1397		info = "power domain ";
1398		callback = pm_op(&dev->pm_domain->ops, state);
1399		goto Run;
1400	}
1401
1402	if (dev->type && dev->type->pm) {
1403		info = "type ";
1404		callback = pm_op(dev->type->pm, state);
1405		goto Run;
1406	}
1407
1408	if (dev->class) {
1409		if (dev->class->pm) {
1410			info = "class ";
1411			callback = pm_op(dev->class->pm, state);
1412			goto Run;
1413		} else if (dev->class->suspend) {
1414			pm_dev_dbg(dev, state, "legacy class ");
1415			error = legacy_suspend(dev, state, dev->class->suspend,
1416						"legacy class ");
1417			goto End;
1418		}
1419	}
1420
1421	if (dev->bus) {
1422		if (dev->bus->pm) {
1423			info = "bus ";
1424			callback = pm_op(dev->bus->pm, state);
1425		} else if (dev->bus->suspend) {
1426			pm_dev_dbg(dev, state, "legacy bus ");
1427			error = legacy_suspend(dev, state, dev->bus->suspend,
1428						"legacy bus ");
1429			goto End;
1430		}
1431	}
1432
1433 Run:
1434	if (!callback && dev->driver && dev->driver->pm) {
1435		info = "driver ";
1436		callback = pm_op(dev->driver->pm, state);
1437	}
1438
1439	error = dpm_run_callback(callback, dev, state, info);
1440
1441 End:
1442	if (!error) {
1443		struct device *parent = dev->parent;
1444
1445		dev->power.is_suspended = true;
1446		if (parent) {
1447			spin_lock_irq(&parent->power.lock);
1448
1449			dev->parent->power.direct_complete = false;
1450			if (dev->power.wakeup_path
1451			    && !dev->parent->power.ignore_children)
1452				dev->parent->power.wakeup_path = true;
1453
1454			spin_unlock_irq(&parent->power.lock);
1455		}
1456	}
1457
1458	device_unlock(dev);
1459	dpm_watchdog_clear(&wd);
1460
1461 Complete:
1462	complete_all(&dev->power.completion);
1463	if (error)
1464		async_error = error;
 
 
 
1465
 
1466	TRACE_SUSPEND(error);
1467	return error;
1468}
1469
1470static void async_suspend(void *data, async_cookie_t cookie)
1471{
1472	struct device *dev = (struct device *)data;
1473	int error;
1474
1475	error = __device_suspend(dev, pm_transition, true);
1476	if (error) {
1477		dpm_save_failed_dev(dev_name(dev));
1478		pm_dev_err(dev, pm_transition, " async", error);
1479	}
1480
 
1481	put_device(dev);
1482}
1483
1484static int device_suspend(struct device *dev)
1485{
1486	reinit_completion(&dev->power.completion);
1487
1488	if (is_async(dev)) {
1489		get_device(dev);
1490		async_schedule(async_suspend, dev);
1491		return 0;
1492	}
1493
1494	return __device_suspend(dev, pm_transition, false);
1495}
1496
1497/**
1498 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1499 * @state: PM transition of the system being carried out.
1500 */
1501int dpm_suspend(pm_message_t state)
1502{
1503	ktime_t starttime = ktime_get();
1504	int error = 0;
1505
1506	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1507	might_sleep();
1508
 
1509	cpufreq_suspend();
1510
1511	mutex_lock(&dpm_list_mtx);
1512	pm_transition = state;
1513	async_error = 0;
 
 
 
1514	while (!list_empty(&dpm_prepared_list)) {
1515		struct device *dev = to_device(dpm_prepared_list.prev);
1516
 
 
 
 
 
1517		get_device(dev);
 
1518		mutex_unlock(&dpm_list_mtx);
1519
1520		error = device_suspend(dev);
1521
1522		mutex_lock(&dpm_list_mtx);
1523		if (error) {
1524			pm_dev_err(dev, state, "", error);
1525			dpm_save_failed_dev(dev_name(dev));
1526			put_device(dev);
1527			break;
1528		}
1529		if (!list_empty(&dev->power.entry))
1530			list_move(&dev->power.entry, &dpm_suspended_list);
1531		put_device(dev);
1532		if (async_error)
 
 
 
1533			break;
1534	}
 
1535	mutex_unlock(&dpm_list_mtx);
 
1536	async_synchronize_full();
1537	if (!error)
1538		error = async_error;
1539	if (error) {
1540		suspend_stats.failed_suspend++;
1541		dpm_save_failed_step(SUSPEND_SUSPEND);
1542	} else
1543		dpm_show_time(starttime, state, NULL);
1544	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1545	return error;
1546}
1547
1548/**
1549 * device_prepare - Prepare a device for system power transition.
1550 * @dev: Device to handle.
1551 * @state: PM transition of the system being carried out.
1552 *
1553 * Execute the ->prepare() callback(s) for given device.  No new children of the
1554 * device may be registered after this function has returned.
1555 */
1556static int device_prepare(struct device *dev, pm_message_t state)
1557{
1558	int (*callback)(struct device *) = NULL;
1559	char *info = NULL;
1560	int ret = 0;
1561
1562	if (dev->power.syscore)
1563		return 0;
1564
1565	/*
1566	 * If a device's parent goes into runtime suspend at the wrong time,
1567	 * it won't be possible to resume the device.  To prevent this we
1568	 * block runtime suspend here, during the prepare phase, and allow
1569	 * it again during the complete phase.
1570	 */
1571	pm_runtime_get_noresume(dev);
1572
 
 
 
1573	device_lock(dev);
1574
1575	dev->power.wakeup_path = device_may_wakeup(dev);
1576
1577	if (dev->power.no_pm_callbacks) {
1578		ret = 1;	/* Let device go direct_complete */
1579		goto unlock;
1580	}
1581
1582	if (dev->pm_domain) {
1583		info = "preparing power domain ";
1584		callback = dev->pm_domain->ops.prepare;
1585	} else if (dev->type && dev->type->pm) {
1586		info = "preparing type ";
1587		callback = dev->type->pm->prepare;
1588	} else if (dev->class && dev->class->pm) {
1589		info = "preparing class ";
1590		callback = dev->class->pm->prepare;
1591	} else if (dev->bus && dev->bus->pm) {
1592		info = "preparing bus ";
1593		callback = dev->bus->pm->prepare;
1594	}
1595
1596	if (!callback && dev->driver && dev->driver->pm) {
1597		info = "preparing driver ";
1598		callback = dev->driver->pm->prepare;
1599	}
1600
1601	if (callback)
1602		ret = callback(dev);
1603
1604unlock:
1605	device_unlock(dev);
1606
1607	if (ret < 0) {
1608		suspend_report_result(callback, ret);
1609		pm_runtime_put(dev);
1610		return ret;
1611	}
1612	/*
1613	 * A positive return value from ->prepare() means "this device appears
1614	 * to be runtime-suspended and its state is fine, so if it really is
1615	 * runtime-suspended, you can leave it in that state provided that you
1616	 * will do the same thing with all of its descendants".  This only
1617	 * applies to suspend transitions, however.
1618	 */
1619	spin_lock_irq(&dev->power.lock);
1620	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
 
 
1621	spin_unlock_irq(&dev->power.lock);
1622	return 0;
1623}
1624
1625/**
1626 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1627 * @state: PM transition of the system being carried out.
1628 *
1629 * Execute the ->prepare() callback(s) for all devices.
1630 */
1631int dpm_prepare(pm_message_t state)
1632{
1633	int error = 0;
1634
1635	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1636	might_sleep();
1637
1638	/*
1639	 * Give a chance for the known devices to complete their probes, before
1640	 * disable probing of devices. This sync point is important at least
1641	 * at boot time + hibernation restore.
1642	 */
1643	wait_for_device_probe();
1644	/*
1645	 * It is unsafe if probing of devices will happen during suspend or
1646	 * hibernation and system behavior will be unpredictable in this case.
1647	 * So, let's prohibit device's probing here and defer their probes
1648	 * instead. The normal behavior will be restored in dpm_complete().
1649	 */
1650	device_block_probing();
1651
1652	mutex_lock(&dpm_list_mtx);
1653	while (!list_empty(&dpm_list)) {
1654		struct device *dev = to_device(dpm_list.next);
1655
1656		get_device(dev);
 
1657		mutex_unlock(&dpm_list_mtx);
1658
1659		trace_device_pm_callback_start(dev, "", state.event);
1660		error = device_prepare(dev, state);
1661		trace_device_pm_callback_end(dev, error);
1662
1663		mutex_lock(&dpm_list_mtx);
1664		if (error) {
1665			if (error == -EAGAIN) {
1666				put_device(dev);
1667				error = 0;
1668				continue;
1669			}
1670			printk(KERN_INFO "PM: Device %s not prepared "
1671				"for power transition: code %d\n",
1672				dev_name(dev), error);
1673			put_device(dev);
1674			break;
1675		}
1676		dev->power.is_prepared = true;
1677		if (!list_empty(&dev->power.entry))
1678			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1679		put_device(dev);
 
 
1680	}
1681	mutex_unlock(&dpm_list_mtx);
1682	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1683	return error;
1684}
1685
1686/**
1687 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1688 * @state: PM transition of the system being carried out.
1689 *
1690 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1691 * callbacks for them.
1692 */
1693int dpm_suspend_start(pm_message_t state)
1694{
 
1695	int error;
1696
1697	error = dpm_prepare(state);
1698	if (error) {
1699		suspend_stats.failed_prepare++;
1700		dpm_save_failed_step(SUSPEND_PREPARE);
1701	} else
1702		error = dpm_suspend(state);
 
 
1703	return error;
1704}
1705EXPORT_SYMBOL_GPL(dpm_suspend_start);
1706
1707void __suspend_report_result(const char *function, void *fn, int ret)
1708{
1709	if (ret)
1710		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1711}
1712EXPORT_SYMBOL_GPL(__suspend_report_result);
1713
1714/**
1715 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1716 * @dev: Device to wait for.
1717 * @subordinate: Device that needs to wait for @dev.
 
1718 */
1719int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1720{
1721	dpm_wait(dev, subordinate->power.async_suspend);
1722	return async_error;
1723}
1724EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1725
1726/**
1727 * dpm_for_each_dev - device iterator.
1728 * @data: data for the callback.
1729 * @fn: function to be called for each device.
1730 *
1731 * Iterate over devices in dpm_list, and call @fn for each device,
1732 * passing it @data.
1733 */
1734void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1735{
1736	struct device *dev;
1737
1738	if (!fn)
1739		return;
1740
1741	device_pm_lock();
1742	list_for_each_entry(dev, &dpm_list, power.entry)
1743		fn(dev, data);
1744	device_pm_unlock();
1745}
1746EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1747
1748static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1749{
1750	if (!ops)
1751		return true;
1752
1753	return !ops->prepare &&
1754	       !ops->suspend &&
1755	       !ops->suspend_late &&
1756	       !ops->suspend_noirq &&
1757	       !ops->resume_noirq &&
1758	       !ops->resume_early &&
1759	       !ops->resume &&
1760	       !ops->complete;
1761}
1762
1763void device_pm_check_callbacks(struct device *dev)
1764{
1765	spin_lock_irq(&dev->power.lock);
 
 
1766	dev->power.no_pm_callbacks =
1767		(!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
 
1768		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1769		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1770		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1771		(!dev->driver || pm_ops_is_empty(dev->driver->pm));
1772	spin_unlock_irq(&dev->power.lock);
 
 
 
 
 
 
 
1773}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/main.c - Where the driver meets power management.
   4 *
   5 * Copyright (c) 2003 Patrick Mochel
   6 * Copyright (c) 2003 Open Source Development Lab
   7 *
 
 
 
   8 * The driver model core calls device_pm_add() when a device is registered.
   9 * This will initialize the embedded device_pm_info object in the device
  10 * and add it to the list of power-controlled devices. sysfs entries for
  11 * controlling device power management will also be added.
  12 *
  13 * A separate list is used for keeping track of power info, because the power
  14 * domain dependencies may differ from the ancestral dependencies that the
  15 * subsystem list maintains.
  16 */
  17
  18#define pr_fmt(fmt) "PM: " fmt
  19#define dev_fmt pr_fmt
  20
  21#include <linux/device.h>
 
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm-trace.h>
  27#include <linux/pm_wakeirq.h>
  28#include <linux/interrupt.h>
  29#include <linux/sched.h>
  30#include <linux/sched/debug.h>
  31#include <linux/async.h>
  32#include <linux/suspend.h>
  33#include <trace/events/power.h>
  34#include <linux/cpufreq.h>
  35#include <linux/devfreq.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
  43#define list_for_each_entry_rcu_locked(pos, head, member) \
  44	list_for_each_entry_rcu(pos, head, member, \
  45			device_links_read_lock_held())
  46
  47/*
  48 * The entries in the dpm_list list are in a depth first order, simply
  49 * because children are guaranteed to be discovered after parents, and
  50 * are inserted at the back of the list on discovery.
  51 *
  52 * Since device_pm_add() may be called with a device lock held,
  53 * we must never try to acquire a device lock while holding
  54 * dpm_list_mutex.
  55 */
  56
  57LIST_HEAD(dpm_list);
  58static LIST_HEAD(dpm_prepared_list);
  59static LIST_HEAD(dpm_suspended_list);
  60static LIST_HEAD(dpm_late_early_list);
  61static LIST_HEAD(dpm_noirq_list);
  62
 
  63static DEFINE_MUTEX(dpm_list_mtx);
  64static pm_message_t pm_transition;
  65
  66static int async_error;
  67
  68static const char *pm_verb(int event)
  69{
  70	switch (event) {
  71	case PM_EVENT_SUSPEND:
  72		return "suspend";
  73	case PM_EVENT_RESUME:
  74		return "resume";
  75	case PM_EVENT_FREEZE:
  76		return "freeze";
  77	case PM_EVENT_QUIESCE:
  78		return "quiesce";
  79	case PM_EVENT_HIBERNATE:
  80		return "hibernate";
  81	case PM_EVENT_THAW:
  82		return "thaw";
  83	case PM_EVENT_RESTORE:
  84		return "restore";
  85	case PM_EVENT_RECOVER:
  86		return "recover";
  87	default:
  88		return "(unknown PM event)";
  89	}
  90}
  91
  92/**
  93 * device_pm_sleep_init - Initialize system suspend-related device fields.
  94 * @dev: Device object being initialized.
  95 */
  96void device_pm_sleep_init(struct device *dev)
  97{
  98	dev->power.is_prepared = false;
  99	dev->power.is_suspended = false;
 100	dev->power.is_noirq_suspended = false;
 101	dev->power.is_late_suspended = false;
 102	init_completion(&dev->power.completion);
 103	complete_all(&dev->power.completion);
 104	dev->power.wakeup = NULL;
 105	INIT_LIST_HEAD(&dev->power.entry);
 106}
 107
 108/**
 109 * device_pm_lock - Lock the list of active devices used by the PM core.
 110 */
 111void device_pm_lock(void)
 112{
 113	mutex_lock(&dpm_list_mtx);
 114}
 115
 116/**
 117 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 118 */
 119void device_pm_unlock(void)
 120{
 121	mutex_unlock(&dpm_list_mtx);
 122}
 123
 124/**
 125 * device_pm_add - Add a device to the PM core's list of active devices.
 126 * @dev: Device to add to the list.
 127 */
 128void device_pm_add(struct device *dev)
 129{
 130	/* Skip PM setup/initialization. */
 131	if (device_pm_not_required(dev))
 132		return;
 133
 134	pr_debug("Adding info for %s:%s\n",
 135		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 136	device_pm_check_callbacks(dev);
 137	mutex_lock(&dpm_list_mtx);
 138	if (dev->parent && dev->parent->power.is_prepared)
 139		dev_warn(dev, "parent %s should not be sleeping\n",
 140			dev_name(dev->parent));
 141	list_add_tail(&dev->power.entry, &dpm_list);
 142	dev->power.in_dpm_list = true;
 143	mutex_unlock(&dpm_list_mtx);
 144}
 145
 146/**
 147 * device_pm_remove - Remove a device from the PM core's list of active devices.
 148 * @dev: Device to be removed from the list.
 149 */
 150void device_pm_remove(struct device *dev)
 151{
 152	if (device_pm_not_required(dev))
 153		return;
 154
 155	pr_debug("Removing info for %s:%s\n",
 156		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 157	complete_all(&dev->power.completion);
 158	mutex_lock(&dpm_list_mtx);
 159	list_del_init(&dev->power.entry);
 160	dev->power.in_dpm_list = false;
 161	mutex_unlock(&dpm_list_mtx);
 162	device_wakeup_disable(dev);
 163	pm_runtime_remove(dev);
 164	device_pm_check_callbacks(dev);
 165}
 166
 167/**
 168 * device_pm_move_before - Move device in the PM core's list of active devices.
 169 * @deva: Device to move in dpm_list.
 170 * @devb: Device @deva should come before.
 171 */
 172void device_pm_move_before(struct device *deva, struct device *devb)
 173{
 174	pr_debug("Moving %s:%s before %s:%s\n",
 175		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 176		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 177	/* Delete deva from dpm_list and reinsert before devb. */
 178	list_move_tail(&deva->power.entry, &devb->power.entry);
 179}
 180
 181/**
 182 * device_pm_move_after - Move device in the PM core's list of active devices.
 183 * @deva: Device to move in dpm_list.
 184 * @devb: Device @deva should come after.
 185 */
 186void device_pm_move_after(struct device *deva, struct device *devb)
 187{
 188	pr_debug("Moving %s:%s after %s:%s\n",
 189		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 190		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 191	/* Delete deva from dpm_list and reinsert after devb. */
 192	list_move(&deva->power.entry, &devb->power.entry);
 193}
 194
 195/**
 196 * device_pm_move_last - Move device to end of the PM core's list of devices.
 197 * @dev: Device to move in dpm_list.
 198 */
 199void device_pm_move_last(struct device *dev)
 200{
 201	pr_debug("Moving %s:%s to end of list\n",
 202		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 203	list_move_tail(&dev->power.entry, &dpm_list);
 204}
 205
 206static ktime_t initcall_debug_start(struct device *dev, void *cb)
 207{
 208	if (!pm_print_times_enabled)
 209		return 0;
 
 
 
 
 
 
 210
 211	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
 212		 task_pid_nr(current),
 213		 dev->parent ? dev_name(dev->parent) : "none");
 214	return ktime_get();
 215}
 216
 217static void initcall_debug_report(struct device *dev, ktime_t calltime,
 218				  void *cb, int error)
 219{
 220	ktime_t rettime;
 
 221
 222	if (!pm_print_times_enabled)
 223		return;
 224
 225	rettime = ktime_get();
 226	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
 227		 (unsigned long long)ktime_us_delta(rettime, calltime));
 
 228}
 229
 230/**
 231 * dpm_wait - Wait for a PM operation to complete.
 232 * @dev: Device to wait for.
 233 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 234 */
 235static void dpm_wait(struct device *dev, bool async)
 236{
 237	if (!dev)
 238		return;
 239
 240	if (async || (pm_async_enabled && dev->power.async_suspend))
 241		wait_for_completion(&dev->power.completion);
 242}
 243
 244static int dpm_wait_fn(struct device *dev, void *async_ptr)
 245{
 246	dpm_wait(dev, *((bool *)async_ptr));
 247	return 0;
 248}
 249
 250static void dpm_wait_for_children(struct device *dev, bool async)
 251{
 252       device_for_each_child(dev, &async, dpm_wait_fn);
 253}
 254
 255static void dpm_wait_for_suppliers(struct device *dev, bool async)
 256{
 257	struct device_link *link;
 258	int idx;
 259
 260	idx = device_links_read_lock();
 261
 262	/*
 263	 * If the supplier goes away right after we've checked the link to it,
 264	 * we'll wait for its completion to change the state, but that's fine,
 265	 * because the only things that will block as a result are the SRCU
 266	 * callbacks freeing the link objects for the links in the list we're
 267	 * walking.
 268	 */
 269	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
 270		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 271			dpm_wait(link->supplier, async);
 272
 273	device_links_read_unlock(idx);
 274}
 275
 276static bool dpm_wait_for_superior(struct device *dev, bool async)
 277{
 278	struct device *parent;
 279
 280	/*
 281	 * If the device is resumed asynchronously and the parent's callback
 282	 * deletes both the device and the parent itself, the parent object may
 283	 * be freed while this function is running, so avoid that by reference
 284	 * counting the parent once more unless the device has been deleted
 285	 * already (in which case return right away).
 286	 */
 287	mutex_lock(&dpm_list_mtx);
 288
 289	if (!device_pm_initialized(dev)) {
 290		mutex_unlock(&dpm_list_mtx);
 291		return false;
 292	}
 293
 294	parent = get_device(dev->parent);
 295
 296	mutex_unlock(&dpm_list_mtx);
 297
 298	dpm_wait(parent, async);
 299	put_device(parent);
 300
 301	dpm_wait_for_suppliers(dev, async);
 302
 303	/*
 304	 * If the parent's callback has deleted the device, attempting to resume
 305	 * it would be invalid, so avoid doing that then.
 306	 */
 307	return device_pm_initialized(dev);
 308}
 309
 310static void dpm_wait_for_consumers(struct device *dev, bool async)
 311{
 312	struct device_link *link;
 313	int idx;
 314
 315	idx = device_links_read_lock();
 316
 317	/*
 318	 * The status of a device link can only be changed from "dormant" by a
 319	 * probe, but that cannot happen during system suspend/resume.  In
 320	 * theory it can change to "dormant" at that time, but then it is
 321	 * reasonable to wait for the target device anyway (eg. if it goes
 322	 * away, it's better to wait for it to go away completely and then
 323	 * continue instead of trying to continue in parallel with its
 324	 * unregistration).
 325	 */
 326	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
 327		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 328			dpm_wait(link->consumer, async);
 329
 330	device_links_read_unlock(idx);
 331}
 332
 333static void dpm_wait_for_subordinate(struct device *dev, bool async)
 334{
 335	dpm_wait_for_children(dev, async);
 336	dpm_wait_for_consumers(dev, async);
 337}
 338
 339/**
 340 * pm_op - Return the PM operation appropriate for given PM event.
 341 * @ops: PM operations to choose from.
 342 * @state: PM transition of the system being carried out.
 343 */
 344static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 345{
 346	switch (state.event) {
 347#ifdef CONFIG_SUSPEND
 348	case PM_EVENT_SUSPEND:
 349		return ops->suspend;
 350	case PM_EVENT_RESUME:
 351		return ops->resume;
 352#endif /* CONFIG_SUSPEND */
 353#ifdef CONFIG_HIBERNATE_CALLBACKS
 354	case PM_EVENT_FREEZE:
 355	case PM_EVENT_QUIESCE:
 356		return ops->freeze;
 357	case PM_EVENT_HIBERNATE:
 358		return ops->poweroff;
 359	case PM_EVENT_THAW:
 360	case PM_EVENT_RECOVER:
 361		return ops->thaw;
 
 362	case PM_EVENT_RESTORE:
 363		return ops->restore;
 364#endif /* CONFIG_HIBERNATE_CALLBACKS */
 365	}
 366
 367	return NULL;
 368}
 369
 370/**
 371 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 372 * @ops: PM operations to choose from.
 373 * @state: PM transition of the system being carried out.
 374 *
 375 * Runtime PM is disabled for @dev while this function is being executed.
 376 */
 377static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 378				      pm_message_t state)
 379{
 380	switch (state.event) {
 381#ifdef CONFIG_SUSPEND
 382	case PM_EVENT_SUSPEND:
 383		return ops->suspend_late;
 384	case PM_EVENT_RESUME:
 385		return ops->resume_early;
 386#endif /* CONFIG_SUSPEND */
 387#ifdef CONFIG_HIBERNATE_CALLBACKS
 388	case PM_EVENT_FREEZE:
 389	case PM_EVENT_QUIESCE:
 390		return ops->freeze_late;
 391	case PM_EVENT_HIBERNATE:
 392		return ops->poweroff_late;
 393	case PM_EVENT_THAW:
 394	case PM_EVENT_RECOVER:
 395		return ops->thaw_early;
 396	case PM_EVENT_RESTORE:
 397		return ops->restore_early;
 398#endif /* CONFIG_HIBERNATE_CALLBACKS */
 399	}
 400
 401	return NULL;
 402}
 403
 404/**
 405 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 406 * @ops: PM operations to choose from.
 407 * @state: PM transition of the system being carried out.
 408 *
 409 * The driver of @dev will not receive interrupts while this function is being
 410 * executed.
 411 */
 412static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 413{
 414	switch (state.event) {
 415#ifdef CONFIG_SUSPEND
 416	case PM_EVENT_SUSPEND:
 417		return ops->suspend_noirq;
 418	case PM_EVENT_RESUME:
 419		return ops->resume_noirq;
 420#endif /* CONFIG_SUSPEND */
 421#ifdef CONFIG_HIBERNATE_CALLBACKS
 422	case PM_EVENT_FREEZE:
 423	case PM_EVENT_QUIESCE:
 424		return ops->freeze_noirq;
 425	case PM_EVENT_HIBERNATE:
 426		return ops->poweroff_noirq;
 427	case PM_EVENT_THAW:
 428	case PM_EVENT_RECOVER:
 429		return ops->thaw_noirq;
 430	case PM_EVENT_RESTORE:
 431		return ops->restore_noirq;
 432#endif /* CONFIG_HIBERNATE_CALLBACKS */
 433	}
 434
 435	return NULL;
 436}
 437
 438static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 439{
 440	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
 441		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 442		", may wakeup" : "", dev->power.driver_flags);
 443}
 444
 445static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 446			int error)
 447{
 448	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
 449		error);
 450}
 451
 452static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 453			  const char *info)
 454{
 455	ktime_t calltime;
 456	u64 usecs64;
 457	int usecs;
 458
 459	calltime = ktime_get();
 460	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 461	do_div(usecs64, NSEC_PER_USEC);
 462	usecs = usecs64;
 463	if (usecs == 0)
 464		usecs = 1;
 465
 466	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 467		  info ?: "", info ? " " : "", pm_verb(state.event),
 468		  error ? "aborted" : "complete",
 469		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 470}
 471
 472static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 473			    pm_message_t state, const char *info)
 474{
 475	ktime_t calltime;
 476	int error;
 477
 478	if (!cb)
 479		return 0;
 480
 481	calltime = initcall_debug_start(dev, cb);
 482
 483	pm_dev_dbg(dev, state, info);
 484	trace_device_pm_callback_start(dev, info, state.event);
 485	error = cb(dev);
 486	trace_device_pm_callback_end(dev, error);
 487	suspend_report_result(dev, cb, error);
 488
 489	initcall_debug_report(dev, calltime, cb, error);
 490
 491	return error;
 492}
 493
 494#ifdef CONFIG_DPM_WATCHDOG
 495struct dpm_watchdog {
 496	struct device		*dev;
 497	struct task_struct	*tsk;
 498	struct timer_list	timer;
 499};
 500
 501#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 502	struct dpm_watchdog wd
 503
 504/**
 505 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 506 * @t: The timer that PM watchdog depends on.
 507 *
 508 * Called when a driver has timed out suspending or resuming.
 509 * There's not much we can do here to recover so panic() to
 510 * capture a crash-dump in pstore.
 511 */
 512static void dpm_watchdog_handler(struct timer_list *t)
 513{
 514	struct dpm_watchdog *wd = from_timer(wd, t, timer);
 515
 516	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 517	show_stack(wd->tsk, NULL, KERN_EMERG);
 518	panic("%s %s: unrecoverable failure\n",
 519		dev_driver_string(wd->dev), dev_name(wd->dev));
 520}
 521
 522/**
 523 * dpm_watchdog_set - Enable pm watchdog for given device.
 524 * @wd: Watchdog. Must be allocated on the stack.
 525 * @dev: Device to handle.
 526 */
 527static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 528{
 529	struct timer_list *timer = &wd->timer;
 530
 531	wd->dev = dev;
 532	wd->tsk = current;
 533
 534	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 535	/* use same timeout value for both suspend and resume */
 536	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 
 
 537	add_timer(timer);
 538}
 539
 540/**
 541 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 542 * @wd: Watchdog to disable.
 543 */
 544static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 545{
 546	struct timer_list *timer = &wd->timer;
 547
 548	del_timer_sync(timer);
 549	destroy_timer_on_stack(timer);
 550}
 551#else
 552#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 553#define dpm_watchdog_set(x, y)
 554#define dpm_watchdog_clear(x)
 555#endif
 556
 557/*------------------------- Resume routines -------------------------*/
 558
 559/**
 560 * dev_pm_skip_resume - System-wide device resume optimization check.
 561 * @dev: Target device.
 562 *
 563 * Return:
 564 * - %false if the transition under way is RESTORE.
 565 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
 566 * - The logical negation of %power.must_resume otherwise (that is, when the
 567 *   transition under way is RESUME).
 568 */
 569bool dev_pm_skip_resume(struct device *dev)
 570{
 571	if (pm_transition.event == PM_EVENT_RESTORE)
 572		return false;
 573
 574	if (pm_transition.event == PM_EVENT_THAW)
 575		return dev_pm_skip_suspend(dev);
 576
 577	return !dev->power.must_resume;
 578}
 579
 580static bool is_async(struct device *dev)
 581{
 582	return dev->power.async_suspend && pm_async_enabled
 583		&& !pm_trace_is_enabled();
 584}
 585
 586static bool dpm_async_fn(struct device *dev, async_func_t func)
 587{
 588	reinit_completion(&dev->power.completion);
 589
 590	if (is_async(dev)) {
 591		dev->power.async_in_progress = true;
 592
 593		get_device(dev);
 594
 595		if (async_schedule_dev_nocall(func, dev))
 596			return true;
 597
 598		put_device(dev);
 599	}
 600	/*
 601	 * Because async_schedule_dev_nocall() above has returned false or it
 602	 * has not been called at all, func() is not running and it is safe to
 603	 * update the async_in_progress flag without extra synchronization.
 604	 */
 605	dev->power.async_in_progress = false;
 606	return false;
 607}
 608
 609/**
 610 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 611 * @dev: Device to handle.
 612 * @state: PM transition of the system being carried out.
 613 * @async: If true, the device is being resumed asynchronously.
 614 *
 615 * The driver of @dev will not receive interrupts while this function is being
 616 * executed.
 617 */
 618static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 619{
 620	pm_callback_t callback = NULL;
 621	const char *info = NULL;
 622	bool skip_resume;
 623	int error = 0;
 624
 625	TRACE_DEVICE(dev);
 626	TRACE_RESUME(0);
 627
 628	if (dev->power.syscore || dev->power.direct_complete)
 629		goto Out;
 630
 631	if (!dev->power.is_noirq_suspended)
 632		goto Out;
 633
 634	if (!dpm_wait_for_superior(dev, async))
 635		goto Out;
 636
 637	skip_resume = dev_pm_skip_resume(dev);
 638	/*
 639	 * If the driver callback is skipped below or by the middle layer
 640	 * callback and device_resume_early() also skips the driver callback for
 641	 * this device later, it needs to appear as "suspended" to PM-runtime,
 642	 * so change its status accordingly.
 643	 *
 644	 * Otherwise, the device is going to be resumed, so set its PM-runtime
 645	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
 646	 * to avoid confusing drivers that don't use it.
 647	 */
 648	if (skip_resume)
 649		pm_runtime_set_suspended(dev);
 650	else if (dev_pm_skip_suspend(dev))
 651		pm_runtime_set_active(dev);
 652
 653	if (dev->pm_domain) {
 654		info = "noirq power domain ";
 655		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 656	} else if (dev->type && dev->type->pm) {
 657		info = "noirq type ";
 658		callback = pm_noirq_op(dev->type->pm, state);
 659	} else if (dev->class && dev->class->pm) {
 660		info = "noirq class ";
 661		callback = pm_noirq_op(dev->class->pm, state);
 662	} else if (dev->bus && dev->bus->pm) {
 663		info = "noirq bus ";
 664		callback = pm_noirq_op(dev->bus->pm, state);
 665	}
 666	if (callback)
 667		goto Run;
 668
 669	if (skip_resume)
 670		goto Skip;
 671
 672	if (dev->driver && dev->driver->pm) {
 673		info = "noirq driver ";
 674		callback = pm_noirq_op(dev->driver->pm, state);
 675	}
 676
 677Run:
 678	error = dpm_run_callback(callback, dev, state, info);
 679
 680Skip:
 681	dev->power.is_noirq_suspended = false;
 682
 683Out:
 684	complete_all(&dev->power.completion);
 685	TRACE_RESUME(error);
 
 
 686
 687	if (error) {
 688		async_error = error;
 689		dpm_save_failed_dev(dev_name(dev));
 690		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
 691	}
 692}
 693
 694static void async_resume_noirq(void *data, async_cookie_t cookie)
 695{
 696	struct device *dev = data;
 
 
 
 
 
 697
 698	device_resume_noirq(dev, pm_transition, true);
 699	put_device(dev);
 700}
 701
 702static void dpm_noirq_resume_devices(pm_message_t state)
 
 
 
 
 
 
 
 703{
 704	struct device *dev;
 705	ktime_t starttime = ktime_get();
 706
 707	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 708
 709	async_error = 0;
 710	pm_transition = state;
 711
 712	mutex_lock(&dpm_list_mtx);
 713
 714	/*
 715	 * Trigger the resume of "async" devices upfront so they don't have to
 716	 * wait for the "non-async" ones they don't depend on.
 
 717	 */
 718	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
 719		dpm_async_fn(dev, async_resume_noirq);
 
 
 
 
 
 720
 721	while (!list_empty(&dpm_noirq_list)) {
 722		dev = to_device(dpm_noirq_list.next);
 
 723		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 
 724
 725		if (!dev->power.async_in_progress) {
 726			get_device(dev);
 727
 728			mutex_unlock(&dpm_list_mtx);
 
 
 
 
 
 
 
 729
 730			device_resume_noirq(dev, state, false);
 731
 732			put_device(dev);
 733
 734			mutex_lock(&dpm_list_mtx);
 735		}
 736	}
 737	mutex_unlock(&dpm_list_mtx);
 738	async_synchronize_full();
 739	dpm_show_time(starttime, state, 0, "noirq");
 740	if (async_error)
 741		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 742
 743	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 744}
 745
 746/**
 747 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 748 * @state: PM transition of the system being carried out.
 749 *
 750 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 751 * allow device drivers' interrupt handlers to be called.
 752 */
 753void dpm_resume_noirq(pm_message_t state)
 754{
 755	dpm_noirq_resume_devices(state);
 756
 757	resume_device_irqs();
 758	device_wakeup_disarm_wake_irqs();
 
 
 759}
 760
 761/**
 762 * device_resume_early - Execute an "early resume" callback for given device.
 763 * @dev: Device to handle.
 764 * @state: PM transition of the system being carried out.
 765 * @async: If true, the device is being resumed asynchronously.
 766 *
 767 * Runtime PM is disabled for @dev while this function is being executed.
 768 */
 769static void device_resume_early(struct device *dev, pm_message_t state, bool async)
 770{
 771	pm_callback_t callback = NULL;
 772	const char *info = NULL;
 773	int error = 0;
 774
 775	TRACE_DEVICE(dev);
 776	TRACE_RESUME(0);
 777
 778	if (dev->power.syscore || dev->power.direct_complete)
 779		goto Out;
 780
 781	if (!dev->power.is_late_suspended)
 782		goto Out;
 783
 784	if (!dpm_wait_for_superior(dev, async))
 785		goto Out;
 786
 787	if (dev->pm_domain) {
 788		info = "early power domain ";
 789		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 790	} else if (dev->type && dev->type->pm) {
 791		info = "early type ";
 792		callback = pm_late_early_op(dev->type->pm, state);
 793	} else if (dev->class && dev->class->pm) {
 794		info = "early class ";
 795		callback = pm_late_early_op(dev->class->pm, state);
 796	} else if (dev->bus && dev->bus->pm) {
 797		info = "early bus ";
 798		callback = pm_late_early_op(dev->bus->pm, state);
 799	}
 800	if (callback)
 801		goto Run;
 802
 803	if (dev_pm_skip_resume(dev))
 804		goto Skip;
 805
 806	if (dev->driver && dev->driver->pm) {
 807		info = "early driver ";
 808		callback = pm_late_early_op(dev->driver->pm, state);
 809	}
 810
 811Run:
 812	error = dpm_run_callback(callback, dev, state, info);
 813
 814Skip:
 815	dev->power.is_late_suspended = false;
 816
 817Out:
 818	TRACE_RESUME(error);
 819
 820	pm_runtime_enable(dev);
 821	complete_all(&dev->power.completion);
 822
 823	if (error) {
 824		async_error = error;
 825		dpm_save_failed_dev(dev_name(dev));
 826		pm_dev_err(dev, state, async ? " async early" : " early", error);
 827	}
 828}
 829
 830static void async_resume_early(void *data, async_cookie_t cookie)
 831{
 832	struct device *dev = data;
 
 
 
 
 
 833
 834	device_resume_early(dev, pm_transition, true);
 835	put_device(dev);
 836}
 837
 838/**
 839 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 840 * @state: PM transition of the system being carried out.
 841 */
 842void dpm_resume_early(pm_message_t state)
 843{
 844	struct device *dev;
 845	ktime_t starttime = ktime_get();
 846
 847	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 848
 849	async_error = 0;
 850	pm_transition = state;
 851
 852	mutex_lock(&dpm_list_mtx);
 853
 854	/*
 855	 * Trigger the resume of "async" devices upfront so they don't have to
 856	 * wait for the "non-async" ones they don't depend on.
 
 857	 */
 858	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
 859		dpm_async_fn(dev, async_resume_early);
 
 
 
 
 
 860
 861	while (!list_empty(&dpm_late_early_list)) {
 862		dev = to_device(dpm_late_early_list.next);
 
 863		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 
 864
 865		if (!dev->power.async_in_progress) {
 866			get_device(dev);
 867
 868			mutex_unlock(&dpm_list_mtx);
 869
 870			device_resume_early(dev, state, false);
 871
 872			put_device(dev);
 873
 874			mutex_lock(&dpm_list_mtx);
 875		}
 
 
 876	}
 877	mutex_unlock(&dpm_list_mtx);
 878	async_synchronize_full();
 879	dpm_show_time(starttime, state, 0, "early");
 880	if (async_error)
 881		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 882
 883	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 884}
 885
 886/**
 887 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 888 * @state: PM transition of the system being carried out.
 889 */
 890void dpm_resume_start(pm_message_t state)
 891{
 892	dpm_resume_noirq(state);
 893	dpm_resume_early(state);
 894}
 895EXPORT_SYMBOL_GPL(dpm_resume_start);
 896
 897/**
 898 * device_resume - Execute "resume" callbacks for given device.
 899 * @dev: Device to handle.
 900 * @state: PM transition of the system being carried out.
 901 * @async: If true, the device is being resumed asynchronously.
 902 */
 903static void device_resume(struct device *dev, pm_message_t state, bool async)
 904{
 905	pm_callback_t callback = NULL;
 906	const char *info = NULL;
 907	int error = 0;
 908	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 909
 910	TRACE_DEVICE(dev);
 911	TRACE_RESUME(0);
 912
 913	if (dev->power.syscore)
 914		goto Complete;
 915
 916	if (dev->power.direct_complete) {
 917		/* Match the pm_runtime_disable() in __device_suspend(). */
 918		pm_runtime_enable(dev);
 919		goto Complete;
 920	}
 921
 922	if (!dpm_wait_for_superior(dev, async))
 923		goto Complete;
 924
 925	dpm_watchdog_set(&wd, dev);
 926	device_lock(dev);
 927
 928	/*
 929	 * This is a fib.  But we'll allow new children to be added below
 930	 * a resumed device, even if the device hasn't been completed yet.
 931	 */
 932	dev->power.is_prepared = false;
 933
 934	if (!dev->power.is_suspended)
 935		goto Unlock;
 936
 937	if (dev->pm_domain) {
 938		info = "power domain ";
 939		callback = pm_op(&dev->pm_domain->ops, state);
 940		goto Driver;
 941	}
 942
 943	if (dev->type && dev->type->pm) {
 944		info = "type ";
 945		callback = pm_op(dev->type->pm, state);
 946		goto Driver;
 947	}
 948
 949	if (dev->class && dev->class->pm) {
 950		info = "class ";
 951		callback = pm_op(dev->class->pm, state);
 952		goto Driver;
 
 
 
 
 
 
 953	}
 954
 955	if (dev->bus) {
 956		if (dev->bus->pm) {
 957			info = "bus ";
 958			callback = pm_op(dev->bus->pm, state);
 959		} else if (dev->bus->resume) {
 960			info = "legacy bus ";
 961			callback = dev->bus->resume;
 962			goto End;
 963		}
 964	}
 965
 966 Driver:
 967	if (!callback && dev->driver && dev->driver->pm) {
 968		info = "driver ";
 969		callback = pm_op(dev->driver->pm, state);
 970	}
 971
 972 End:
 973	error = dpm_run_callback(callback, dev, state, info);
 974	dev->power.is_suspended = false;
 975
 976 Unlock:
 977	device_unlock(dev);
 978	dpm_watchdog_clear(&wd);
 979
 980 Complete:
 981	complete_all(&dev->power.completion);
 982
 983	TRACE_RESUME(error);
 984
 985	if (error) {
 986		async_error = error;
 987		dpm_save_failed_dev(dev_name(dev));
 988		pm_dev_err(dev, state, async ? " async" : "", error);
 989	}
 990}
 991
 992static void async_resume(void *data, async_cookie_t cookie)
 993{
 994	struct device *dev = data;
 
 995
 996	device_resume(dev, pm_transition, true);
 
 
 997	put_device(dev);
 998}
 999
1000/**
1001 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1002 * @state: PM transition of the system being carried out.
1003 *
1004 * Execute the appropriate "resume" callback for all devices whose status
1005 * indicates that they are suspended.
1006 */
1007void dpm_resume(pm_message_t state)
1008{
1009	struct device *dev;
1010	ktime_t starttime = ktime_get();
1011
1012	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1013	might_sleep();
1014
 
1015	pm_transition = state;
1016	async_error = 0;
1017
1018	mutex_lock(&dpm_list_mtx);
1019
1020	/*
1021	 * Trigger the resume of "async" devices upfront so they don't have to
1022	 * wait for the "non-async" ones they don't depend on.
1023	 */
1024	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1025		dpm_async_fn(dev, async_resume);
1026
1027	while (!list_empty(&dpm_suspended_list)) {
1028		dev = to_device(dpm_suspended_list.next);
1029		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1030
1031		if (!dev->power.async_in_progress) {
1032			get_device(dev);
1033
1034			mutex_unlock(&dpm_list_mtx);
1035
1036			device_resume(dev, state, false);
1037
1038			put_device(dev);
 
 
 
 
1039
1040			mutex_lock(&dpm_list_mtx);
1041		}
 
 
 
1042	}
1043	mutex_unlock(&dpm_list_mtx);
1044	async_synchronize_full();
1045	dpm_show_time(starttime, state, 0, NULL);
1046	if (async_error)
1047		dpm_save_failed_step(SUSPEND_RESUME);
1048
1049	cpufreq_resume();
1050	devfreq_resume();
1051	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1052}
1053
1054/**
1055 * device_complete - Complete a PM transition for given device.
1056 * @dev: Device to handle.
1057 * @state: PM transition of the system being carried out.
1058 */
1059static void device_complete(struct device *dev, pm_message_t state)
1060{
1061	void (*callback)(struct device *) = NULL;
1062	const char *info = NULL;
1063
1064	if (dev->power.syscore)
1065		goto out;
1066
1067	device_lock(dev);
1068
1069	if (dev->pm_domain) {
1070		info = "completing power domain ";
1071		callback = dev->pm_domain->ops.complete;
1072	} else if (dev->type && dev->type->pm) {
1073		info = "completing type ";
1074		callback = dev->type->pm->complete;
1075	} else if (dev->class && dev->class->pm) {
1076		info = "completing class ";
1077		callback = dev->class->pm->complete;
1078	} else if (dev->bus && dev->bus->pm) {
1079		info = "completing bus ";
1080		callback = dev->bus->pm->complete;
1081	}
1082
1083	if (!callback && dev->driver && dev->driver->pm) {
1084		info = "completing driver ";
1085		callback = dev->driver->pm->complete;
1086	}
1087
1088	if (callback) {
1089		pm_dev_dbg(dev, state, info);
1090		callback(dev);
1091	}
1092
1093	device_unlock(dev);
1094
1095out:
1096	pm_runtime_put(dev);
1097}
1098
1099/**
1100 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1101 * @state: PM transition of the system being carried out.
1102 *
1103 * Execute the ->complete() callbacks for all devices whose PM status is not
1104 * DPM_ON (this allows new devices to be registered).
1105 */
1106void dpm_complete(pm_message_t state)
1107{
1108	struct list_head list;
1109
1110	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1111	might_sleep();
1112
1113	INIT_LIST_HEAD(&list);
1114	mutex_lock(&dpm_list_mtx);
1115	while (!list_empty(&dpm_prepared_list)) {
1116		struct device *dev = to_device(dpm_prepared_list.prev);
1117
1118		get_device(dev);
1119		dev->power.is_prepared = false;
1120		list_move(&dev->power.entry, &list);
1121
1122		mutex_unlock(&dpm_list_mtx);
1123
1124		trace_device_pm_callback_start(dev, "", state.event);
1125		device_complete(dev, state);
1126		trace_device_pm_callback_end(dev, 0);
1127
 
1128		put_device(dev);
1129
1130		mutex_lock(&dpm_list_mtx);
1131	}
1132	list_splice(&list, &dpm_list);
1133	mutex_unlock(&dpm_list_mtx);
1134
1135	/* Allow device probing and trigger re-probing of deferred devices */
1136	device_unblock_probing();
1137	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1138}
1139
1140/**
1141 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1142 * @state: PM transition of the system being carried out.
1143 *
1144 * Execute "resume" callbacks for all devices and complete the PM transition of
1145 * the system.
1146 */
1147void dpm_resume_end(pm_message_t state)
1148{
1149	dpm_resume(state);
1150	dpm_complete(state);
1151}
1152EXPORT_SYMBOL_GPL(dpm_resume_end);
1153
1154
1155/*------------------------- Suspend routines -------------------------*/
1156
1157/**
1158 * resume_event - Return a "resume" message for given "suspend" sleep state.
1159 * @sleep_state: PM message representing a sleep state.
1160 *
1161 * Return a PM message representing the resume event corresponding to given
1162 * sleep state.
1163 */
1164static pm_message_t resume_event(pm_message_t sleep_state)
1165{
1166	switch (sleep_state.event) {
1167	case PM_EVENT_SUSPEND:
1168		return PMSG_RESUME;
1169	case PM_EVENT_FREEZE:
1170	case PM_EVENT_QUIESCE:
1171		return PMSG_RECOVER;
1172	case PM_EVENT_HIBERNATE:
1173		return PMSG_RESTORE;
1174	}
1175	return PMSG_ON;
1176}
1177
1178static void dpm_superior_set_must_resume(struct device *dev)
1179{
1180	struct device_link *link;
1181	int idx;
1182
1183	if (dev->parent)
1184		dev->parent->power.must_resume = true;
1185
1186	idx = device_links_read_lock();
1187
1188	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1189		link->supplier->power.must_resume = true;
1190
1191	device_links_read_unlock(idx);
1192}
1193
1194/**
1195 * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1196 * @dev: Device to handle.
1197 * @state: PM transition of the system being carried out.
1198 * @async: If true, the device is being suspended asynchronously.
1199 *
1200 * The driver of @dev will not receive interrupts while this function is being
1201 * executed.
1202 */
1203static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1204{
1205	pm_callback_t callback = NULL;
1206	const char *info = NULL;
1207	int error = 0;
1208
1209	TRACE_DEVICE(dev);
1210	TRACE_SUSPEND(0);
1211
1212	dpm_wait_for_subordinate(dev, async);
 
1213
1214	if (async_error)
 
1215		goto Complete;
 
1216
1217	if (dev->power.syscore || dev->power.direct_complete)
1218		goto Complete;
1219
 
 
1220	if (dev->pm_domain) {
1221		info = "noirq power domain ";
1222		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1223	} else if (dev->type && dev->type->pm) {
1224		info = "noirq type ";
1225		callback = pm_noirq_op(dev->type->pm, state);
1226	} else if (dev->class && dev->class->pm) {
1227		info = "noirq class ";
1228		callback = pm_noirq_op(dev->class->pm, state);
1229	} else if (dev->bus && dev->bus->pm) {
1230		info = "noirq bus ";
1231		callback = pm_noirq_op(dev->bus->pm, state);
1232	}
1233	if (callback)
1234		goto Run;
1235
1236	if (dev_pm_skip_suspend(dev))
1237		goto Skip;
1238
1239	if (dev->driver && dev->driver->pm) {
1240		info = "noirq driver ";
1241		callback = pm_noirq_op(dev->driver->pm, state);
1242	}
1243
1244Run:
1245	error = dpm_run_callback(callback, dev, state, info);
1246	if (error) {
 
 
1247		async_error = error;
1248		dpm_save_failed_dev(dev_name(dev));
1249		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1250		goto Complete;
1251	}
1252
1253Skip:
1254	dev->power.is_noirq_suspended = true;
1255
1256	/*
1257	 * Skipping the resume of devices that were in use right before the
1258	 * system suspend (as indicated by their PM-runtime usage counters)
1259	 * would be suboptimal.  Also resume them if doing that is not allowed
1260	 * to be skipped.
1261	 */
1262	if (atomic_read(&dev->power.usage_count) > 1 ||
1263	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1264	      dev->power.may_skip_resume))
1265		dev->power.must_resume = true;
1266
1267	if (dev->power.must_resume)
1268		dpm_superior_set_must_resume(dev);
1269
1270Complete:
1271	complete_all(&dev->power.completion);
1272	TRACE_SUSPEND(error);
1273	return error;
1274}
1275
1276static void async_suspend_noirq(void *data, async_cookie_t cookie)
1277{
1278	struct device *dev = data;
 
 
 
 
 
 
 
1279
1280	device_suspend_noirq(dev, pm_transition, true);
1281	put_device(dev);
1282}
1283
1284static int dpm_noirq_suspend_devices(pm_message_t state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285{
1286	ktime_t starttime = ktime_get();
1287	int error = 0;
1288
1289	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1290
 
 
 
1291	pm_transition = state;
1292	async_error = 0;
1293
1294	mutex_lock(&dpm_list_mtx);
1295
1296	while (!list_empty(&dpm_late_early_list)) {
1297		struct device *dev = to_device(dpm_late_early_list.prev);
1298
1299		list_move(&dev->power.entry, &dpm_noirq_list);
1300
1301		if (dpm_async_fn(dev, async_suspend_noirq))
1302			continue;
1303
1304		get_device(dev);
1305
1306		mutex_unlock(&dpm_list_mtx);
1307
1308		error = device_suspend_noirq(dev, state, false);
1309
 
 
 
 
 
 
 
 
 
1310		put_device(dev);
1311
1312		mutex_lock(&dpm_list_mtx);
1313
1314		if (error || async_error)
1315			break;
1316	}
1317
1318	mutex_unlock(&dpm_list_mtx);
1319
1320	async_synchronize_full();
1321	if (!error)
1322		error = async_error;
1323
1324	if (error)
 
1325		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1326
1327	dpm_show_time(starttime, state, error, "noirq");
 
 
1328	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1329	return error;
1330}
1331
1332/**
1333 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1334 * @state: PM transition of the system being carried out.
1335 *
1336 * Prevent device drivers' interrupt handlers from being called and invoke
1337 * "noirq" suspend callbacks for all non-sysdev devices.
1338 */
1339int dpm_suspend_noirq(pm_message_t state)
1340{
1341	int ret;
1342
1343	device_wakeup_arm_wake_irqs();
1344	suspend_device_irqs();
1345
1346	ret = dpm_noirq_suspend_devices(state);
1347	if (ret)
1348		dpm_resume_noirq(resume_event(state));
1349
1350	return ret;
1351}
1352
1353static void dpm_propagate_wakeup_to_parent(struct device *dev)
1354{
1355	struct device *parent = dev->parent;
1356
1357	if (!parent)
1358		return;
1359
1360	spin_lock_irq(&parent->power.lock);
1361
1362	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1363		parent->power.wakeup_path = true;
1364
1365	spin_unlock_irq(&parent->power.lock);
1366}
1367
1368/**
1369 * device_suspend_late - Execute a "late suspend" callback for given device.
1370 * @dev: Device to handle.
1371 * @state: PM transition of the system being carried out.
1372 * @async: If true, the device is being suspended asynchronously.
1373 *
1374 * Runtime PM is disabled for @dev while this function is being executed.
1375 */
1376static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1377{
1378	pm_callback_t callback = NULL;
1379	const char *info = NULL;
1380	int error = 0;
1381
1382	TRACE_DEVICE(dev);
1383	TRACE_SUSPEND(0);
1384
1385	__pm_runtime_disable(dev, false);
1386
1387	dpm_wait_for_subordinate(dev, async);
1388
1389	if (async_error)
1390		goto Complete;
1391
1392	if (pm_wakeup_pending()) {
1393		async_error = -EBUSY;
1394		goto Complete;
1395	}
1396
1397	if (dev->power.syscore || dev->power.direct_complete)
1398		goto Complete;
1399
 
 
1400	if (dev->pm_domain) {
1401		info = "late power domain ";
1402		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1403	} else if (dev->type && dev->type->pm) {
1404		info = "late type ";
1405		callback = pm_late_early_op(dev->type->pm, state);
1406	} else if (dev->class && dev->class->pm) {
1407		info = "late class ";
1408		callback = pm_late_early_op(dev->class->pm, state);
1409	} else if (dev->bus && dev->bus->pm) {
1410		info = "late bus ";
1411		callback = pm_late_early_op(dev->bus->pm, state);
1412	}
1413	if (callback)
1414		goto Run;
1415
1416	if (dev_pm_skip_suspend(dev))
1417		goto Skip;
1418
1419	if (dev->driver && dev->driver->pm) {
1420		info = "late driver ";
1421		callback = pm_late_early_op(dev->driver->pm, state);
1422	}
1423
1424Run:
1425	error = dpm_run_callback(callback, dev, state, info);
1426	if (error) {
 
 
1427		async_error = error;
1428		dpm_save_failed_dev(dev_name(dev));
1429		pm_dev_err(dev, state, async ? " async late" : " late", error);
1430		goto Complete;
1431	}
1432	dpm_propagate_wakeup_to_parent(dev);
1433
1434Skip:
1435	dev->power.is_late_suspended = true;
1436
1437Complete:
1438	TRACE_SUSPEND(error);
1439	complete_all(&dev->power.completion);
1440	return error;
1441}
1442
1443static void async_suspend_late(void *data, async_cookie_t cookie)
1444{
1445	struct device *dev = data;
 
1446
1447	device_suspend_late(dev, pm_transition, true);
 
 
 
 
1448	put_device(dev);
1449}
1450
 
 
 
 
 
 
 
 
 
 
 
 
 
1451/**
1452 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1453 * @state: PM transition of the system being carried out.
1454 */
1455int dpm_suspend_late(pm_message_t state)
1456{
1457	ktime_t starttime = ktime_get();
1458	int error = 0;
1459
1460	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1461
1462	pm_transition = state;
1463	async_error = 0;
1464
1465	wake_up_all_idle_cpus();
1466
1467	mutex_lock(&dpm_list_mtx);
1468
1469	while (!list_empty(&dpm_suspended_list)) {
1470		struct device *dev = to_device(dpm_suspended_list.prev);
1471
1472		list_move(&dev->power.entry, &dpm_late_early_list);
1473
1474		if (dpm_async_fn(dev, async_suspend_late))
1475			continue;
1476
1477		get_device(dev);
1478
1479		mutex_unlock(&dpm_list_mtx);
1480
1481		error = device_suspend_late(dev, state, false);
1482
 
 
 
 
 
 
 
 
 
1483		put_device(dev);
1484
1485		mutex_lock(&dpm_list_mtx);
1486
1487		if (error || async_error)
1488			break;
1489	}
1490
1491	mutex_unlock(&dpm_list_mtx);
1492
1493	async_synchronize_full();
1494	if (!error)
1495		error = async_error;
1496
1497	if (error) {
 
1498		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1499		dpm_resume_early(resume_event(state));
 
 
1500	}
1501	dpm_show_time(starttime, state, error, "late");
1502	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1503	return error;
1504}
1505
1506/**
1507 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1508 * @state: PM transition of the system being carried out.
1509 */
1510int dpm_suspend_end(pm_message_t state)
1511{
1512	ktime_t starttime = ktime_get();
1513	int error;
1514
1515	error = dpm_suspend_late(state);
1516	if (error)
1517		goto out;
1518
1519	error = dpm_suspend_noirq(state);
1520	if (error)
1521		dpm_resume_early(resume_event(state));
 
 
1522
1523out:
1524	dpm_show_time(starttime, state, error, "end");
1525	return error;
1526}
1527EXPORT_SYMBOL_GPL(dpm_suspend_end);
1528
1529/**
1530 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1531 * @dev: Device to suspend.
1532 * @state: PM transition of the system being carried out.
1533 * @cb: Suspend callback to execute.
1534 * @info: string description of caller.
1535 */
1536static int legacy_suspend(struct device *dev, pm_message_t state,
1537			  int (*cb)(struct device *dev, pm_message_t state),
1538			  const char *info)
1539{
1540	int error;
1541	ktime_t calltime;
1542
1543	calltime = initcall_debug_start(dev, cb);
1544
1545	trace_device_pm_callback_start(dev, info, state.event);
1546	error = cb(dev, state);
1547	trace_device_pm_callback_end(dev, error);
1548	suspend_report_result(dev, cb, error);
1549
1550	initcall_debug_report(dev, calltime, cb, error);
1551
1552	return error;
1553}
1554
1555static void dpm_clear_superiors_direct_complete(struct device *dev)
1556{
1557	struct device_link *link;
1558	int idx;
1559
1560	if (dev->parent) {
1561		spin_lock_irq(&dev->parent->power.lock);
1562		dev->parent->power.direct_complete = false;
1563		spin_unlock_irq(&dev->parent->power.lock);
1564	}
1565
1566	idx = device_links_read_lock();
1567
1568	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1569		spin_lock_irq(&link->supplier->power.lock);
1570		link->supplier->power.direct_complete = false;
1571		spin_unlock_irq(&link->supplier->power.lock);
1572	}
1573
1574	device_links_read_unlock(idx);
1575}
1576
1577/**
1578 * device_suspend - Execute "suspend" callbacks for given device.
1579 * @dev: Device to handle.
1580 * @state: PM transition of the system being carried out.
1581 * @async: If true, the device is being suspended asynchronously.
1582 */
1583static int device_suspend(struct device *dev, pm_message_t state, bool async)
1584{
1585	pm_callback_t callback = NULL;
1586	const char *info = NULL;
1587	int error = 0;
1588	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1589
1590	TRACE_DEVICE(dev);
1591	TRACE_SUSPEND(0);
1592
1593	dpm_wait_for_subordinate(dev, async);
1594
1595	if (async_error) {
1596		dev->power.direct_complete = false;
1597		goto Complete;
1598	}
1599
1600	/*
1601	 * Wait for possible runtime PM transitions of the device in progress
1602	 * to complete and if there's a runtime resume request pending for it,
1603	 * resume it before proceeding with invoking the system-wide suspend
1604	 * callbacks for it.
1605	 *
1606	 * If the system-wide suspend callbacks below change the configuration
1607	 * of the device, they must disable runtime PM for it or otherwise
1608	 * ensure that its runtime-resume callbacks will not be confused by that
1609	 * change in case they are invoked going forward.
1610	 */
1611	pm_runtime_barrier(dev);
 
1612
1613	if (pm_wakeup_pending()) {
1614		dev->power.direct_complete = false;
1615		async_error = -EBUSY;
1616		goto Complete;
1617	}
1618
1619	if (dev->power.syscore)
1620		goto Complete;
1621
1622	/* Avoid direct_complete to let wakeup_path propagate. */
1623	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1624		dev->power.direct_complete = false;
1625
1626	if (dev->power.direct_complete) {
1627		if (pm_runtime_status_suspended(dev)) {
1628			pm_runtime_disable(dev);
1629			if (pm_runtime_status_suspended(dev)) {
1630				pm_dev_dbg(dev, state, "direct-complete ");
1631				goto Complete;
1632			}
1633
1634			pm_runtime_enable(dev);
1635		}
1636		dev->power.direct_complete = false;
1637	}
1638
1639	dev->power.may_skip_resume = true;
1640	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1641
1642	dpm_watchdog_set(&wd, dev);
1643	device_lock(dev);
1644
1645	if (dev->pm_domain) {
1646		info = "power domain ";
1647		callback = pm_op(&dev->pm_domain->ops, state);
1648		goto Run;
1649	}
1650
1651	if (dev->type && dev->type->pm) {
1652		info = "type ";
1653		callback = pm_op(dev->type->pm, state);
1654		goto Run;
1655	}
1656
1657	if (dev->class && dev->class->pm) {
1658		info = "class ";
1659		callback = pm_op(dev->class->pm, state);
1660		goto Run;
 
 
 
 
 
 
 
1661	}
1662
1663	if (dev->bus) {
1664		if (dev->bus->pm) {
1665			info = "bus ";
1666			callback = pm_op(dev->bus->pm, state);
1667		} else if (dev->bus->suspend) {
1668			pm_dev_dbg(dev, state, "legacy bus ");
1669			error = legacy_suspend(dev, state, dev->bus->suspend,
1670						"legacy bus ");
1671			goto End;
1672		}
1673	}
1674
1675 Run:
1676	if (!callback && dev->driver && dev->driver->pm) {
1677		info = "driver ";
1678		callback = pm_op(dev->driver->pm, state);
1679	}
1680
1681	error = dpm_run_callback(callback, dev, state, info);
1682
1683 End:
1684	if (!error) {
 
 
1685		dev->power.is_suspended = true;
1686		if (device_may_wakeup(dev))
1687			dev->power.wakeup_path = true;
 
 
 
 
 
1688
1689		dpm_propagate_wakeup_to_parent(dev);
1690		dpm_clear_superiors_direct_complete(dev);
1691	}
1692
1693	device_unlock(dev);
1694	dpm_watchdog_clear(&wd);
1695
1696 Complete:
1697	if (error) {
 
1698		async_error = error;
1699		dpm_save_failed_dev(dev_name(dev));
1700		pm_dev_err(dev, state, async ? " async" : "", error);
1701	}
1702
1703	complete_all(&dev->power.completion);
1704	TRACE_SUSPEND(error);
1705	return error;
1706}
1707
1708static void async_suspend(void *data, async_cookie_t cookie)
1709{
1710	struct device *dev = data;
 
 
 
 
 
 
 
1711
1712	device_suspend(dev, pm_transition, true);
1713	put_device(dev);
1714}
1715
 
 
 
 
 
 
 
 
 
 
 
 
 
1716/**
1717 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1718 * @state: PM transition of the system being carried out.
1719 */
1720int dpm_suspend(pm_message_t state)
1721{
1722	ktime_t starttime = ktime_get();
1723	int error = 0;
1724
1725	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1726	might_sleep();
1727
1728	devfreq_suspend();
1729	cpufreq_suspend();
1730
 
1731	pm_transition = state;
1732	async_error = 0;
1733
1734	mutex_lock(&dpm_list_mtx);
1735
1736	while (!list_empty(&dpm_prepared_list)) {
1737		struct device *dev = to_device(dpm_prepared_list.prev);
1738
1739		list_move(&dev->power.entry, &dpm_suspended_list);
1740
1741		if (dpm_async_fn(dev, async_suspend))
1742			continue;
1743
1744		get_device(dev);
1745
1746		mutex_unlock(&dpm_list_mtx);
1747
1748		error = device_suspend(dev, state, false);
1749
 
 
 
 
 
 
 
 
 
1750		put_device(dev);
1751
1752		mutex_lock(&dpm_list_mtx);
1753
1754		if (error || async_error)
1755			break;
1756	}
1757
1758	mutex_unlock(&dpm_list_mtx);
1759
1760	async_synchronize_full();
1761	if (!error)
1762		error = async_error;
1763
1764	if (error)
1765		dpm_save_failed_step(SUSPEND_SUSPEND);
1766
1767	dpm_show_time(starttime, state, error, NULL);
1768	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1769	return error;
1770}
1771
1772/**
1773 * device_prepare - Prepare a device for system power transition.
1774 * @dev: Device to handle.
1775 * @state: PM transition of the system being carried out.
1776 *
1777 * Execute the ->prepare() callback(s) for given device.  No new children of the
1778 * device may be registered after this function has returned.
1779 */
1780static int device_prepare(struct device *dev, pm_message_t state)
1781{
1782	int (*callback)(struct device *) = NULL;
 
1783	int ret = 0;
1784
 
 
 
1785	/*
1786	 * If a device's parent goes into runtime suspend at the wrong time,
1787	 * it won't be possible to resume the device.  To prevent this we
1788	 * block runtime suspend here, during the prepare phase, and allow
1789	 * it again during the complete phase.
1790	 */
1791	pm_runtime_get_noresume(dev);
1792
1793	if (dev->power.syscore)
1794		return 0;
1795
1796	device_lock(dev);
1797
1798	dev->power.wakeup_path = false;
1799
1800	if (dev->power.no_pm_callbacks)
 
1801		goto unlock;
 
1802
1803	if (dev->pm_domain)
 
1804		callback = dev->pm_domain->ops.prepare;
1805	else if (dev->type && dev->type->pm)
 
1806		callback = dev->type->pm->prepare;
1807	else if (dev->class && dev->class->pm)
 
1808		callback = dev->class->pm->prepare;
1809	else if (dev->bus && dev->bus->pm)
 
1810		callback = dev->bus->pm->prepare;
 
1811
1812	if (!callback && dev->driver && dev->driver->pm)
 
1813		callback = dev->driver->pm->prepare;
 
1814
1815	if (callback)
1816		ret = callback(dev);
1817
1818unlock:
1819	device_unlock(dev);
1820
1821	if (ret < 0) {
1822		suspend_report_result(dev, callback, ret);
1823		pm_runtime_put(dev);
1824		return ret;
1825	}
1826	/*
1827	 * A positive return value from ->prepare() means "this device appears
1828	 * to be runtime-suspended and its state is fine, so if it really is
1829	 * runtime-suspended, you can leave it in that state provided that you
1830	 * will do the same thing with all of its descendants".  This only
1831	 * applies to suspend transitions, however.
1832	 */
1833	spin_lock_irq(&dev->power.lock);
1834	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1835		(ret > 0 || dev->power.no_pm_callbacks) &&
1836		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1837	spin_unlock_irq(&dev->power.lock);
1838	return 0;
1839}
1840
1841/**
1842 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1843 * @state: PM transition of the system being carried out.
1844 *
1845 * Execute the ->prepare() callback(s) for all devices.
1846 */
1847int dpm_prepare(pm_message_t state)
1848{
1849	int error = 0;
1850
1851	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1852	might_sleep();
1853
1854	/*
1855	 * Give a chance for the known devices to complete their probes, before
1856	 * disable probing of devices. This sync point is important at least
1857	 * at boot time + hibernation restore.
1858	 */
1859	wait_for_device_probe();
1860	/*
1861	 * It is unsafe if probing of devices will happen during suspend or
1862	 * hibernation and system behavior will be unpredictable in this case.
1863	 * So, let's prohibit device's probing here and defer their probes
1864	 * instead. The normal behavior will be restored in dpm_complete().
1865	 */
1866	device_block_probing();
1867
1868	mutex_lock(&dpm_list_mtx);
1869	while (!list_empty(&dpm_list) && !error) {
1870		struct device *dev = to_device(dpm_list.next);
1871
1872		get_device(dev);
1873
1874		mutex_unlock(&dpm_list_mtx);
1875
1876		trace_device_pm_callback_start(dev, "", state.event);
1877		error = device_prepare(dev, state);
1878		trace_device_pm_callback_end(dev, error);
1879
1880		mutex_lock(&dpm_list_mtx);
1881
1882		if (!error) {
1883			dev->power.is_prepared = true;
1884			if (!list_empty(&dev->power.entry))
1885				list_move_tail(&dev->power.entry, &dpm_prepared_list);
1886		} else if (error == -EAGAIN) {
1887			error = 0;
1888		} else {
1889			dev_info(dev, "not prepared for power transition: code %d\n",
1890				 error);
 
1891		}
1892
1893		mutex_unlock(&dpm_list_mtx);
1894
1895		put_device(dev);
1896
1897		mutex_lock(&dpm_list_mtx);
1898	}
1899	mutex_unlock(&dpm_list_mtx);
1900	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1901	return error;
1902}
1903
1904/**
1905 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1906 * @state: PM transition of the system being carried out.
1907 *
1908 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1909 * callbacks for them.
1910 */
1911int dpm_suspend_start(pm_message_t state)
1912{
1913	ktime_t starttime = ktime_get();
1914	int error;
1915
1916	error = dpm_prepare(state);
1917	if (error)
 
1918		dpm_save_failed_step(SUSPEND_PREPARE);
1919	else
1920		error = dpm_suspend(state);
1921
1922	dpm_show_time(starttime, state, error, "start");
1923	return error;
1924}
1925EXPORT_SYMBOL_GPL(dpm_suspend_start);
1926
1927void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1928{
1929	if (ret)
1930		dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1931}
1932EXPORT_SYMBOL_GPL(__suspend_report_result);
1933
1934/**
1935 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 
1936 * @subordinate: Device that needs to wait for @dev.
1937 * @dev: Device to wait for.
1938 */
1939int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1940{
1941	dpm_wait(dev, subordinate->power.async_suspend);
1942	return async_error;
1943}
1944EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1945
1946/**
1947 * dpm_for_each_dev - device iterator.
1948 * @data: data for the callback.
1949 * @fn: function to be called for each device.
1950 *
1951 * Iterate over devices in dpm_list, and call @fn for each device,
1952 * passing it @data.
1953 */
1954void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1955{
1956	struct device *dev;
1957
1958	if (!fn)
1959		return;
1960
1961	device_pm_lock();
1962	list_for_each_entry(dev, &dpm_list, power.entry)
1963		fn(dev, data);
1964	device_pm_unlock();
1965}
1966EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1967
1968static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1969{
1970	if (!ops)
1971		return true;
1972
1973	return !ops->prepare &&
1974	       !ops->suspend &&
1975	       !ops->suspend_late &&
1976	       !ops->suspend_noirq &&
1977	       !ops->resume_noirq &&
1978	       !ops->resume_early &&
1979	       !ops->resume &&
1980	       !ops->complete;
1981}
1982
1983void device_pm_check_callbacks(struct device *dev)
1984{
1985	unsigned long flags;
1986
1987	spin_lock_irqsave(&dev->power.lock, flags);
1988	dev->power.no_pm_callbacks =
1989		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1990		 !dev->bus->suspend && !dev->bus->resume)) &&
1991		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1992		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1993		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1994		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1995		 !dev->driver->suspend && !dev->driver->resume));
1996	spin_unlock_irqrestore(&dev->power.lock, flags);
1997}
1998
1999bool dev_pm_skip_suspend(struct device *dev)
2000{
2001	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2002		pm_runtime_status_suspended(dev);
2003}