Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/main.c - Where the driver meets power management.
   4 *
   5 * Copyright (c) 2003 Patrick Mochel
   6 * Copyright (c) 2003 Open Source Development Lab
   7 *
   8 * The driver model core calls device_pm_add() when a device is registered.
   9 * This will initialize the embedded device_pm_info object in the device
  10 * and add it to the list of power-controlled devices. sysfs entries for
  11 * controlling device power management will also be added.
  12 *
  13 * A separate list is used for keeping track of power info, because the power
  14 * domain dependencies may differ from the ancestral dependencies that the
  15 * subsystem list maintains.
  16 */
  17
  18#define pr_fmt(fmt) "PM: " fmt
 
  19
  20#include <linux/device.h>
  21#include <linux/export.h>
  22#include <linux/mutex.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/pm-trace.h>
  26#include <linux/pm_wakeirq.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/sched/debug.h>
  30#include <linux/async.h>
  31#include <linux/suspend.h>
  32#include <trace/events/power.h>
  33#include <linux/cpufreq.h>
  34#include <linux/cpuidle.h>
  35#include <linux/devfreq.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
  43#define list_for_each_entry_rcu_locked(pos, head, member) \
  44	list_for_each_entry_rcu(pos, head, member, \
  45			device_links_read_lock_held())
  46
  47/*
  48 * The entries in the dpm_list list are in a depth first order, simply
  49 * because children are guaranteed to be discovered after parents, and
  50 * are inserted at the back of the list on discovery.
  51 *
  52 * Since device_pm_add() may be called with a device lock held,
  53 * we must never try to acquire a device lock while holding
  54 * dpm_list_mutex.
  55 */
  56
  57LIST_HEAD(dpm_list);
  58static LIST_HEAD(dpm_prepared_list);
  59static LIST_HEAD(dpm_suspended_list);
  60static LIST_HEAD(dpm_late_early_list);
  61static LIST_HEAD(dpm_noirq_list);
  62
  63struct suspend_stats suspend_stats;
  64static DEFINE_MUTEX(dpm_list_mtx);
  65static pm_message_t pm_transition;
  66
  67static int async_error;
  68
  69static const char *pm_verb(int event)
  70{
  71	switch (event) {
  72	case PM_EVENT_SUSPEND:
  73		return "suspend";
  74	case PM_EVENT_RESUME:
  75		return "resume";
  76	case PM_EVENT_FREEZE:
  77		return "freeze";
  78	case PM_EVENT_QUIESCE:
  79		return "quiesce";
  80	case PM_EVENT_HIBERNATE:
  81		return "hibernate";
  82	case PM_EVENT_THAW:
  83		return "thaw";
  84	case PM_EVENT_RESTORE:
  85		return "restore";
  86	case PM_EVENT_RECOVER:
  87		return "recover";
  88	default:
  89		return "(unknown PM event)";
  90	}
  91}
  92
  93/**
  94 * device_pm_sleep_init - Initialize system suspend-related device fields.
  95 * @dev: Device object being initialized.
  96 */
  97void device_pm_sleep_init(struct device *dev)
  98{
  99	dev->power.is_prepared = false;
 100	dev->power.is_suspended = false;
 101	dev->power.is_noirq_suspended = false;
 102	dev->power.is_late_suspended = false;
 103	init_completion(&dev->power.completion);
 104	complete_all(&dev->power.completion);
 105	dev->power.wakeup = NULL;
 106	INIT_LIST_HEAD(&dev->power.entry);
 107}
 108
 109/**
 110 * device_pm_lock - Lock the list of active devices used by the PM core.
 111 */
 112void device_pm_lock(void)
 113{
 114	mutex_lock(&dpm_list_mtx);
 115}
 116
 117/**
 118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 119 */
 120void device_pm_unlock(void)
 121{
 122	mutex_unlock(&dpm_list_mtx);
 123}
 124
 125/**
 126 * device_pm_add - Add a device to the PM core's list of active devices.
 127 * @dev: Device to add to the list.
 128 */
 129void device_pm_add(struct device *dev)
 130{
 131	/* Skip PM setup/initialization. */
 132	if (device_pm_not_required(dev))
 133		return;
 134
 135	pr_debug("Adding info for %s:%s\n",
 136		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 137	device_pm_check_callbacks(dev);
 138	mutex_lock(&dpm_list_mtx);
 139	if (dev->parent && dev->parent->power.is_prepared)
 140		dev_warn(dev, "parent %s should not be sleeping\n",
 141			dev_name(dev->parent));
 142	list_add_tail(&dev->power.entry, &dpm_list);
 143	dev->power.in_dpm_list = true;
 144	mutex_unlock(&dpm_list_mtx);
 145}
 146
 147/**
 148 * device_pm_remove - Remove a device from the PM core's list of active devices.
 149 * @dev: Device to be removed from the list.
 150 */
 151void device_pm_remove(struct device *dev)
 152{
 153	if (device_pm_not_required(dev))
 154		return;
 155
 156	pr_debug("Removing info for %s:%s\n",
 157		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 158	complete_all(&dev->power.completion);
 159	mutex_lock(&dpm_list_mtx);
 160	list_del_init(&dev->power.entry);
 161	dev->power.in_dpm_list = false;
 162	mutex_unlock(&dpm_list_mtx);
 163	device_wakeup_disable(dev);
 164	pm_runtime_remove(dev);
 165	device_pm_check_callbacks(dev);
 166}
 167
 168/**
 169 * device_pm_move_before - Move device in the PM core's list of active devices.
 170 * @deva: Device to move in dpm_list.
 171 * @devb: Device @deva should come before.
 172 */
 173void device_pm_move_before(struct device *deva, struct device *devb)
 174{
 175	pr_debug("Moving %s:%s before %s:%s\n",
 176		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 177		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 178	/* Delete deva from dpm_list and reinsert before devb. */
 179	list_move_tail(&deva->power.entry, &devb->power.entry);
 180}
 181
 182/**
 183 * device_pm_move_after - Move device in the PM core's list of active devices.
 184 * @deva: Device to move in dpm_list.
 185 * @devb: Device @deva should come after.
 186 */
 187void device_pm_move_after(struct device *deva, struct device *devb)
 188{
 189	pr_debug("Moving %s:%s after %s:%s\n",
 190		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 191		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 192	/* Delete deva from dpm_list and reinsert after devb. */
 193	list_move(&deva->power.entry, &devb->power.entry);
 194}
 195
 196/**
 197 * device_pm_move_last - Move device to end of the PM core's list of devices.
 198 * @dev: Device to move in dpm_list.
 199 */
 200void device_pm_move_last(struct device *dev)
 201{
 202	pr_debug("Moving %s:%s to end of list\n",
 203		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 204	list_move_tail(&dev->power.entry, &dpm_list);
 205}
 206
 207static ktime_t initcall_debug_start(struct device *dev, void *cb)
 208{
 209	if (!pm_print_times_enabled)
 210		return 0;
 211
 212	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
 213		 task_pid_nr(current),
 214		 dev->parent ? dev_name(dev->parent) : "none");
 215	return ktime_get();
 216}
 217
 218static void initcall_debug_report(struct device *dev, ktime_t calltime,
 219				  void *cb, int error)
 220{
 221	ktime_t rettime;
 222	s64 nsecs;
 223
 224	if (!pm_print_times_enabled)
 225		return;
 226
 227	rettime = ktime_get();
 228	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 229
 230	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
 231		 (unsigned long long)nsecs >> 10);
 232}
 233
 234/**
 235 * dpm_wait - Wait for a PM operation to complete.
 236 * @dev: Device to wait for.
 237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 238 */
 239static void dpm_wait(struct device *dev, bool async)
 240{
 241	if (!dev)
 242		return;
 243
 244	if (async || (pm_async_enabled && dev->power.async_suspend))
 245		wait_for_completion(&dev->power.completion);
 246}
 247
 248static int dpm_wait_fn(struct device *dev, void *async_ptr)
 249{
 250	dpm_wait(dev, *((bool *)async_ptr));
 251	return 0;
 252}
 253
 254static void dpm_wait_for_children(struct device *dev, bool async)
 255{
 256       device_for_each_child(dev, &async, dpm_wait_fn);
 257}
 258
 259static void dpm_wait_for_suppliers(struct device *dev, bool async)
 260{
 261	struct device_link *link;
 262	int idx;
 263
 264	idx = device_links_read_lock();
 265
 266	/*
 267	 * If the supplier goes away right after we've checked the link to it,
 268	 * we'll wait for its completion to change the state, but that's fine,
 269	 * because the only things that will block as a result are the SRCU
 270	 * callbacks freeing the link objects for the links in the list we're
 271	 * walking.
 272	 */
 273	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
 274		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 275			dpm_wait(link->supplier, async);
 276
 277	device_links_read_unlock(idx);
 278}
 279
 280static bool dpm_wait_for_superior(struct device *dev, bool async)
 281{
 282	struct device *parent;
 283
 284	/*
 285	 * If the device is resumed asynchronously and the parent's callback
 286	 * deletes both the device and the parent itself, the parent object may
 287	 * be freed while this function is running, so avoid that by reference
 288	 * counting the parent once more unless the device has been deleted
 289	 * already (in which case return right away).
 290	 */
 291	mutex_lock(&dpm_list_mtx);
 292
 293	if (!device_pm_initialized(dev)) {
 294		mutex_unlock(&dpm_list_mtx);
 295		return false;
 296	}
 297
 298	parent = get_device(dev->parent);
 299
 300	mutex_unlock(&dpm_list_mtx);
 301
 302	dpm_wait(parent, async);
 303	put_device(parent);
 304
 305	dpm_wait_for_suppliers(dev, async);
 306
 307	/*
 308	 * If the parent's callback has deleted the device, attempting to resume
 309	 * it would be invalid, so avoid doing that then.
 310	 */
 311	return device_pm_initialized(dev);
 312}
 313
 314static void dpm_wait_for_consumers(struct device *dev, bool async)
 315{
 316	struct device_link *link;
 317	int idx;
 318
 319	idx = device_links_read_lock();
 320
 321	/*
 322	 * The status of a device link can only be changed from "dormant" by a
 323	 * probe, but that cannot happen during system suspend/resume.  In
 324	 * theory it can change to "dormant" at that time, but then it is
 325	 * reasonable to wait for the target device anyway (eg. if it goes
 326	 * away, it's better to wait for it to go away completely and then
 327	 * continue instead of trying to continue in parallel with its
 328	 * unregistration).
 329	 */
 330	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
 331		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 332			dpm_wait(link->consumer, async);
 333
 334	device_links_read_unlock(idx);
 335}
 336
 337static void dpm_wait_for_subordinate(struct device *dev, bool async)
 338{
 339	dpm_wait_for_children(dev, async);
 340	dpm_wait_for_consumers(dev, async);
 341}
 342
 343/**
 344 * pm_op - Return the PM operation appropriate for given PM event.
 345 * @ops: PM operations to choose from.
 346 * @state: PM transition of the system being carried out.
 347 */
 348static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 349{
 350	switch (state.event) {
 351#ifdef CONFIG_SUSPEND
 352	case PM_EVENT_SUSPEND:
 353		return ops->suspend;
 354	case PM_EVENT_RESUME:
 355		return ops->resume;
 356#endif /* CONFIG_SUSPEND */
 357#ifdef CONFIG_HIBERNATE_CALLBACKS
 358	case PM_EVENT_FREEZE:
 359	case PM_EVENT_QUIESCE:
 360		return ops->freeze;
 361	case PM_EVENT_HIBERNATE:
 362		return ops->poweroff;
 363	case PM_EVENT_THAW:
 364	case PM_EVENT_RECOVER:
 365		return ops->thaw;
 366		break;
 367	case PM_EVENT_RESTORE:
 368		return ops->restore;
 369#endif /* CONFIG_HIBERNATE_CALLBACKS */
 370	}
 371
 372	return NULL;
 373}
 374
 375/**
 376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 377 * @ops: PM operations to choose from.
 378 * @state: PM transition of the system being carried out.
 379 *
 380 * Runtime PM is disabled for @dev while this function is being executed.
 381 */
 382static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 383				      pm_message_t state)
 384{
 385	switch (state.event) {
 386#ifdef CONFIG_SUSPEND
 387	case PM_EVENT_SUSPEND:
 388		return ops->suspend_late;
 389	case PM_EVENT_RESUME:
 390		return ops->resume_early;
 391#endif /* CONFIG_SUSPEND */
 392#ifdef CONFIG_HIBERNATE_CALLBACKS
 393	case PM_EVENT_FREEZE:
 394	case PM_EVENT_QUIESCE:
 395		return ops->freeze_late;
 396	case PM_EVENT_HIBERNATE:
 397		return ops->poweroff_late;
 398	case PM_EVENT_THAW:
 399	case PM_EVENT_RECOVER:
 400		return ops->thaw_early;
 401	case PM_EVENT_RESTORE:
 402		return ops->restore_early;
 403#endif /* CONFIG_HIBERNATE_CALLBACKS */
 404	}
 405
 406	return NULL;
 407}
 408
 409/**
 410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 411 * @ops: PM operations to choose from.
 412 * @state: PM transition of the system being carried out.
 413 *
 414 * The driver of @dev will not receive interrupts while this function is being
 415 * executed.
 416 */
 417static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 418{
 419	switch (state.event) {
 420#ifdef CONFIG_SUSPEND
 421	case PM_EVENT_SUSPEND:
 422		return ops->suspend_noirq;
 423	case PM_EVENT_RESUME:
 424		return ops->resume_noirq;
 425#endif /* CONFIG_SUSPEND */
 426#ifdef CONFIG_HIBERNATE_CALLBACKS
 427	case PM_EVENT_FREEZE:
 428	case PM_EVENT_QUIESCE:
 429		return ops->freeze_noirq;
 430	case PM_EVENT_HIBERNATE:
 431		return ops->poweroff_noirq;
 432	case PM_EVENT_THAW:
 433	case PM_EVENT_RECOVER:
 434		return ops->thaw_noirq;
 435	case PM_EVENT_RESTORE:
 436		return ops->restore_noirq;
 437#endif /* CONFIG_HIBERNATE_CALLBACKS */
 438	}
 439
 440	return NULL;
 441}
 442
 443static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 444{
 445	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 446		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 447		", may wakeup" : "");
 448}
 449
 450static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 451			int error)
 452{
 453	pr_err("Device %s failed to %s%s: error %d\n",
 454	       dev_name(dev), pm_verb(state.event), info, error);
 455}
 456
 457static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 458			  const char *info)
 459{
 460	ktime_t calltime;
 461	u64 usecs64;
 462	int usecs;
 463
 464	calltime = ktime_get();
 465	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 466	do_div(usecs64, NSEC_PER_USEC);
 467	usecs = usecs64;
 468	if (usecs == 0)
 469		usecs = 1;
 470
 471	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 472		  info ?: "", info ? " " : "", pm_verb(state.event),
 473		  error ? "aborted" : "complete",
 474		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 475}
 476
 477static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 478			    pm_message_t state, const char *info)
 479{
 480	ktime_t calltime;
 481	int error;
 482
 483	if (!cb)
 484		return 0;
 485
 486	calltime = initcall_debug_start(dev, cb);
 487
 488	pm_dev_dbg(dev, state, info);
 489	trace_device_pm_callback_start(dev, info, state.event);
 490	error = cb(dev);
 491	trace_device_pm_callback_end(dev, error);
 492	suspend_report_result(cb, error);
 493
 494	initcall_debug_report(dev, calltime, cb, error);
 495
 496	return error;
 497}
 498
 499#ifdef CONFIG_DPM_WATCHDOG
 500struct dpm_watchdog {
 501	struct device		*dev;
 502	struct task_struct	*tsk;
 503	struct timer_list	timer;
 504};
 505
 506#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 507	struct dpm_watchdog wd
 508
 509/**
 510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 511 * @t: The timer that PM watchdog depends on.
 512 *
 513 * Called when a driver has timed out suspending or resuming.
 514 * There's not much we can do here to recover so panic() to
 515 * capture a crash-dump in pstore.
 516 */
 517static void dpm_watchdog_handler(struct timer_list *t)
 518{
 519	struct dpm_watchdog *wd = from_timer(wd, t, timer);
 520
 521	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 522	show_stack(wd->tsk, NULL, KERN_EMERG);
 523	panic("%s %s: unrecoverable failure\n",
 524		dev_driver_string(wd->dev), dev_name(wd->dev));
 525}
 526
 527/**
 528 * dpm_watchdog_set - Enable pm watchdog for given device.
 529 * @wd: Watchdog. Must be allocated on the stack.
 530 * @dev: Device to handle.
 531 */
 532static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 533{
 534	struct timer_list *timer = &wd->timer;
 535
 536	wd->dev = dev;
 537	wd->tsk = current;
 538
 539	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 540	/* use same timeout value for both suspend and resume */
 541	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 542	add_timer(timer);
 543}
 544
 545/**
 546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 547 * @wd: Watchdog to disable.
 548 */
 549static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 550{
 551	struct timer_list *timer = &wd->timer;
 552
 553	del_timer_sync(timer);
 554	destroy_timer_on_stack(timer);
 555}
 556#else
 557#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 558#define dpm_watchdog_set(x, y)
 559#define dpm_watchdog_clear(x)
 560#endif
 561
 562/*------------------------- Resume routines -------------------------*/
 563
 564/**
 565 * dev_pm_skip_resume - System-wide device resume optimization check.
 566 * @dev: Target device.
 567 *
 568 * Return:
 569 * - %false if the transition under way is RESTORE.
 570 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
 571 * - The logical negation of %power.must_resume otherwise (that is, when the
 572 *   transition under way is RESUME).
 573 */
 574bool dev_pm_skip_resume(struct device *dev)
 575{
 576	if (pm_transition.event == PM_EVENT_RESTORE)
 577		return false;
 578
 579	if (pm_transition.event == PM_EVENT_THAW)
 580		return dev_pm_skip_suspend(dev);
 581
 582	return !dev->power.must_resume;
 583}
 584
 585/**
 586 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 587 * @dev: Device to handle.
 588 * @state: PM transition of the system being carried out.
 589 * @async: If true, the device is being resumed asynchronously.
 590 *
 591 * The driver of @dev will not receive interrupts while this function is being
 592 * executed.
 593 */
 594static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 595{
 596	pm_callback_t callback = NULL;
 597	const char *info = NULL;
 598	bool skip_resume;
 599	int error = 0;
 600
 601	TRACE_DEVICE(dev);
 602	TRACE_RESUME(0);
 603
 604	if (dev->power.syscore || dev->power.direct_complete)
 605		goto Out;
 606
 607	if (!dev->power.is_noirq_suspended)
 608		goto Out;
 609
 610	if (!dpm_wait_for_superior(dev, async))
 611		goto Out;
 612
 613	skip_resume = dev_pm_skip_resume(dev);
 614	/*
 615	 * If the driver callback is skipped below or by the middle layer
 616	 * callback and device_resume_early() also skips the driver callback for
 617	 * this device later, it needs to appear as "suspended" to PM-runtime,
 618	 * so change its status accordingly.
 619	 *
 620	 * Otherwise, the device is going to be resumed, so set its PM-runtime
 621	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
 622	 * to avoid confusing drivers that don't use it.
 623	 */
 624	if (skip_resume)
 625		pm_runtime_set_suspended(dev);
 626	else if (dev_pm_skip_suspend(dev))
 627		pm_runtime_set_active(dev);
 628
 629	if (dev->pm_domain) {
 630		info = "noirq power domain ";
 631		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 632	} else if (dev->type && dev->type->pm) {
 633		info = "noirq type ";
 634		callback = pm_noirq_op(dev->type->pm, state);
 635	} else if (dev->class && dev->class->pm) {
 636		info = "noirq class ";
 637		callback = pm_noirq_op(dev->class->pm, state);
 638	} else if (dev->bus && dev->bus->pm) {
 639		info = "noirq bus ";
 640		callback = pm_noirq_op(dev->bus->pm, state);
 641	}
 642	if (callback)
 643		goto Run;
 644
 645	if (skip_resume)
 646		goto Skip;
 647
 648	if (dev->driver && dev->driver->pm) {
 649		info = "noirq driver ";
 650		callback = pm_noirq_op(dev->driver->pm, state);
 651	}
 652
 653Run:
 654	error = dpm_run_callback(callback, dev, state, info);
 655
 656Skip:
 657	dev->power.is_noirq_suspended = false;
 658
 659Out:
 660	complete_all(&dev->power.completion);
 661	TRACE_RESUME(error);
 662	return error;
 
 
 
 
 
 
 663}
 664
 665static bool is_async(struct device *dev)
 666{
 667	return dev->power.async_suspend && pm_async_enabled
 668		&& !pm_trace_is_enabled();
 669}
 670
 671static bool dpm_async_fn(struct device *dev, async_func_t func)
 672{
 673	reinit_completion(&dev->power.completion);
 674
 675	if (is_async(dev)) {
 
 
 676		get_device(dev);
 677		async_schedule_dev(func, dev);
 678		return true;
 679	}
 680
 
 
 
 
 
 
 
 
 
 
 
 681	return false;
 682}
 683
 684static void async_resume_noirq(void *data, async_cookie_t cookie)
 685{
 686	struct device *dev = (struct device *)data;
 687	int error;
 688
 689	error = device_resume_noirq(dev, pm_transition, true);
 690	if (error)
 691		pm_dev_err(dev, pm_transition, " async", error);
 692
 
 693	put_device(dev);
 694}
 695
 696static void dpm_noirq_resume_devices(pm_message_t state)
 697{
 698	struct device *dev;
 699	ktime_t starttime = ktime_get();
 700
 701	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 702	mutex_lock(&dpm_list_mtx);
 703	pm_transition = state;
 704
 705	/*
 706	 * Advanced the async threads upfront,
 707	 * in case the starting of async threads is
 708	 * delayed by non-async resuming devices.
 709	 */
 710	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
 711		dpm_async_fn(dev, async_resume_noirq);
 712
 713	while (!list_empty(&dpm_noirq_list)) {
 714		dev = to_device(dpm_noirq_list.next);
 715		get_device(dev);
 716		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 717		mutex_unlock(&dpm_list_mtx);
 718
 719		if (!is_async(dev)) {
 720			int error;
 721
 722			error = device_resume_noirq(dev, state, false);
 723			if (error) {
 724				suspend_stats.failed_resume_noirq++;
 725				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 726				dpm_save_failed_dev(dev_name(dev));
 727				pm_dev_err(dev, state, " noirq", error);
 728			}
 729		}
 730
 731		mutex_lock(&dpm_list_mtx);
 732		put_device(dev);
 
 
 
 
 733	}
 734	mutex_unlock(&dpm_list_mtx);
 735	async_synchronize_full();
 736	dpm_show_time(starttime, state, 0, "noirq");
 737	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 738}
 739
 740/**
 741 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 742 * @state: PM transition of the system being carried out.
 743 *
 744 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 745 * allow device drivers' interrupt handlers to be called.
 746 */
 747void dpm_resume_noirq(pm_message_t state)
 748{
 749	dpm_noirq_resume_devices(state);
 750
 751	resume_device_irqs();
 752	device_wakeup_disarm_wake_irqs();
 753
 754	cpuidle_resume();
 755}
 756
 757/**
 758 * device_resume_early - Execute an "early resume" callback for given device.
 759 * @dev: Device to handle.
 760 * @state: PM transition of the system being carried out.
 761 * @async: If true, the device is being resumed asynchronously.
 762 *
 763 * Runtime PM is disabled for @dev while this function is being executed.
 764 */
 765static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 766{
 767	pm_callback_t callback = NULL;
 768	const char *info = NULL;
 769	int error = 0;
 770
 771	TRACE_DEVICE(dev);
 772	TRACE_RESUME(0);
 773
 774	if (dev->power.syscore || dev->power.direct_complete)
 775		goto Out;
 776
 777	if (!dev->power.is_late_suspended)
 778		goto Out;
 779
 780	if (!dpm_wait_for_superior(dev, async))
 781		goto Out;
 782
 783	if (dev->pm_domain) {
 784		info = "early power domain ";
 785		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 786	} else if (dev->type && dev->type->pm) {
 787		info = "early type ";
 788		callback = pm_late_early_op(dev->type->pm, state);
 789	} else if (dev->class && dev->class->pm) {
 790		info = "early class ";
 791		callback = pm_late_early_op(dev->class->pm, state);
 792	} else if (dev->bus && dev->bus->pm) {
 793		info = "early bus ";
 794		callback = pm_late_early_op(dev->bus->pm, state);
 795	}
 796	if (callback)
 797		goto Run;
 798
 799	if (dev_pm_skip_resume(dev))
 800		goto Skip;
 801
 802	if (dev->driver && dev->driver->pm) {
 803		info = "early driver ";
 804		callback = pm_late_early_op(dev->driver->pm, state);
 805	}
 806
 807Run:
 808	error = dpm_run_callback(callback, dev, state, info);
 809
 810Skip:
 811	dev->power.is_late_suspended = false;
 812
 813Out:
 814	TRACE_RESUME(error);
 815
 816	pm_runtime_enable(dev);
 817	complete_all(&dev->power.completion);
 818	return error;
 
 
 
 
 
 
 819}
 820
 821static void async_resume_early(void *data, async_cookie_t cookie)
 822{
 823	struct device *dev = (struct device *)data;
 824	int error;
 825
 826	error = device_resume_early(dev, pm_transition, true);
 827	if (error)
 828		pm_dev_err(dev, pm_transition, " async", error);
 829
 
 830	put_device(dev);
 831}
 832
 833/**
 834 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 835 * @state: PM transition of the system being carried out.
 836 */
 837void dpm_resume_early(pm_message_t state)
 838{
 839	struct device *dev;
 840	ktime_t starttime = ktime_get();
 841
 842	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 843	mutex_lock(&dpm_list_mtx);
 844	pm_transition = state;
 845
 846	/*
 847	 * Advanced the async threads upfront,
 848	 * in case the starting of async threads is
 849	 * delayed by non-async resuming devices.
 850	 */
 851	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
 852		dpm_async_fn(dev, async_resume_early);
 853
 854	while (!list_empty(&dpm_late_early_list)) {
 855		dev = to_device(dpm_late_early_list.next);
 856		get_device(dev);
 857		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 858		mutex_unlock(&dpm_list_mtx);
 859
 860		if (!is_async(dev)) {
 861			int error;
 862
 863			error = device_resume_early(dev, state, false);
 864			if (error) {
 865				suspend_stats.failed_resume_early++;
 866				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 867				dpm_save_failed_dev(dev_name(dev));
 868				pm_dev_err(dev, state, " early", error);
 869			}
 870		}
 871		mutex_lock(&dpm_list_mtx);
 872		put_device(dev);
 873	}
 874	mutex_unlock(&dpm_list_mtx);
 875	async_synchronize_full();
 876	dpm_show_time(starttime, state, 0, "early");
 877	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 878}
 879
 880/**
 881 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 882 * @state: PM transition of the system being carried out.
 883 */
 884void dpm_resume_start(pm_message_t state)
 885{
 886	dpm_resume_noirq(state);
 887	dpm_resume_early(state);
 888}
 889EXPORT_SYMBOL_GPL(dpm_resume_start);
 890
 891/**
 892 * device_resume - Execute "resume" callbacks for given device.
 893 * @dev: Device to handle.
 894 * @state: PM transition of the system being carried out.
 895 * @async: If true, the device is being resumed asynchronously.
 896 */
 897static int device_resume(struct device *dev, pm_message_t state, bool async)
 898{
 899	pm_callback_t callback = NULL;
 900	const char *info = NULL;
 901	int error = 0;
 902	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 903
 904	TRACE_DEVICE(dev);
 905	TRACE_RESUME(0);
 906
 907	if (dev->power.syscore)
 908		goto Complete;
 909
 910	if (dev->power.direct_complete) {
 911		/* Match the pm_runtime_disable() in __device_suspend(). */
 912		pm_runtime_enable(dev);
 913		goto Complete;
 914	}
 915
 916	if (!dpm_wait_for_superior(dev, async))
 917		goto Complete;
 918
 919	dpm_watchdog_set(&wd, dev);
 920	device_lock(dev);
 921
 922	/*
 923	 * This is a fib.  But we'll allow new children to be added below
 924	 * a resumed device, even if the device hasn't been completed yet.
 925	 */
 926	dev->power.is_prepared = false;
 927
 928	if (!dev->power.is_suspended)
 929		goto Unlock;
 930
 931	if (dev->pm_domain) {
 932		info = "power domain ";
 933		callback = pm_op(&dev->pm_domain->ops, state);
 934		goto Driver;
 935	}
 936
 937	if (dev->type && dev->type->pm) {
 938		info = "type ";
 939		callback = pm_op(dev->type->pm, state);
 940		goto Driver;
 941	}
 942
 943	if (dev->class && dev->class->pm) {
 944		info = "class ";
 945		callback = pm_op(dev->class->pm, state);
 946		goto Driver;
 947	}
 948
 949	if (dev->bus) {
 950		if (dev->bus->pm) {
 951			info = "bus ";
 952			callback = pm_op(dev->bus->pm, state);
 953		} else if (dev->bus->resume) {
 954			info = "legacy bus ";
 955			callback = dev->bus->resume;
 956			goto End;
 957		}
 958	}
 959
 960 Driver:
 961	if (!callback && dev->driver && dev->driver->pm) {
 962		info = "driver ";
 963		callback = pm_op(dev->driver->pm, state);
 964	}
 965
 966 End:
 967	error = dpm_run_callback(callback, dev, state, info);
 968	dev->power.is_suspended = false;
 969
 970 Unlock:
 971	device_unlock(dev);
 972	dpm_watchdog_clear(&wd);
 973
 974 Complete:
 975	complete_all(&dev->power.completion);
 976
 977	TRACE_RESUME(error);
 978
 979	return error;
 
 
 
 
 
 980}
 981
 982static void async_resume(void *data, async_cookie_t cookie)
 983{
 984	struct device *dev = (struct device *)data;
 985	int error;
 986
 987	error = device_resume(dev, pm_transition, true);
 988	if (error)
 989		pm_dev_err(dev, pm_transition, " async", error);
 990	put_device(dev);
 991}
 992
 993/**
 994 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 995 * @state: PM transition of the system being carried out.
 996 *
 997 * Execute the appropriate "resume" callback for all devices whose status
 998 * indicates that they are suspended.
 999 */
1000void dpm_resume(pm_message_t state)
1001{
1002	struct device *dev;
1003	ktime_t starttime = ktime_get();
1004
1005	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1006	might_sleep();
1007
1008	mutex_lock(&dpm_list_mtx);
1009	pm_transition = state;
1010	async_error = 0;
1011
 
 
 
 
1012	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1013		dpm_async_fn(dev, async_resume);
1014
1015	while (!list_empty(&dpm_suspended_list)) {
1016		dev = to_device(dpm_suspended_list.next);
 
1017		get_device(dev);
1018		if (!is_async(dev)) {
1019			int error;
1020
 
1021			mutex_unlock(&dpm_list_mtx);
1022
1023			error = device_resume(dev, state, false);
1024			if (error) {
1025				suspend_stats.failed_resume++;
1026				dpm_save_failed_step(SUSPEND_RESUME);
1027				dpm_save_failed_dev(dev_name(dev));
1028				pm_dev_err(dev, state, "", error);
1029			}
1030
1031			mutex_lock(&dpm_list_mtx);
1032		}
 
1033		if (!list_empty(&dev->power.entry))
1034			list_move_tail(&dev->power.entry, &dpm_prepared_list);
 
 
 
1035		put_device(dev);
 
 
1036	}
1037	mutex_unlock(&dpm_list_mtx);
1038	async_synchronize_full();
1039	dpm_show_time(starttime, state, 0, NULL);
1040
1041	cpufreq_resume();
1042	devfreq_resume();
1043	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1044}
1045
1046/**
1047 * device_complete - Complete a PM transition for given device.
1048 * @dev: Device to handle.
1049 * @state: PM transition of the system being carried out.
1050 */
1051static void device_complete(struct device *dev, pm_message_t state)
1052{
1053	void (*callback)(struct device *) = NULL;
1054	const char *info = NULL;
1055
1056	if (dev->power.syscore)
1057		return;
1058
1059	device_lock(dev);
1060
1061	if (dev->pm_domain) {
1062		info = "completing power domain ";
1063		callback = dev->pm_domain->ops.complete;
1064	} else if (dev->type && dev->type->pm) {
1065		info = "completing type ";
1066		callback = dev->type->pm->complete;
1067	} else if (dev->class && dev->class->pm) {
1068		info = "completing class ";
1069		callback = dev->class->pm->complete;
1070	} else if (dev->bus && dev->bus->pm) {
1071		info = "completing bus ";
1072		callback = dev->bus->pm->complete;
1073	}
1074
1075	if (!callback && dev->driver && dev->driver->pm) {
1076		info = "completing driver ";
1077		callback = dev->driver->pm->complete;
1078	}
1079
1080	if (callback) {
1081		pm_dev_dbg(dev, state, info);
1082		callback(dev);
1083	}
1084
1085	device_unlock(dev);
1086
 
1087	pm_runtime_put(dev);
1088}
1089
1090/**
1091 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1092 * @state: PM transition of the system being carried out.
1093 *
1094 * Execute the ->complete() callbacks for all devices whose PM status is not
1095 * DPM_ON (this allows new devices to be registered).
1096 */
1097void dpm_complete(pm_message_t state)
1098{
1099	struct list_head list;
1100
1101	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1102	might_sleep();
1103
1104	INIT_LIST_HEAD(&list);
1105	mutex_lock(&dpm_list_mtx);
1106	while (!list_empty(&dpm_prepared_list)) {
1107		struct device *dev = to_device(dpm_prepared_list.prev);
1108
1109		get_device(dev);
1110		dev->power.is_prepared = false;
1111		list_move(&dev->power.entry, &list);
 
1112		mutex_unlock(&dpm_list_mtx);
1113
1114		trace_device_pm_callback_start(dev, "", state.event);
1115		device_complete(dev, state);
1116		trace_device_pm_callback_end(dev, 0);
1117
1118		mutex_lock(&dpm_list_mtx);
1119		put_device(dev);
 
 
1120	}
1121	list_splice(&list, &dpm_list);
1122	mutex_unlock(&dpm_list_mtx);
1123
1124	/* Allow device probing and trigger re-probing of deferred devices */
1125	device_unblock_probing();
1126	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1127}
1128
1129/**
1130 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1131 * @state: PM transition of the system being carried out.
1132 *
1133 * Execute "resume" callbacks for all devices and complete the PM transition of
1134 * the system.
1135 */
1136void dpm_resume_end(pm_message_t state)
1137{
1138	dpm_resume(state);
1139	dpm_complete(state);
1140}
1141EXPORT_SYMBOL_GPL(dpm_resume_end);
1142
1143
1144/*------------------------- Suspend routines -------------------------*/
1145
1146/**
1147 * resume_event - Return a "resume" message for given "suspend" sleep state.
1148 * @sleep_state: PM message representing a sleep state.
1149 *
1150 * Return a PM message representing the resume event corresponding to given
1151 * sleep state.
1152 */
1153static pm_message_t resume_event(pm_message_t sleep_state)
1154{
1155	switch (sleep_state.event) {
1156	case PM_EVENT_SUSPEND:
1157		return PMSG_RESUME;
1158	case PM_EVENT_FREEZE:
1159	case PM_EVENT_QUIESCE:
1160		return PMSG_RECOVER;
1161	case PM_EVENT_HIBERNATE:
1162		return PMSG_RESTORE;
1163	}
1164	return PMSG_ON;
1165}
1166
1167static void dpm_superior_set_must_resume(struct device *dev)
1168{
1169	struct device_link *link;
1170	int idx;
1171
1172	if (dev->parent)
1173		dev->parent->power.must_resume = true;
1174
1175	idx = device_links_read_lock();
1176
1177	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1178		link->supplier->power.must_resume = true;
1179
1180	device_links_read_unlock(idx);
1181}
1182
1183/**
1184 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1185 * @dev: Device to handle.
1186 * @state: PM transition of the system being carried out.
1187 * @async: If true, the device is being suspended asynchronously.
1188 *
1189 * The driver of @dev will not receive interrupts while this function is being
1190 * executed.
1191 */
1192static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1193{
1194	pm_callback_t callback = NULL;
1195	const char *info = NULL;
1196	int error = 0;
1197
1198	TRACE_DEVICE(dev);
1199	TRACE_SUSPEND(0);
1200
1201	dpm_wait_for_subordinate(dev, async);
1202
1203	if (async_error)
1204		goto Complete;
1205
1206	if (dev->power.syscore || dev->power.direct_complete)
1207		goto Complete;
1208
1209	if (dev->pm_domain) {
1210		info = "noirq power domain ";
1211		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1212	} else if (dev->type && dev->type->pm) {
1213		info = "noirq type ";
1214		callback = pm_noirq_op(dev->type->pm, state);
1215	} else if (dev->class && dev->class->pm) {
1216		info = "noirq class ";
1217		callback = pm_noirq_op(dev->class->pm, state);
1218	} else if (dev->bus && dev->bus->pm) {
1219		info = "noirq bus ";
1220		callback = pm_noirq_op(dev->bus->pm, state);
1221	}
1222	if (callback)
1223		goto Run;
1224
1225	if (dev_pm_skip_suspend(dev))
1226		goto Skip;
1227
1228	if (dev->driver && dev->driver->pm) {
1229		info = "noirq driver ";
1230		callback = pm_noirq_op(dev->driver->pm, state);
1231	}
1232
1233Run:
1234	error = dpm_run_callback(callback, dev, state, info);
1235	if (error) {
1236		async_error = error;
1237		goto Complete;
1238	}
1239
1240Skip:
1241	dev->power.is_noirq_suspended = true;
1242
1243	/*
1244	 * Skipping the resume of devices that were in use right before the
1245	 * system suspend (as indicated by their PM-runtime usage counters)
1246	 * would be suboptimal.  Also resume them if doing that is not allowed
1247	 * to be skipped.
1248	 */
1249	if (atomic_read(&dev->power.usage_count) > 1 ||
1250	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1251	      dev->power.may_skip_resume))
1252		dev->power.must_resume = true;
1253
1254	if (dev->power.must_resume)
1255		dpm_superior_set_must_resume(dev);
1256
1257Complete:
1258	complete_all(&dev->power.completion);
1259	TRACE_SUSPEND(error);
1260	return error;
1261}
1262
1263static void async_suspend_noirq(void *data, async_cookie_t cookie)
1264{
1265	struct device *dev = (struct device *)data;
1266	int error;
1267
1268	error = __device_suspend_noirq(dev, pm_transition, true);
1269	if (error) {
1270		dpm_save_failed_dev(dev_name(dev));
1271		pm_dev_err(dev, pm_transition, " async", error);
1272	}
1273
1274	put_device(dev);
1275}
1276
1277static int device_suspend_noirq(struct device *dev)
1278{
1279	if (dpm_async_fn(dev, async_suspend_noirq))
1280		return 0;
1281
1282	return __device_suspend_noirq(dev, pm_transition, false);
1283}
1284
1285static int dpm_noirq_suspend_devices(pm_message_t state)
1286{
1287	ktime_t starttime = ktime_get();
1288	int error = 0;
1289
1290	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1291	mutex_lock(&dpm_list_mtx);
1292	pm_transition = state;
1293	async_error = 0;
1294
1295	while (!list_empty(&dpm_late_early_list)) {
1296		struct device *dev = to_device(dpm_late_early_list.prev);
1297
1298		get_device(dev);
1299		mutex_unlock(&dpm_list_mtx);
1300
1301		error = device_suspend_noirq(dev);
1302
1303		mutex_lock(&dpm_list_mtx);
 
1304		if (error) {
1305			pm_dev_err(dev, state, " noirq", error);
1306			dpm_save_failed_dev(dev_name(dev));
1307			put_device(dev);
1308			break;
1309		}
1310		if (!list_empty(&dev->power.entry))
1311			list_move(&dev->power.entry, &dpm_noirq_list);
 
 
 
 
1312		put_device(dev);
1313
1314		if (async_error)
 
 
1315			break;
1316	}
1317	mutex_unlock(&dpm_list_mtx);
1318	async_synchronize_full();
1319	if (!error)
1320		error = async_error;
1321
1322	if (error) {
1323		suspend_stats.failed_suspend_noirq++;
1324		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1325	}
1326	dpm_show_time(starttime, state, error, "noirq");
1327	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1328	return error;
1329}
1330
1331/**
1332 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1333 * @state: PM transition of the system being carried out.
1334 *
1335 * Prevent device drivers' interrupt handlers from being called and invoke
1336 * "noirq" suspend callbacks for all non-sysdev devices.
1337 */
1338int dpm_suspend_noirq(pm_message_t state)
1339{
1340	int ret;
1341
1342	cpuidle_pause();
1343
1344	device_wakeup_arm_wake_irqs();
1345	suspend_device_irqs();
1346
1347	ret = dpm_noirq_suspend_devices(state);
1348	if (ret)
1349		dpm_resume_noirq(resume_event(state));
1350
1351	return ret;
1352}
1353
1354static void dpm_propagate_wakeup_to_parent(struct device *dev)
1355{
1356	struct device *parent = dev->parent;
1357
1358	if (!parent)
1359		return;
1360
1361	spin_lock_irq(&parent->power.lock);
1362
1363	if (dev->power.wakeup_path && !parent->power.ignore_children)
1364		parent->power.wakeup_path = true;
1365
1366	spin_unlock_irq(&parent->power.lock);
1367}
1368
1369/**
1370 * __device_suspend_late - Execute a "late suspend" callback for given device.
1371 * @dev: Device to handle.
1372 * @state: PM transition of the system being carried out.
1373 * @async: If true, the device is being suspended asynchronously.
1374 *
1375 * Runtime PM is disabled for @dev while this function is being executed.
1376 */
1377static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1378{
1379	pm_callback_t callback = NULL;
1380	const char *info = NULL;
1381	int error = 0;
1382
1383	TRACE_DEVICE(dev);
1384	TRACE_SUSPEND(0);
1385
1386	__pm_runtime_disable(dev, false);
1387
1388	dpm_wait_for_subordinate(dev, async);
1389
1390	if (async_error)
1391		goto Complete;
1392
1393	if (pm_wakeup_pending()) {
1394		async_error = -EBUSY;
1395		goto Complete;
1396	}
1397
1398	if (dev->power.syscore || dev->power.direct_complete)
1399		goto Complete;
1400
1401	if (dev->pm_domain) {
1402		info = "late power domain ";
1403		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1404	} else if (dev->type && dev->type->pm) {
1405		info = "late type ";
1406		callback = pm_late_early_op(dev->type->pm, state);
1407	} else if (dev->class && dev->class->pm) {
1408		info = "late class ";
1409		callback = pm_late_early_op(dev->class->pm, state);
1410	} else if (dev->bus && dev->bus->pm) {
1411		info = "late bus ";
1412		callback = pm_late_early_op(dev->bus->pm, state);
1413	}
1414	if (callback)
1415		goto Run;
1416
1417	if (dev_pm_skip_suspend(dev))
1418		goto Skip;
1419
1420	if (dev->driver && dev->driver->pm) {
1421		info = "late driver ";
1422		callback = pm_late_early_op(dev->driver->pm, state);
1423	}
1424
1425Run:
1426	error = dpm_run_callback(callback, dev, state, info);
1427	if (error) {
1428		async_error = error;
1429		goto Complete;
1430	}
1431	dpm_propagate_wakeup_to_parent(dev);
1432
1433Skip:
1434	dev->power.is_late_suspended = true;
1435
1436Complete:
1437	TRACE_SUSPEND(error);
1438	complete_all(&dev->power.completion);
1439	return error;
1440}
1441
1442static void async_suspend_late(void *data, async_cookie_t cookie)
1443{
1444	struct device *dev = (struct device *)data;
1445	int error;
1446
1447	error = __device_suspend_late(dev, pm_transition, true);
1448	if (error) {
1449		dpm_save_failed_dev(dev_name(dev));
1450		pm_dev_err(dev, pm_transition, " async", error);
1451	}
1452	put_device(dev);
1453}
1454
1455static int device_suspend_late(struct device *dev)
1456{
1457	if (dpm_async_fn(dev, async_suspend_late))
1458		return 0;
1459
1460	return __device_suspend_late(dev, pm_transition, false);
1461}
1462
1463/**
1464 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1465 * @state: PM transition of the system being carried out.
1466 */
1467int dpm_suspend_late(pm_message_t state)
1468{
1469	ktime_t starttime = ktime_get();
1470	int error = 0;
1471
1472	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
 
1473	mutex_lock(&dpm_list_mtx);
1474	pm_transition = state;
1475	async_error = 0;
1476
1477	while (!list_empty(&dpm_suspended_list)) {
1478		struct device *dev = to_device(dpm_suspended_list.prev);
1479
1480		get_device(dev);
 
1481		mutex_unlock(&dpm_list_mtx);
1482
1483		error = device_suspend_late(dev);
1484
1485		mutex_lock(&dpm_list_mtx);
 
1486		if (!list_empty(&dev->power.entry))
1487			list_move(&dev->power.entry, &dpm_late_early_list);
1488
1489		if (error) {
1490			pm_dev_err(dev, state, " late", error);
1491			dpm_save_failed_dev(dev_name(dev));
1492			put_device(dev);
1493			break;
1494		}
 
 
 
1495		put_device(dev);
1496
1497		if (async_error)
 
 
1498			break;
1499	}
1500	mutex_unlock(&dpm_list_mtx);
1501	async_synchronize_full();
1502	if (!error)
1503		error = async_error;
1504	if (error) {
1505		suspend_stats.failed_suspend_late++;
1506		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1507		dpm_resume_early(resume_event(state));
1508	}
1509	dpm_show_time(starttime, state, error, "late");
1510	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1511	return error;
1512}
1513
1514/**
1515 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1516 * @state: PM transition of the system being carried out.
1517 */
1518int dpm_suspend_end(pm_message_t state)
1519{
1520	ktime_t starttime = ktime_get();
1521	int error;
1522
1523	error = dpm_suspend_late(state);
1524	if (error)
1525		goto out;
1526
1527	error = dpm_suspend_noirq(state);
1528	if (error)
1529		dpm_resume_early(resume_event(state));
1530
1531out:
1532	dpm_show_time(starttime, state, error, "end");
1533	return error;
1534}
1535EXPORT_SYMBOL_GPL(dpm_suspend_end);
1536
1537/**
1538 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1539 * @dev: Device to suspend.
1540 * @state: PM transition of the system being carried out.
1541 * @cb: Suspend callback to execute.
1542 * @info: string description of caller.
1543 */
1544static int legacy_suspend(struct device *dev, pm_message_t state,
1545			  int (*cb)(struct device *dev, pm_message_t state),
1546			  const char *info)
1547{
1548	int error;
1549	ktime_t calltime;
1550
1551	calltime = initcall_debug_start(dev, cb);
1552
1553	trace_device_pm_callback_start(dev, info, state.event);
1554	error = cb(dev, state);
1555	trace_device_pm_callback_end(dev, error);
1556	suspend_report_result(cb, error);
1557
1558	initcall_debug_report(dev, calltime, cb, error);
1559
1560	return error;
1561}
1562
1563static void dpm_clear_superiors_direct_complete(struct device *dev)
1564{
1565	struct device_link *link;
1566	int idx;
1567
1568	if (dev->parent) {
1569		spin_lock_irq(&dev->parent->power.lock);
1570		dev->parent->power.direct_complete = false;
1571		spin_unlock_irq(&dev->parent->power.lock);
1572	}
1573
1574	idx = device_links_read_lock();
1575
1576	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1577		spin_lock_irq(&link->supplier->power.lock);
1578		link->supplier->power.direct_complete = false;
1579		spin_unlock_irq(&link->supplier->power.lock);
1580	}
1581
1582	device_links_read_unlock(idx);
1583}
1584
1585/**
1586 * __device_suspend - Execute "suspend" callbacks for given device.
1587 * @dev: Device to handle.
1588 * @state: PM transition of the system being carried out.
1589 * @async: If true, the device is being suspended asynchronously.
1590 */
1591static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1592{
1593	pm_callback_t callback = NULL;
1594	const char *info = NULL;
1595	int error = 0;
1596	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1597
1598	TRACE_DEVICE(dev);
1599	TRACE_SUSPEND(0);
1600
1601	dpm_wait_for_subordinate(dev, async);
1602
1603	if (async_error) {
1604		dev->power.direct_complete = false;
1605		goto Complete;
1606	}
1607
1608	/*
1609	 * Wait for possible runtime PM transitions of the device in progress
1610	 * to complete and if there's a runtime resume request pending for it,
1611	 * resume it before proceeding with invoking the system-wide suspend
1612	 * callbacks for it.
1613	 *
1614	 * If the system-wide suspend callbacks below change the configuration
1615	 * of the device, they must disable runtime PM for it or otherwise
1616	 * ensure that its runtime-resume callbacks will not be confused by that
1617	 * change in case they are invoked going forward.
1618	 */
1619	pm_runtime_barrier(dev);
1620
1621	if (pm_wakeup_pending()) {
1622		dev->power.direct_complete = false;
1623		async_error = -EBUSY;
1624		goto Complete;
1625	}
1626
1627	if (dev->power.syscore)
1628		goto Complete;
1629
1630	/* Avoid direct_complete to let wakeup_path propagate. */
1631	if (device_may_wakeup(dev) || dev->power.wakeup_path)
1632		dev->power.direct_complete = false;
1633
1634	if (dev->power.direct_complete) {
1635		if (pm_runtime_status_suspended(dev)) {
1636			pm_runtime_disable(dev);
1637			if (pm_runtime_status_suspended(dev)) {
1638				pm_dev_dbg(dev, state, "direct-complete ");
1639				goto Complete;
1640			}
1641
1642			pm_runtime_enable(dev);
1643		}
1644		dev->power.direct_complete = false;
1645	}
1646
1647	dev->power.may_skip_resume = true;
1648	dev->power.must_resume = false;
1649
1650	dpm_watchdog_set(&wd, dev);
1651	device_lock(dev);
1652
1653	if (dev->pm_domain) {
1654		info = "power domain ";
1655		callback = pm_op(&dev->pm_domain->ops, state);
1656		goto Run;
1657	}
1658
1659	if (dev->type && dev->type->pm) {
1660		info = "type ";
1661		callback = pm_op(dev->type->pm, state);
1662		goto Run;
1663	}
1664
1665	if (dev->class && dev->class->pm) {
1666		info = "class ";
1667		callback = pm_op(dev->class->pm, state);
1668		goto Run;
1669	}
1670
1671	if (dev->bus) {
1672		if (dev->bus->pm) {
1673			info = "bus ";
1674			callback = pm_op(dev->bus->pm, state);
1675		} else if (dev->bus->suspend) {
1676			pm_dev_dbg(dev, state, "legacy bus ");
1677			error = legacy_suspend(dev, state, dev->bus->suspend,
1678						"legacy bus ");
1679			goto End;
1680		}
1681	}
1682
1683 Run:
1684	if (!callback && dev->driver && dev->driver->pm) {
1685		info = "driver ";
1686		callback = pm_op(dev->driver->pm, state);
1687	}
1688
1689	error = dpm_run_callback(callback, dev, state, info);
1690
1691 End:
1692	if (!error) {
1693		dev->power.is_suspended = true;
1694		if (device_may_wakeup(dev))
1695			dev->power.wakeup_path = true;
1696
1697		dpm_propagate_wakeup_to_parent(dev);
1698		dpm_clear_superiors_direct_complete(dev);
1699	}
1700
1701	device_unlock(dev);
1702	dpm_watchdog_clear(&wd);
1703
1704 Complete:
1705	if (error)
1706		async_error = error;
1707
1708	complete_all(&dev->power.completion);
1709	TRACE_SUSPEND(error);
1710	return error;
1711}
1712
1713static void async_suspend(void *data, async_cookie_t cookie)
1714{
1715	struct device *dev = (struct device *)data;
1716	int error;
1717
1718	error = __device_suspend(dev, pm_transition, true);
1719	if (error) {
1720		dpm_save_failed_dev(dev_name(dev));
1721		pm_dev_err(dev, pm_transition, " async", error);
1722	}
1723
1724	put_device(dev);
1725}
1726
1727static int device_suspend(struct device *dev)
1728{
1729	if (dpm_async_fn(dev, async_suspend))
1730		return 0;
1731
1732	return __device_suspend(dev, pm_transition, false);
1733}
1734
1735/**
1736 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1737 * @state: PM transition of the system being carried out.
1738 */
1739int dpm_suspend(pm_message_t state)
1740{
1741	ktime_t starttime = ktime_get();
1742	int error = 0;
1743
1744	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1745	might_sleep();
1746
1747	devfreq_suspend();
1748	cpufreq_suspend();
1749
1750	mutex_lock(&dpm_list_mtx);
1751	pm_transition = state;
1752	async_error = 0;
1753	while (!list_empty(&dpm_prepared_list)) {
1754		struct device *dev = to_device(dpm_prepared_list.prev);
1755
1756		get_device(dev);
 
1757		mutex_unlock(&dpm_list_mtx);
1758
1759		error = device_suspend(dev);
1760
1761		mutex_lock(&dpm_list_mtx);
 
1762		if (error) {
1763			pm_dev_err(dev, state, "", error);
1764			dpm_save_failed_dev(dev_name(dev));
1765			put_device(dev);
1766			break;
1767		}
1768		if (!list_empty(&dev->power.entry))
1769			list_move(&dev->power.entry, &dpm_suspended_list);
 
 
 
 
1770		put_device(dev);
1771		if (async_error)
 
 
 
1772			break;
1773	}
1774	mutex_unlock(&dpm_list_mtx);
1775	async_synchronize_full();
1776	if (!error)
1777		error = async_error;
1778	if (error) {
1779		suspend_stats.failed_suspend++;
1780		dpm_save_failed_step(SUSPEND_SUSPEND);
1781	}
1782	dpm_show_time(starttime, state, error, NULL);
1783	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1784	return error;
1785}
1786
1787/**
1788 * device_prepare - Prepare a device for system power transition.
1789 * @dev: Device to handle.
1790 * @state: PM transition of the system being carried out.
1791 *
1792 * Execute the ->prepare() callback(s) for given device.  No new children of the
1793 * device may be registered after this function has returned.
1794 */
1795static int device_prepare(struct device *dev, pm_message_t state)
1796{
1797	int (*callback)(struct device *) = NULL;
1798	int ret = 0;
1799
1800	if (dev->power.syscore)
1801		return 0;
1802
1803	/*
1804	 * If a device's parent goes into runtime suspend at the wrong time,
1805	 * it won't be possible to resume the device.  To prevent this we
1806	 * block runtime suspend here, during the prepare phase, and allow
1807	 * it again during the complete phase.
1808	 */
1809	pm_runtime_get_noresume(dev);
1810
 
 
 
1811	device_lock(dev);
1812
1813	dev->power.wakeup_path = false;
1814
1815	if (dev->power.no_pm_callbacks)
1816		goto unlock;
1817
1818	if (dev->pm_domain)
1819		callback = dev->pm_domain->ops.prepare;
1820	else if (dev->type && dev->type->pm)
1821		callback = dev->type->pm->prepare;
1822	else if (dev->class && dev->class->pm)
1823		callback = dev->class->pm->prepare;
1824	else if (dev->bus && dev->bus->pm)
1825		callback = dev->bus->pm->prepare;
1826
1827	if (!callback && dev->driver && dev->driver->pm)
1828		callback = dev->driver->pm->prepare;
1829
1830	if (callback)
1831		ret = callback(dev);
1832
1833unlock:
1834	device_unlock(dev);
1835
1836	if (ret < 0) {
1837		suspend_report_result(callback, ret);
1838		pm_runtime_put(dev);
1839		return ret;
1840	}
1841	/*
1842	 * A positive return value from ->prepare() means "this device appears
1843	 * to be runtime-suspended and its state is fine, so if it really is
1844	 * runtime-suspended, you can leave it in that state provided that you
1845	 * will do the same thing with all of its descendants".  This only
1846	 * applies to suspend transitions, however.
1847	 */
1848	spin_lock_irq(&dev->power.lock);
1849	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1850		(ret > 0 || dev->power.no_pm_callbacks) &&
1851		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1852	spin_unlock_irq(&dev->power.lock);
1853	return 0;
1854}
1855
1856/**
1857 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1858 * @state: PM transition of the system being carried out.
1859 *
1860 * Execute the ->prepare() callback(s) for all devices.
1861 */
1862int dpm_prepare(pm_message_t state)
1863{
1864	int error = 0;
1865
1866	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1867	might_sleep();
1868
1869	/*
1870	 * Give a chance for the known devices to complete their probes, before
1871	 * disable probing of devices. This sync point is important at least
1872	 * at boot time + hibernation restore.
1873	 */
1874	wait_for_device_probe();
1875	/*
1876	 * It is unsafe if probing of devices will happen during suspend or
1877	 * hibernation and system behavior will be unpredictable in this case.
1878	 * So, let's prohibit device's probing here and defer their probes
1879	 * instead. The normal behavior will be restored in dpm_complete().
1880	 */
1881	device_block_probing();
1882
1883	mutex_lock(&dpm_list_mtx);
1884	while (!list_empty(&dpm_list)) {
1885		struct device *dev = to_device(dpm_list.next);
1886
1887		get_device(dev);
 
1888		mutex_unlock(&dpm_list_mtx);
1889
1890		trace_device_pm_callback_start(dev, "", state.event);
1891		error = device_prepare(dev, state);
1892		trace_device_pm_callback_end(dev, error);
1893
1894		mutex_lock(&dpm_list_mtx);
1895		if (error) {
1896			if (error == -EAGAIN) {
1897				put_device(dev);
1898				error = 0;
1899				continue;
1900			}
1901			pr_info("Device %s not prepared for power transition: code %d\n",
1902				dev_name(dev), error);
1903			put_device(dev);
1904			break;
1905		}
1906		dev->power.is_prepared = true;
1907		if (!list_empty(&dev->power.entry))
1908			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1909		put_device(dev);
 
 
1910	}
1911	mutex_unlock(&dpm_list_mtx);
1912	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1913	return error;
1914}
1915
1916/**
1917 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1918 * @state: PM transition of the system being carried out.
1919 *
1920 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1921 * callbacks for them.
1922 */
1923int dpm_suspend_start(pm_message_t state)
1924{
1925	ktime_t starttime = ktime_get();
1926	int error;
1927
1928	error = dpm_prepare(state);
1929	if (error) {
1930		suspend_stats.failed_prepare++;
1931		dpm_save_failed_step(SUSPEND_PREPARE);
1932	} else
1933		error = dpm_suspend(state);
1934	dpm_show_time(starttime, state, error, "start");
1935	return error;
1936}
1937EXPORT_SYMBOL_GPL(dpm_suspend_start);
1938
1939void __suspend_report_result(const char *function, void *fn, int ret)
1940{
1941	if (ret)
1942		pr_err("%s(): %pS returns %d\n", function, fn, ret);
1943}
1944EXPORT_SYMBOL_GPL(__suspend_report_result);
1945
1946/**
1947 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1948 * @subordinate: Device that needs to wait for @dev.
1949 * @dev: Device to wait for.
1950 */
1951int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1952{
1953	dpm_wait(dev, subordinate->power.async_suspend);
1954	return async_error;
1955}
1956EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1957
1958/**
1959 * dpm_for_each_dev - device iterator.
1960 * @data: data for the callback.
1961 * @fn: function to be called for each device.
1962 *
1963 * Iterate over devices in dpm_list, and call @fn for each device,
1964 * passing it @data.
1965 */
1966void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1967{
1968	struct device *dev;
1969
1970	if (!fn)
1971		return;
1972
1973	device_pm_lock();
1974	list_for_each_entry(dev, &dpm_list, power.entry)
1975		fn(dev, data);
1976	device_pm_unlock();
1977}
1978EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1979
1980static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1981{
1982	if (!ops)
1983		return true;
1984
1985	return !ops->prepare &&
1986	       !ops->suspend &&
1987	       !ops->suspend_late &&
1988	       !ops->suspend_noirq &&
1989	       !ops->resume_noirq &&
1990	       !ops->resume_early &&
1991	       !ops->resume &&
1992	       !ops->complete;
1993}
1994
1995void device_pm_check_callbacks(struct device *dev)
1996{
1997	spin_lock_irq(&dev->power.lock);
 
 
1998	dev->power.no_pm_callbacks =
1999		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2000		 !dev->bus->suspend && !dev->bus->resume)) &&
2001		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2002		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2003		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2004		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2005		 !dev->driver->suspend && !dev->driver->resume));
2006	spin_unlock_irq(&dev->power.lock);
2007}
2008
2009bool dev_pm_skip_suspend(struct device *dev)
2010{
2011	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2012		pm_runtime_status_suspended(dev);
2013}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/main.c - Where the driver meets power management.
   4 *
   5 * Copyright (c) 2003 Patrick Mochel
   6 * Copyright (c) 2003 Open Source Development Lab
   7 *
   8 * The driver model core calls device_pm_add() when a device is registered.
   9 * This will initialize the embedded device_pm_info object in the device
  10 * and add it to the list of power-controlled devices. sysfs entries for
  11 * controlling device power management will also be added.
  12 *
  13 * A separate list is used for keeping track of power info, because the power
  14 * domain dependencies may differ from the ancestral dependencies that the
  15 * subsystem list maintains.
  16 */
  17
  18#define pr_fmt(fmt) "PM: " fmt
  19#define dev_fmt pr_fmt
  20
  21#include <linux/device.h>
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm-trace.h>
  27#include <linux/pm_wakeirq.h>
  28#include <linux/interrupt.h>
  29#include <linux/sched.h>
  30#include <linux/sched/debug.h>
  31#include <linux/async.h>
  32#include <linux/suspend.h>
  33#include <trace/events/power.h>
  34#include <linux/cpufreq.h>
 
  35#include <linux/devfreq.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
  43#define list_for_each_entry_rcu_locked(pos, head, member) \
  44	list_for_each_entry_rcu(pos, head, member, \
  45			device_links_read_lock_held())
  46
  47/*
  48 * The entries in the dpm_list list are in a depth first order, simply
  49 * because children are guaranteed to be discovered after parents, and
  50 * are inserted at the back of the list on discovery.
  51 *
  52 * Since device_pm_add() may be called with a device lock held,
  53 * we must never try to acquire a device lock while holding
  54 * dpm_list_mutex.
  55 */
  56
  57LIST_HEAD(dpm_list);
  58static LIST_HEAD(dpm_prepared_list);
  59static LIST_HEAD(dpm_suspended_list);
  60static LIST_HEAD(dpm_late_early_list);
  61static LIST_HEAD(dpm_noirq_list);
  62
  63struct suspend_stats suspend_stats;
  64static DEFINE_MUTEX(dpm_list_mtx);
  65static pm_message_t pm_transition;
  66
  67static int async_error;
  68
  69static const char *pm_verb(int event)
  70{
  71	switch (event) {
  72	case PM_EVENT_SUSPEND:
  73		return "suspend";
  74	case PM_EVENT_RESUME:
  75		return "resume";
  76	case PM_EVENT_FREEZE:
  77		return "freeze";
  78	case PM_EVENT_QUIESCE:
  79		return "quiesce";
  80	case PM_EVENT_HIBERNATE:
  81		return "hibernate";
  82	case PM_EVENT_THAW:
  83		return "thaw";
  84	case PM_EVENT_RESTORE:
  85		return "restore";
  86	case PM_EVENT_RECOVER:
  87		return "recover";
  88	default:
  89		return "(unknown PM event)";
  90	}
  91}
  92
  93/**
  94 * device_pm_sleep_init - Initialize system suspend-related device fields.
  95 * @dev: Device object being initialized.
  96 */
  97void device_pm_sleep_init(struct device *dev)
  98{
  99	dev->power.is_prepared = false;
 100	dev->power.is_suspended = false;
 101	dev->power.is_noirq_suspended = false;
 102	dev->power.is_late_suspended = false;
 103	init_completion(&dev->power.completion);
 104	complete_all(&dev->power.completion);
 105	dev->power.wakeup = NULL;
 106	INIT_LIST_HEAD(&dev->power.entry);
 107}
 108
 109/**
 110 * device_pm_lock - Lock the list of active devices used by the PM core.
 111 */
 112void device_pm_lock(void)
 113{
 114	mutex_lock(&dpm_list_mtx);
 115}
 116
 117/**
 118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 119 */
 120void device_pm_unlock(void)
 121{
 122	mutex_unlock(&dpm_list_mtx);
 123}
 124
 125/**
 126 * device_pm_add - Add a device to the PM core's list of active devices.
 127 * @dev: Device to add to the list.
 128 */
 129void device_pm_add(struct device *dev)
 130{
 131	/* Skip PM setup/initialization. */
 132	if (device_pm_not_required(dev))
 133		return;
 134
 135	pr_debug("Adding info for %s:%s\n",
 136		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 137	device_pm_check_callbacks(dev);
 138	mutex_lock(&dpm_list_mtx);
 139	if (dev->parent && dev->parent->power.is_prepared)
 140		dev_warn(dev, "parent %s should not be sleeping\n",
 141			dev_name(dev->parent));
 142	list_add_tail(&dev->power.entry, &dpm_list);
 143	dev->power.in_dpm_list = true;
 144	mutex_unlock(&dpm_list_mtx);
 145}
 146
 147/**
 148 * device_pm_remove - Remove a device from the PM core's list of active devices.
 149 * @dev: Device to be removed from the list.
 150 */
 151void device_pm_remove(struct device *dev)
 152{
 153	if (device_pm_not_required(dev))
 154		return;
 155
 156	pr_debug("Removing info for %s:%s\n",
 157		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 158	complete_all(&dev->power.completion);
 159	mutex_lock(&dpm_list_mtx);
 160	list_del_init(&dev->power.entry);
 161	dev->power.in_dpm_list = false;
 162	mutex_unlock(&dpm_list_mtx);
 163	device_wakeup_disable(dev);
 164	pm_runtime_remove(dev);
 165	device_pm_check_callbacks(dev);
 166}
 167
 168/**
 169 * device_pm_move_before - Move device in the PM core's list of active devices.
 170 * @deva: Device to move in dpm_list.
 171 * @devb: Device @deva should come before.
 172 */
 173void device_pm_move_before(struct device *deva, struct device *devb)
 174{
 175	pr_debug("Moving %s:%s before %s:%s\n",
 176		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 177		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 178	/* Delete deva from dpm_list and reinsert before devb. */
 179	list_move_tail(&deva->power.entry, &devb->power.entry);
 180}
 181
 182/**
 183 * device_pm_move_after - Move device in the PM core's list of active devices.
 184 * @deva: Device to move in dpm_list.
 185 * @devb: Device @deva should come after.
 186 */
 187void device_pm_move_after(struct device *deva, struct device *devb)
 188{
 189	pr_debug("Moving %s:%s after %s:%s\n",
 190		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 191		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 192	/* Delete deva from dpm_list and reinsert after devb. */
 193	list_move(&deva->power.entry, &devb->power.entry);
 194}
 195
 196/**
 197 * device_pm_move_last - Move device to end of the PM core's list of devices.
 198 * @dev: Device to move in dpm_list.
 199 */
 200void device_pm_move_last(struct device *dev)
 201{
 202	pr_debug("Moving %s:%s to end of list\n",
 203		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 204	list_move_tail(&dev->power.entry, &dpm_list);
 205}
 206
 207static ktime_t initcall_debug_start(struct device *dev, void *cb)
 208{
 209	if (!pm_print_times_enabled)
 210		return 0;
 211
 212	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
 213		 task_pid_nr(current),
 214		 dev->parent ? dev_name(dev->parent) : "none");
 215	return ktime_get();
 216}
 217
 218static void initcall_debug_report(struct device *dev, ktime_t calltime,
 219				  void *cb, int error)
 220{
 221	ktime_t rettime;
 
 222
 223	if (!pm_print_times_enabled)
 224		return;
 225
 226	rettime = ktime_get();
 
 
 227	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
 228		 (unsigned long long)ktime_us_delta(rettime, calltime));
 229}
 230
 231/**
 232 * dpm_wait - Wait for a PM operation to complete.
 233 * @dev: Device to wait for.
 234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 235 */
 236static void dpm_wait(struct device *dev, bool async)
 237{
 238	if (!dev)
 239		return;
 240
 241	if (async || (pm_async_enabled && dev->power.async_suspend))
 242		wait_for_completion(&dev->power.completion);
 243}
 244
 245static int dpm_wait_fn(struct device *dev, void *async_ptr)
 246{
 247	dpm_wait(dev, *((bool *)async_ptr));
 248	return 0;
 249}
 250
 251static void dpm_wait_for_children(struct device *dev, bool async)
 252{
 253       device_for_each_child(dev, &async, dpm_wait_fn);
 254}
 255
 256static void dpm_wait_for_suppliers(struct device *dev, bool async)
 257{
 258	struct device_link *link;
 259	int idx;
 260
 261	idx = device_links_read_lock();
 262
 263	/*
 264	 * If the supplier goes away right after we've checked the link to it,
 265	 * we'll wait for its completion to change the state, but that's fine,
 266	 * because the only things that will block as a result are the SRCU
 267	 * callbacks freeing the link objects for the links in the list we're
 268	 * walking.
 269	 */
 270	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
 271		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 272			dpm_wait(link->supplier, async);
 273
 274	device_links_read_unlock(idx);
 275}
 276
 277static bool dpm_wait_for_superior(struct device *dev, bool async)
 278{
 279	struct device *parent;
 280
 281	/*
 282	 * If the device is resumed asynchronously and the parent's callback
 283	 * deletes both the device and the parent itself, the parent object may
 284	 * be freed while this function is running, so avoid that by reference
 285	 * counting the parent once more unless the device has been deleted
 286	 * already (in which case return right away).
 287	 */
 288	mutex_lock(&dpm_list_mtx);
 289
 290	if (!device_pm_initialized(dev)) {
 291		mutex_unlock(&dpm_list_mtx);
 292		return false;
 293	}
 294
 295	parent = get_device(dev->parent);
 296
 297	mutex_unlock(&dpm_list_mtx);
 298
 299	dpm_wait(parent, async);
 300	put_device(parent);
 301
 302	dpm_wait_for_suppliers(dev, async);
 303
 304	/*
 305	 * If the parent's callback has deleted the device, attempting to resume
 306	 * it would be invalid, so avoid doing that then.
 307	 */
 308	return device_pm_initialized(dev);
 309}
 310
 311static void dpm_wait_for_consumers(struct device *dev, bool async)
 312{
 313	struct device_link *link;
 314	int idx;
 315
 316	idx = device_links_read_lock();
 317
 318	/*
 319	 * The status of a device link can only be changed from "dormant" by a
 320	 * probe, but that cannot happen during system suspend/resume.  In
 321	 * theory it can change to "dormant" at that time, but then it is
 322	 * reasonable to wait for the target device anyway (eg. if it goes
 323	 * away, it's better to wait for it to go away completely and then
 324	 * continue instead of trying to continue in parallel with its
 325	 * unregistration).
 326	 */
 327	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
 328		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 329			dpm_wait(link->consumer, async);
 330
 331	device_links_read_unlock(idx);
 332}
 333
 334static void dpm_wait_for_subordinate(struct device *dev, bool async)
 335{
 336	dpm_wait_for_children(dev, async);
 337	dpm_wait_for_consumers(dev, async);
 338}
 339
 340/**
 341 * pm_op - Return the PM operation appropriate for given PM event.
 342 * @ops: PM operations to choose from.
 343 * @state: PM transition of the system being carried out.
 344 */
 345static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 346{
 347	switch (state.event) {
 348#ifdef CONFIG_SUSPEND
 349	case PM_EVENT_SUSPEND:
 350		return ops->suspend;
 351	case PM_EVENT_RESUME:
 352		return ops->resume;
 353#endif /* CONFIG_SUSPEND */
 354#ifdef CONFIG_HIBERNATE_CALLBACKS
 355	case PM_EVENT_FREEZE:
 356	case PM_EVENT_QUIESCE:
 357		return ops->freeze;
 358	case PM_EVENT_HIBERNATE:
 359		return ops->poweroff;
 360	case PM_EVENT_THAW:
 361	case PM_EVENT_RECOVER:
 362		return ops->thaw;
 
 363	case PM_EVENT_RESTORE:
 364		return ops->restore;
 365#endif /* CONFIG_HIBERNATE_CALLBACKS */
 366	}
 367
 368	return NULL;
 369}
 370
 371/**
 372 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 373 * @ops: PM operations to choose from.
 374 * @state: PM transition of the system being carried out.
 375 *
 376 * Runtime PM is disabled for @dev while this function is being executed.
 377 */
 378static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 379				      pm_message_t state)
 380{
 381	switch (state.event) {
 382#ifdef CONFIG_SUSPEND
 383	case PM_EVENT_SUSPEND:
 384		return ops->suspend_late;
 385	case PM_EVENT_RESUME:
 386		return ops->resume_early;
 387#endif /* CONFIG_SUSPEND */
 388#ifdef CONFIG_HIBERNATE_CALLBACKS
 389	case PM_EVENT_FREEZE:
 390	case PM_EVENT_QUIESCE:
 391		return ops->freeze_late;
 392	case PM_EVENT_HIBERNATE:
 393		return ops->poweroff_late;
 394	case PM_EVENT_THAW:
 395	case PM_EVENT_RECOVER:
 396		return ops->thaw_early;
 397	case PM_EVENT_RESTORE:
 398		return ops->restore_early;
 399#endif /* CONFIG_HIBERNATE_CALLBACKS */
 400	}
 401
 402	return NULL;
 403}
 404
 405/**
 406 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 407 * @ops: PM operations to choose from.
 408 * @state: PM transition of the system being carried out.
 409 *
 410 * The driver of @dev will not receive interrupts while this function is being
 411 * executed.
 412 */
 413static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 414{
 415	switch (state.event) {
 416#ifdef CONFIG_SUSPEND
 417	case PM_EVENT_SUSPEND:
 418		return ops->suspend_noirq;
 419	case PM_EVENT_RESUME:
 420		return ops->resume_noirq;
 421#endif /* CONFIG_SUSPEND */
 422#ifdef CONFIG_HIBERNATE_CALLBACKS
 423	case PM_EVENT_FREEZE:
 424	case PM_EVENT_QUIESCE:
 425		return ops->freeze_noirq;
 426	case PM_EVENT_HIBERNATE:
 427		return ops->poweroff_noirq;
 428	case PM_EVENT_THAW:
 429	case PM_EVENT_RECOVER:
 430		return ops->thaw_noirq;
 431	case PM_EVENT_RESTORE:
 432		return ops->restore_noirq;
 433#endif /* CONFIG_HIBERNATE_CALLBACKS */
 434	}
 435
 436	return NULL;
 437}
 438
 439static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 440{
 441	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
 442		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 443		", may wakeup" : "", dev->power.driver_flags);
 444}
 445
 446static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 447			int error)
 448{
 449	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
 450		error);
 451}
 452
 453static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 454			  const char *info)
 455{
 456	ktime_t calltime;
 457	u64 usecs64;
 458	int usecs;
 459
 460	calltime = ktime_get();
 461	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 462	do_div(usecs64, NSEC_PER_USEC);
 463	usecs = usecs64;
 464	if (usecs == 0)
 465		usecs = 1;
 466
 467	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 468		  info ?: "", info ? " " : "", pm_verb(state.event),
 469		  error ? "aborted" : "complete",
 470		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 471}
 472
 473static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 474			    pm_message_t state, const char *info)
 475{
 476	ktime_t calltime;
 477	int error;
 478
 479	if (!cb)
 480		return 0;
 481
 482	calltime = initcall_debug_start(dev, cb);
 483
 484	pm_dev_dbg(dev, state, info);
 485	trace_device_pm_callback_start(dev, info, state.event);
 486	error = cb(dev);
 487	trace_device_pm_callback_end(dev, error);
 488	suspend_report_result(dev, cb, error);
 489
 490	initcall_debug_report(dev, calltime, cb, error);
 491
 492	return error;
 493}
 494
 495#ifdef CONFIG_DPM_WATCHDOG
 496struct dpm_watchdog {
 497	struct device		*dev;
 498	struct task_struct	*tsk;
 499	struct timer_list	timer;
 500};
 501
 502#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 503	struct dpm_watchdog wd
 504
 505/**
 506 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 507 * @t: The timer that PM watchdog depends on.
 508 *
 509 * Called when a driver has timed out suspending or resuming.
 510 * There's not much we can do here to recover so panic() to
 511 * capture a crash-dump in pstore.
 512 */
 513static void dpm_watchdog_handler(struct timer_list *t)
 514{
 515	struct dpm_watchdog *wd = from_timer(wd, t, timer);
 516
 517	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 518	show_stack(wd->tsk, NULL, KERN_EMERG);
 519	panic("%s %s: unrecoverable failure\n",
 520		dev_driver_string(wd->dev), dev_name(wd->dev));
 521}
 522
 523/**
 524 * dpm_watchdog_set - Enable pm watchdog for given device.
 525 * @wd: Watchdog. Must be allocated on the stack.
 526 * @dev: Device to handle.
 527 */
 528static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 529{
 530	struct timer_list *timer = &wd->timer;
 531
 532	wd->dev = dev;
 533	wd->tsk = current;
 534
 535	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 536	/* use same timeout value for both suspend and resume */
 537	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 538	add_timer(timer);
 539}
 540
 541/**
 542 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 543 * @wd: Watchdog to disable.
 544 */
 545static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 546{
 547	struct timer_list *timer = &wd->timer;
 548
 549	del_timer_sync(timer);
 550	destroy_timer_on_stack(timer);
 551}
 552#else
 553#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 554#define dpm_watchdog_set(x, y)
 555#define dpm_watchdog_clear(x)
 556#endif
 557
 558/*------------------------- Resume routines -------------------------*/
 559
 560/**
 561 * dev_pm_skip_resume - System-wide device resume optimization check.
 562 * @dev: Target device.
 563 *
 564 * Return:
 565 * - %false if the transition under way is RESTORE.
 566 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
 567 * - The logical negation of %power.must_resume otherwise (that is, when the
 568 *   transition under way is RESUME).
 569 */
 570bool dev_pm_skip_resume(struct device *dev)
 571{
 572	if (pm_transition.event == PM_EVENT_RESTORE)
 573		return false;
 574
 575	if (pm_transition.event == PM_EVENT_THAW)
 576		return dev_pm_skip_suspend(dev);
 577
 578	return !dev->power.must_resume;
 579}
 580
 581/**
 582 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 583 * @dev: Device to handle.
 584 * @state: PM transition of the system being carried out.
 585 * @async: If true, the device is being resumed asynchronously.
 586 *
 587 * The driver of @dev will not receive interrupts while this function is being
 588 * executed.
 589 */
 590static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 591{
 592	pm_callback_t callback = NULL;
 593	const char *info = NULL;
 594	bool skip_resume;
 595	int error = 0;
 596
 597	TRACE_DEVICE(dev);
 598	TRACE_RESUME(0);
 599
 600	if (dev->power.syscore || dev->power.direct_complete)
 601		goto Out;
 602
 603	if (!dev->power.is_noirq_suspended)
 604		goto Out;
 605
 606	if (!dpm_wait_for_superior(dev, async))
 607		goto Out;
 608
 609	skip_resume = dev_pm_skip_resume(dev);
 610	/*
 611	 * If the driver callback is skipped below or by the middle layer
 612	 * callback and device_resume_early() also skips the driver callback for
 613	 * this device later, it needs to appear as "suspended" to PM-runtime,
 614	 * so change its status accordingly.
 615	 *
 616	 * Otherwise, the device is going to be resumed, so set its PM-runtime
 617	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
 618	 * to avoid confusing drivers that don't use it.
 619	 */
 620	if (skip_resume)
 621		pm_runtime_set_suspended(dev);
 622	else if (dev_pm_skip_suspend(dev))
 623		pm_runtime_set_active(dev);
 624
 625	if (dev->pm_domain) {
 626		info = "noirq power domain ";
 627		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 628	} else if (dev->type && dev->type->pm) {
 629		info = "noirq type ";
 630		callback = pm_noirq_op(dev->type->pm, state);
 631	} else if (dev->class && dev->class->pm) {
 632		info = "noirq class ";
 633		callback = pm_noirq_op(dev->class->pm, state);
 634	} else if (dev->bus && dev->bus->pm) {
 635		info = "noirq bus ";
 636		callback = pm_noirq_op(dev->bus->pm, state);
 637	}
 638	if (callback)
 639		goto Run;
 640
 641	if (skip_resume)
 642		goto Skip;
 643
 644	if (dev->driver && dev->driver->pm) {
 645		info = "noirq driver ";
 646		callback = pm_noirq_op(dev->driver->pm, state);
 647	}
 648
 649Run:
 650	error = dpm_run_callback(callback, dev, state, info);
 651
 652Skip:
 653	dev->power.is_noirq_suspended = false;
 654
 655Out:
 656	complete_all(&dev->power.completion);
 657	TRACE_RESUME(error);
 658
 659	if (error) {
 660		suspend_stats.failed_resume_noirq++;
 661		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 662		dpm_save_failed_dev(dev_name(dev));
 663		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
 664	}
 665}
 666
 667static bool is_async(struct device *dev)
 668{
 669	return dev->power.async_suspend && pm_async_enabled
 670		&& !pm_trace_is_enabled();
 671}
 672
 673static bool dpm_async_fn(struct device *dev, async_func_t func)
 674{
 675	reinit_completion(&dev->power.completion);
 676
 677	if (is_async(dev)) {
 678		dev->power.async_in_progress = true;
 679
 680		get_device(dev);
 
 
 
 681
 682		if (async_schedule_dev_nocall(func, dev))
 683			return true;
 684
 685		put_device(dev);
 686	}
 687	/*
 688	 * Because async_schedule_dev_nocall() above has returned false or it
 689	 * has not been called at all, func() is not running and it is safe to
 690	 * update the async_in_progress flag without extra synchronization.
 691	 */
 692	dev->power.async_in_progress = false;
 693	return false;
 694}
 695
 696static void async_resume_noirq(void *data, async_cookie_t cookie)
 697{
 698	struct device *dev = data;
 
 
 
 
 
 699
 700	device_resume_noirq(dev, pm_transition, true);
 701	put_device(dev);
 702}
 703
 704static void dpm_noirq_resume_devices(pm_message_t state)
 705{
 706	struct device *dev;
 707	ktime_t starttime = ktime_get();
 708
 709	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 710	mutex_lock(&dpm_list_mtx);
 711	pm_transition = state;
 712
 713	/*
 714	 * Trigger the resume of "async" devices upfront so they don't have to
 715	 * wait for the "non-async" ones they don't depend on.
 
 716	 */
 717	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
 718		dpm_async_fn(dev, async_resume_noirq);
 719
 720	while (!list_empty(&dpm_noirq_list)) {
 721		dev = to_device(dpm_noirq_list.next);
 
 722		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 
 723
 724		if (!dev->power.async_in_progress) {
 725			get_device(dev);
 726
 727			mutex_unlock(&dpm_list_mtx);
 
 
 
 
 
 
 
 728
 729			device_resume_noirq(dev, state, false);
 730
 731			put_device(dev);
 732
 733			mutex_lock(&dpm_list_mtx);
 734		}
 735	}
 736	mutex_unlock(&dpm_list_mtx);
 737	async_synchronize_full();
 738	dpm_show_time(starttime, state, 0, "noirq");
 739	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 740}
 741
 742/**
 743 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 744 * @state: PM transition of the system being carried out.
 745 *
 746 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 747 * allow device drivers' interrupt handlers to be called.
 748 */
 749void dpm_resume_noirq(pm_message_t state)
 750{
 751	dpm_noirq_resume_devices(state);
 752
 753	resume_device_irqs();
 754	device_wakeup_disarm_wake_irqs();
 
 
 755}
 756
 757/**
 758 * device_resume_early - Execute an "early resume" callback for given device.
 759 * @dev: Device to handle.
 760 * @state: PM transition of the system being carried out.
 761 * @async: If true, the device is being resumed asynchronously.
 762 *
 763 * Runtime PM is disabled for @dev while this function is being executed.
 764 */
 765static void device_resume_early(struct device *dev, pm_message_t state, bool async)
 766{
 767	pm_callback_t callback = NULL;
 768	const char *info = NULL;
 769	int error = 0;
 770
 771	TRACE_DEVICE(dev);
 772	TRACE_RESUME(0);
 773
 774	if (dev->power.syscore || dev->power.direct_complete)
 775		goto Out;
 776
 777	if (!dev->power.is_late_suspended)
 778		goto Out;
 779
 780	if (!dpm_wait_for_superior(dev, async))
 781		goto Out;
 782
 783	if (dev->pm_domain) {
 784		info = "early power domain ";
 785		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 786	} else if (dev->type && dev->type->pm) {
 787		info = "early type ";
 788		callback = pm_late_early_op(dev->type->pm, state);
 789	} else if (dev->class && dev->class->pm) {
 790		info = "early class ";
 791		callback = pm_late_early_op(dev->class->pm, state);
 792	} else if (dev->bus && dev->bus->pm) {
 793		info = "early bus ";
 794		callback = pm_late_early_op(dev->bus->pm, state);
 795	}
 796	if (callback)
 797		goto Run;
 798
 799	if (dev_pm_skip_resume(dev))
 800		goto Skip;
 801
 802	if (dev->driver && dev->driver->pm) {
 803		info = "early driver ";
 804		callback = pm_late_early_op(dev->driver->pm, state);
 805	}
 806
 807Run:
 808	error = dpm_run_callback(callback, dev, state, info);
 809
 810Skip:
 811	dev->power.is_late_suspended = false;
 812
 813Out:
 814	TRACE_RESUME(error);
 815
 816	pm_runtime_enable(dev);
 817	complete_all(&dev->power.completion);
 818
 819	if (error) {
 820		suspend_stats.failed_resume_early++;
 821		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 822		dpm_save_failed_dev(dev_name(dev));
 823		pm_dev_err(dev, state, async ? " async early" : " early", error);
 824	}
 825}
 826
 827static void async_resume_early(void *data, async_cookie_t cookie)
 828{
 829	struct device *dev = data;
 
 
 
 
 
 830
 831	device_resume_early(dev, pm_transition, true);
 832	put_device(dev);
 833}
 834
 835/**
 836 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 837 * @state: PM transition of the system being carried out.
 838 */
 839void dpm_resume_early(pm_message_t state)
 840{
 841	struct device *dev;
 842	ktime_t starttime = ktime_get();
 843
 844	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 845	mutex_lock(&dpm_list_mtx);
 846	pm_transition = state;
 847
 848	/*
 849	 * Trigger the resume of "async" devices upfront so they don't have to
 850	 * wait for the "non-async" ones they don't depend on.
 
 851	 */
 852	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
 853		dpm_async_fn(dev, async_resume_early);
 854
 855	while (!list_empty(&dpm_late_early_list)) {
 856		dev = to_device(dpm_late_early_list.next);
 
 857		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 
 858
 859		if (!dev->power.async_in_progress) {
 860			get_device(dev);
 861
 862			mutex_unlock(&dpm_list_mtx);
 863
 864			device_resume_early(dev, state, false);
 865
 866			put_device(dev);
 867
 868			mutex_lock(&dpm_list_mtx);
 869		}
 
 
 870	}
 871	mutex_unlock(&dpm_list_mtx);
 872	async_synchronize_full();
 873	dpm_show_time(starttime, state, 0, "early");
 874	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 875}
 876
 877/**
 878 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 879 * @state: PM transition of the system being carried out.
 880 */
 881void dpm_resume_start(pm_message_t state)
 882{
 883	dpm_resume_noirq(state);
 884	dpm_resume_early(state);
 885}
 886EXPORT_SYMBOL_GPL(dpm_resume_start);
 887
 888/**
 889 * device_resume - Execute "resume" callbacks for given device.
 890 * @dev: Device to handle.
 891 * @state: PM transition of the system being carried out.
 892 * @async: If true, the device is being resumed asynchronously.
 893 */
 894static void device_resume(struct device *dev, pm_message_t state, bool async)
 895{
 896	pm_callback_t callback = NULL;
 897	const char *info = NULL;
 898	int error = 0;
 899	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 900
 901	TRACE_DEVICE(dev);
 902	TRACE_RESUME(0);
 903
 904	if (dev->power.syscore)
 905		goto Complete;
 906
 907	if (dev->power.direct_complete) {
 908		/* Match the pm_runtime_disable() in __device_suspend(). */
 909		pm_runtime_enable(dev);
 910		goto Complete;
 911	}
 912
 913	if (!dpm_wait_for_superior(dev, async))
 914		goto Complete;
 915
 916	dpm_watchdog_set(&wd, dev);
 917	device_lock(dev);
 918
 919	/*
 920	 * This is a fib.  But we'll allow new children to be added below
 921	 * a resumed device, even if the device hasn't been completed yet.
 922	 */
 923	dev->power.is_prepared = false;
 924
 925	if (!dev->power.is_suspended)
 926		goto Unlock;
 927
 928	if (dev->pm_domain) {
 929		info = "power domain ";
 930		callback = pm_op(&dev->pm_domain->ops, state);
 931		goto Driver;
 932	}
 933
 934	if (dev->type && dev->type->pm) {
 935		info = "type ";
 936		callback = pm_op(dev->type->pm, state);
 937		goto Driver;
 938	}
 939
 940	if (dev->class && dev->class->pm) {
 941		info = "class ";
 942		callback = pm_op(dev->class->pm, state);
 943		goto Driver;
 944	}
 945
 946	if (dev->bus) {
 947		if (dev->bus->pm) {
 948			info = "bus ";
 949			callback = pm_op(dev->bus->pm, state);
 950		} else if (dev->bus->resume) {
 951			info = "legacy bus ";
 952			callback = dev->bus->resume;
 953			goto End;
 954		}
 955	}
 956
 957 Driver:
 958	if (!callback && dev->driver && dev->driver->pm) {
 959		info = "driver ";
 960		callback = pm_op(dev->driver->pm, state);
 961	}
 962
 963 End:
 964	error = dpm_run_callback(callback, dev, state, info);
 965	dev->power.is_suspended = false;
 966
 967 Unlock:
 968	device_unlock(dev);
 969	dpm_watchdog_clear(&wd);
 970
 971 Complete:
 972	complete_all(&dev->power.completion);
 973
 974	TRACE_RESUME(error);
 975
 976	if (error) {
 977		suspend_stats.failed_resume++;
 978		dpm_save_failed_step(SUSPEND_RESUME);
 979		dpm_save_failed_dev(dev_name(dev));
 980		pm_dev_err(dev, state, async ? " async" : "", error);
 981	}
 982}
 983
 984static void async_resume(void *data, async_cookie_t cookie)
 985{
 986	struct device *dev = data;
 
 987
 988	device_resume(dev, pm_transition, true);
 
 
 989	put_device(dev);
 990}
 991
 992/**
 993 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 994 * @state: PM transition of the system being carried out.
 995 *
 996 * Execute the appropriate "resume" callback for all devices whose status
 997 * indicates that they are suspended.
 998 */
 999void dpm_resume(pm_message_t state)
1000{
1001	struct device *dev;
1002	ktime_t starttime = ktime_get();
1003
1004	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1005	might_sleep();
1006
1007	mutex_lock(&dpm_list_mtx);
1008	pm_transition = state;
1009	async_error = 0;
1010
1011	/*
1012	 * Trigger the resume of "async" devices upfront so they don't have to
1013	 * wait for the "non-async" ones they don't depend on.
1014	 */
1015	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1016		dpm_async_fn(dev, async_resume);
1017
1018	while (!list_empty(&dpm_suspended_list)) {
1019		dev = to_device(dpm_suspended_list.next);
1020
1021		get_device(dev);
 
 
1022
1023		if (!dev->power.async_in_progress) {
1024			mutex_unlock(&dpm_list_mtx);
1025
1026			device_resume(dev, state, false);
 
 
 
 
 
 
1027
1028			mutex_lock(&dpm_list_mtx);
1029		}
1030
1031		if (!list_empty(&dev->power.entry))
1032			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1033
1034		mutex_unlock(&dpm_list_mtx);
1035
1036		put_device(dev);
1037
1038		mutex_lock(&dpm_list_mtx);
1039	}
1040	mutex_unlock(&dpm_list_mtx);
1041	async_synchronize_full();
1042	dpm_show_time(starttime, state, 0, NULL);
1043
1044	cpufreq_resume();
1045	devfreq_resume();
1046	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1047}
1048
1049/**
1050 * device_complete - Complete a PM transition for given device.
1051 * @dev: Device to handle.
1052 * @state: PM transition of the system being carried out.
1053 */
1054static void device_complete(struct device *dev, pm_message_t state)
1055{
1056	void (*callback)(struct device *) = NULL;
1057	const char *info = NULL;
1058
1059	if (dev->power.syscore)
1060		goto out;
1061
1062	device_lock(dev);
1063
1064	if (dev->pm_domain) {
1065		info = "completing power domain ";
1066		callback = dev->pm_domain->ops.complete;
1067	} else if (dev->type && dev->type->pm) {
1068		info = "completing type ";
1069		callback = dev->type->pm->complete;
1070	} else if (dev->class && dev->class->pm) {
1071		info = "completing class ";
1072		callback = dev->class->pm->complete;
1073	} else if (dev->bus && dev->bus->pm) {
1074		info = "completing bus ";
1075		callback = dev->bus->pm->complete;
1076	}
1077
1078	if (!callback && dev->driver && dev->driver->pm) {
1079		info = "completing driver ";
1080		callback = dev->driver->pm->complete;
1081	}
1082
1083	if (callback) {
1084		pm_dev_dbg(dev, state, info);
1085		callback(dev);
1086	}
1087
1088	device_unlock(dev);
1089
1090out:
1091	pm_runtime_put(dev);
1092}
1093
1094/**
1095 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1096 * @state: PM transition of the system being carried out.
1097 *
1098 * Execute the ->complete() callbacks for all devices whose PM status is not
1099 * DPM_ON (this allows new devices to be registered).
1100 */
1101void dpm_complete(pm_message_t state)
1102{
1103	struct list_head list;
1104
1105	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1106	might_sleep();
1107
1108	INIT_LIST_HEAD(&list);
1109	mutex_lock(&dpm_list_mtx);
1110	while (!list_empty(&dpm_prepared_list)) {
1111		struct device *dev = to_device(dpm_prepared_list.prev);
1112
1113		get_device(dev);
1114		dev->power.is_prepared = false;
1115		list_move(&dev->power.entry, &list);
1116
1117		mutex_unlock(&dpm_list_mtx);
1118
1119		trace_device_pm_callback_start(dev, "", state.event);
1120		device_complete(dev, state);
1121		trace_device_pm_callback_end(dev, 0);
1122
 
1123		put_device(dev);
1124
1125		mutex_lock(&dpm_list_mtx);
1126	}
1127	list_splice(&list, &dpm_list);
1128	mutex_unlock(&dpm_list_mtx);
1129
1130	/* Allow device probing and trigger re-probing of deferred devices */
1131	device_unblock_probing();
1132	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1133}
1134
1135/**
1136 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1137 * @state: PM transition of the system being carried out.
1138 *
1139 * Execute "resume" callbacks for all devices and complete the PM transition of
1140 * the system.
1141 */
1142void dpm_resume_end(pm_message_t state)
1143{
1144	dpm_resume(state);
1145	dpm_complete(state);
1146}
1147EXPORT_SYMBOL_GPL(dpm_resume_end);
1148
1149
1150/*------------------------- Suspend routines -------------------------*/
1151
1152/**
1153 * resume_event - Return a "resume" message for given "suspend" sleep state.
1154 * @sleep_state: PM message representing a sleep state.
1155 *
1156 * Return a PM message representing the resume event corresponding to given
1157 * sleep state.
1158 */
1159static pm_message_t resume_event(pm_message_t sleep_state)
1160{
1161	switch (sleep_state.event) {
1162	case PM_EVENT_SUSPEND:
1163		return PMSG_RESUME;
1164	case PM_EVENT_FREEZE:
1165	case PM_EVENT_QUIESCE:
1166		return PMSG_RECOVER;
1167	case PM_EVENT_HIBERNATE:
1168		return PMSG_RESTORE;
1169	}
1170	return PMSG_ON;
1171}
1172
1173static void dpm_superior_set_must_resume(struct device *dev)
1174{
1175	struct device_link *link;
1176	int idx;
1177
1178	if (dev->parent)
1179		dev->parent->power.must_resume = true;
1180
1181	idx = device_links_read_lock();
1182
1183	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1184		link->supplier->power.must_resume = true;
1185
1186	device_links_read_unlock(idx);
1187}
1188
1189/**
1190 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1191 * @dev: Device to handle.
1192 * @state: PM transition of the system being carried out.
1193 * @async: If true, the device is being suspended asynchronously.
1194 *
1195 * The driver of @dev will not receive interrupts while this function is being
1196 * executed.
1197 */
1198static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1199{
1200	pm_callback_t callback = NULL;
1201	const char *info = NULL;
1202	int error = 0;
1203
1204	TRACE_DEVICE(dev);
1205	TRACE_SUSPEND(0);
1206
1207	dpm_wait_for_subordinate(dev, async);
1208
1209	if (async_error)
1210		goto Complete;
1211
1212	if (dev->power.syscore || dev->power.direct_complete)
1213		goto Complete;
1214
1215	if (dev->pm_domain) {
1216		info = "noirq power domain ";
1217		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1218	} else if (dev->type && dev->type->pm) {
1219		info = "noirq type ";
1220		callback = pm_noirq_op(dev->type->pm, state);
1221	} else if (dev->class && dev->class->pm) {
1222		info = "noirq class ";
1223		callback = pm_noirq_op(dev->class->pm, state);
1224	} else if (dev->bus && dev->bus->pm) {
1225		info = "noirq bus ";
1226		callback = pm_noirq_op(dev->bus->pm, state);
1227	}
1228	if (callback)
1229		goto Run;
1230
1231	if (dev_pm_skip_suspend(dev))
1232		goto Skip;
1233
1234	if (dev->driver && dev->driver->pm) {
1235		info = "noirq driver ";
1236		callback = pm_noirq_op(dev->driver->pm, state);
1237	}
1238
1239Run:
1240	error = dpm_run_callback(callback, dev, state, info);
1241	if (error) {
1242		async_error = error;
1243		goto Complete;
1244	}
1245
1246Skip:
1247	dev->power.is_noirq_suspended = true;
1248
1249	/*
1250	 * Skipping the resume of devices that were in use right before the
1251	 * system suspend (as indicated by their PM-runtime usage counters)
1252	 * would be suboptimal.  Also resume them if doing that is not allowed
1253	 * to be skipped.
1254	 */
1255	if (atomic_read(&dev->power.usage_count) > 1 ||
1256	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1257	      dev->power.may_skip_resume))
1258		dev->power.must_resume = true;
1259
1260	if (dev->power.must_resume)
1261		dpm_superior_set_must_resume(dev);
1262
1263Complete:
1264	complete_all(&dev->power.completion);
1265	TRACE_SUSPEND(error);
1266	return error;
1267}
1268
1269static void async_suspend_noirq(void *data, async_cookie_t cookie)
1270{
1271	struct device *dev = data;
1272	int error;
1273
1274	error = __device_suspend_noirq(dev, pm_transition, true);
1275	if (error) {
1276		dpm_save_failed_dev(dev_name(dev));
1277		pm_dev_err(dev, pm_transition, " async", error);
1278	}
1279
1280	put_device(dev);
1281}
1282
1283static int device_suspend_noirq(struct device *dev)
1284{
1285	if (dpm_async_fn(dev, async_suspend_noirq))
1286		return 0;
1287
1288	return __device_suspend_noirq(dev, pm_transition, false);
1289}
1290
1291static int dpm_noirq_suspend_devices(pm_message_t state)
1292{
1293	ktime_t starttime = ktime_get();
1294	int error = 0;
1295
1296	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1297	mutex_lock(&dpm_list_mtx);
1298	pm_transition = state;
1299	async_error = 0;
1300
1301	while (!list_empty(&dpm_late_early_list)) {
1302		struct device *dev = to_device(dpm_late_early_list.prev);
1303
1304		get_device(dev);
1305		mutex_unlock(&dpm_list_mtx);
1306
1307		error = device_suspend_noirq(dev);
1308
1309		mutex_lock(&dpm_list_mtx);
1310
1311		if (error) {
1312			pm_dev_err(dev, state, " noirq", error);
1313			dpm_save_failed_dev(dev_name(dev));
1314		} else if (!list_empty(&dev->power.entry)) {
 
 
 
1315			list_move(&dev->power.entry, &dpm_noirq_list);
1316		}
1317
1318		mutex_unlock(&dpm_list_mtx);
1319
1320		put_device(dev);
1321
1322		mutex_lock(&dpm_list_mtx);
1323
1324		if (error || async_error)
1325			break;
1326	}
1327	mutex_unlock(&dpm_list_mtx);
1328	async_synchronize_full();
1329	if (!error)
1330		error = async_error;
1331
1332	if (error) {
1333		suspend_stats.failed_suspend_noirq++;
1334		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1335	}
1336	dpm_show_time(starttime, state, error, "noirq");
1337	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1338	return error;
1339}
1340
1341/**
1342 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1343 * @state: PM transition of the system being carried out.
1344 *
1345 * Prevent device drivers' interrupt handlers from being called and invoke
1346 * "noirq" suspend callbacks for all non-sysdev devices.
1347 */
1348int dpm_suspend_noirq(pm_message_t state)
1349{
1350	int ret;
1351
 
 
1352	device_wakeup_arm_wake_irqs();
1353	suspend_device_irqs();
1354
1355	ret = dpm_noirq_suspend_devices(state);
1356	if (ret)
1357		dpm_resume_noirq(resume_event(state));
1358
1359	return ret;
1360}
1361
1362static void dpm_propagate_wakeup_to_parent(struct device *dev)
1363{
1364	struct device *parent = dev->parent;
1365
1366	if (!parent)
1367		return;
1368
1369	spin_lock_irq(&parent->power.lock);
1370
1371	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1372		parent->power.wakeup_path = true;
1373
1374	spin_unlock_irq(&parent->power.lock);
1375}
1376
1377/**
1378 * __device_suspend_late - Execute a "late suspend" callback for given device.
1379 * @dev: Device to handle.
1380 * @state: PM transition of the system being carried out.
1381 * @async: If true, the device is being suspended asynchronously.
1382 *
1383 * Runtime PM is disabled for @dev while this function is being executed.
1384 */
1385static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1386{
1387	pm_callback_t callback = NULL;
1388	const char *info = NULL;
1389	int error = 0;
1390
1391	TRACE_DEVICE(dev);
1392	TRACE_SUSPEND(0);
1393
1394	__pm_runtime_disable(dev, false);
1395
1396	dpm_wait_for_subordinate(dev, async);
1397
1398	if (async_error)
1399		goto Complete;
1400
1401	if (pm_wakeup_pending()) {
1402		async_error = -EBUSY;
1403		goto Complete;
1404	}
1405
1406	if (dev->power.syscore || dev->power.direct_complete)
1407		goto Complete;
1408
1409	if (dev->pm_domain) {
1410		info = "late power domain ";
1411		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1412	} else if (dev->type && dev->type->pm) {
1413		info = "late type ";
1414		callback = pm_late_early_op(dev->type->pm, state);
1415	} else if (dev->class && dev->class->pm) {
1416		info = "late class ";
1417		callback = pm_late_early_op(dev->class->pm, state);
1418	} else if (dev->bus && dev->bus->pm) {
1419		info = "late bus ";
1420		callback = pm_late_early_op(dev->bus->pm, state);
1421	}
1422	if (callback)
1423		goto Run;
1424
1425	if (dev_pm_skip_suspend(dev))
1426		goto Skip;
1427
1428	if (dev->driver && dev->driver->pm) {
1429		info = "late driver ";
1430		callback = pm_late_early_op(dev->driver->pm, state);
1431	}
1432
1433Run:
1434	error = dpm_run_callback(callback, dev, state, info);
1435	if (error) {
1436		async_error = error;
1437		goto Complete;
1438	}
1439	dpm_propagate_wakeup_to_parent(dev);
1440
1441Skip:
1442	dev->power.is_late_suspended = true;
1443
1444Complete:
1445	TRACE_SUSPEND(error);
1446	complete_all(&dev->power.completion);
1447	return error;
1448}
1449
1450static void async_suspend_late(void *data, async_cookie_t cookie)
1451{
1452	struct device *dev = data;
1453	int error;
1454
1455	error = __device_suspend_late(dev, pm_transition, true);
1456	if (error) {
1457		dpm_save_failed_dev(dev_name(dev));
1458		pm_dev_err(dev, pm_transition, " async", error);
1459	}
1460	put_device(dev);
1461}
1462
1463static int device_suspend_late(struct device *dev)
1464{
1465	if (dpm_async_fn(dev, async_suspend_late))
1466		return 0;
1467
1468	return __device_suspend_late(dev, pm_transition, false);
1469}
1470
1471/**
1472 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1473 * @state: PM transition of the system being carried out.
1474 */
1475int dpm_suspend_late(pm_message_t state)
1476{
1477	ktime_t starttime = ktime_get();
1478	int error = 0;
1479
1480	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1481	wake_up_all_idle_cpus();
1482	mutex_lock(&dpm_list_mtx);
1483	pm_transition = state;
1484	async_error = 0;
1485
1486	while (!list_empty(&dpm_suspended_list)) {
1487		struct device *dev = to_device(dpm_suspended_list.prev);
1488
1489		get_device(dev);
1490
1491		mutex_unlock(&dpm_list_mtx);
1492
1493		error = device_suspend_late(dev);
1494
1495		mutex_lock(&dpm_list_mtx);
1496
1497		if (!list_empty(&dev->power.entry))
1498			list_move(&dev->power.entry, &dpm_late_early_list);
1499
1500		if (error) {
1501			pm_dev_err(dev, state, " late", error);
1502			dpm_save_failed_dev(dev_name(dev));
 
 
1503		}
1504
1505		mutex_unlock(&dpm_list_mtx);
1506
1507		put_device(dev);
1508
1509		mutex_lock(&dpm_list_mtx);
1510
1511		if (error || async_error)
1512			break;
1513	}
1514	mutex_unlock(&dpm_list_mtx);
1515	async_synchronize_full();
1516	if (!error)
1517		error = async_error;
1518	if (error) {
1519		suspend_stats.failed_suspend_late++;
1520		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1521		dpm_resume_early(resume_event(state));
1522	}
1523	dpm_show_time(starttime, state, error, "late");
1524	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1525	return error;
1526}
1527
1528/**
1529 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1530 * @state: PM transition of the system being carried out.
1531 */
1532int dpm_suspend_end(pm_message_t state)
1533{
1534	ktime_t starttime = ktime_get();
1535	int error;
1536
1537	error = dpm_suspend_late(state);
1538	if (error)
1539		goto out;
1540
1541	error = dpm_suspend_noirq(state);
1542	if (error)
1543		dpm_resume_early(resume_event(state));
1544
1545out:
1546	dpm_show_time(starttime, state, error, "end");
1547	return error;
1548}
1549EXPORT_SYMBOL_GPL(dpm_suspend_end);
1550
1551/**
1552 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1553 * @dev: Device to suspend.
1554 * @state: PM transition of the system being carried out.
1555 * @cb: Suspend callback to execute.
1556 * @info: string description of caller.
1557 */
1558static int legacy_suspend(struct device *dev, pm_message_t state,
1559			  int (*cb)(struct device *dev, pm_message_t state),
1560			  const char *info)
1561{
1562	int error;
1563	ktime_t calltime;
1564
1565	calltime = initcall_debug_start(dev, cb);
1566
1567	trace_device_pm_callback_start(dev, info, state.event);
1568	error = cb(dev, state);
1569	trace_device_pm_callback_end(dev, error);
1570	suspend_report_result(dev, cb, error);
1571
1572	initcall_debug_report(dev, calltime, cb, error);
1573
1574	return error;
1575}
1576
1577static void dpm_clear_superiors_direct_complete(struct device *dev)
1578{
1579	struct device_link *link;
1580	int idx;
1581
1582	if (dev->parent) {
1583		spin_lock_irq(&dev->parent->power.lock);
1584		dev->parent->power.direct_complete = false;
1585		spin_unlock_irq(&dev->parent->power.lock);
1586	}
1587
1588	idx = device_links_read_lock();
1589
1590	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1591		spin_lock_irq(&link->supplier->power.lock);
1592		link->supplier->power.direct_complete = false;
1593		spin_unlock_irq(&link->supplier->power.lock);
1594	}
1595
1596	device_links_read_unlock(idx);
1597}
1598
1599/**
1600 * __device_suspend - Execute "suspend" callbacks for given device.
1601 * @dev: Device to handle.
1602 * @state: PM transition of the system being carried out.
1603 * @async: If true, the device is being suspended asynchronously.
1604 */
1605static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1606{
1607	pm_callback_t callback = NULL;
1608	const char *info = NULL;
1609	int error = 0;
1610	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1611
1612	TRACE_DEVICE(dev);
1613	TRACE_SUSPEND(0);
1614
1615	dpm_wait_for_subordinate(dev, async);
1616
1617	if (async_error) {
1618		dev->power.direct_complete = false;
1619		goto Complete;
1620	}
1621
1622	/*
1623	 * Wait for possible runtime PM transitions of the device in progress
1624	 * to complete and if there's a runtime resume request pending for it,
1625	 * resume it before proceeding with invoking the system-wide suspend
1626	 * callbacks for it.
1627	 *
1628	 * If the system-wide suspend callbacks below change the configuration
1629	 * of the device, they must disable runtime PM for it or otherwise
1630	 * ensure that its runtime-resume callbacks will not be confused by that
1631	 * change in case they are invoked going forward.
1632	 */
1633	pm_runtime_barrier(dev);
1634
1635	if (pm_wakeup_pending()) {
1636		dev->power.direct_complete = false;
1637		async_error = -EBUSY;
1638		goto Complete;
1639	}
1640
1641	if (dev->power.syscore)
1642		goto Complete;
1643
1644	/* Avoid direct_complete to let wakeup_path propagate. */
1645	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1646		dev->power.direct_complete = false;
1647
1648	if (dev->power.direct_complete) {
1649		if (pm_runtime_status_suspended(dev)) {
1650			pm_runtime_disable(dev);
1651			if (pm_runtime_status_suspended(dev)) {
1652				pm_dev_dbg(dev, state, "direct-complete ");
1653				goto Complete;
1654			}
1655
1656			pm_runtime_enable(dev);
1657		}
1658		dev->power.direct_complete = false;
1659	}
1660
1661	dev->power.may_skip_resume = true;
1662	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1663
1664	dpm_watchdog_set(&wd, dev);
1665	device_lock(dev);
1666
1667	if (dev->pm_domain) {
1668		info = "power domain ";
1669		callback = pm_op(&dev->pm_domain->ops, state);
1670		goto Run;
1671	}
1672
1673	if (dev->type && dev->type->pm) {
1674		info = "type ";
1675		callback = pm_op(dev->type->pm, state);
1676		goto Run;
1677	}
1678
1679	if (dev->class && dev->class->pm) {
1680		info = "class ";
1681		callback = pm_op(dev->class->pm, state);
1682		goto Run;
1683	}
1684
1685	if (dev->bus) {
1686		if (dev->bus->pm) {
1687			info = "bus ";
1688			callback = pm_op(dev->bus->pm, state);
1689		} else if (dev->bus->suspend) {
1690			pm_dev_dbg(dev, state, "legacy bus ");
1691			error = legacy_suspend(dev, state, dev->bus->suspend,
1692						"legacy bus ");
1693			goto End;
1694		}
1695	}
1696
1697 Run:
1698	if (!callback && dev->driver && dev->driver->pm) {
1699		info = "driver ";
1700		callback = pm_op(dev->driver->pm, state);
1701	}
1702
1703	error = dpm_run_callback(callback, dev, state, info);
1704
1705 End:
1706	if (!error) {
1707		dev->power.is_suspended = true;
1708		if (device_may_wakeup(dev))
1709			dev->power.wakeup_path = true;
1710
1711		dpm_propagate_wakeup_to_parent(dev);
1712		dpm_clear_superiors_direct_complete(dev);
1713	}
1714
1715	device_unlock(dev);
1716	dpm_watchdog_clear(&wd);
1717
1718 Complete:
1719	if (error)
1720		async_error = error;
1721
1722	complete_all(&dev->power.completion);
1723	TRACE_SUSPEND(error);
1724	return error;
1725}
1726
1727static void async_suspend(void *data, async_cookie_t cookie)
1728{
1729	struct device *dev = data;
1730	int error;
1731
1732	error = __device_suspend(dev, pm_transition, true);
1733	if (error) {
1734		dpm_save_failed_dev(dev_name(dev));
1735		pm_dev_err(dev, pm_transition, " async", error);
1736	}
1737
1738	put_device(dev);
1739}
1740
1741static int device_suspend(struct device *dev)
1742{
1743	if (dpm_async_fn(dev, async_suspend))
1744		return 0;
1745
1746	return __device_suspend(dev, pm_transition, false);
1747}
1748
1749/**
1750 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1751 * @state: PM transition of the system being carried out.
1752 */
1753int dpm_suspend(pm_message_t state)
1754{
1755	ktime_t starttime = ktime_get();
1756	int error = 0;
1757
1758	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1759	might_sleep();
1760
1761	devfreq_suspend();
1762	cpufreq_suspend();
1763
1764	mutex_lock(&dpm_list_mtx);
1765	pm_transition = state;
1766	async_error = 0;
1767	while (!list_empty(&dpm_prepared_list)) {
1768		struct device *dev = to_device(dpm_prepared_list.prev);
1769
1770		get_device(dev);
1771
1772		mutex_unlock(&dpm_list_mtx);
1773
1774		error = device_suspend(dev);
1775
1776		mutex_lock(&dpm_list_mtx);
1777
1778		if (error) {
1779			pm_dev_err(dev, state, "", error);
1780			dpm_save_failed_dev(dev_name(dev));
1781		} else if (!list_empty(&dev->power.entry)) {
 
 
 
1782			list_move(&dev->power.entry, &dpm_suspended_list);
1783		}
1784
1785		mutex_unlock(&dpm_list_mtx);
1786
1787		put_device(dev);
1788
1789		mutex_lock(&dpm_list_mtx);
1790
1791		if (error || async_error)
1792			break;
1793	}
1794	mutex_unlock(&dpm_list_mtx);
1795	async_synchronize_full();
1796	if (!error)
1797		error = async_error;
1798	if (error) {
1799		suspend_stats.failed_suspend++;
1800		dpm_save_failed_step(SUSPEND_SUSPEND);
1801	}
1802	dpm_show_time(starttime, state, error, NULL);
1803	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1804	return error;
1805}
1806
1807/**
1808 * device_prepare - Prepare a device for system power transition.
1809 * @dev: Device to handle.
1810 * @state: PM transition of the system being carried out.
1811 *
1812 * Execute the ->prepare() callback(s) for given device.  No new children of the
1813 * device may be registered after this function has returned.
1814 */
1815static int device_prepare(struct device *dev, pm_message_t state)
1816{
1817	int (*callback)(struct device *) = NULL;
1818	int ret = 0;
1819
 
 
 
1820	/*
1821	 * If a device's parent goes into runtime suspend at the wrong time,
1822	 * it won't be possible to resume the device.  To prevent this we
1823	 * block runtime suspend here, during the prepare phase, and allow
1824	 * it again during the complete phase.
1825	 */
1826	pm_runtime_get_noresume(dev);
1827
1828	if (dev->power.syscore)
1829		return 0;
1830
1831	device_lock(dev);
1832
1833	dev->power.wakeup_path = false;
1834
1835	if (dev->power.no_pm_callbacks)
1836		goto unlock;
1837
1838	if (dev->pm_domain)
1839		callback = dev->pm_domain->ops.prepare;
1840	else if (dev->type && dev->type->pm)
1841		callback = dev->type->pm->prepare;
1842	else if (dev->class && dev->class->pm)
1843		callback = dev->class->pm->prepare;
1844	else if (dev->bus && dev->bus->pm)
1845		callback = dev->bus->pm->prepare;
1846
1847	if (!callback && dev->driver && dev->driver->pm)
1848		callback = dev->driver->pm->prepare;
1849
1850	if (callback)
1851		ret = callback(dev);
1852
1853unlock:
1854	device_unlock(dev);
1855
1856	if (ret < 0) {
1857		suspend_report_result(dev, callback, ret);
1858		pm_runtime_put(dev);
1859		return ret;
1860	}
1861	/*
1862	 * A positive return value from ->prepare() means "this device appears
1863	 * to be runtime-suspended and its state is fine, so if it really is
1864	 * runtime-suspended, you can leave it in that state provided that you
1865	 * will do the same thing with all of its descendants".  This only
1866	 * applies to suspend transitions, however.
1867	 */
1868	spin_lock_irq(&dev->power.lock);
1869	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1870		(ret > 0 || dev->power.no_pm_callbacks) &&
1871		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1872	spin_unlock_irq(&dev->power.lock);
1873	return 0;
1874}
1875
1876/**
1877 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1878 * @state: PM transition of the system being carried out.
1879 *
1880 * Execute the ->prepare() callback(s) for all devices.
1881 */
1882int dpm_prepare(pm_message_t state)
1883{
1884	int error = 0;
1885
1886	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1887	might_sleep();
1888
1889	/*
1890	 * Give a chance for the known devices to complete their probes, before
1891	 * disable probing of devices. This sync point is important at least
1892	 * at boot time + hibernation restore.
1893	 */
1894	wait_for_device_probe();
1895	/*
1896	 * It is unsafe if probing of devices will happen during suspend or
1897	 * hibernation and system behavior will be unpredictable in this case.
1898	 * So, let's prohibit device's probing here and defer their probes
1899	 * instead. The normal behavior will be restored in dpm_complete().
1900	 */
1901	device_block_probing();
1902
1903	mutex_lock(&dpm_list_mtx);
1904	while (!list_empty(&dpm_list) && !error) {
1905		struct device *dev = to_device(dpm_list.next);
1906
1907		get_device(dev);
1908
1909		mutex_unlock(&dpm_list_mtx);
1910
1911		trace_device_pm_callback_start(dev, "", state.event);
1912		error = device_prepare(dev, state);
1913		trace_device_pm_callback_end(dev, error);
1914
1915		mutex_lock(&dpm_list_mtx);
1916
1917		if (!error) {
1918			dev->power.is_prepared = true;
1919			if (!list_empty(&dev->power.entry))
1920				list_move_tail(&dev->power.entry, &dpm_prepared_list);
1921		} else if (error == -EAGAIN) {
1922			error = 0;
1923		} else {
1924			dev_info(dev, "not prepared for power transition: code %d\n",
1925				 error);
1926		}
1927
1928		mutex_unlock(&dpm_list_mtx);
1929
1930		put_device(dev);
1931
1932		mutex_lock(&dpm_list_mtx);
1933	}
1934	mutex_unlock(&dpm_list_mtx);
1935	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1936	return error;
1937}
1938
1939/**
1940 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1941 * @state: PM transition of the system being carried out.
1942 *
1943 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1944 * callbacks for them.
1945 */
1946int dpm_suspend_start(pm_message_t state)
1947{
1948	ktime_t starttime = ktime_get();
1949	int error;
1950
1951	error = dpm_prepare(state);
1952	if (error) {
1953		suspend_stats.failed_prepare++;
1954		dpm_save_failed_step(SUSPEND_PREPARE);
1955	} else
1956		error = dpm_suspend(state);
1957	dpm_show_time(starttime, state, error, "start");
1958	return error;
1959}
1960EXPORT_SYMBOL_GPL(dpm_suspend_start);
1961
1962void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1963{
1964	if (ret)
1965		dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1966}
1967EXPORT_SYMBOL_GPL(__suspend_report_result);
1968
1969/**
1970 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1971 * @subordinate: Device that needs to wait for @dev.
1972 * @dev: Device to wait for.
1973 */
1974int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1975{
1976	dpm_wait(dev, subordinate->power.async_suspend);
1977	return async_error;
1978}
1979EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1980
1981/**
1982 * dpm_for_each_dev - device iterator.
1983 * @data: data for the callback.
1984 * @fn: function to be called for each device.
1985 *
1986 * Iterate over devices in dpm_list, and call @fn for each device,
1987 * passing it @data.
1988 */
1989void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1990{
1991	struct device *dev;
1992
1993	if (!fn)
1994		return;
1995
1996	device_pm_lock();
1997	list_for_each_entry(dev, &dpm_list, power.entry)
1998		fn(dev, data);
1999	device_pm_unlock();
2000}
2001EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2002
2003static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2004{
2005	if (!ops)
2006		return true;
2007
2008	return !ops->prepare &&
2009	       !ops->suspend &&
2010	       !ops->suspend_late &&
2011	       !ops->suspend_noirq &&
2012	       !ops->resume_noirq &&
2013	       !ops->resume_early &&
2014	       !ops->resume &&
2015	       !ops->complete;
2016}
2017
2018void device_pm_check_callbacks(struct device *dev)
2019{
2020	unsigned long flags;
2021
2022	spin_lock_irqsave(&dev->power.lock, flags);
2023	dev->power.no_pm_callbacks =
2024		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2025		 !dev->bus->suspend && !dev->bus->resume)) &&
2026		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2027		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2028		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2029		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2030		 !dev->driver->suspend && !dev->driver->resume));
2031	spin_unlock_irqrestore(&dev->power.lock, flags);
2032}
2033
2034bool dev_pm_skip_suspend(struct device *dev)
2035{
2036	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2037		pm_runtime_status_suspended(dev);
2038}