Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/main.c - Where the driver meets power management.
   4 *
   5 * Copyright (c) 2003 Patrick Mochel
   6 * Copyright (c) 2003 Open Source Development Lab
   7 *
   8 * The driver model core calls device_pm_add() when a device is registered.
   9 * This will initialize the embedded device_pm_info object in the device
  10 * and add it to the list of power-controlled devices. sysfs entries for
  11 * controlling device power management will also be added.
  12 *
  13 * A separate list is used for keeping track of power info, because the power
  14 * domain dependencies may differ from the ancestral dependencies that the
  15 * subsystem list maintains.
  16 */
  17
  18#define pr_fmt(fmt) "PM: " fmt
  19
  20#include <linux/device.h>
  21#include <linux/export.h>
  22#include <linux/mutex.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/pm-trace.h>
  26#include <linux/pm_wakeirq.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/sched/debug.h>
  30#include <linux/async.h>
  31#include <linux/suspend.h>
  32#include <trace/events/power.h>
  33#include <linux/cpufreq.h>
  34#include <linux/cpuidle.h>
  35#include <linux/devfreq.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
  43#define list_for_each_entry_rcu_locked(pos, head, member) \
  44	list_for_each_entry_rcu(pos, head, member, \
  45			device_links_read_lock_held())
  46
  47/*
  48 * The entries in the dpm_list list are in a depth first order, simply
  49 * because children are guaranteed to be discovered after parents, and
  50 * are inserted at the back of the list on discovery.
  51 *
  52 * Since device_pm_add() may be called with a device lock held,
  53 * we must never try to acquire a device lock while holding
  54 * dpm_list_mutex.
  55 */
  56
  57LIST_HEAD(dpm_list);
  58static LIST_HEAD(dpm_prepared_list);
  59static LIST_HEAD(dpm_suspended_list);
  60static LIST_HEAD(dpm_late_early_list);
  61static LIST_HEAD(dpm_noirq_list);
  62
  63struct suspend_stats suspend_stats;
  64static DEFINE_MUTEX(dpm_list_mtx);
  65static pm_message_t pm_transition;
  66
  67static int async_error;
  68
  69static const char *pm_verb(int event)
  70{
  71	switch (event) {
  72	case PM_EVENT_SUSPEND:
  73		return "suspend";
  74	case PM_EVENT_RESUME:
  75		return "resume";
  76	case PM_EVENT_FREEZE:
  77		return "freeze";
  78	case PM_EVENT_QUIESCE:
  79		return "quiesce";
  80	case PM_EVENT_HIBERNATE:
  81		return "hibernate";
  82	case PM_EVENT_THAW:
  83		return "thaw";
  84	case PM_EVENT_RESTORE:
  85		return "restore";
  86	case PM_EVENT_RECOVER:
  87		return "recover";
  88	default:
  89		return "(unknown PM event)";
  90	}
  91}
  92
  93/**
  94 * device_pm_sleep_init - Initialize system suspend-related device fields.
  95 * @dev: Device object being initialized.
  96 */
  97void device_pm_sleep_init(struct device *dev)
  98{
  99	dev->power.is_prepared = false;
 100	dev->power.is_suspended = false;
 101	dev->power.is_noirq_suspended = false;
 102	dev->power.is_late_suspended = false;
 103	init_completion(&dev->power.completion);
 104	complete_all(&dev->power.completion);
 105	dev->power.wakeup = NULL;
 106	INIT_LIST_HEAD(&dev->power.entry);
 107}
 108
 109/**
 110 * device_pm_lock - Lock the list of active devices used by the PM core.
 111 */
 112void device_pm_lock(void)
 113{
 114	mutex_lock(&dpm_list_mtx);
 115}
 116
 117/**
 118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 119 */
 120void device_pm_unlock(void)
 121{
 122	mutex_unlock(&dpm_list_mtx);
 123}
 124
 125/**
 126 * device_pm_add - Add a device to the PM core's list of active devices.
 127 * @dev: Device to add to the list.
 128 */
 129void device_pm_add(struct device *dev)
 130{
 131	/* Skip PM setup/initialization. */
 132	if (device_pm_not_required(dev))
 133		return;
 134
 135	pr_debug("Adding info for %s:%s\n",
 136		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 137	device_pm_check_callbacks(dev);
 138	mutex_lock(&dpm_list_mtx);
 139	if (dev->parent && dev->parent->power.is_prepared)
 140		dev_warn(dev, "parent %s should not be sleeping\n",
 141			dev_name(dev->parent));
 142	list_add_tail(&dev->power.entry, &dpm_list);
 143	dev->power.in_dpm_list = true;
 144	mutex_unlock(&dpm_list_mtx);
 145}
 146
 147/**
 148 * device_pm_remove - Remove a device from the PM core's list of active devices.
 149 * @dev: Device to be removed from the list.
 150 */
 151void device_pm_remove(struct device *dev)
 152{
 153	if (device_pm_not_required(dev))
 154		return;
 155
 156	pr_debug("Removing info for %s:%s\n",
 157		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 158	complete_all(&dev->power.completion);
 159	mutex_lock(&dpm_list_mtx);
 160	list_del_init(&dev->power.entry);
 161	dev->power.in_dpm_list = false;
 162	mutex_unlock(&dpm_list_mtx);
 163	device_wakeup_disable(dev);
 164	pm_runtime_remove(dev);
 165	device_pm_check_callbacks(dev);
 166}
 167
 168/**
 169 * device_pm_move_before - Move device in the PM core's list of active devices.
 170 * @deva: Device to move in dpm_list.
 171 * @devb: Device @deva should come before.
 172 */
 173void device_pm_move_before(struct device *deva, struct device *devb)
 174{
 175	pr_debug("Moving %s:%s before %s:%s\n",
 176		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 177		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 178	/* Delete deva from dpm_list and reinsert before devb. */
 179	list_move_tail(&deva->power.entry, &devb->power.entry);
 180}
 181
 182/**
 183 * device_pm_move_after - Move device in the PM core's list of active devices.
 184 * @deva: Device to move in dpm_list.
 185 * @devb: Device @deva should come after.
 186 */
 187void device_pm_move_after(struct device *deva, struct device *devb)
 188{
 189	pr_debug("Moving %s:%s after %s:%s\n",
 190		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 191		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 192	/* Delete deva from dpm_list and reinsert after devb. */
 193	list_move(&deva->power.entry, &devb->power.entry);
 194}
 195
 196/**
 197 * device_pm_move_last - Move device to end of the PM core's list of devices.
 198 * @dev: Device to move in dpm_list.
 199 */
 200void device_pm_move_last(struct device *dev)
 201{
 202	pr_debug("Moving %s:%s to end of list\n",
 203		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 204	list_move_tail(&dev->power.entry, &dpm_list);
 205}
 206
 207static ktime_t initcall_debug_start(struct device *dev, void *cb)
 208{
 209	if (!pm_print_times_enabled)
 210		return 0;
 211
 212	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
 213		 task_pid_nr(current),
 214		 dev->parent ? dev_name(dev->parent) : "none");
 215	return ktime_get();
 216}
 217
 218static void initcall_debug_report(struct device *dev, ktime_t calltime,
 219				  void *cb, int error)
 220{
 221	ktime_t rettime;
 222	s64 nsecs;
 223
 224	if (!pm_print_times_enabled)
 225		return;
 226
 227	rettime = ktime_get();
 228	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 229
 230	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
 231		 (unsigned long long)nsecs >> 10);
 232}
 233
 234/**
 235 * dpm_wait - Wait for a PM operation to complete.
 236 * @dev: Device to wait for.
 237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 238 */
 239static void dpm_wait(struct device *dev, bool async)
 240{
 241	if (!dev)
 242		return;
 243
 244	if (async || (pm_async_enabled && dev->power.async_suspend))
 245		wait_for_completion(&dev->power.completion);
 246}
 247
 248static int dpm_wait_fn(struct device *dev, void *async_ptr)
 249{
 250	dpm_wait(dev, *((bool *)async_ptr));
 251	return 0;
 252}
 253
 254static void dpm_wait_for_children(struct device *dev, bool async)
 255{
 256       device_for_each_child(dev, &async, dpm_wait_fn);
 257}
 258
 259static void dpm_wait_for_suppliers(struct device *dev, bool async)
 260{
 261	struct device_link *link;
 262	int idx;
 263
 264	idx = device_links_read_lock();
 265
 266	/*
 267	 * If the supplier goes away right after we've checked the link to it,
 268	 * we'll wait for its completion to change the state, but that's fine,
 269	 * because the only things that will block as a result are the SRCU
 270	 * callbacks freeing the link objects for the links in the list we're
 271	 * walking.
 272	 */
 273	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
 274		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 275			dpm_wait(link->supplier, async);
 276
 277	device_links_read_unlock(idx);
 278}
 279
 280static bool dpm_wait_for_superior(struct device *dev, bool async)
 281{
 282	struct device *parent;
 283
 284	/*
 285	 * If the device is resumed asynchronously and the parent's callback
 286	 * deletes both the device and the parent itself, the parent object may
 287	 * be freed while this function is running, so avoid that by reference
 288	 * counting the parent once more unless the device has been deleted
 289	 * already (in which case return right away).
 290	 */
 291	mutex_lock(&dpm_list_mtx);
 292
 293	if (!device_pm_initialized(dev)) {
 294		mutex_unlock(&dpm_list_mtx);
 295		return false;
 296	}
 297
 298	parent = get_device(dev->parent);
 299
 300	mutex_unlock(&dpm_list_mtx);
 301
 302	dpm_wait(parent, async);
 303	put_device(parent);
 304
 305	dpm_wait_for_suppliers(dev, async);
 306
 307	/*
 308	 * If the parent's callback has deleted the device, attempting to resume
 309	 * it would be invalid, so avoid doing that then.
 310	 */
 311	return device_pm_initialized(dev);
 312}
 313
 314static void dpm_wait_for_consumers(struct device *dev, bool async)
 315{
 316	struct device_link *link;
 317	int idx;
 318
 319	idx = device_links_read_lock();
 320
 321	/*
 322	 * The status of a device link can only be changed from "dormant" by a
 323	 * probe, but that cannot happen during system suspend/resume.  In
 324	 * theory it can change to "dormant" at that time, but then it is
 325	 * reasonable to wait for the target device anyway (eg. if it goes
 326	 * away, it's better to wait for it to go away completely and then
 327	 * continue instead of trying to continue in parallel with its
 328	 * unregistration).
 329	 */
 330	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
 331		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 332			dpm_wait(link->consumer, async);
 333
 334	device_links_read_unlock(idx);
 335}
 336
 337static void dpm_wait_for_subordinate(struct device *dev, bool async)
 338{
 339	dpm_wait_for_children(dev, async);
 340	dpm_wait_for_consumers(dev, async);
 341}
 342
 343/**
 344 * pm_op - Return the PM operation appropriate for given PM event.
 345 * @ops: PM operations to choose from.
 346 * @state: PM transition of the system being carried out.
 347 */
 348static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 349{
 350	switch (state.event) {
 351#ifdef CONFIG_SUSPEND
 352	case PM_EVENT_SUSPEND:
 353		return ops->suspend;
 354	case PM_EVENT_RESUME:
 355		return ops->resume;
 356#endif /* CONFIG_SUSPEND */
 357#ifdef CONFIG_HIBERNATE_CALLBACKS
 358	case PM_EVENT_FREEZE:
 359	case PM_EVENT_QUIESCE:
 360		return ops->freeze;
 361	case PM_EVENT_HIBERNATE:
 362		return ops->poweroff;
 363	case PM_EVENT_THAW:
 364	case PM_EVENT_RECOVER:
 365		return ops->thaw;
 366		break;
 367	case PM_EVENT_RESTORE:
 368		return ops->restore;
 369#endif /* CONFIG_HIBERNATE_CALLBACKS */
 370	}
 371
 372	return NULL;
 373}
 374
 375/**
 376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 377 * @ops: PM operations to choose from.
 378 * @state: PM transition of the system being carried out.
 379 *
 380 * Runtime PM is disabled for @dev while this function is being executed.
 381 */
 382static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 383				      pm_message_t state)
 384{
 385	switch (state.event) {
 386#ifdef CONFIG_SUSPEND
 387	case PM_EVENT_SUSPEND:
 388		return ops->suspend_late;
 389	case PM_EVENT_RESUME:
 390		return ops->resume_early;
 391#endif /* CONFIG_SUSPEND */
 392#ifdef CONFIG_HIBERNATE_CALLBACKS
 393	case PM_EVENT_FREEZE:
 394	case PM_EVENT_QUIESCE:
 395		return ops->freeze_late;
 396	case PM_EVENT_HIBERNATE:
 397		return ops->poweroff_late;
 398	case PM_EVENT_THAW:
 399	case PM_EVENT_RECOVER:
 400		return ops->thaw_early;
 401	case PM_EVENT_RESTORE:
 402		return ops->restore_early;
 403#endif /* CONFIG_HIBERNATE_CALLBACKS */
 404	}
 405
 406	return NULL;
 407}
 408
 409/**
 410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 411 * @ops: PM operations to choose from.
 412 * @state: PM transition of the system being carried out.
 413 *
 414 * The driver of @dev will not receive interrupts while this function is being
 415 * executed.
 416 */
 417static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 418{
 419	switch (state.event) {
 420#ifdef CONFIG_SUSPEND
 421	case PM_EVENT_SUSPEND:
 422		return ops->suspend_noirq;
 423	case PM_EVENT_RESUME:
 424		return ops->resume_noirq;
 425#endif /* CONFIG_SUSPEND */
 426#ifdef CONFIG_HIBERNATE_CALLBACKS
 427	case PM_EVENT_FREEZE:
 428	case PM_EVENT_QUIESCE:
 429		return ops->freeze_noirq;
 430	case PM_EVENT_HIBERNATE:
 431		return ops->poweroff_noirq;
 432	case PM_EVENT_THAW:
 433	case PM_EVENT_RECOVER:
 434		return ops->thaw_noirq;
 435	case PM_EVENT_RESTORE:
 436		return ops->restore_noirq;
 437#endif /* CONFIG_HIBERNATE_CALLBACKS */
 438	}
 439
 440	return NULL;
 441}
 442
 443static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 444{
 445	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 446		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 447		", may wakeup" : "");
 448}
 449
 450static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 451			int error)
 452{
 453	pr_err("Device %s failed to %s%s: error %d\n",
 454	       dev_name(dev), pm_verb(state.event), info, error);
 455}
 456
 457static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 458			  const char *info)
 459{
 460	ktime_t calltime;
 461	u64 usecs64;
 462	int usecs;
 463
 464	calltime = ktime_get();
 465	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 466	do_div(usecs64, NSEC_PER_USEC);
 467	usecs = usecs64;
 468	if (usecs == 0)
 469		usecs = 1;
 470
 471	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 472		  info ?: "", info ? " " : "", pm_verb(state.event),
 473		  error ? "aborted" : "complete",
 474		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 475}
 476
 477static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 478			    pm_message_t state, const char *info)
 479{
 480	ktime_t calltime;
 481	int error;
 482
 483	if (!cb)
 484		return 0;
 485
 486	calltime = initcall_debug_start(dev, cb);
 487
 488	pm_dev_dbg(dev, state, info);
 489	trace_device_pm_callback_start(dev, info, state.event);
 490	error = cb(dev);
 491	trace_device_pm_callback_end(dev, error);
 492	suspend_report_result(cb, error);
 493
 494	initcall_debug_report(dev, calltime, cb, error);
 495
 496	return error;
 497}
 498
 499#ifdef CONFIG_DPM_WATCHDOG
 500struct dpm_watchdog {
 501	struct device		*dev;
 502	struct task_struct	*tsk;
 503	struct timer_list	timer;
 504};
 505
 506#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 507	struct dpm_watchdog wd
 508
 509/**
 510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 511 * @t: The timer that PM watchdog depends on.
 512 *
 513 * Called when a driver has timed out suspending or resuming.
 514 * There's not much we can do here to recover so panic() to
 515 * capture a crash-dump in pstore.
 516 */
 517static void dpm_watchdog_handler(struct timer_list *t)
 518{
 519	struct dpm_watchdog *wd = from_timer(wd, t, timer);
 520
 521	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 522	show_stack(wd->tsk, NULL, KERN_EMERG);
 523	panic("%s %s: unrecoverable failure\n",
 524		dev_driver_string(wd->dev), dev_name(wd->dev));
 525}
 526
 527/**
 528 * dpm_watchdog_set - Enable pm watchdog for given device.
 529 * @wd: Watchdog. Must be allocated on the stack.
 530 * @dev: Device to handle.
 531 */
 532static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 533{
 534	struct timer_list *timer = &wd->timer;
 535
 536	wd->dev = dev;
 537	wd->tsk = current;
 538
 539	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 540	/* use same timeout value for both suspend and resume */
 541	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 542	add_timer(timer);
 543}
 544
 545/**
 546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 547 * @wd: Watchdog to disable.
 548 */
 549static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 550{
 551	struct timer_list *timer = &wd->timer;
 552
 553	del_timer_sync(timer);
 554	destroy_timer_on_stack(timer);
 555}
 556#else
 557#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 558#define dpm_watchdog_set(x, y)
 559#define dpm_watchdog_clear(x)
 560#endif
 561
 562/*------------------------- Resume routines -------------------------*/
 563
 564/**
 565 * dev_pm_skip_resume - System-wide device resume optimization check.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566 * @dev: Target device.
 567 *
 568 * Return:
 569 * - %false if the transition under way is RESTORE.
 570 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
 571 * - The logical negation of %power.must_resume otherwise (that is, when the
 572 *   transition under way is RESUME).
 573 */
 574bool dev_pm_skip_resume(struct device *dev)
 575{
 576	if (pm_transition.event == PM_EVENT_RESTORE)
 577		return false;
 578
 579	if (pm_transition.event == PM_EVENT_THAW)
 580		return dev_pm_skip_suspend(dev);
 
 
 
 
 581
 582	return !dev->power.must_resume;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 583}
 584
 
 
 
 
 
 
 
 
 585/**
 586 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 587 * @dev: Device to handle.
 588 * @state: PM transition of the system being carried out.
 589 * @async: If true, the device is being resumed asynchronously.
 590 *
 591 * The driver of @dev will not receive interrupts while this function is being
 592 * executed.
 593 */
 594static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 595{
 596	pm_callback_t callback = NULL;
 597	const char *info = NULL;
 598	bool skip_resume;
 599	int error = 0;
 600
 601	TRACE_DEVICE(dev);
 602	TRACE_RESUME(0);
 603
 604	if (dev->power.syscore || dev->power.direct_complete)
 605		goto Out;
 606
 607	if (!dev->power.is_noirq_suspended)
 608		goto Out;
 609
 610	if (!dpm_wait_for_superior(dev, async))
 611		goto Out;
 612
 613	skip_resume = dev_pm_skip_resume(dev);
 614	/*
 615	 * If the driver callback is skipped below or by the middle layer
 616	 * callback and device_resume_early() also skips the driver callback for
 617	 * this device later, it needs to appear as "suspended" to PM-runtime,
 618	 * so change its status accordingly.
 619	 *
 620	 * Otherwise, the device is going to be resumed, so set its PM-runtime
 621	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
 622	 * to avoid confusing drivers that don't use it.
 623	 */
 624	if (skip_resume)
 625		pm_runtime_set_suspended(dev);
 626	else if (dev_pm_skip_suspend(dev))
 627		pm_runtime_set_active(dev);
 628
 629	if (dev->pm_domain) {
 630		info = "noirq power domain ";
 631		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 632	} else if (dev->type && dev->type->pm) {
 633		info = "noirq type ";
 634		callback = pm_noirq_op(dev->type->pm, state);
 635	} else if (dev->class && dev->class->pm) {
 636		info = "noirq class ";
 637		callback = pm_noirq_op(dev->class->pm, state);
 638	} else if (dev->bus && dev->bus->pm) {
 639		info = "noirq bus ";
 640		callback = pm_noirq_op(dev->bus->pm, state);
 641	}
 642	if (callback)
 643		goto Run;
 644
 645	if (skip_resume)
 646		goto Skip;
 647
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 648	if (dev->driver && dev->driver->pm) {
 649		info = "noirq driver ";
 650		callback = pm_noirq_op(dev->driver->pm, state);
 651	}
 652
 653Run:
 654	error = dpm_run_callback(callback, dev, state, info);
 655
 656Skip:
 657	dev->power.is_noirq_suspended = false;
 658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 659Out:
 660	complete_all(&dev->power.completion);
 661	TRACE_RESUME(error);
 662	return error;
 663}
 664
 665static bool is_async(struct device *dev)
 666{
 667	return dev->power.async_suspend && pm_async_enabled
 668		&& !pm_trace_is_enabled();
 669}
 670
 671static bool dpm_async_fn(struct device *dev, async_func_t func)
 672{
 673	reinit_completion(&dev->power.completion);
 674
 675	if (is_async(dev)) {
 676		get_device(dev);
 677		async_schedule_dev(func, dev);
 678		return true;
 679	}
 680
 681	return false;
 682}
 683
 684static void async_resume_noirq(void *data, async_cookie_t cookie)
 685{
 686	struct device *dev = (struct device *)data;
 687	int error;
 688
 689	error = device_resume_noirq(dev, pm_transition, true);
 690	if (error)
 691		pm_dev_err(dev, pm_transition, " async", error);
 692
 693	put_device(dev);
 694}
 695
 696static void dpm_noirq_resume_devices(pm_message_t state)
 697{
 698	struct device *dev;
 699	ktime_t starttime = ktime_get();
 700
 701	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 702	mutex_lock(&dpm_list_mtx);
 703	pm_transition = state;
 704
 705	/*
 706	 * Advanced the async threads upfront,
 707	 * in case the starting of async threads is
 708	 * delayed by non-async resuming devices.
 709	 */
 710	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
 711		dpm_async_fn(dev, async_resume_noirq);
 712
 713	while (!list_empty(&dpm_noirq_list)) {
 714		dev = to_device(dpm_noirq_list.next);
 715		get_device(dev);
 716		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 717		mutex_unlock(&dpm_list_mtx);
 718
 719		if (!is_async(dev)) {
 720			int error;
 721
 722			error = device_resume_noirq(dev, state, false);
 723			if (error) {
 724				suspend_stats.failed_resume_noirq++;
 725				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 726				dpm_save_failed_dev(dev_name(dev));
 727				pm_dev_err(dev, state, " noirq", error);
 728			}
 729		}
 730
 731		mutex_lock(&dpm_list_mtx);
 732		put_device(dev);
 733	}
 734	mutex_unlock(&dpm_list_mtx);
 735	async_synchronize_full();
 736	dpm_show_time(starttime, state, 0, "noirq");
 737	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 738}
 739
 740/**
 741 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 742 * @state: PM transition of the system being carried out.
 743 *
 744 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 745 * allow device drivers' interrupt handlers to be called.
 746 */
 747void dpm_resume_noirq(pm_message_t state)
 748{
 749	dpm_noirq_resume_devices(state);
 750
 751	resume_device_irqs();
 752	device_wakeup_disarm_wake_irqs();
 753
 754	cpuidle_resume();
 755}
 756
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 757/**
 758 * device_resume_early - Execute an "early resume" callback for given device.
 759 * @dev: Device to handle.
 760 * @state: PM transition of the system being carried out.
 761 * @async: If true, the device is being resumed asynchronously.
 762 *
 763 * Runtime PM is disabled for @dev while this function is being executed.
 764 */
 765static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 766{
 767	pm_callback_t callback = NULL;
 768	const char *info = NULL;
 769	int error = 0;
 770
 771	TRACE_DEVICE(dev);
 772	TRACE_RESUME(0);
 773
 774	if (dev->power.syscore || dev->power.direct_complete)
 775		goto Out;
 776
 777	if (!dev->power.is_late_suspended)
 778		goto Out;
 779
 780	if (!dpm_wait_for_superior(dev, async))
 781		goto Out;
 782
 783	if (dev->pm_domain) {
 784		info = "early power domain ";
 785		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 786	} else if (dev->type && dev->type->pm) {
 787		info = "early type ";
 788		callback = pm_late_early_op(dev->type->pm, state);
 789	} else if (dev->class && dev->class->pm) {
 790		info = "early class ";
 791		callback = pm_late_early_op(dev->class->pm, state);
 792	} else if (dev->bus && dev->bus->pm) {
 793		info = "early bus ";
 794		callback = pm_late_early_op(dev->bus->pm, state);
 795	}
 796	if (callback)
 797		goto Run;
 798
 799	if (dev_pm_skip_resume(dev))
 800		goto Skip;
 801
 802	if (dev->driver && dev->driver->pm) {
 803		info = "early driver ";
 804		callback = pm_late_early_op(dev->driver->pm, state);
 805	}
 806
 807Run:
 808	error = dpm_run_callback(callback, dev, state, info);
 809
 810Skip:
 811	dev->power.is_late_suspended = false;
 812
 813Out:
 814	TRACE_RESUME(error);
 815
 816	pm_runtime_enable(dev);
 817	complete_all(&dev->power.completion);
 818	return error;
 819}
 820
 821static void async_resume_early(void *data, async_cookie_t cookie)
 822{
 823	struct device *dev = (struct device *)data;
 824	int error;
 825
 826	error = device_resume_early(dev, pm_transition, true);
 827	if (error)
 828		pm_dev_err(dev, pm_transition, " async", error);
 829
 830	put_device(dev);
 831}
 832
 833/**
 834 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 835 * @state: PM transition of the system being carried out.
 836 */
 837void dpm_resume_early(pm_message_t state)
 838{
 839	struct device *dev;
 840	ktime_t starttime = ktime_get();
 841
 842	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 843	mutex_lock(&dpm_list_mtx);
 844	pm_transition = state;
 845
 846	/*
 847	 * Advanced the async threads upfront,
 848	 * in case the starting of async threads is
 849	 * delayed by non-async resuming devices.
 850	 */
 851	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
 852		dpm_async_fn(dev, async_resume_early);
 853
 854	while (!list_empty(&dpm_late_early_list)) {
 855		dev = to_device(dpm_late_early_list.next);
 856		get_device(dev);
 857		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 858		mutex_unlock(&dpm_list_mtx);
 859
 860		if (!is_async(dev)) {
 861			int error;
 862
 863			error = device_resume_early(dev, state, false);
 864			if (error) {
 865				suspend_stats.failed_resume_early++;
 866				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 867				dpm_save_failed_dev(dev_name(dev));
 868				pm_dev_err(dev, state, " early", error);
 869			}
 870		}
 871		mutex_lock(&dpm_list_mtx);
 872		put_device(dev);
 873	}
 874	mutex_unlock(&dpm_list_mtx);
 875	async_synchronize_full();
 876	dpm_show_time(starttime, state, 0, "early");
 877	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 878}
 879
 880/**
 881 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 882 * @state: PM transition of the system being carried out.
 883 */
 884void dpm_resume_start(pm_message_t state)
 885{
 886	dpm_resume_noirq(state);
 887	dpm_resume_early(state);
 888}
 889EXPORT_SYMBOL_GPL(dpm_resume_start);
 890
 891/**
 892 * device_resume - Execute "resume" callbacks for given device.
 893 * @dev: Device to handle.
 894 * @state: PM transition of the system being carried out.
 895 * @async: If true, the device is being resumed asynchronously.
 896 */
 897static int device_resume(struct device *dev, pm_message_t state, bool async)
 898{
 899	pm_callback_t callback = NULL;
 900	const char *info = NULL;
 901	int error = 0;
 902	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 903
 904	TRACE_DEVICE(dev);
 905	TRACE_RESUME(0);
 906
 907	if (dev->power.syscore)
 908		goto Complete;
 909
 910	if (dev->power.direct_complete) {
 911		/* Match the pm_runtime_disable() in __device_suspend(). */
 912		pm_runtime_enable(dev);
 913		goto Complete;
 914	}
 915
 916	if (!dpm_wait_for_superior(dev, async))
 917		goto Complete;
 918
 919	dpm_watchdog_set(&wd, dev);
 920	device_lock(dev);
 921
 922	/*
 923	 * This is a fib.  But we'll allow new children to be added below
 924	 * a resumed device, even if the device hasn't been completed yet.
 925	 */
 926	dev->power.is_prepared = false;
 927
 928	if (!dev->power.is_suspended)
 929		goto Unlock;
 930
 931	if (dev->pm_domain) {
 932		info = "power domain ";
 933		callback = pm_op(&dev->pm_domain->ops, state);
 934		goto Driver;
 935	}
 936
 937	if (dev->type && dev->type->pm) {
 938		info = "type ";
 939		callback = pm_op(dev->type->pm, state);
 940		goto Driver;
 941	}
 942
 943	if (dev->class && dev->class->pm) {
 944		info = "class ";
 945		callback = pm_op(dev->class->pm, state);
 946		goto Driver;
 947	}
 948
 949	if (dev->bus) {
 950		if (dev->bus->pm) {
 951			info = "bus ";
 952			callback = pm_op(dev->bus->pm, state);
 953		} else if (dev->bus->resume) {
 954			info = "legacy bus ";
 955			callback = dev->bus->resume;
 956			goto End;
 957		}
 958	}
 959
 960 Driver:
 961	if (!callback && dev->driver && dev->driver->pm) {
 962		info = "driver ";
 963		callback = pm_op(dev->driver->pm, state);
 964	}
 965
 966 End:
 967	error = dpm_run_callback(callback, dev, state, info);
 968	dev->power.is_suspended = false;
 969
 970 Unlock:
 971	device_unlock(dev);
 972	dpm_watchdog_clear(&wd);
 973
 974 Complete:
 975	complete_all(&dev->power.completion);
 976
 977	TRACE_RESUME(error);
 978
 979	return error;
 980}
 981
 982static void async_resume(void *data, async_cookie_t cookie)
 983{
 984	struct device *dev = (struct device *)data;
 985	int error;
 986
 987	error = device_resume(dev, pm_transition, true);
 988	if (error)
 989		pm_dev_err(dev, pm_transition, " async", error);
 990	put_device(dev);
 991}
 992
 993/**
 994 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 995 * @state: PM transition of the system being carried out.
 996 *
 997 * Execute the appropriate "resume" callback for all devices whose status
 998 * indicates that they are suspended.
 999 */
1000void dpm_resume(pm_message_t state)
1001{
1002	struct device *dev;
1003	ktime_t starttime = ktime_get();
1004
1005	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1006	might_sleep();
1007
1008	mutex_lock(&dpm_list_mtx);
1009	pm_transition = state;
1010	async_error = 0;
1011
1012	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1013		dpm_async_fn(dev, async_resume);
1014
1015	while (!list_empty(&dpm_suspended_list)) {
1016		dev = to_device(dpm_suspended_list.next);
1017		get_device(dev);
1018		if (!is_async(dev)) {
1019			int error;
1020
1021			mutex_unlock(&dpm_list_mtx);
1022
1023			error = device_resume(dev, state, false);
1024			if (error) {
1025				suspend_stats.failed_resume++;
1026				dpm_save_failed_step(SUSPEND_RESUME);
1027				dpm_save_failed_dev(dev_name(dev));
1028				pm_dev_err(dev, state, "", error);
1029			}
1030
1031			mutex_lock(&dpm_list_mtx);
1032		}
1033		if (!list_empty(&dev->power.entry))
1034			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1035		put_device(dev);
1036	}
1037	mutex_unlock(&dpm_list_mtx);
1038	async_synchronize_full();
1039	dpm_show_time(starttime, state, 0, NULL);
1040
1041	cpufreq_resume();
1042	devfreq_resume();
1043	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1044}
1045
1046/**
1047 * device_complete - Complete a PM transition for given device.
1048 * @dev: Device to handle.
1049 * @state: PM transition of the system being carried out.
1050 */
1051static void device_complete(struct device *dev, pm_message_t state)
1052{
1053	void (*callback)(struct device *) = NULL;
1054	const char *info = NULL;
1055
1056	if (dev->power.syscore)
1057		return;
1058
1059	device_lock(dev);
1060
1061	if (dev->pm_domain) {
1062		info = "completing power domain ";
1063		callback = dev->pm_domain->ops.complete;
1064	} else if (dev->type && dev->type->pm) {
1065		info = "completing type ";
1066		callback = dev->type->pm->complete;
1067	} else if (dev->class && dev->class->pm) {
1068		info = "completing class ";
1069		callback = dev->class->pm->complete;
1070	} else if (dev->bus && dev->bus->pm) {
1071		info = "completing bus ";
1072		callback = dev->bus->pm->complete;
1073	}
1074
1075	if (!callback && dev->driver && dev->driver->pm) {
1076		info = "completing driver ";
1077		callback = dev->driver->pm->complete;
1078	}
1079
1080	if (callback) {
1081		pm_dev_dbg(dev, state, info);
1082		callback(dev);
1083	}
1084
1085	device_unlock(dev);
1086
1087	pm_runtime_put(dev);
1088}
1089
1090/**
1091 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1092 * @state: PM transition of the system being carried out.
1093 *
1094 * Execute the ->complete() callbacks for all devices whose PM status is not
1095 * DPM_ON (this allows new devices to be registered).
1096 */
1097void dpm_complete(pm_message_t state)
1098{
1099	struct list_head list;
1100
1101	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1102	might_sleep();
1103
1104	INIT_LIST_HEAD(&list);
1105	mutex_lock(&dpm_list_mtx);
1106	while (!list_empty(&dpm_prepared_list)) {
1107		struct device *dev = to_device(dpm_prepared_list.prev);
1108
1109		get_device(dev);
1110		dev->power.is_prepared = false;
1111		list_move(&dev->power.entry, &list);
1112		mutex_unlock(&dpm_list_mtx);
1113
1114		trace_device_pm_callback_start(dev, "", state.event);
1115		device_complete(dev, state);
1116		trace_device_pm_callback_end(dev, 0);
1117
1118		mutex_lock(&dpm_list_mtx);
1119		put_device(dev);
1120	}
1121	list_splice(&list, &dpm_list);
1122	mutex_unlock(&dpm_list_mtx);
1123
1124	/* Allow device probing and trigger re-probing of deferred devices */
1125	device_unblock_probing();
1126	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1127}
1128
1129/**
1130 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1131 * @state: PM transition of the system being carried out.
1132 *
1133 * Execute "resume" callbacks for all devices and complete the PM transition of
1134 * the system.
1135 */
1136void dpm_resume_end(pm_message_t state)
1137{
1138	dpm_resume(state);
1139	dpm_complete(state);
1140}
1141EXPORT_SYMBOL_GPL(dpm_resume_end);
1142
1143
1144/*------------------------- Suspend routines -------------------------*/
1145
1146/**
1147 * resume_event - Return a "resume" message for given "suspend" sleep state.
1148 * @sleep_state: PM message representing a sleep state.
1149 *
1150 * Return a PM message representing the resume event corresponding to given
1151 * sleep state.
1152 */
1153static pm_message_t resume_event(pm_message_t sleep_state)
1154{
1155	switch (sleep_state.event) {
1156	case PM_EVENT_SUSPEND:
1157		return PMSG_RESUME;
1158	case PM_EVENT_FREEZE:
1159	case PM_EVENT_QUIESCE:
1160		return PMSG_RECOVER;
1161	case PM_EVENT_HIBERNATE:
1162		return PMSG_RESTORE;
1163	}
1164	return PMSG_ON;
1165}
1166
1167static void dpm_superior_set_must_resume(struct device *dev)
1168{
1169	struct device_link *link;
1170	int idx;
1171
1172	if (dev->parent)
1173		dev->parent->power.must_resume = true;
1174
1175	idx = device_links_read_lock();
1176
1177	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1178		link->supplier->power.must_resume = true;
1179
1180	device_links_read_unlock(idx);
1181}
1182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1183/**
1184 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1185 * @dev: Device to handle.
1186 * @state: PM transition of the system being carried out.
1187 * @async: If true, the device is being suspended asynchronously.
1188 *
1189 * The driver of @dev will not receive interrupts while this function is being
1190 * executed.
1191 */
1192static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1193{
1194	pm_callback_t callback = NULL;
1195	const char *info = NULL;
 
1196	int error = 0;
1197
1198	TRACE_DEVICE(dev);
1199	TRACE_SUSPEND(0);
1200
1201	dpm_wait_for_subordinate(dev, async);
1202
1203	if (async_error)
1204		goto Complete;
1205
1206	if (dev->power.syscore || dev->power.direct_complete)
1207		goto Complete;
1208
1209	if (dev->pm_domain) {
1210		info = "noirq power domain ";
1211		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1212	} else if (dev->type && dev->type->pm) {
1213		info = "noirq type ";
1214		callback = pm_noirq_op(dev->type->pm, state);
1215	} else if (dev->class && dev->class->pm) {
1216		info = "noirq class ";
1217		callback = pm_noirq_op(dev->class->pm, state);
1218	} else if (dev->bus && dev->bus->pm) {
1219		info = "noirq bus ";
1220		callback = pm_noirq_op(dev->bus->pm, state);
1221	}
1222	if (callback)
1223		goto Run;
1224
1225	if (dev_pm_skip_suspend(dev))
 
 
1226		goto Skip;
1227
1228	if (dev->driver && dev->driver->pm) {
1229		info = "noirq driver ";
1230		callback = pm_noirq_op(dev->driver->pm, state);
1231	}
1232
1233Run:
1234	error = dpm_run_callback(callback, dev, state, info);
1235	if (error) {
1236		async_error = error;
1237		goto Complete;
1238	}
1239
1240Skip:
1241	dev->power.is_noirq_suspended = true;
1242
1243	/*
1244	 * Skipping the resume of devices that were in use right before the
1245	 * system suspend (as indicated by their PM-runtime usage counters)
1246	 * would be suboptimal.  Also resume them if doing that is not allowed
1247	 * to be skipped.
1248	 */
1249	if (atomic_read(&dev->power.usage_count) > 1 ||
1250	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1251	      dev->power.may_skip_resume))
1252		dev->power.must_resume = true;
 
1253
1254	if (dev->power.must_resume)
1255		dpm_superior_set_must_resume(dev);
1256
1257Complete:
1258	complete_all(&dev->power.completion);
1259	TRACE_SUSPEND(error);
1260	return error;
1261}
1262
1263static void async_suspend_noirq(void *data, async_cookie_t cookie)
1264{
1265	struct device *dev = (struct device *)data;
1266	int error;
1267
1268	error = __device_suspend_noirq(dev, pm_transition, true);
1269	if (error) {
1270		dpm_save_failed_dev(dev_name(dev));
1271		pm_dev_err(dev, pm_transition, " async", error);
1272	}
1273
1274	put_device(dev);
1275}
1276
1277static int device_suspend_noirq(struct device *dev)
1278{
1279	if (dpm_async_fn(dev, async_suspend_noirq))
1280		return 0;
1281
1282	return __device_suspend_noirq(dev, pm_transition, false);
1283}
1284
1285static int dpm_noirq_suspend_devices(pm_message_t state)
1286{
1287	ktime_t starttime = ktime_get();
1288	int error = 0;
1289
1290	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1291	mutex_lock(&dpm_list_mtx);
1292	pm_transition = state;
1293	async_error = 0;
1294
1295	while (!list_empty(&dpm_late_early_list)) {
1296		struct device *dev = to_device(dpm_late_early_list.prev);
1297
1298		get_device(dev);
1299		mutex_unlock(&dpm_list_mtx);
1300
1301		error = device_suspend_noirq(dev);
1302
1303		mutex_lock(&dpm_list_mtx);
1304		if (error) {
1305			pm_dev_err(dev, state, " noirq", error);
1306			dpm_save_failed_dev(dev_name(dev));
1307			put_device(dev);
1308			break;
1309		}
1310		if (!list_empty(&dev->power.entry))
1311			list_move(&dev->power.entry, &dpm_noirq_list);
1312		put_device(dev);
1313
1314		if (async_error)
1315			break;
1316	}
1317	mutex_unlock(&dpm_list_mtx);
1318	async_synchronize_full();
1319	if (!error)
1320		error = async_error;
1321
1322	if (error) {
1323		suspend_stats.failed_suspend_noirq++;
1324		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1325	}
1326	dpm_show_time(starttime, state, error, "noirq");
1327	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1328	return error;
1329}
1330
1331/**
1332 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1333 * @state: PM transition of the system being carried out.
1334 *
1335 * Prevent device drivers' interrupt handlers from being called and invoke
1336 * "noirq" suspend callbacks for all non-sysdev devices.
1337 */
1338int dpm_suspend_noirq(pm_message_t state)
1339{
1340	int ret;
1341
1342	cpuidle_pause();
1343
1344	device_wakeup_arm_wake_irqs();
1345	suspend_device_irqs();
1346
1347	ret = dpm_noirq_suspend_devices(state);
1348	if (ret)
1349		dpm_resume_noirq(resume_event(state));
1350
1351	return ret;
1352}
1353
1354static void dpm_propagate_wakeup_to_parent(struct device *dev)
1355{
1356	struct device *parent = dev->parent;
1357
1358	if (!parent)
1359		return;
1360
1361	spin_lock_irq(&parent->power.lock);
1362
1363	if (dev->power.wakeup_path && !parent->power.ignore_children)
1364		parent->power.wakeup_path = true;
1365
1366	spin_unlock_irq(&parent->power.lock);
1367}
1368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369/**
1370 * __device_suspend_late - Execute a "late suspend" callback for given device.
1371 * @dev: Device to handle.
1372 * @state: PM transition of the system being carried out.
1373 * @async: If true, the device is being suspended asynchronously.
1374 *
1375 * Runtime PM is disabled for @dev while this function is being executed.
1376 */
1377static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1378{
1379	pm_callback_t callback = NULL;
1380	const char *info = NULL;
1381	int error = 0;
1382
1383	TRACE_DEVICE(dev);
1384	TRACE_SUSPEND(0);
1385
1386	__pm_runtime_disable(dev, false);
1387
1388	dpm_wait_for_subordinate(dev, async);
1389
1390	if (async_error)
1391		goto Complete;
1392
1393	if (pm_wakeup_pending()) {
1394		async_error = -EBUSY;
1395		goto Complete;
1396	}
1397
1398	if (dev->power.syscore || dev->power.direct_complete)
1399		goto Complete;
1400
1401	if (dev->pm_domain) {
1402		info = "late power domain ";
1403		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1404	} else if (dev->type && dev->type->pm) {
1405		info = "late type ";
1406		callback = pm_late_early_op(dev->type->pm, state);
1407	} else if (dev->class && dev->class->pm) {
1408		info = "late class ";
1409		callback = pm_late_early_op(dev->class->pm, state);
1410	} else if (dev->bus && dev->bus->pm) {
1411		info = "late bus ";
1412		callback = pm_late_early_op(dev->bus->pm, state);
1413	}
1414	if (callback)
1415		goto Run;
1416
1417	if (dev_pm_skip_suspend(dev))
 
1418		goto Skip;
1419
1420	if (dev->driver && dev->driver->pm) {
1421		info = "late driver ";
1422		callback = pm_late_early_op(dev->driver->pm, state);
1423	}
1424
1425Run:
1426	error = dpm_run_callback(callback, dev, state, info);
1427	if (error) {
1428		async_error = error;
1429		goto Complete;
1430	}
1431	dpm_propagate_wakeup_to_parent(dev);
1432
1433Skip:
1434	dev->power.is_late_suspended = true;
1435
1436Complete:
1437	TRACE_SUSPEND(error);
1438	complete_all(&dev->power.completion);
1439	return error;
1440}
1441
1442static void async_suspend_late(void *data, async_cookie_t cookie)
1443{
1444	struct device *dev = (struct device *)data;
1445	int error;
1446
1447	error = __device_suspend_late(dev, pm_transition, true);
1448	if (error) {
1449		dpm_save_failed_dev(dev_name(dev));
1450		pm_dev_err(dev, pm_transition, " async", error);
1451	}
1452	put_device(dev);
1453}
1454
1455static int device_suspend_late(struct device *dev)
1456{
1457	if (dpm_async_fn(dev, async_suspend_late))
1458		return 0;
1459
1460	return __device_suspend_late(dev, pm_transition, false);
1461}
1462
1463/**
1464 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1465 * @state: PM transition of the system being carried out.
1466 */
1467int dpm_suspend_late(pm_message_t state)
1468{
1469	ktime_t starttime = ktime_get();
1470	int error = 0;
1471
1472	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1473	mutex_lock(&dpm_list_mtx);
1474	pm_transition = state;
1475	async_error = 0;
1476
1477	while (!list_empty(&dpm_suspended_list)) {
1478		struct device *dev = to_device(dpm_suspended_list.prev);
1479
1480		get_device(dev);
1481		mutex_unlock(&dpm_list_mtx);
1482
1483		error = device_suspend_late(dev);
1484
1485		mutex_lock(&dpm_list_mtx);
1486		if (!list_empty(&dev->power.entry))
1487			list_move(&dev->power.entry, &dpm_late_early_list);
1488
1489		if (error) {
1490			pm_dev_err(dev, state, " late", error);
1491			dpm_save_failed_dev(dev_name(dev));
1492			put_device(dev);
1493			break;
1494		}
1495		put_device(dev);
1496
1497		if (async_error)
1498			break;
1499	}
1500	mutex_unlock(&dpm_list_mtx);
1501	async_synchronize_full();
1502	if (!error)
1503		error = async_error;
1504	if (error) {
1505		suspend_stats.failed_suspend_late++;
1506		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1507		dpm_resume_early(resume_event(state));
1508	}
1509	dpm_show_time(starttime, state, error, "late");
1510	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1511	return error;
1512}
1513
1514/**
1515 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1516 * @state: PM transition of the system being carried out.
1517 */
1518int dpm_suspend_end(pm_message_t state)
1519{
1520	ktime_t starttime = ktime_get();
1521	int error;
1522
1523	error = dpm_suspend_late(state);
1524	if (error)
1525		goto out;
1526
1527	error = dpm_suspend_noirq(state);
1528	if (error)
1529		dpm_resume_early(resume_event(state));
1530
1531out:
1532	dpm_show_time(starttime, state, error, "end");
1533	return error;
1534}
1535EXPORT_SYMBOL_GPL(dpm_suspend_end);
1536
1537/**
1538 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1539 * @dev: Device to suspend.
1540 * @state: PM transition of the system being carried out.
1541 * @cb: Suspend callback to execute.
1542 * @info: string description of caller.
1543 */
1544static int legacy_suspend(struct device *dev, pm_message_t state,
1545			  int (*cb)(struct device *dev, pm_message_t state),
1546			  const char *info)
1547{
1548	int error;
1549	ktime_t calltime;
1550
1551	calltime = initcall_debug_start(dev, cb);
1552
1553	trace_device_pm_callback_start(dev, info, state.event);
1554	error = cb(dev, state);
1555	trace_device_pm_callback_end(dev, error);
1556	suspend_report_result(cb, error);
1557
1558	initcall_debug_report(dev, calltime, cb, error);
1559
1560	return error;
1561}
1562
1563static void dpm_clear_superiors_direct_complete(struct device *dev)
1564{
1565	struct device_link *link;
1566	int idx;
1567
1568	if (dev->parent) {
1569		spin_lock_irq(&dev->parent->power.lock);
1570		dev->parent->power.direct_complete = false;
1571		spin_unlock_irq(&dev->parent->power.lock);
1572	}
1573
1574	idx = device_links_read_lock();
1575
1576	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1577		spin_lock_irq(&link->supplier->power.lock);
1578		link->supplier->power.direct_complete = false;
1579		spin_unlock_irq(&link->supplier->power.lock);
1580	}
1581
1582	device_links_read_unlock(idx);
1583}
1584
1585/**
1586 * __device_suspend - Execute "suspend" callbacks for given device.
1587 * @dev: Device to handle.
1588 * @state: PM transition of the system being carried out.
1589 * @async: If true, the device is being suspended asynchronously.
1590 */
1591static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1592{
1593	pm_callback_t callback = NULL;
1594	const char *info = NULL;
1595	int error = 0;
1596	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1597
1598	TRACE_DEVICE(dev);
1599	TRACE_SUSPEND(0);
1600
1601	dpm_wait_for_subordinate(dev, async);
1602
1603	if (async_error) {
1604		dev->power.direct_complete = false;
1605		goto Complete;
1606	}
1607
1608	/*
1609	 * Wait for possible runtime PM transitions of the device in progress
1610	 * to complete and if there's a runtime resume request pending for it,
1611	 * resume it before proceeding with invoking the system-wide suspend
1612	 * callbacks for it.
1613	 *
1614	 * If the system-wide suspend callbacks below change the configuration
1615	 * of the device, they must disable runtime PM for it or otherwise
1616	 * ensure that its runtime-resume callbacks will not be confused by that
1617	 * change in case they are invoked going forward.
1618	 */
1619	pm_runtime_barrier(dev);
 
1620
1621	if (pm_wakeup_pending()) {
1622		dev->power.direct_complete = false;
1623		async_error = -EBUSY;
1624		goto Complete;
1625	}
1626
1627	if (dev->power.syscore)
1628		goto Complete;
1629
1630	/* Avoid direct_complete to let wakeup_path propagate. */
1631	if (device_may_wakeup(dev) || dev->power.wakeup_path)
1632		dev->power.direct_complete = false;
1633
1634	if (dev->power.direct_complete) {
1635		if (pm_runtime_status_suspended(dev)) {
1636			pm_runtime_disable(dev);
1637			if (pm_runtime_status_suspended(dev)) {
1638				pm_dev_dbg(dev, state, "direct-complete ");
1639				goto Complete;
1640			}
1641
1642			pm_runtime_enable(dev);
1643		}
1644		dev->power.direct_complete = false;
1645	}
1646
1647	dev->power.may_skip_resume = true;
1648	dev->power.must_resume = false;
1649
1650	dpm_watchdog_set(&wd, dev);
1651	device_lock(dev);
1652
1653	if (dev->pm_domain) {
1654		info = "power domain ";
1655		callback = pm_op(&dev->pm_domain->ops, state);
1656		goto Run;
1657	}
1658
1659	if (dev->type && dev->type->pm) {
1660		info = "type ";
1661		callback = pm_op(dev->type->pm, state);
1662		goto Run;
1663	}
1664
1665	if (dev->class && dev->class->pm) {
1666		info = "class ";
1667		callback = pm_op(dev->class->pm, state);
1668		goto Run;
1669	}
1670
1671	if (dev->bus) {
1672		if (dev->bus->pm) {
1673			info = "bus ";
1674			callback = pm_op(dev->bus->pm, state);
1675		} else if (dev->bus->suspend) {
1676			pm_dev_dbg(dev, state, "legacy bus ");
1677			error = legacy_suspend(dev, state, dev->bus->suspend,
1678						"legacy bus ");
1679			goto End;
1680		}
1681	}
1682
1683 Run:
1684	if (!callback && dev->driver && dev->driver->pm) {
1685		info = "driver ";
1686		callback = pm_op(dev->driver->pm, state);
1687	}
1688
1689	error = dpm_run_callback(callback, dev, state, info);
1690
1691 End:
1692	if (!error) {
1693		dev->power.is_suspended = true;
1694		if (device_may_wakeup(dev))
1695			dev->power.wakeup_path = true;
1696
1697		dpm_propagate_wakeup_to_parent(dev);
1698		dpm_clear_superiors_direct_complete(dev);
1699	}
1700
1701	device_unlock(dev);
1702	dpm_watchdog_clear(&wd);
1703
1704 Complete:
1705	if (error)
1706		async_error = error;
1707
1708	complete_all(&dev->power.completion);
1709	TRACE_SUSPEND(error);
1710	return error;
1711}
1712
1713static void async_suspend(void *data, async_cookie_t cookie)
1714{
1715	struct device *dev = (struct device *)data;
1716	int error;
1717
1718	error = __device_suspend(dev, pm_transition, true);
1719	if (error) {
1720		dpm_save_failed_dev(dev_name(dev));
1721		pm_dev_err(dev, pm_transition, " async", error);
1722	}
1723
1724	put_device(dev);
1725}
1726
1727static int device_suspend(struct device *dev)
1728{
1729	if (dpm_async_fn(dev, async_suspend))
1730		return 0;
1731
1732	return __device_suspend(dev, pm_transition, false);
1733}
1734
1735/**
1736 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1737 * @state: PM transition of the system being carried out.
1738 */
1739int dpm_suspend(pm_message_t state)
1740{
1741	ktime_t starttime = ktime_get();
1742	int error = 0;
1743
1744	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1745	might_sleep();
1746
1747	devfreq_suspend();
1748	cpufreq_suspend();
1749
1750	mutex_lock(&dpm_list_mtx);
1751	pm_transition = state;
1752	async_error = 0;
1753	while (!list_empty(&dpm_prepared_list)) {
1754		struct device *dev = to_device(dpm_prepared_list.prev);
1755
1756		get_device(dev);
1757		mutex_unlock(&dpm_list_mtx);
1758
1759		error = device_suspend(dev);
1760
1761		mutex_lock(&dpm_list_mtx);
1762		if (error) {
1763			pm_dev_err(dev, state, "", error);
1764			dpm_save_failed_dev(dev_name(dev));
1765			put_device(dev);
1766			break;
1767		}
1768		if (!list_empty(&dev->power.entry))
1769			list_move(&dev->power.entry, &dpm_suspended_list);
1770		put_device(dev);
1771		if (async_error)
1772			break;
1773	}
1774	mutex_unlock(&dpm_list_mtx);
1775	async_synchronize_full();
1776	if (!error)
1777		error = async_error;
1778	if (error) {
1779		suspend_stats.failed_suspend++;
1780		dpm_save_failed_step(SUSPEND_SUSPEND);
1781	}
1782	dpm_show_time(starttime, state, error, NULL);
1783	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1784	return error;
1785}
1786
1787/**
1788 * device_prepare - Prepare a device for system power transition.
1789 * @dev: Device to handle.
1790 * @state: PM transition of the system being carried out.
1791 *
1792 * Execute the ->prepare() callback(s) for given device.  No new children of the
1793 * device may be registered after this function has returned.
1794 */
1795static int device_prepare(struct device *dev, pm_message_t state)
1796{
1797	int (*callback)(struct device *) = NULL;
1798	int ret = 0;
1799
1800	if (dev->power.syscore)
1801		return 0;
1802
 
 
 
 
1803	/*
1804	 * If a device's parent goes into runtime suspend at the wrong time,
1805	 * it won't be possible to resume the device.  To prevent this we
1806	 * block runtime suspend here, during the prepare phase, and allow
1807	 * it again during the complete phase.
1808	 */
1809	pm_runtime_get_noresume(dev);
1810
1811	device_lock(dev);
1812
1813	dev->power.wakeup_path = false;
1814
1815	if (dev->power.no_pm_callbacks)
1816		goto unlock;
1817
1818	if (dev->pm_domain)
1819		callback = dev->pm_domain->ops.prepare;
1820	else if (dev->type && dev->type->pm)
1821		callback = dev->type->pm->prepare;
1822	else if (dev->class && dev->class->pm)
1823		callback = dev->class->pm->prepare;
1824	else if (dev->bus && dev->bus->pm)
1825		callback = dev->bus->pm->prepare;
1826
1827	if (!callback && dev->driver && dev->driver->pm)
1828		callback = dev->driver->pm->prepare;
1829
1830	if (callback)
1831		ret = callback(dev);
1832
1833unlock:
1834	device_unlock(dev);
1835
1836	if (ret < 0) {
1837		suspend_report_result(callback, ret);
1838		pm_runtime_put(dev);
1839		return ret;
1840	}
1841	/*
1842	 * A positive return value from ->prepare() means "this device appears
1843	 * to be runtime-suspended and its state is fine, so if it really is
1844	 * runtime-suspended, you can leave it in that state provided that you
1845	 * will do the same thing with all of its descendants".  This only
1846	 * applies to suspend transitions, however.
1847	 */
1848	spin_lock_irq(&dev->power.lock);
1849	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1850		(ret > 0 || dev->power.no_pm_callbacks) &&
1851		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
 
1852	spin_unlock_irq(&dev->power.lock);
1853	return 0;
1854}
1855
1856/**
1857 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1858 * @state: PM transition of the system being carried out.
1859 *
1860 * Execute the ->prepare() callback(s) for all devices.
1861 */
1862int dpm_prepare(pm_message_t state)
1863{
1864	int error = 0;
1865
1866	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1867	might_sleep();
1868
1869	/*
1870	 * Give a chance for the known devices to complete their probes, before
1871	 * disable probing of devices. This sync point is important at least
1872	 * at boot time + hibernation restore.
1873	 */
1874	wait_for_device_probe();
1875	/*
1876	 * It is unsafe if probing of devices will happen during suspend or
1877	 * hibernation and system behavior will be unpredictable in this case.
1878	 * So, let's prohibit device's probing here and defer their probes
1879	 * instead. The normal behavior will be restored in dpm_complete().
1880	 */
1881	device_block_probing();
1882
1883	mutex_lock(&dpm_list_mtx);
1884	while (!list_empty(&dpm_list)) {
1885		struct device *dev = to_device(dpm_list.next);
1886
1887		get_device(dev);
1888		mutex_unlock(&dpm_list_mtx);
1889
1890		trace_device_pm_callback_start(dev, "", state.event);
1891		error = device_prepare(dev, state);
1892		trace_device_pm_callback_end(dev, error);
1893
1894		mutex_lock(&dpm_list_mtx);
1895		if (error) {
1896			if (error == -EAGAIN) {
1897				put_device(dev);
1898				error = 0;
1899				continue;
1900			}
1901			pr_info("Device %s not prepared for power transition: code %d\n",
1902				dev_name(dev), error);
1903			put_device(dev);
1904			break;
1905		}
1906		dev->power.is_prepared = true;
1907		if (!list_empty(&dev->power.entry))
1908			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1909		put_device(dev);
1910	}
1911	mutex_unlock(&dpm_list_mtx);
1912	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1913	return error;
1914}
1915
1916/**
1917 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1918 * @state: PM transition of the system being carried out.
1919 *
1920 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1921 * callbacks for them.
1922 */
1923int dpm_suspend_start(pm_message_t state)
1924{
1925	ktime_t starttime = ktime_get();
1926	int error;
1927
1928	error = dpm_prepare(state);
1929	if (error) {
1930		suspend_stats.failed_prepare++;
1931		dpm_save_failed_step(SUSPEND_PREPARE);
1932	} else
1933		error = dpm_suspend(state);
1934	dpm_show_time(starttime, state, error, "start");
1935	return error;
1936}
1937EXPORT_SYMBOL_GPL(dpm_suspend_start);
1938
1939void __suspend_report_result(const char *function, void *fn, int ret)
1940{
1941	if (ret)
1942		pr_err("%s(): %pS returns %d\n", function, fn, ret);
1943}
1944EXPORT_SYMBOL_GPL(__suspend_report_result);
1945
1946/**
1947 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1948 * @subordinate: Device that needs to wait for @dev.
1949 * @dev: Device to wait for.
1950 */
1951int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1952{
1953	dpm_wait(dev, subordinate->power.async_suspend);
1954	return async_error;
1955}
1956EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1957
1958/**
1959 * dpm_for_each_dev - device iterator.
1960 * @data: data for the callback.
1961 * @fn: function to be called for each device.
1962 *
1963 * Iterate over devices in dpm_list, and call @fn for each device,
1964 * passing it @data.
1965 */
1966void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1967{
1968	struct device *dev;
1969
1970	if (!fn)
1971		return;
1972
1973	device_pm_lock();
1974	list_for_each_entry(dev, &dpm_list, power.entry)
1975		fn(dev, data);
1976	device_pm_unlock();
1977}
1978EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1979
1980static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1981{
1982	if (!ops)
1983		return true;
1984
1985	return !ops->prepare &&
1986	       !ops->suspend &&
1987	       !ops->suspend_late &&
1988	       !ops->suspend_noirq &&
1989	       !ops->resume_noirq &&
1990	       !ops->resume_early &&
1991	       !ops->resume &&
1992	       !ops->complete;
1993}
1994
1995void device_pm_check_callbacks(struct device *dev)
1996{
1997	spin_lock_irq(&dev->power.lock);
1998	dev->power.no_pm_callbacks =
1999		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2000		 !dev->bus->suspend && !dev->bus->resume)) &&
2001		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2002		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2003		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2004		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2005		 !dev->driver->suspend && !dev->driver->resume));
2006	spin_unlock_irq(&dev->power.lock);
2007}
2008
2009bool dev_pm_skip_suspend(struct device *dev)
2010{
2011	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2012		pm_runtime_status_suspended(dev);
2013}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/main.c - Where the driver meets power management.
   4 *
   5 * Copyright (c) 2003 Patrick Mochel
   6 * Copyright (c) 2003 Open Source Development Lab
   7 *
   8 * The driver model core calls device_pm_add() when a device is registered.
   9 * This will initialize the embedded device_pm_info object in the device
  10 * and add it to the list of power-controlled devices. sysfs entries for
  11 * controlling device power management will also be added.
  12 *
  13 * A separate list is used for keeping track of power info, because the power
  14 * domain dependencies may differ from the ancestral dependencies that the
  15 * subsystem list maintains.
  16 */
  17
  18#define pr_fmt(fmt) "PM: " fmt
  19
  20#include <linux/device.h>
  21#include <linux/export.h>
  22#include <linux/mutex.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/pm-trace.h>
  26#include <linux/pm_wakeirq.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/sched/debug.h>
  30#include <linux/async.h>
  31#include <linux/suspend.h>
  32#include <trace/events/power.h>
  33#include <linux/cpufreq.h>
  34#include <linux/cpuidle.h>
  35#include <linux/devfreq.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
 
 
 
 
  43/*
  44 * The entries in the dpm_list list are in a depth first order, simply
  45 * because children are guaranteed to be discovered after parents, and
  46 * are inserted at the back of the list on discovery.
  47 *
  48 * Since device_pm_add() may be called with a device lock held,
  49 * we must never try to acquire a device lock while holding
  50 * dpm_list_mutex.
  51 */
  52
  53LIST_HEAD(dpm_list);
  54static LIST_HEAD(dpm_prepared_list);
  55static LIST_HEAD(dpm_suspended_list);
  56static LIST_HEAD(dpm_late_early_list);
  57static LIST_HEAD(dpm_noirq_list);
  58
  59struct suspend_stats suspend_stats;
  60static DEFINE_MUTEX(dpm_list_mtx);
  61static pm_message_t pm_transition;
  62
  63static int async_error;
  64
  65static const char *pm_verb(int event)
  66{
  67	switch (event) {
  68	case PM_EVENT_SUSPEND:
  69		return "suspend";
  70	case PM_EVENT_RESUME:
  71		return "resume";
  72	case PM_EVENT_FREEZE:
  73		return "freeze";
  74	case PM_EVENT_QUIESCE:
  75		return "quiesce";
  76	case PM_EVENT_HIBERNATE:
  77		return "hibernate";
  78	case PM_EVENT_THAW:
  79		return "thaw";
  80	case PM_EVENT_RESTORE:
  81		return "restore";
  82	case PM_EVENT_RECOVER:
  83		return "recover";
  84	default:
  85		return "(unknown PM event)";
  86	}
  87}
  88
  89/**
  90 * device_pm_sleep_init - Initialize system suspend-related device fields.
  91 * @dev: Device object being initialized.
  92 */
  93void device_pm_sleep_init(struct device *dev)
  94{
  95	dev->power.is_prepared = false;
  96	dev->power.is_suspended = false;
  97	dev->power.is_noirq_suspended = false;
  98	dev->power.is_late_suspended = false;
  99	init_completion(&dev->power.completion);
 100	complete_all(&dev->power.completion);
 101	dev->power.wakeup = NULL;
 102	INIT_LIST_HEAD(&dev->power.entry);
 103}
 104
 105/**
 106 * device_pm_lock - Lock the list of active devices used by the PM core.
 107 */
 108void device_pm_lock(void)
 109{
 110	mutex_lock(&dpm_list_mtx);
 111}
 112
 113/**
 114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 115 */
 116void device_pm_unlock(void)
 117{
 118	mutex_unlock(&dpm_list_mtx);
 119}
 120
 121/**
 122 * device_pm_add - Add a device to the PM core's list of active devices.
 123 * @dev: Device to add to the list.
 124 */
 125void device_pm_add(struct device *dev)
 126{
 127	/* Skip PM setup/initialization. */
 128	if (device_pm_not_required(dev))
 129		return;
 130
 131	pr_debug("Adding info for %s:%s\n",
 132		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 133	device_pm_check_callbacks(dev);
 134	mutex_lock(&dpm_list_mtx);
 135	if (dev->parent && dev->parent->power.is_prepared)
 136		dev_warn(dev, "parent %s should not be sleeping\n",
 137			dev_name(dev->parent));
 138	list_add_tail(&dev->power.entry, &dpm_list);
 139	dev->power.in_dpm_list = true;
 140	mutex_unlock(&dpm_list_mtx);
 141}
 142
 143/**
 144 * device_pm_remove - Remove a device from the PM core's list of active devices.
 145 * @dev: Device to be removed from the list.
 146 */
 147void device_pm_remove(struct device *dev)
 148{
 149	if (device_pm_not_required(dev))
 150		return;
 151
 152	pr_debug("Removing info for %s:%s\n",
 153		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 154	complete_all(&dev->power.completion);
 155	mutex_lock(&dpm_list_mtx);
 156	list_del_init(&dev->power.entry);
 157	dev->power.in_dpm_list = false;
 158	mutex_unlock(&dpm_list_mtx);
 159	device_wakeup_disable(dev);
 160	pm_runtime_remove(dev);
 161	device_pm_check_callbacks(dev);
 162}
 163
 164/**
 165 * device_pm_move_before - Move device in the PM core's list of active devices.
 166 * @deva: Device to move in dpm_list.
 167 * @devb: Device @deva should come before.
 168 */
 169void device_pm_move_before(struct device *deva, struct device *devb)
 170{
 171	pr_debug("Moving %s:%s before %s:%s\n",
 172		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 173		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 174	/* Delete deva from dpm_list and reinsert before devb. */
 175	list_move_tail(&deva->power.entry, &devb->power.entry);
 176}
 177
 178/**
 179 * device_pm_move_after - Move device in the PM core's list of active devices.
 180 * @deva: Device to move in dpm_list.
 181 * @devb: Device @deva should come after.
 182 */
 183void device_pm_move_after(struct device *deva, struct device *devb)
 184{
 185	pr_debug("Moving %s:%s after %s:%s\n",
 186		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 187		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 188	/* Delete deva from dpm_list and reinsert after devb. */
 189	list_move(&deva->power.entry, &devb->power.entry);
 190}
 191
 192/**
 193 * device_pm_move_last - Move device to end of the PM core's list of devices.
 194 * @dev: Device to move in dpm_list.
 195 */
 196void device_pm_move_last(struct device *dev)
 197{
 198	pr_debug("Moving %s:%s to end of list\n",
 199		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 200	list_move_tail(&dev->power.entry, &dpm_list);
 201}
 202
 203static ktime_t initcall_debug_start(struct device *dev, void *cb)
 204{
 205	if (!pm_print_times_enabled)
 206		return 0;
 207
 208	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
 209		 task_pid_nr(current),
 210		 dev->parent ? dev_name(dev->parent) : "none");
 211	return ktime_get();
 212}
 213
 214static void initcall_debug_report(struct device *dev, ktime_t calltime,
 215				  void *cb, int error)
 216{
 217	ktime_t rettime;
 218	s64 nsecs;
 219
 220	if (!pm_print_times_enabled)
 221		return;
 222
 223	rettime = ktime_get();
 224	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 225
 226	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
 227		 (unsigned long long)nsecs >> 10);
 228}
 229
 230/**
 231 * dpm_wait - Wait for a PM operation to complete.
 232 * @dev: Device to wait for.
 233 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 234 */
 235static void dpm_wait(struct device *dev, bool async)
 236{
 237	if (!dev)
 238		return;
 239
 240	if (async || (pm_async_enabled && dev->power.async_suspend))
 241		wait_for_completion(&dev->power.completion);
 242}
 243
 244static int dpm_wait_fn(struct device *dev, void *async_ptr)
 245{
 246	dpm_wait(dev, *((bool *)async_ptr));
 247	return 0;
 248}
 249
 250static void dpm_wait_for_children(struct device *dev, bool async)
 251{
 252       device_for_each_child(dev, &async, dpm_wait_fn);
 253}
 254
 255static void dpm_wait_for_suppliers(struct device *dev, bool async)
 256{
 257	struct device_link *link;
 258	int idx;
 259
 260	idx = device_links_read_lock();
 261
 262	/*
 263	 * If the supplier goes away right after we've checked the link to it,
 264	 * we'll wait for its completion to change the state, but that's fine,
 265	 * because the only things that will block as a result are the SRCU
 266	 * callbacks freeing the link objects for the links in the list we're
 267	 * walking.
 268	 */
 269	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
 270		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 271			dpm_wait(link->supplier, async);
 272
 273	device_links_read_unlock(idx);
 274}
 275
 276static void dpm_wait_for_superior(struct device *dev, bool async)
 277{
 278	dpm_wait(dev->parent, async);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279	dpm_wait_for_suppliers(dev, async);
 
 
 
 
 
 
 280}
 281
 282static void dpm_wait_for_consumers(struct device *dev, bool async)
 283{
 284	struct device_link *link;
 285	int idx;
 286
 287	idx = device_links_read_lock();
 288
 289	/*
 290	 * The status of a device link can only be changed from "dormant" by a
 291	 * probe, but that cannot happen during system suspend/resume.  In
 292	 * theory it can change to "dormant" at that time, but then it is
 293	 * reasonable to wait for the target device anyway (eg. if it goes
 294	 * away, it's better to wait for it to go away completely and then
 295	 * continue instead of trying to continue in parallel with its
 296	 * unregistration).
 297	 */
 298	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
 299		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 300			dpm_wait(link->consumer, async);
 301
 302	device_links_read_unlock(idx);
 303}
 304
 305static void dpm_wait_for_subordinate(struct device *dev, bool async)
 306{
 307	dpm_wait_for_children(dev, async);
 308	dpm_wait_for_consumers(dev, async);
 309}
 310
 311/**
 312 * pm_op - Return the PM operation appropriate for given PM event.
 313 * @ops: PM operations to choose from.
 314 * @state: PM transition of the system being carried out.
 315 */
 316static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 317{
 318	switch (state.event) {
 319#ifdef CONFIG_SUSPEND
 320	case PM_EVENT_SUSPEND:
 321		return ops->suspend;
 322	case PM_EVENT_RESUME:
 323		return ops->resume;
 324#endif /* CONFIG_SUSPEND */
 325#ifdef CONFIG_HIBERNATE_CALLBACKS
 326	case PM_EVENT_FREEZE:
 327	case PM_EVENT_QUIESCE:
 328		return ops->freeze;
 329	case PM_EVENT_HIBERNATE:
 330		return ops->poweroff;
 331	case PM_EVENT_THAW:
 332	case PM_EVENT_RECOVER:
 333		return ops->thaw;
 334		break;
 335	case PM_EVENT_RESTORE:
 336		return ops->restore;
 337#endif /* CONFIG_HIBERNATE_CALLBACKS */
 338	}
 339
 340	return NULL;
 341}
 342
 343/**
 344 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 345 * @ops: PM operations to choose from.
 346 * @state: PM transition of the system being carried out.
 347 *
 348 * Runtime PM is disabled for @dev while this function is being executed.
 349 */
 350static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 351				      pm_message_t state)
 352{
 353	switch (state.event) {
 354#ifdef CONFIG_SUSPEND
 355	case PM_EVENT_SUSPEND:
 356		return ops->suspend_late;
 357	case PM_EVENT_RESUME:
 358		return ops->resume_early;
 359#endif /* CONFIG_SUSPEND */
 360#ifdef CONFIG_HIBERNATE_CALLBACKS
 361	case PM_EVENT_FREEZE:
 362	case PM_EVENT_QUIESCE:
 363		return ops->freeze_late;
 364	case PM_EVENT_HIBERNATE:
 365		return ops->poweroff_late;
 366	case PM_EVENT_THAW:
 367	case PM_EVENT_RECOVER:
 368		return ops->thaw_early;
 369	case PM_EVENT_RESTORE:
 370		return ops->restore_early;
 371#endif /* CONFIG_HIBERNATE_CALLBACKS */
 372	}
 373
 374	return NULL;
 375}
 376
 377/**
 378 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 379 * @ops: PM operations to choose from.
 380 * @state: PM transition of the system being carried out.
 381 *
 382 * The driver of @dev will not receive interrupts while this function is being
 383 * executed.
 384 */
 385static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 386{
 387	switch (state.event) {
 388#ifdef CONFIG_SUSPEND
 389	case PM_EVENT_SUSPEND:
 390		return ops->suspend_noirq;
 391	case PM_EVENT_RESUME:
 392		return ops->resume_noirq;
 393#endif /* CONFIG_SUSPEND */
 394#ifdef CONFIG_HIBERNATE_CALLBACKS
 395	case PM_EVENT_FREEZE:
 396	case PM_EVENT_QUIESCE:
 397		return ops->freeze_noirq;
 398	case PM_EVENT_HIBERNATE:
 399		return ops->poweroff_noirq;
 400	case PM_EVENT_THAW:
 401	case PM_EVENT_RECOVER:
 402		return ops->thaw_noirq;
 403	case PM_EVENT_RESTORE:
 404		return ops->restore_noirq;
 405#endif /* CONFIG_HIBERNATE_CALLBACKS */
 406	}
 407
 408	return NULL;
 409}
 410
 411static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 412{
 413	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 414		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 415		", may wakeup" : "");
 416}
 417
 418static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 419			int error)
 420{
 421	pr_err("Device %s failed to %s%s: error %d\n",
 422	       dev_name(dev), pm_verb(state.event), info, error);
 423}
 424
 425static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 426			  const char *info)
 427{
 428	ktime_t calltime;
 429	u64 usecs64;
 430	int usecs;
 431
 432	calltime = ktime_get();
 433	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 434	do_div(usecs64, NSEC_PER_USEC);
 435	usecs = usecs64;
 436	if (usecs == 0)
 437		usecs = 1;
 438
 439	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 440		  info ?: "", info ? " " : "", pm_verb(state.event),
 441		  error ? "aborted" : "complete",
 442		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 443}
 444
 445static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 446			    pm_message_t state, const char *info)
 447{
 448	ktime_t calltime;
 449	int error;
 450
 451	if (!cb)
 452		return 0;
 453
 454	calltime = initcall_debug_start(dev, cb);
 455
 456	pm_dev_dbg(dev, state, info);
 457	trace_device_pm_callback_start(dev, info, state.event);
 458	error = cb(dev);
 459	trace_device_pm_callback_end(dev, error);
 460	suspend_report_result(cb, error);
 461
 462	initcall_debug_report(dev, calltime, cb, error);
 463
 464	return error;
 465}
 466
 467#ifdef CONFIG_DPM_WATCHDOG
 468struct dpm_watchdog {
 469	struct device		*dev;
 470	struct task_struct	*tsk;
 471	struct timer_list	timer;
 472};
 473
 474#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 475	struct dpm_watchdog wd
 476
 477/**
 478 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 479 * @t: The timer that PM watchdog depends on.
 480 *
 481 * Called when a driver has timed out suspending or resuming.
 482 * There's not much we can do here to recover so panic() to
 483 * capture a crash-dump in pstore.
 484 */
 485static void dpm_watchdog_handler(struct timer_list *t)
 486{
 487	struct dpm_watchdog *wd = from_timer(wd, t, timer);
 488
 489	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 490	show_stack(wd->tsk, NULL);
 491	panic("%s %s: unrecoverable failure\n",
 492		dev_driver_string(wd->dev), dev_name(wd->dev));
 493}
 494
 495/**
 496 * dpm_watchdog_set - Enable pm watchdog for given device.
 497 * @wd: Watchdog. Must be allocated on the stack.
 498 * @dev: Device to handle.
 499 */
 500static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 501{
 502	struct timer_list *timer = &wd->timer;
 503
 504	wd->dev = dev;
 505	wd->tsk = current;
 506
 507	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 508	/* use same timeout value for both suspend and resume */
 509	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 510	add_timer(timer);
 511}
 512
 513/**
 514 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 515 * @wd: Watchdog to disable.
 516 */
 517static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 518{
 519	struct timer_list *timer = &wd->timer;
 520
 521	del_timer_sync(timer);
 522	destroy_timer_on_stack(timer);
 523}
 524#else
 525#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 526#define dpm_watchdog_set(x, y)
 527#define dpm_watchdog_clear(x)
 528#endif
 529
 530/*------------------------- Resume routines -------------------------*/
 531
 532/**
 533 * suspend_event - Return a "suspend" message for given "resume" one.
 534 * @resume_msg: PM message representing a system-wide resume transition.
 535 */
 536static pm_message_t suspend_event(pm_message_t resume_msg)
 537{
 538	switch (resume_msg.event) {
 539	case PM_EVENT_RESUME:
 540		return PMSG_SUSPEND;
 541	case PM_EVENT_THAW:
 542	case PM_EVENT_RESTORE:
 543		return PMSG_FREEZE;
 544	case PM_EVENT_RECOVER:
 545		return PMSG_HIBERNATE;
 546	}
 547	return PMSG_ON;
 548}
 549
 550/**
 551 * dev_pm_may_skip_resume - System-wide device resume optimization check.
 552 * @dev: Target device.
 553 *
 554 * Checks whether or not the device may be left in suspend after a system-wide
 555 * transition to the working state.
 
 
 
 556 */
 557bool dev_pm_may_skip_resume(struct device *dev)
 558{
 559	return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
 560}
 561
 562static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
 563						pm_message_t state,
 564						const char **info_p)
 565{
 566	pm_callback_t callback;
 567	const char *info;
 568
 569	if (dev->pm_domain) {
 570		info = "noirq power domain ";
 571		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 572	} else if (dev->type && dev->type->pm) {
 573		info = "noirq type ";
 574		callback = pm_noirq_op(dev->type->pm, state);
 575	} else if (dev->class && dev->class->pm) {
 576		info = "noirq class ";
 577		callback = pm_noirq_op(dev->class->pm, state);
 578	} else if (dev->bus && dev->bus->pm) {
 579		info = "noirq bus ";
 580		callback = pm_noirq_op(dev->bus->pm, state);
 581	} else {
 582		return NULL;
 583	}
 584
 585	if (info_p)
 586		*info_p = info;
 587
 588	return callback;
 589}
 590
 591static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
 592						 pm_message_t state,
 593						 const char **info_p);
 594
 595static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
 596						pm_message_t state,
 597						const char **info_p);
 598
 599/**
 600 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 601 * @dev: Device to handle.
 602 * @state: PM transition of the system being carried out.
 603 * @async: If true, the device is being resumed asynchronously.
 604 *
 605 * The driver of @dev will not receive interrupts while this function is being
 606 * executed.
 607 */
 608static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 609{
 610	pm_callback_t callback;
 611	const char *info;
 612	bool skip_resume;
 613	int error = 0;
 614
 615	TRACE_DEVICE(dev);
 616	TRACE_RESUME(0);
 617
 618	if (dev->power.syscore || dev->power.direct_complete)
 619		goto Out;
 620
 621	if (!dev->power.is_noirq_suspended)
 622		goto Out;
 623
 624	dpm_wait_for_superior(dev, async);
 
 625
 626	skip_resume = dev_pm_may_skip_resume(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627
 628	callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
 
 
 
 
 
 
 
 
 
 
 
 
 629	if (callback)
 630		goto Run;
 631
 632	if (skip_resume)
 633		goto Skip;
 634
 635	if (dev_pm_smart_suspend_and_suspended(dev)) {
 636		pm_message_t suspend_msg = suspend_event(state);
 637
 638		/*
 639		 * If "freeze" callbacks have been skipped during a transition
 640		 * related to hibernation, the subsequent "thaw" callbacks must
 641		 * be skipped too or bad things may happen.  Otherwise, resume
 642		 * callbacks are going to be run for the device, so its runtime
 643		 * PM status must be changed to reflect the new state after the
 644		 * transition under way.
 645		 */
 646		if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
 647		    !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
 648			if (state.event == PM_EVENT_THAW) {
 649				skip_resume = true;
 650				goto Skip;
 651			} else {
 652				pm_runtime_set_active(dev);
 653			}
 654		}
 655	}
 656
 657	if (dev->driver && dev->driver->pm) {
 658		info = "noirq driver ";
 659		callback = pm_noirq_op(dev->driver->pm, state);
 660	}
 661
 662Run:
 663	error = dpm_run_callback(callback, dev, state, info);
 664
 665Skip:
 666	dev->power.is_noirq_suspended = false;
 667
 668	if (skip_resume) {
 669		/* Make the next phases of resume skip the device. */
 670		dev->power.is_late_suspended = false;
 671		dev->power.is_suspended = false;
 672		/*
 673		 * The device is going to be left in suspend, but it might not
 674		 * have been in runtime suspend before the system suspended, so
 675		 * its runtime PM status needs to be updated to avoid confusing
 676		 * the runtime PM framework when runtime PM is enabled for the
 677		 * device again.
 678		 */
 679		pm_runtime_set_suspended(dev);
 680	}
 681
 682Out:
 683	complete_all(&dev->power.completion);
 684	TRACE_RESUME(error);
 685	return error;
 686}
 687
 688static bool is_async(struct device *dev)
 689{
 690	return dev->power.async_suspend && pm_async_enabled
 691		&& !pm_trace_is_enabled();
 692}
 693
 694static bool dpm_async_fn(struct device *dev, async_func_t func)
 695{
 696	reinit_completion(&dev->power.completion);
 697
 698	if (is_async(dev)) {
 699		get_device(dev);
 700		async_schedule(func, dev);
 701		return true;
 702	}
 703
 704	return false;
 705}
 706
 707static void async_resume_noirq(void *data, async_cookie_t cookie)
 708{
 709	struct device *dev = (struct device *)data;
 710	int error;
 711
 712	error = device_resume_noirq(dev, pm_transition, true);
 713	if (error)
 714		pm_dev_err(dev, pm_transition, " async", error);
 715
 716	put_device(dev);
 717}
 718
 719static void dpm_noirq_resume_devices(pm_message_t state)
 720{
 721	struct device *dev;
 722	ktime_t starttime = ktime_get();
 723
 724	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 725	mutex_lock(&dpm_list_mtx);
 726	pm_transition = state;
 727
 728	/*
 729	 * Advanced the async threads upfront,
 730	 * in case the starting of async threads is
 731	 * delayed by non-async resuming devices.
 732	 */
 733	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
 734		dpm_async_fn(dev, async_resume_noirq);
 735
 736	while (!list_empty(&dpm_noirq_list)) {
 737		dev = to_device(dpm_noirq_list.next);
 738		get_device(dev);
 739		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 740		mutex_unlock(&dpm_list_mtx);
 741
 742		if (!is_async(dev)) {
 743			int error;
 744
 745			error = device_resume_noirq(dev, state, false);
 746			if (error) {
 747				suspend_stats.failed_resume_noirq++;
 748				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 749				dpm_save_failed_dev(dev_name(dev));
 750				pm_dev_err(dev, state, " noirq", error);
 751			}
 752		}
 753
 754		mutex_lock(&dpm_list_mtx);
 755		put_device(dev);
 756	}
 757	mutex_unlock(&dpm_list_mtx);
 758	async_synchronize_full();
 759	dpm_show_time(starttime, state, 0, "noirq");
 760	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 761}
 762
 763/**
 764 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 765 * @state: PM transition of the system being carried out.
 766 *
 767 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 768 * allow device drivers' interrupt handlers to be called.
 769 */
 770void dpm_resume_noirq(pm_message_t state)
 771{
 772	dpm_noirq_resume_devices(state);
 773
 774	resume_device_irqs();
 775	device_wakeup_disarm_wake_irqs();
 776
 777	cpuidle_resume();
 778}
 779
 780static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
 781						pm_message_t state,
 782						const char **info_p)
 783{
 784	pm_callback_t callback;
 785	const char *info;
 786
 787	if (dev->pm_domain) {
 788		info = "early power domain ";
 789		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 790	} else if (dev->type && dev->type->pm) {
 791		info = "early type ";
 792		callback = pm_late_early_op(dev->type->pm, state);
 793	} else if (dev->class && dev->class->pm) {
 794		info = "early class ";
 795		callback = pm_late_early_op(dev->class->pm, state);
 796	} else if (dev->bus && dev->bus->pm) {
 797		info = "early bus ";
 798		callback = pm_late_early_op(dev->bus->pm, state);
 799	} else {
 800		return NULL;
 801	}
 802
 803	if (info_p)
 804		*info_p = info;
 805
 806	return callback;
 807}
 808
 809/**
 810 * device_resume_early - Execute an "early resume" callback for given device.
 811 * @dev: Device to handle.
 812 * @state: PM transition of the system being carried out.
 813 * @async: If true, the device is being resumed asynchronously.
 814 *
 815 * Runtime PM is disabled for @dev while this function is being executed.
 816 */
 817static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 818{
 819	pm_callback_t callback;
 820	const char *info;
 821	int error = 0;
 822
 823	TRACE_DEVICE(dev);
 824	TRACE_RESUME(0);
 825
 826	if (dev->power.syscore || dev->power.direct_complete)
 827		goto Out;
 828
 829	if (!dev->power.is_late_suspended)
 830		goto Out;
 831
 832	dpm_wait_for_superior(dev, async);
 
 833
 834	callback = dpm_subsys_resume_early_cb(dev, state, &info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835
 836	if (!callback && dev->driver && dev->driver->pm) {
 837		info = "early driver ";
 838		callback = pm_late_early_op(dev->driver->pm, state);
 839	}
 840
 
 841	error = dpm_run_callback(callback, dev, state, info);
 
 
 842	dev->power.is_late_suspended = false;
 843
 844 Out:
 845	TRACE_RESUME(error);
 846
 847	pm_runtime_enable(dev);
 848	complete_all(&dev->power.completion);
 849	return error;
 850}
 851
 852static void async_resume_early(void *data, async_cookie_t cookie)
 853{
 854	struct device *dev = (struct device *)data;
 855	int error;
 856
 857	error = device_resume_early(dev, pm_transition, true);
 858	if (error)
 859		pm_dev_err(dev, pm_transition, " async", error);
 860
 861	put_device(dev);
 862}
 863
 864/**
 865 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 866 * @state: PM transition of the system being carried out.
 867 */
 868void dpm_resume_early(pm_message_t state)
 869{
 870	struct device *dev;
 871	ktime_t starttime = ktime_get();
 872
 873	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 874	mutex_lock(&dpm_list_mtx);
 875	pm_transition = state;
 876
 877	/*
 878	 * Advanced the async threads upfront,
 879	 * in case the starting of async threads is
 880	 * delayed by non-async resuming devices.
 881	 */
 882	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
 883		dpm_async_fn(dev, async_resume_early);
 884
 885	while (!list_empty(&dpm_late_early_list)) {
 886		dev = to_device(dpm_late_early_list.next);
 887		get_device(dev);
 888		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 889		mutex_unlock(&dpm_list_mtx);
 890
 891		if (!is_async(dev)) {
 892			int error;
 893
 894			error = device_resume_early(dev, state, false);
 895			if (error) {
 896				suspend_stats.failed_resume_early++;
 897				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 898				dpm_save_failed_dev(dev_name(dev));
 899				pm_dev_err(dev, state, " early", error);
 900			}
 901		}
 902		mutex_lock(&dpm_list_mtx);
 903		put_device(dev);
 904	}
 905	mutex_unlock(&dpm_list_mtx);
 906	async_synchronize_full();
 907	dpm_show_time(starttime, state, 0, "early");
 908	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 909}
 910
 911/**
 912 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 913 * @state: PM transition of the system being carried out.
 914 */
 915void dpm_resume_start(pm_message_t state)
 916{
 917	dpm_resume_noirq(state);
 918	dpm_resume_early(state);
 919}
 920EXPORT_SYMBOL_GPL(dpm_resume_start);
 921
 922/**
 923 * device_resume - Execute "resume" callbacks for given device.
 924 * @dev: Device to handle.
 925 * @state: PM transition of the system being carried out.
 926 * @async: If true, the device is being resumed asynchronously.
 927 */
 928static int device_resume(struct device *dev, pm_message_t state, bool async)
 929{
 930	pm_callback_t callback = NULL;
 931	const char *info = NULL;
 932	int error = 0;
 933	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 934
 935	TRACE_DEVICE(dev);
 936	TRACE_RESUME(0);
 937
 938	if (dev->power.syscore)
 939		goto Complete;
 940
 941	if (dev->power.direct_complete) {
 942		/* Match the pm_runtime_disable() in __device_suspend(). */
 943		pm_runtime_enable(dev);
 944		goto Complete;
 945	}
 946
 947	dpm_wait_for_superior(dev, async);
 
 
 948	dpm_watchdog_set(&wd, dev);
 949	device_lock(dev);
 950
 951	/*
 952	 * This is a fib.  But we'll allow new children to be added below
 953	 * a resumed device, even if the device hasn't been completed yet.
 954	 */
 955	dev->power.is_prepared = false;
 956
 957	if (!dev->power.is_suspended)
 958		goto Unlock;
 959
 960	if (dev->pm_domain) {
 961		info = "power domain ";
 962		callback = pm_op(&dev->pm_domain->ops, state);
 963		goto Driver;
 964	}
 965
 966	if (dev->type && dev->type->pm) {
 967		info = "type ";
 968		callback = pm_op(dev->type->pm, state);
 969		goto Driver;
 970	}
 971
 972	if (dev->class && dev->class->pm) {
 973		info = "class ";
 974		callback = pm_op(dev->class->pm, state);
 975		goto Driver;
 976	}
 977
 978	if (dev->bus) {
 979		if (dev->bus->pm) {
 980			info = "bus ";
 981			callback = pm_op(dev->bus->pm, state);
 982		} else if (dev->bus->resume) {
 983			info = "legacy bus ";
 984			callback = dev->bus->resume;
 985			goto End;
 986		}
 987	}
 988
 989 Driver:
 990	if (!callback && dev->driver && dev->driver->pm) {
 991		info = "driver ";
 992		callback = pm_op(dev->driver->pm, state);
 993	}
 994
 995 End:
 996	error = dpm_run_callback(callback, dev, state, info);
 997	dev->power.is_suspended = false;
 998
 999 Unlock:
1000	device_unlock(dev);
1001	dpm_watchdog_clear(&wd);
1002
1003 Complete:
1004	complete_all(&dev->power.completion);
1005
1006	TRACE_RESUME(error);
1007
1008	return error;
1009}
1010
1011static void async_resume(void *data, async_cookie_t cookie)
1012{
1013	struct device *dev = (struct device *)data;
1014	int error;
1015
1016	error = device_resume(dev, pm_transition, true);
1017	if (error)
1018		pm_dev_err(dev, pm_transition, " async", error);
1019	put_device(dev);
1020}
1021
1022/**
1023 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1024 * @state: PM transition of the system being carried out.
1025 *
1026 * Execute the appropriate "resume" callback for all devices whose status
1027 * indicates that they are suspended.
1028 */
1029void dpm_resume(pm_message_t state)
1030{
1031	struct device *dev;
1032	ktime_t starttime = ktime_get();
1033
1034	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1035	might_sleep();
1036
1037	mutex_lock(&dpm_list_mtx);
1038	pm_transition = state;
1039	async_error = 0;
1040
1041	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1042		dpm_async_fn(dev, async_resume);
1043
1044	while (!list_empty(&dpm_suspended_list)) {
1045		dev = to_device(dpm_suspended_list.next);
1046		get_device(dev);
1047		if (!is_async(dev)) {
1048			int error;
1049
1050			mutex_unlock(&dpm_list_mtx);
1051
1052			error = device_resume(dev, state, false);
1053			if (error) {
1054				suspend_stats.failed_resume++;
1055				dpm_save_failed_step(SUSPEND_RESUME);
1056				dpm_save_failed_dev(dev_name(dev));
1057				pm_dev_err(dev, state, "", error);
1058			}
1059
1060			mutex_lock(&dpm_list_mtx);
1061		}
1062		if (!list_empty(&dev->power.entry))
1063			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1064		put_device(dev);
1065	}
1066	mutex_unlock(&dpm_list_mtx);
1067	async_synchronize_full();
1068	dpm_show_time(starttime, state, 0, NULL);
1069
1070	cpufreq_resume();
1071	devfreq_resume();
1072	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1073}
1074
1075/**
1076 * device_complete - Complete a PM transition for given device.
1077 * @dev: Device to handle.
1078 * @state: PM transition of the system being carried out.
1079 */
1080static void device_complete(struct device *dev, pm_message_t state)
1081{
1082	void (*callback)(struct device *) = NULL;
1083	const char *info = NULL;
1084
1085	if (dev->power.syscore)
1086		return;
1087
1088	device_lock(dev);
1089
1090	if (dev->pm_domain) {
1091		info = "completing power domain ";
1092		callback = dev->pm_domain->ops.complete;
1093	} else if (dev->type && dev->type->pm) {
1094		info = "completing type ";
1095		callback = dev->type->pm->complete;
1096	} else if (dev->class && dev->class->pm) {
1097		info = "completing class ";
1098		callback = dev->class->pm->complete;
1099	} else if (dev->bus && dev->bus->pm) {
1100		info = "completing bus ";
1101		callback = dev->bus->pm->complete;
1102	}
1103
1104	if (!callback && dev->driver && dev->driver->pm) {
1105		info = "completing driver ";
1106		callback = dev->driver->pm->complete;
1107	}
1108
1109	if (callback) {
1110		pm_dev_dbg(dev, state, info);
1111		callback(dev);
1112	}
1113
1114	device_unlock(dev);
1115
1116	pm_runtime_put(dev);
1117}
1118
1119/**
1120 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1121 * @state: PM transition of the system being carried out.
1122 *
1123 * Execute the ->complete() callbacks for all devices whose PM status is not
1124 * DPM_ON (this allows new devices to be registered).
1125 */
1126void dpm_complete(pm_message_t state)
1127{
1128	struct list_head list;
1129
1130	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1131	might_sleep();
1132
1133	INIT_LIST_HEAD(&list);
1134	mutex_lock(&dpm_list_mtx);
1135	while (!list_empty(&dpm_prepared_list)) {
1136		struct device *dev = to_device(dpm_prepared_list.prev);
1137
1138		get_device(dev);
1139		dev->power.is_prepared = false;
1140		list_move(&dev->power.entry, &list);
1141		mutex_unlock(&dpm_list_mtx);
1142
1143		trace_device_pm_callback_start(dev, "", state.event);
1144		device_complete(dev, state);
1145		trace_device_pm_callback_end(dev, 0);
1146
1147		mutex_lock(&dpm_list_mtx);
1148		put_device(dev);
1149	}
1150	list_splice(&list, &dpm_list);
1151	mutex_unlock(&dpm_list_mtx);
1152
1153	/* Allow device probing and trigger re-probing of deferred devices */
1154	device_unblock_probing();
1155	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1156}
1157
1158/**
1159 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1160 * @state: PM transition of the system being carried out.
1161 *
1162 * Execute "resume" callbacks for all devices and complete the PM transition of
1163 * the system.
1164 */
1165void dpm_resume_end(pm_message_t state)
1166{
1167	dpm_resume(state);
1168	dpm_complete(state);
1169}
1170EXPORT_SYMBOL_GPL(dpm_resume_end);
1171
1172
1173/*------------------------- Suspend routines -------------------------*/
1174
1175/**
1176 * resume_event - Return a "resume" message for given "suspend" sleep state.
1177 * @sleep_state: PM message representing a sleep state.
1178 *
1179 * Return a PM message representing the resume event corresponding to given
1180 * sleep state.
1181 */
1182static pm_message_t resume_event(pm_message_t sleep_state)
1183{
1184	switch (sleep_state.event) {
1185	case PM_EVENT_SUSPEND:
1186		return PMSG_RESUME;
1187	case PM_EVENT_FREEZE:
1188	case PM_EVENT_QUIESCE:
1189		return PMSG_RECOVER;
1190	case PM_EVENT_HIBERNATE:
1191		return PMSG_RESTORE;
1192	}
1193	return PMSG_ON;
1194}
1195
1196static void dpm_superior_set_must_resume(struct device *dev)
1197{
1198	struct device_link *link;
1199	int idx;
1200
1201	if (dev->parent)
1202		dev->parent->power.must_resume = true;
1203
1204	idx = device_links_read_lock();
1205
1206	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1207		link->supplier->power.must_resume = true;
1208
1209	device_links_read_unlock(idx);
1210}
1211
1212static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1213						 pm_message_t state,
1214						 const char **info_p)
1215{
1216	pm_callback_t callback;
1217	const char *info;
1218
1219	if (dev->pm_domain) {
1220		info = "noirq power domain ";
1221		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1222	} else if (dev->type && dev->type->pm) {
1223		info = "noirq type ";
1224		callback = pm_noirq_op(dev->type->pm, state);
1225	} else if (dev->class && dev->class->pm) {
1226		info = "noirq class ";
1227		callback = pm_noirq_op(dev->class->pm, state);
1228	} else if (dev->bus && dev->bus->pm) {
1229		info = "noirq bus ";
1230		callback = pm_noirq_op(dev->bus->pm, state);
1231	} else {
1232		return NULL;
1233	}
1234
1235	if (info_p)
1236		*info_p = info;
1237
1238	return callback;
1239}
1240
1241static bool device_must_resume(struct device *dev, pm_message_t state,
1242			       bool no_subsys_suspend_noirq)
1243{
1244	pm_message_t resume_msg = resume_event(state);
1245
1246	/*
1247	 * If all of the device driver's "noirq", "late" and "early" callbacks
1248	 * are invoked directly by the core, the decision to allow the device to
1249	 * stay in suspend can be based on its current runtime PM status and its
1250	 * wakeup settings.
1251	 */
1252	if (no_subsys_suspend_noirq &&
1253	    !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1254	    !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1255	    !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1256		return !pm_runtime_status_suspended(dev) &&
1257			(resume_msg.event != PM_EVENT_RESUME ||
1258			 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1259
1260	/*
1261	 * The only safe strategy here is to require that if the device may not
1262	 * be left in suspend, resume callbacks must be invoked for it.
1263	 */
1264	return !dev->power.may_skip_resume;
1265}
1266
1267/**
1268 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1269 * @dev: Device to handle.
1270 * @state: PM transition of the system being carried out.
1271 * @async: If true, the device is being suspended asynchronously.
1272 *
1273 * The driver of @dev will not receive interrupts while this function is being
1274 * executed.
1275 */
1276static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1277{
1278	pm_callback_t callback;
1279	const char *info;
1280	bool no_subsys_cb = false;
1281	int error = 0;
1282
1283	TRACE_DEVICE(dev);
1284	TRACE_SUSPEND(0);
1285
1286	dpm_wait_for_subordinate(dev, async);
1287
1288	if (async_error)
1289		goto Complete;
1290
1291	if (dev->power.syscore || dev->power.direct_complete)
1292		goto Complete;
1293
1294	callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
 
 
 
 
 
 
 
 
 
 
 
 
1295	if (callback)
1296		goto Run;
1297
1298	no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1299
1300	if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1301		goto Skip;
1302
1303	if (dev->driver && dev->driver->pm) {
1304		info = "noirq driver ";
1305		callback = pm_noirq_op(dev->driver->pm, state);
1306	}
1307
1308Run:
1309	error = dpm_run_callback(callback, dev, state, info);
1310	if (error) {
1311		async_error = error;
1312		goto Complete;
1313	}
1314
1315Skip:
1316	dev->power.is_noirq_suspended = true;
1317
1318	if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1319		dev->power.must_resume = dev->power.must_resume ||
1320				atomic_read(&dev->power.usage_count) > 1 ||
1321				device_must_resume(dev, state, no_subsys_cb);
1322	} else {
 
 
 
 
1323		dev->power.must_resume = true;
1324	}
1325
1326	if (dev->power.must_resume)
1327		dpm_superior_set_must_resume(dev);
1328
1329Complete:
1330	complete_all(&dev->power.completion);
1331	TRACE_SUSPEND(error);
1332	return error;
1333}
1334
1335static void async_suspend_noirq(void *data, async_cookie_t cookie)
1336{
1337	struct device *dev = (struct device *)data;
1338	int error;
1339
1340	error = __device_suspend_noirq(dev, pm_transition, true);
1341	if (error) {
1342		dpm_save_failed_dev(dev_name(dev));
1343		pm_dev_err(dev, pm_transition, " async", error);
1344	}
1345
1346	put_device(dev);
1347}
1348
1349static int device_suspend_noirq(struct device *dev)
1350{
1351	if (dpm_async_fn(dev, async_suspend_noirq))
1352		return 0;
1353
1354	return __device_suspend_noirq(dev, pm_transition, false);
1355}
1356
1357static int dpm_noirq_suspend_devices(pm_message_t state)
1358{
1359	ktime_t starttime = ktime_get();
1360	int error = 0;
1361
1362	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1363	mutex_lock(&dpm_list_mtx);
1364	pm_transition = state;
1365	async_error = 0;
1366
1367	while (!list_empty(&dpm_late_early_list)) {
1368		struct device *dev = to_device(dpm_late_early_list.prev);
1369
1370		get_device(dev);
1371		mutex_unlock(&dpm_list_mtx);
1372
1373		error = device_suspend_noirq(dev);
1374
1375		mutex_lock(&dpm_list_mtx);
1376		if (error) {
1377			pm_dev_err(dev, state, " noirq", error);
1378			dpm_save_failed_dev(dev_name(dev));
1379			put_device(dev);
1380			break;
1381		}
1382		if (!list_empty(&dev->power.entry))
1383			list_move(&dev->power.entry, &dpm_noirq_list);
1384		put_device(dev);
1385
1386		if (async_error)
1387			break;
1388	}
1389	mutex_unlock(&dpm_list_mtx);
1390	async_synchronize_full();
1391	if (!error)
1392		error = async_error;
1393
1394	if (error) {
1395		suspend_stats.failed_suspend_noirq++;
1396		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1397	}
1398	dpm_show_time(starttime, state, error, "noirq");
1399	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1400	return error;
1401}
1402
1403/**
1404 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1405 * @state: PM transition of the system being carried out.
1406 *
1407 * Prevent device drivers' interrupt handlers from being called and invoke
1408 * "noirq" suspend callbacks for all non-sysdev devices.
1409 */
1410int dpm_suspend_noirq(pm_message_t state)
1411{
1412	int ret;
1413
1414	cpuidle_pause();
1415
1416	device_wakeup_arm_wake_irqs();
1417	suspend_device_irqs();
1418
1419	ret = dpm_noirq_suspend_devices(state);
1420	if (ret)
1421		dpm_resume_noirq(resume_event(state));
1422
1423	return ret;
1424}
1425
1426static void dpm_propagate_wakeup_to_parent(struct device *dev)
1427{
1428	struct device *parent = dev->parent;
1429
1430	if (!parent)
1431		return;
1432
1433	spin_lock_irq(&parent->power.lock);
1434
1435	if (dev->power.wakeup_path && !parent->power.ignore_children)
1436		parent->power.wakeup_path = true;
1437
1438	spin_unlock_irq(&parent->power.lock);
1439}
1440
1441static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1442						pm_message_t state,
1443						const char **info_p)
1444{
1445	pm_callback_t callback;
1446	const char *info;
1447
1448	if (dev->pm_domain) {
1449		info = "late power domain ";
1450		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1451	} else if (dev->type && dev->type->pm) {
1452		info = "late type ";
1453		callback = pm_late_early_op(dev->type->pm, state);
1454	} else if (dev->class && dev->class->pm) {
1455		info = "late class ";
1456		callback = pm_late_early_op(dev->class->pm, state);
1457	} else if (dev->bus && dev->bus->pm) {
1458		info = "late bus ";
1459		callback = pm_late_early_op(dev->bus->pm, state);
1460	} else {
1461		return NULL;
1462	}
1463
1464	if (info_p)
1465		*info_p = info;
1466
1467	return callback;
1468}
1469
1470/**
1471 * __device_suspend_late - Execute a "late suspend" callback for given device.
1472 * @dev: Device to handle.
1473 * @state: PM transition of the system being carried out.
1474 * @async: If true, the device is being suspended asynchronously.
1475 *
1476 * Runtime PM is disabled for @dev while this function is being executed.
1477 */
1478static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1479{
1480	pm_callback_t callback;
1481	const char *info;
1482	int error = 0;
1483
1484	TRACE_DEVICE(dev);
1485	TRACE_SUSPEND(0);
1486
1487	__pm_runtime_disable(dev, false);
1488
1489	dpm_wait_for_subordinate(dev, async);
1490
1491	if (async_error)
1492		goto Complete;
1493
1494	if (pm_wakeup_pending()) {
1495		async_error = -EBUSY;
1496		goto Complete;
1497	}
1498
1499	if (dev->power.syscore || dev->power.direct_complete)
1500		goto Complete;
1501
1502	callback = dpm_subsys_suspend_late_cb(dev, state, &info);
 
 
 
 
 
 
 
 
 
 
 
 
1503	if (callback)
1504		goto Run;
1505
1506	if (dev_pm_smart_suspend_and_suspended(dev) &&
1507	    !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1508		goto Skip;
1509
1510	if (dev->driver && dev->driver->pm) {
1511		info = "late driver ";
1512		callback = pm_late_early_op(dev->driver->pm, state);
1513	}
1514
1515Run:
1516	error = dpm_run_callback(callback, dev, state, info);
1517	if (error) {
1518		async_error = error;
1519		goto Complete;
1520	}
1521	dpm_propagate_wakeup_to_parent(dev);
1522
1523Skip:
1524	dev->power.is_late_suspended = true;
1525
1526Complete:
1527	TRACE_SUSPEND(error);
1528	complete_all(&dev->power.completion);
1529	return error;
1530}
1531
1532static void async_suspend_late(void *data, async_cookie_t cookie)
1533{
1534	struct device *dev = (struct device *)data;
1535	int error;
1536
1537	error = __device_suspend_late(dev, pm_transition, true);
1538	if (error) {
1539		dpm_save_failed_dev(dev_name(dev));
1540		pm_dev_err(dev, pm_transition, " async", error);
1541	}
1542	put_device(dev);
1543}
1544
1545static int device_suspend_late(struct device *dev)
1546{
1547	if (dpm_async_fn(dev, async_suspend_late))
1548		return 0;
1549
1550	return __device_suspend_late(dev, pm_transition, false);
1551}
1552
1553/**
1554 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1555 * @state: PM transition of the system being carried out.
1556 */
1557int dpm_suspend_late(pm_message_t state)
1558{
1559	ktime_t starttime = ktime_get();
1560	int error = 0;
1561
1562	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1563	mutex_lock(&dpm_list_mtx);
1564	pm_transition = state;
1565	async_error = 0;
1566
1567	while (!list_empty(&dpm_suspended_list)) {
1568		struct device *dev = to_device(dpm_suspended_list.prev);
1569
1570		get_device(dev);
1571		mutex_unlock(&dpm_list_mtx);
1572
1573		error = device_suspend_late(dev);
1574
1575		mutex_lock(&dpm_list_mtx);
1576		if (!list_empty(&dev->power.entry))
1577			list_move(&dev->power.entry, &dpm_late_early_list);
1578
1579		if (error) {
1580			pm_dev_err(dev, state, " late", error);
1581			dpm_save_failed_dev(dev_name(dev));
1582			put_device(dev);
1583			break;
1584		}
1585		put_device(dev);
1586
1587		if (async_error)
1588			break;
1589	}
1590	mutex_unlock(&dpm_list_mtx);
1591	async_synchronize_full();
1592	if (!error)
1593		error = async_error;
1594	if (error) {
1595		suspend_stats.failed_suspend_late++;
1596		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1597		dpm_resume_early(resume_event(state));
1598	}
1599	dpm_show_time(starttime, state, error, "late");
1600	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1601	return error;
1602}
1603
1604/**
1605 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1606 * @state: PM transition of the system being carried out.
1607 */
1608int dpm_suspend_end(pm_message_t state)
1609{
1610	ktime_t starttime = ktime_get();
1611	int error;
1612
1613	error = dpm_suspend_late(state);
1614	if (error)
1615		goto out;
1616
1617	error = dpm_suspend_noirq(state);
1618	if (error)
1619		dpm_resume_early(resume_event(state));
1620
1621out:
1622	dpm_show_time(starttime, state, error, "end");
1623	return error;
1624}
1625EXPORT_SYMBOL_GPL(dpm_suspend_end);
1626
1627/**
1628 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1629 * @dev: Device to suspend.
1630 * @state: PM transition of the system being carried out.
1631 * @cb: Suspend callback to execute.
1632 * @info: string description of caller.
1633 */
1634static int legacy_suspend(struct device *dev, pm_message_t state,
1635			  int (*cb)(struct device *dev, pm_message_t state),
1636			  const char *info)
1637{
1638	int error;
1639	ktime_t calltime;
1640
1641	calltime = initcall_debug_start(dev, cb);
1642
1643	trace_device_pm_callback_start(dev, info, state.event);
1644	error = cb(dev, state);
1645	trace_device_pm_callback_end(dev, error);
1646	suspend_report_result(cb, error);
1647
1648	initcall_debug_report(dev, calltime, cb, error);
1649
1650	return error;
1651}
1652
1653static void dpm_clear_superiors_direct_complete(struct device *dev)
1654{
1655	struct device_link *link;
1656	int idx;
1657
1658	if (dev->parent) {
1659		spin_lock_irq(&dev->parent->power.lock);
1660		dev->parent->power.direct_complete = false;
1661		spin_unlock_irq(&dev->parent->power.lock);
1662	}
1663
1664	idx = device_links_read_lock();
1665
1666	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1667		spin_lock_irq(&link->supplier->power.lock);
1668		link->supplier->power.direct_complete = false;
1669		spin_unlock_irq(&link->supplier->power.lock);
1670	}
1671
1672	device_links_read_unlock(idx);
1673}
1674
1675/**
1676 * __device_suspend - Execute "suspend" callbacks for given device.
1677 * @dev: Device to handle.
1678 * @state: PM transition of the system being carried out.
1679 * @async: If true, the device is being suspended asynchronously.
1680 */
1681static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1682{
1683	pm_callback_t callback = NULL;
1684	const char *info = NULL;
1685	int error = 0;
1686	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1687
1688	TRACE_DEVICE(dev);
1689	TRACE_SUSPEND(0);
1690
1691	dpm_wait_for_subordinate(dev, async);
1692
1693	if (async_error) {
1694		dev->power.direct_complete = false;
1695		goto Complete;
1696	}
1697
1698	/*
1699	 * If a device configured to wake up the system from sleep states
1700	 * has been suspended at run time and there's a resume request pending
1701	 * for it, this is equivalent to the device signaling wakeup, so the
1702	 * system suspend operation should be aborted.
 
 
 
 
 
1703	 */
1704	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1705		pm_wakeup_event(dev, 0);
1706
1707	if (pm_wakeup_pending()) {
1708		dev->power.direct_complete = false;
1709		async_error = -EBUSY;
1710		goto Complete;
1711	}
1712
1713	if (dev->power.syscore)
1714		goto Complete;
1715
1716	/* Avoid direct_complete to let wakeup_path propagate. */
1717	if (device_may_wakeup(dev) || dev->power.wakeup_path)
1718		dev->power.direct_complete = false;
1719
1720	if (dev->power.direct_complete) {
1721		if (pm_runtime_status_suspended(dev)) {
1722			pm_runtime_disable(dev);
1723			if (pm_runtime_status_suspended(dev)) {
1724				pm_dev_dbg(dev, state, "direct-complete ");
1725				goto Complete;
1726			}
1727
1728			pm_runtime_enable(dev);
1729		}
1730		dev->power.direct_complete = false;
1731	}
1732
1733	dev->power.may_skip_resume = false;
1734	dev->power.must_resume = false;
1735
1736	dpm_watchdog_set(&wd, dev);
1737	device_lock(dev);
1738
1739	if (dev->pm_domain) {
1740		info = "power domain ";
1741		callback = pm_op(&dev->pm_domain->ops, state);
1742		goto Run;
1743	}
1744
1745	if (dev->type && dev->type->pm) {
1746		info = "type ";
1747		callback = pm_op(dev->type->pm, state);
1748		goto Run;
1749	}
1750
1751	if (dev->class && dev->class->pm) {
1752		info = "class ";
1753		callback = pm_op(dev->class->pm, state);
1754		goto Run;
1755	}
1756
1757	if (dev->bus) {
1758		if (dev->bus->pm) {
1759			info = "bus ";
1760			callback = pm_op(dev->bus->pm, state);
1761		} else if (dev->bus->suspend) {
1762			pm_dev_dbg(dev, state, "legacy bus ");
1763			error = legacy_suspend(dev, state, dev->bus->suspend,
1764						"legacy bus ");
1765			goto End;
1766		}
1767	}
1768
1769 Run:
1770	if (!callback && dev->driver && dev->driver->pm) {
1771		info = "driver ";
1772		callback = pm_op(dev->driver->pm, state);
1773	}
1774
1775	error = dpm_run_callback(callback, dev, state, info);
1776
1777 End:
1778	if (!error) {
1779		dev->power.is_suspended = true;
1780		if (device_may_wakeup(dev))
1781			dev->power.wakeup_path = true;
1782
1783		dpm_propagate_wakeup_to_parent(dev);
1784		dpm_clear_superiors_direct_complete(dev);
1785	}
1786
1787	device_unlock(dev);
1788	dpm_watchdog_clear(&wd);
1789
1790 Complete:
1791	if (error)
1792		async_error = error;
1793
1794	complete_all(&dev->power.completion);
1795	TRACE_SUSPEND(error);
1796	return error;
1797}
1798
1799static void async_suspend(void *data, async_cookie_t cookie)
1800{
1801	struct device *dev = (struct device *)data;
1802	int error;
1803
1804	error = __device_suspend(dev, pm_transition, true);
1805	if (error) {
1806		dpm_save_failed_dev(dev_name(dev));
1807		pm_dev_err(dev, pm_transition, " async", error);
1808	}
1809
1810	put_device(dev);
1811}
1812
1813static int device_suspend(struct device *dev)
1814{
1815	if (dpm_async_fn(dev, async_suspend))
1816		return 0;
1817
1818	return __device_suspend(dev, pm_transition, false);
1819}
1820
1821/**
1822 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1823 * @state: PM transition of the system being carried out.
1824 */
1825int dpm_suspend(pm_message_t state)
1826{
1827	ktime_t starttime = ktime_get();
1828	int error = 0;
1829
1830	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1831	might_sleep();
1832
1833	devfreq_suspend();
1834	cpufreq_suspend();
1835
1836	mutex_lock(&dpm_list_mtx);
1837	pm_transition = state;
1838	async_error = 0;
1839	while (!list_empty(&dpm_prepared_list)) {
1840		struct device *dev = to_device(dpm_prepared_list.prev);
1841
1842		get_device(dev);
1843		mutex_unlock(&dpm_list_mtx);
1844
1845		error = device_suspend(dev);
1846
1847		mutex_lock(&dpm_list_mtx);
1848		if (error) {
1849			pm_dev_err(dev, state, "", error);
1850			dpm_save_failed_dev(dev_name(dev));
1851			put_device(dev);
1852			break;
1853		}
1854		if (!list_empty(&dev->power.entry))
1855			list_move(&dev->power.entry, &dpm_suspended_list);
1856		put_device(dev);
1857		if (async_error)
1858			break;
1859	}
1860	mutex_unlock(&dpm_list_mtx);
1861	async_synchronize_full();
1862	if (!error)
1863		error = async_error;
1864	if (error) {
1865		suspend_stats.failed_suspend++;
1866		dpm_save_failed_step(SUSPEND_SUSPEND);
1867	}
1868	dpm_show_time(starttime, state, error, NULL);
1869	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1870	return error;
1871}
1872
1873/**
1874 * device_prepare - Prepare a device for system power transition.
1875 * @dev: Device to handle.
1876 * @state: PM transition of the system being carried out.
1877 *
1878 * Execute the ->prepare() callback(s) for given device.  No new children of the
1879 * device may be registered after this function has returned.
1880 */
1881static int device_prepare(struct device *dev, pm_message_t state)
1882{
1883	int (*callback)(struct device *) = NULL;
1884	int ret = 0;
1885
1886	if (dev->power.syscore)
1887		return 0;
1888
1889	WARN_ON(!pm_runtime_enabled(dev) &&
1890		dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1891					      DPM_FLAG_LEAVE_SUSPENDED));
1892
1893	/*
1894	 * If a device's parent goes into runtime suspend at the wrong time,
1895	 * it won't be possible to resume the device.  To prevent this we
1896	 * block runtime suspend here, during the prepare phase, and allow
1897	 * it again during the complete phase.
1898	 */
1899	pm_runtime_get_noresume(dev);
1900
1901	device_lock(dev);
1902
1903	dev->power.wakeup_path = false;
1904
1905	if (dev->power.no_pm_callbacks)
1906		goto unlock;
1907
1908	if (dev->pm_domain)
1909		callback = dev->pm_domain->ops.prepare;
1910	else if (dev->type && dev->type->pm)
1911		callback = dev->type->pm->prepare;
1912	else if (dev->class && dev->class->pm)
1913		callback = dev->class->pm->prepare;
1914	else if (dev->bus && dev->bus->pm)
1915		callback = dev->bus->pm->prepare;
1916
1917	if (!callback && dev->driver && dev->driver->pm)
1918		callback = dev->driver->pm->prepare;
1919
1920	if (callback)
1921		ret = callback(dev);
1922
1923unlock:
1924	device_unlock(dev);
1925
1926	if (ret < 0) {
1927		suspend_report_result(callback, ret);
1928		pm_runtime_put(dev);
1929		return ret;
1930	}
1931	/*
1932	 * A positive return value from ->prepare() means "this device appears
1933	 * to be runtime-suspended and its state is fine, so if it really is
1934	 * runtime-suspended, you can leave it in that state provided that you
1935	 * will do the same thing with all of its descendants".  This only
1936	 * applies to suspend transitions, however.
1937	 */
1938	spin_lock_irq(&dev->power.lock);
1939	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1940		((pm_runtime_suspended(dev) && ret > 0) ||
1941		 dev->power.no_pm_callbacks) &&
1942		!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1943	spin_unlock_irq(&dev->power.lock);
1944	return 0;
1945}
1946
1947/**
1948 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1949 * @state: PM transition of the system being carried out.
1950 *
1951 * Execute the ->prepare() callback(s) for all devices.
1952 */
1953int dpm_prepare(pm_message_t state)
1954{
1955	int error = 0;
1956
1957	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1958	might_sleep();
1959
1960	/*
1961	 * Give a chance for the known devices to complete their probes, before
1962	 * disable probing of devices. This sync point is important at least
1963	 * at boot time + hibernation restore.
1964	 */
1965	wait_for_device_probe();
1966	/*
1967	 * It is unsafe if probing of devices will happen during suspend or
1968	 * hibernation and system behavior will be unpredictable in this case.
1969	 * So, let's prohibit device's probing here and defer their probes
1970	 * instead. The normal behavior will be restored in dpm_complete().
1971	 */
1972	device_block_probing();
1973
1974	mutex_lock(&dpm_list_mtx);
1975	while (!list_empty(&dpm_list)) {
1976		struct device *dev = to_device(dpm_list.next);
1977
1978		get_device(dev);
1979		mutex_unlock(&dpm_list_mtx);
1980
1981		trace_device_pm_callback_start(dev, "", state.event);
1982		error = device_prepare(dev, state);
1983		trace_device_pm_callback_end(dev, error);
1984
1985		mutex_lock(&dpm_list_mtx);
1986		if (error) {
1987			if (error == -EAGAIN) {
1988				put_device(dev);
1989				error = 0;
1990				continue;
1991			}
1992			pr_info("Device %s not prepared for power transition: code %d\n",
1993				dev_name(dev), error);
1994			put_device(dev);
1995			break;
1996		}
1997		dev->power.is_prepared = true;
1998		if (!list_empty(&dev->power.entry))
1999			list_move_tail(&dev->power.entry, &dpm_prepared_list);
2000		put_device(dev);
2001	}
2002	mutex_unlock(&dpm_list_mtx);
2003	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2004	return error;
2005}
2006
2007/**
2008 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2009 * @state: PM transition of the system being carried out.
2010 *
2011 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2012 * callbacks for them.
2013 */
2014int dpm_suspend_start(pm_message_t state)
2015{
2016	ktime_t starttime = ktime_get();
2017	int error;
2018
2019	error = dpm_prepare(state);
2020	if (error) {
2021		suspend_stats.failed_prepare++;
2022		dpm_save_failed_step(SUSPEND_PREPARE);
2023	} else
2024		error = dpm_suspend(state);
2025	dpm_show_time(starttime, state, error, "start");
2026	return error;
2027}
2028EXPORT_SYMBOL_GPL(dpm_suspend_start);
2029
2030void __suspend_report_result(const char *function, void *fn, int ret)
2031{
2032	if (ret)
2033		pr_err("%s(): %pS returns %d\n", function, fn, ret);
2034}
2035EXPORT_SYMBOL_GPL(__suspend_report_result);
2036
2037/**
2038 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2039 * @subordinate: Device that needs to wait for @dev.
2040 * @dev: Device to wait for.
2041 */
2042int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2043{
2044	dpm_wait(dev, subordinate->power.async_suspend);
2045	return async_error;
2046}
2047EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2048
2049/**
2050 * dpm_for_each_dev - device iterator.
2051 * @data: data for the callback.
2052 * @fn: function to be called for each device.
2053 *
2054 * Iterate over devices in dpm_list, and call @fn for each device,
2055 * passing it @data.
2056 */
2057void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2058{
2059	struct device *dev;
2060
2061	if (!fn)
2062		return;
2063
2064	device_pm_lock();
2065	list_for_each_entry(dev, &dpm_list, power.entry)
2066		fn(dev, data);
2067	device_pm_unlock();
2068}
2069EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2070
2071static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2072{
2073	if (!ops)
2074		return true;
2075
2076	return !ops->prepare &&
2077	       !ops->suspend &&
2078	       !ops->suspend_late &&
2079	       !ops->suspend_noirq &&
2080	       !ops->resume_noirq &&
2081	       !ops->resume_early &&
2082	       !ops->resume &&
2083	       !ops->complete;
2084}
2085
2086void device_pm_check_callbacks(struct device *dev)
2087{
2088	spin_lock_irq(&dev->power.lock);
2089	dev->power.no_pm_callbacks =
2090		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2091		 !dev->bus->suspend && !dev->bus->resume)) &&
2092		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2093		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2094		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2095		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2096		 !dev->driver->suspend && !dev->driver->resume));
2097	spin_unlock_irq(&dev->power.lock);
2098}
2099
2100bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2101{
2102	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2103		pm_runtime_status_suspended(dev);
2104}