Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   4 *
   5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
 
 
   7 */
 
   8#include <linux/sched/mm.h>
   9#include <linux/ktime.h>
  10#include <linux/hrtimer.h>
  11#include <linux/export.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/pm_wakeirq.h>
  14#include <trace/events/rpm.h>
  15
  16#include "../base.h"
  17#include "power.h"
  18
  19typedef int (*pm_callback_t)(struct device *);
  20
  21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
  22{
  23	pm_callback_t cb;
  24	const struct dev_pm_ops *ops;
  25
  26	if (dev->pm_domain)
  27		ops = &dev->pm_domain->ops;
  28	else if (dev->type && dev->type->pm)
  29		ops = dev->type->pm;
  30	else if (dev->class && dev->class->pm)
  31		ops = dev->class->pm;
  32	else if (dev->bus && dev->bus->pm)
  33		ops = dev->bus->pm;
  34	else
  35		ops = NULL;
  36
  37	if (ops)
  38		cb = *(pm_callback_t *)((void *)ops + cb_offset);
  39	else
  40		cb = NULL;
  41
  42	if (!cb && dev->driver && dev->driver->pm)
  43		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
  44
  45	return cb;
  46}
  47
  48#define RPM_GET_CALLBACK(dev, callback) \
  49		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
  50
  51static int rpm_resume(struct device *dev, int rpmflags);
  52static int rpm_suspend(struct device *dev, int rpmflags);
  53
  54/**
  55 * update_pm_runtime_accounting - Update the time accounting of power states
  56 * @dev: Device to update the accounting for
  57 *
  58 * In order to be able to have time accounting of the various power states
  59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  60 * PM), we need to track the time spent in each state.
  61 * update_pm_runtime_accounting must be called each time before the
  62 * runtime_status field is updated, to account the time in the old state
  63 * correctly.
  64 */
  65static void update_pm_runtime_accounting(struct device *dev)
  66{
  67	u64 now, last, delta;
 
  68
  69	if (dev->power.disable_depth > 0)
  70		return;
  71
  72	last = dev->power.accounting_timestamp;
  73
  74	now = ktime_get_mono_fast_ns();
  75	dev->power.accounting_timestamp = now;
  76
  77	/*
  78	 * Because ktime_get_mono_fast_ns() is not monotonic during
  79	 * timekeeping updates, ensure that 'now' is after the last saved
  80	 * timesptamp.
  81	 */
  82	if (now < last)
  83		return;
  84
  85	delta = now - last;
  86
  87	if (dev->power.runtime_status == RPM_SUSPENDED)
  88		dev->power.suspended_time += delta;
  89	else
  90		dev->power.active_time += delta;
  91}
  92
  93static void __update_runtime_status(struct device *dev, enum rpm_status status)
  94{
  95	update_pm_runtime_accounting(dev);
  96	dev->power.runtime_status = status;
  97}
  98
  99static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
 100{
 101	u64 time;
 102	unsigned long flags;
 103
 104	spin_lock_irqsave(&dev->power.lock, flags);
 105
 106	update_pm_runtime_accounting(dev);
 107	time = suspended ? dev->power.suspended_time : dev->power.active_time;
 108
 109	spin_unlock_irqrestore(&dev->power.lock, flags);
 110
 111	return time;
 112}
 113
 114u64 pm_runtime_active_time(struct device *dev)
 115{
 116	return rpm_get_accounted_time(dev, false);
 117}
 118
 119u64 pm_runtime_suspended_time(struct device *dev)
 120{
 121	return rpm_get_accounted_time(dev, true);
 122}
 123EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
 124
 125/**
 126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
 127 * @dev: Device to handle.
 128 */
 129static void pm_runtime_deactivate_timer(struct device *dev)
 130{
 131	if (dev->power.timer_expires > 0) {
 132		hrtimer_try_to_cancel(&dev->power.suspend_timer);
 133		dev->power.timer_expires = 0;
 134	}
 135}
 136
 137/**
 138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
 139 * @dev: Device to handle.
 140 */
 141static void pm_runtime_cancel_pending(struct device *dev)
 142{
 143	pm_runtime_deactivate_timer(dev);
 144	/*
 145	 * In case there's a request pending, make sure its work function will
 146	 * return without doing anything.
 147	 */
 148	dev->power.request = RPM_REQ_NONE;
 149}
 150
 151/*
 152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
 153 * @dev: Device to handle.
 154 *
 155 * Compute the autosuspend-delay expiration time based on the device's
 156 * power.last_busy time.  If the delay has already expired or is disabled
 157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
 158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
 159 *
 160 * This function may be called either with or without dev->power.lock held.
 161 * Either way it can be racy, since power.last_busy may be updated at any time.
 162 */
 163u64 pm_runtime_autosuspend_expiration(struct device *dev)
 164{
 165	int autosuspend_delay;
 166	u64 expires;
 
 
 167
 168	if (!dev->power.use_autosuspend)
 169		return 0;
 170
 171	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
 172	if (autosuspend_delay < 0)
 173		return 0;
 174
 175	expires  = READ_ONCE(dev->power.last_busy);
 176	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
 177	if (expires > ktime_get_mono_fast_ns())
 178		return expires;	/* Expires in the future */
 179
 180	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 181}
 182EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 183
 184static int dev_memalloc_noio(struct device *dev, void *data)
 185{
 186	return dev->power.memalloc_noio;
 187}
 188
 189/*
 190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
 191 * @dev: Device to handle.
 192 * @enable: True for setting the flag and False for clearing the flag.
 193 *
 194 * Set the flag for all devices in the path from the device to the
 195 * root device in the device tree if @enable is true, otherwise clear
 196 * the flag for devices in the path whose siblings don't set the flag.
 197 *
 198 * The function should only be called by block device, or network
 199 * device driver for solving the deadlock problem during runtime
 200 * resume/suspend:
 201 *
 202 *     If memory allocation with GFP_KERNEL is called inside runtime
 203 *     resume/suspend callback of any one of its ancestors(or the
 204 *     block device itself), the deadlock may be triggered inside the
 205 *     memory allocation since it might not complete until the block
 206 *     device becomes active and the involed page I/O finishes. The
 207 *     situation is pointed out first by Alan Stern. Network device
 208 *     are involved in iSCSI kind of situation.
 209 *
 210 * The lock of dev_hotplug_mutex is held in the function for handling
 211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
 212 * in async probe().
 213 *
 214 * The function should be called between device_add() and device_del()
 215 * on the affected device(block/network device).
 216 */
 217void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
 218{
 219	static DEFINE_MUTEX(dev_hotplug_mutex);
 220
 221	mutex_lock(&dev_hotplug_mutex);
 222	for (;;) {
 223		bool enabled;
 224
 225		/* hold power lock since bitfield is not SMP-safe. */
 226		spin_lock_irq(&dev->power.lock);
 227		enabled = dev->power.memalloc_noio;
 228		dev->power.memalloc_noio = enable;
 229		spin_unlock_irq(&dev->power.lock);
 230
 231		/*
 232		 * not need to enable ancestors any more if the device
 233		 * has been enabled.
 234		 */
 235		if (enabled && enable)
 236			break;
 237
 238		dev = dev->parent;
 239
 240		/*
 241		 * clear flag of the parent device only if all the
 242		 * children don't set the flag because ancestor's
 243		 * flag was set by any one of the descendants.
 244		 */
 245		if (!dev || (!enable &&
 246			     device_for_each_child(dev, NULL,
 247						   dev_memalloc_noio)))
 248			break;
 249	}
 250	mutex_unlock(&dev_hotplug_mutex);
 251}
 252EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
 253
 254/**
 255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 256 * @dev: Device to test.
 257 */
 258static int rpm_check_suspend_allowed(struct device *dev)
 259{
 260	int retval = 0;
 261
 262	if (dev->power.runtime_error)
 263		retval = -EINVAL;
 264	else if (dev->power.disable_depth > 0)
 265		retval = -EACCES;
 266	else if (atomic_read(&dev->power.usage_count) > 0)
 267		retval = -EAGAIN;
 268	else if (!dev->power.ignore_children &&
 269			atomic_read(&dev->power.child_count))
 270		retval = -EBUSY;
 271
 272	/* Pending resume requests take precedence over suspends. */
 273	else if ((dev->power.deferred_resume
 274			&& dev->power.runtime_status == RPM_SUSPENDING)
 275	    || (dev->power.request_pending
 276			&& dev->power.request == RPM_REQ_RESUME))
 277		retval = -EAGAIN;
 278	else if (__dev_pm_qos_resume_latency(dev) == 0)
 279		retval = -EPERM;
 280	else if (dev->power.runtime_status == RPM_SUSPENDED)
 281		retval = 1;
 282
 283	return retval;
 284}
 285
 286static int rpm_get_suppliers(struct device *dev)
 287{
 288	struct device_link *link;
 289
 290	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 291				device_links_read_lock_held()) {
 292		int retval;
 293
 294		if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
 295		    READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
 
 
 
 296			continue;
 297
 298		retval = pm_runtime_get_sync(link->supplier);
 299		/* Ignore suppliers with disabled runtime PM. */
 300		if (retval < 0 && retval != -EACCES) {
 301			pm_runtime_put_noidle(link->supplier);
 302			return retval;
 303		}
 304		refcount_inc(&link->rpm_active);
 305	}
 306	return 0;
 307}
 308
 309static void rpm_put_suppliers(struct device *dev)
 310{
 311	struct device_link *link;
 312
 313	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 314				device_links_read_lock_held()) {
 315		if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
 316			continue;
 317
 318		while (refcount_dec_not_one(&link->rpm_active))
 319			pm_runtime_put(link->supplier);
 320	}
 
 321}
 322
 323/**
 324 * __rpm_callback - Run a given runtime PM callback for a given device.
 325 * @cb: Runtime PM callback to run.
 326 * @dev: Device to run the callback for.
 327 */
 328static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 329	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 330{
 331	int retval, idx;
 332	bool use_links = dev->power.links_count > 0;
 333
 334	if (dev->power.irq_safe) {
 335		spin_unlock(&dev->power.lock);
 336	} else {
 337		spin_unlock_irq(&dev->power.lock);
 338
 339		/*
 340		 * Resume suppliers if necessary.
 341		 *
 342		 * The device's runtime PM status cannot change until this
 343		 * routine returns, so it is safe to read the status outside of
 344		 * the lock.
 345		 */
 346		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
 347			idx = device_links_read_lock();
 348
 349			retval = rpm_get_suppliers(dev);
 350			if (retval)
 351				goto fail;
 352
 353			device_links_read_unlock(idx);
 354		}
 355	}
 356
 357	retval = cb(dev);
 358
 359	if (dev->power.irq_safe) {
 360		spin_lock(&dev->power.lock);
 361	} else {
 362		/*
 363		 * If the device is suspending and the callback has returned
 364		 * success, drop the usage counters of the suppliers that have
 365		 * been reference counted on its resume.
 366		 *
 367		 * Do that if resume fails too.
 368		 */
 369		if (use_links
 370		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
 371		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
 372			idx = device_links_read_lock();
 373
 374 fail:
 375			rpm_put_suppliers(dev);
 376
 377			device_links_read_unlock(idx);
 378		}
 379
 380		spin_lock_irq(&dev->power.lock);
 381	}
 382
 383	return retval;
 384}
 385
 386/**
 387 * rpm_idle - Notify device bus type if the device can be suspended.
 388 * @dev: Device to notify the bus type about.
 389 * @rpmflags: Flag bits.
 390 *
 391 * Check if the device's runtime PM status allows it to be suspended.  If
 392 * another idle notification has been started earlier, return immediately.  If
 393 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 394 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
 395 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
 396 *
 397 * This function must be called under dev->power.lock with interrupts disabled.
 398 */
 399static int rpm_idle(struct device *dev, int rpmflags)
 400{
 401	int (*callback)(struct device *);
 402	int retval;
 403
 404	trace_rpm_idle_rcuidle(dev, rpmflags);
 405	retval = rpm_check_suspend_allowed(dev);
 406	if (retval < 0)
 407		;	/* Conditions are wrong. */
 408
 409	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
 410	else if (dev->power.runtime_status != RPM_ACTIVE)
 411		retval = -EAGAIN;
 412
 413	/*
 414	 * Any pending request other than an idle notification takes
 415	 * precedence over us, except that the timer may be running.
 416	 */
 417	else if (dev->power.request_pending &&
 418	    dev->power.request > RPM_REQ_IDLE)
 419		retval = -EAGAIN;
 420
 421	/* Act as though RPM_NOWAIT is always set. */
 422	else if (dev->power.idle_notification)
 423		retval = -EINPROGRESS;
 424	if (retval)
 425		goto out;
 426
 427	/* Pending requests need to be canceled. */
 428	dev->power.request = RPM_REQ_NONE;
 429
 430	if (dev->power.no_callbacks)
 431		goto out;
 432
 433	/* Carry out an asynchronous or a synchronous idle notification. */
 434	if (rpmflags & RPM_ASYNC) {
 435		dev->power.request = RPM_REQ_IDLE;
 436		if (!dev->power.request_pending) {
 437			dev->power.request_pending = true;
 438			queue_work(pm_wq, &dev->power.work);
 439		}
 440		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
 441		return 0;
 442	}
 443
 444	dev->power.idle_notification = true;
 445
 446	callback = RPM_GET_CALLBACK(dev, runtime_idle);
 447
 448	if (callback)
 449		retval = __rpm_callback(callback, dev);
 450
 451	dev->power.idle_notification = false;
 452	wake_up_all(&dev->power.wait_queue);
 453
 454 out:
 455	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 456	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 457}
 458
 459/**
 460 * rpm_callback - Run a given runtime PM callback for a given device.
 461 * @cb: Runtime PM callback to run.
 462 * @dev: Device to run the callback for.
 463 */
 464static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 465{
 466	int retval;
 467
 468	if (!cb)
 469		return -ENOSYS;
 470
 471	if (dev->power.memalloc_noio) {
 472		unsigned int noio_flag;
 473
 474		/*
 475		 * Deadlock might be caused if memory allocation with
 476		 * GFP_KERNEL happens inside runtime_suspend and
 477		 * runtime_resume callbacks of one block device's
 478		 * ancestor or the block device itself. Network
 479		 * device might be thought as part of iSCSI block
 480		 * device, so network device and its ancestor should
 481		 * be marked as memalloc_noio too.
 482		 */
 483		noio_flag = memalloc_noio_save();
 484		retval = __rpm_callback(cb, dev);
 485		memalloc_noio_restore(noio_flag);
 486	} else {
 487		retval = __rpm_callback(cb, dev);
 488	}
 489
 490	dev->power.runtime_error = retval;
 491	return retval != -EACCES ? retval : -EIO;
 492}
 493
 494/**
 495 * rpm_suspend - Carry out runtime suspend of given device.
 496 * @dev: Device to suspend.
 497 * @rpmflags: Flag bits.
 498 *
 499 * Check if the device's runtime PM status allows it to be suspended.
 500 * Cancel a pending idle notification, autosuspend or suspend. If
 501 * another suspend has been started earlier, either return immediately
 502 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
 503 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
 504 * otherwise run the ->runtime_suspend() callback directly. When
 505 * ->runtime_suspend succeeded, if a deferred resume was requested while
 506 * the callback was running then carry it out, otherwise send an idle
 507 * notification for its parent (if the suspend succeeded and both
 508 * ignore_children of parent->power and irq_safe of dev->power are not set).
 509 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 510 * flag is set and the next autosuspend-delay expiration time is in the
 511 * future, schedule another autosuspend attempt.
 512 *
 513 * This function must be called under dev->power.lock with interrupts disabled.
 514 */
 515static int rpm_suspend(struct device *dev, int rpmflags)
 516	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 517{
 518	int (*callback)(struct device *);
 519	struct device *parent = NULL;
 520	int retval;
 521
 522	trace_rpm_suspend_rcuidle(dev, rpmflags);
 523
 524 repeat:
 525	retval = rpm_check_suspend_allowed(dev);
 
 526	if (retval < 0)
 527		goto out;	/* Conditions are wrong. */
 528
 529	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 530	if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
 
 531		retval = -EAGAIN;
 532	if (retval)
 533		goto out;
 534
 535	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
 536	if ((rpmflags & RPM_AUTO)
 537	    && dev->power.runtime_status != RPM_SUSPENDING) {
 538		u64 expires = pm_runtime_autosuspend_expiration(dev);
 539
 540		if (expires != 0) {
 541			/* Pending requests need to be canceled. */
 542			dev->power.request = RPM_REQ_NONE;
 543
 544			/*
 545			 * Optimization: If the timer is already running and is
 546			 * set to expire at or before the autosuspend delay,
 547			 * avoid the overhead of resetting it.  Just let it
 548			 * expire; pm_suspend_timer_fn() will take care of the
 549			 * rest.
 550			 */
 551			if (!(dev->power.timer_expires &&
 552					dev->power.timer_expires <= expires)) {
 553				/*
 554				 * We add a slack of 25% to gather wakeups
 555				 * without sacrificing the granularity.
 556				 */
 557				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
 558						    (NSEC_PER_MSEC >> 2);
 559
 560				dev->power.timer_expires = expires;
 561				hrtimer_start_range_ns(&dev->power.suspend_timer,
 562						ns_to_ktime(expires),
 563						slack,
 564						HRTIMER_MODE_ABS);
 565			}
 566			dev->power.timer_autosuspends = 1;
 567			goto out;
 568		}
 569	}
 570
 571	/* Other scheduled or pending requests need to be canceled. */
 572	pm_runtime_cancel_pending(dev);
 573
 574	if (dev->power.runtime_status == RPM_SUSPENDING) {
 575		DEFINE_WAIT(wait);
 576
 577		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 578			retval = -EINPROGRESS;
 579			goto out;
 580		}
 581
 582		if (dev->power.irq_safe) {
 583			spin_unlock(&dev->power.lock);
 584
 585			cpu_relax();
 586
 587			spin_lock(&dev->power.lock);
 588			goto repeat;
 589		}
 590
 591		/* Wait for the other suspend running in parallel with us. */
 592		for (;;) {
 593			prepare_to_wait(&dev->power.wait_queue, &wait,
 594					TASK_UNINTERRUPTIBLE);
 595			if (dev->power.runtime_status != RPM_SUSPENDING)
 596				break;
 597
 598			spin_unlock_irq(&dev->power.lock);
 599
 600			schedule();
 601
 602			spin_lock_irq(&dev->power.lock);
 603		}
 604		finish_wait(&dev->power.wait_queue, &wait);
 605		goto repeat;
 606	}
 607
 608	if (dev->power.no_callbacks)
 609		goto no_callback;	/* Assume success. */
 610
 611	/* Carry out an asynchronous or a synchronous suspend. */
 612	if (rpmflags & RPM_ASYNC) {
 613		dev->power.request = (rpmflags & RPM_AUTO) ?
 614		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 615		if (!dev->power.request_pending) {
 616			dev->power.request_pending = true;
 617			queue_work(pm_wq, &dev->power.work);
 618		}
 619		goto out;
 620	}
 621
 622	__update_runtime_status(dev, RPM_SUSPENDING);
 623
 624	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 625
 626	dev_pm_enable_wake_irq_check(dev, true);
 627	retval = rpm_callback(callback, dev);
 628	if (retval)
 629		goto fail;
 630
 631 no_callback:
 632	__update_runtime_status(dev, RPM_SUSPENDED);
 633	pm_runtime_deactivate_timer(dev);
 634
 635	if (dev->parent) {
 636		parent = dev->parent;
 637		atomic_add_unless(&parent->power.child_count, -1, 0);
 638	}
 639	wake_up_all(&dev->power.wait_queue);
 640
 641	if (dev->power.deferred_resume) {
 642		dev->power.deferred_resume = false;
 643		rpm_resume(dev, 0);
 644		retval = -EAGAIN;
 645		goto out;
 646	}
 647
 648	/* Maybe the parent is now able to suspend. */
 649	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
 650		spin_unlock(&dev->power.lock);
 651
 652		spin_lock(&parent->power.lock);
 653		rpm_idle(parent, RPM_ASYNC);
 654		spin_unlock(&parent->power.lock);
 655
 656		spin_lock(&dev->power.lock);
 657	}
 658
 659 out:
 660	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 661
 662	return retval;
 663
 664 fail:
 665	dev_pm_disable_wake_irq_check(dev);
 666	__update_runtime_status(dev, RPM_ACTIVE);
 667	dev->power.deferred_resume = false;
 668	wake_up_all(&dev->power.wait_queue);
 669
 670	if (retval == -EAGAIN || retval == -EBUSY) {
 671		dev->power.runtime_error = 0;
 672
 673		/*
 674		 * If the callback routine failed an autosuspend, and
 675		 * if the last_busy time has been updated so that there
 676		 * is a new autosuspend expiration time, automatically
 677		 * reschedule another autosuspend.
 678		 */
 679		if ((rpmflags & RPM_AUTO) &&
 680		    pm_runtime_autosuspend_expiration(dev) != 0)
 681			goto repeat;
 682	} else {
 683		pm_runtime_cancel_pending(dev);
 684	}
 685	goto out;
 686}
 687
 688/**
 689 * rpm_resume - Carry out runtime resume of given device.
 690 * @dev: Device to resume.
 691 * @rpmflags: Flag bits.
 692 *
 693 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 694 * any scheduled or pending requests.  If another resume has been started
 695 * earlier, either return immediately or wait for it to finish, depending on the
 696 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 697 * parallel with this function, either tell the other process to resume after
 698 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 699 * flag is set then queue a resume request; otherwise run the
 700 * ->runtime_resume() callback directly.  Queue an idle notification for the
 701 * device if the resume succeeded.
 702 *
 703 * This function must be called under dev->power.lock with interrupts disabled.
 704 */
 705static int rpm_resume(struct device *dev, int rpmflags)
 706	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 707{
 708	int (*callback)(struct device *);
 709	struct device *parent = NULL;
 710	int retval = 0;
 711
 712	trace_rpm_resume_rcuidle(dev, rpmflags);
 713
 714 repeat:
 715	if (dev->power.runtime_error)
 716		retval = -EINVAL;
 717	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
 718	    && dev->power.runtime_status == RPM_ACTIVE)
 719		retval = 1;
 720	else if (dev->power.disable_depth > 0)
 721		retval = -EACCES;
 722	if (retval)
 723		goto out;
 724
 725	/*
 726	 * Other scheduled or pending requests need to be canceled.  Small
 727	 * optimization: If an autosuspend timer is running, leave it running
 728	 * rather than cancelling it now only to restart it again in the near
 729	 * future.
 730	 */
 731	dev->power.request = RPM_REQ_NONE;
 732	if (!dev->power.timer_autosuspends)
 733		pm_runtime_deactivate_timer(dev);
 734
 735	if (dev->power.runtime_status == RPM_ACTIVE) {
 736		retval = 1;
 737		goto out;
 738	}
 739
 740	if (dev->power.runtime_status == RPM_RESUMING
 741	    || dev->power.runtime_status == RPM_SUSPENDING) {
 742		DEFINE_WAIT(wait);
 743
 744		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 745			if (dev->power.runtime_status == RPM_SUSPENDING)
 746				dev->power.deferred_resume = true;
 747			else
 748				retval = -EINPROGRESS;
 749			goto out;
 750		}
 751
 752		if (dev->power.irq_safe) {
 753			spin_unlock(&dev->power.lock);
 754
 755			cpu_relax();
 756
 757			spin_lock(&dev->power.lock);
 758			goto repeat;
 759		}
 760
 761		/* Wait for the operation carried out in parallel with us. */
 762		for (;;) {
 763			prepare_to_wait(&dev->power.wait_queue, &wait,
 764					TASK_UNINTERRUPTIBLE);
 765			if (dev->power.runtime_status != RPM_RESUMING
 766			    && dev->power.runtime_status != RPM_SUSPENDING)
 767				break;
 768
 769			spin_unlock_irq(&dev->power.lock);
 770
 771			schedule();
 772
 773			spin_lock_irq(&dev->power.lock);
 774		}
 775		finish_wait(&dev->power.wait_queue, &wait);
 776		goto repeat;
 777	}
 778
 779	/*
 780	 * See if we can skip waking up the parent.  This is safe only if
 781	 * power.no_callbacks is set, because otherwise we don't know whether
 782	 * the resume will actually succeed.
 783	 */
 784	if (dev->power.no_callbacks && !parent && dev->parent) {
 785		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 786		if (dev->parent->power.disable_depth > 0
 787		    || dev->parent->power.ignore_children
 788		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 789			atomic_inc(&dev->parent->power.child_count);
 790			spin_unlock(&dev->parent->power.lock);
 791			retval = 1;
 792			goto no_callback;	/* Assume success. */
 793		}
 794		spin_unlock(&dev->parent->power.lock);
 795	}
 796
 797	/* Carry out an asynchronous or a synchronous resume. */
 798	if (rpmflags & RPM_ASYNC) {
 799		dev->power.request = RPM_REQ_RESUME;
 800		if (!dev->power.request_pending) {
 801			dev->power.request_pending = true;
 802			queue_work(pm_wq, &dev->power.work);
 803		}
 804		retval = 0;
 805		goto out;
 806	}
 807
 808	if (!parent && dev->parent) {
 809		/*
 810		 * Increment the parent's usage counter and resume it if
 811		 * necessary.  Not needed if dev is irq-safe; then the
 812		 * parent is permanently resumed.
 813		 */
 814		parent = dev->parent;
 815		if (dev->power.irq_safe)
 816			goto skip_parent;
 817		spin_unlock(&dev->power.lock);
 818
 819		pm_runtime_get_noresume(parent);
 820
 821		spin_lock(&parent->power.lock);
 822		/*
 823		 * Resume the parent if it has runtime PM enabled and not been
 824		 * set to ignore its children.
 825		 */
 826		if (!parent->power.disable_depth
 827		    && !parent->power.ignore_children) {
 828			rpm_resume(parent, 0);
 829			if (parent->power.runtime_status != RPM_ACTIVE)
 830				retval = -EBUSY;
 831		}
 832		spin_unlock(&parent->power.lock);
 833
 834		spin_lock(&dev->power.lock);
 835		if (retval)
 836			goto out;
 837		goto repeat;
 838	}
 839 skip_parent:
 840
 841	if (dev->power.no_callbacks)
 842		goto no_callback;	/* Assume success. */
 843
 844	__update_runtime_status(dev, RPM_RESUMING);
 845
 846	callback = RPM_GET_CALLBACK(dev, runtime_resume);
 847
 848	dev_pm_disable_wake_irq_check(dev);
 849	retval = rpm_callback(callback, dev);
 850	if (retval) {
 851		__update_runtime_status(dev, RPM_SUSPENDED);
 852		pm_runtime_cancel_pending(dev);
 853		dev_pm_enable_wake_irq_check(dev, false);
 854	} else {
 855 no_callback:
 856		__update_runtime_status(dev, RPM_ACTIVE);
 857		pm_runtime_mark_last_busy(dev);
 858		if (parent)
 859			atomic_inc(&parent->power.child_count);
 860	}
 861	wake_up_all(&dev->power.wait_queue);
 862
 863	if (retval >= 0)
 864		rpm_idle(dev, RPM_ASYNC);
 865
 866 out:
 867	if (parent && !dev->power.irq_safe) {
 868		spin_unlock_irq(&dev->power.lock);
 869
 870		pm_runtime_put(parent);
 871
 872		spin_lock_irq(&dev->power.lock);
 873	}
 874
 875	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 876
 877	return retval;
 878}
 879
 880/**
 881 * pm_runtime_work - Universal runtime PM work function.
 882 * @work: Work structure used for scheduling the execution of this function.
 883 *
 884 * Use @work to get the device object the work is to be done for, determine what
 885 * is to be done and execute the appropriate runtime PM function.
 886 */
 887static void pm_runtime_work(struct work_struct *work)
 888{
 889	struct device *dev = container_of(work, struct device, power.work);
 890	enum rpm_request req;
 891
 892	spin_lock_irq(&dev->power.lock);
 893
 894	if (!dev->power.request_pending)
 895		goto out;
 896
 897	req = dev->power.request;
 898	dev->power.request = RPM_REQ_NONE;
 899	dev->power.request_pending = false;
 900
 901	switch (req) {
 902	case RPM_REQ_NONE:
 903		break;
 904	case RPM_REQ_IDLE:
 905		rpm_idle(dev, RPM_NOWAIT);
 906		break;
 907	case RPM_REQ_SUSPEND:
 908		rpm_suspend(dev, RPM_NOWAIT);
 909		break;
 910	case RPM_REQ_AUTOSUSPEND:
 911		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 912		break;
 913	case RPM_REQ_RESUME:
 914		rpm_resume(dev, RPM_NOWAIT);
 915		break;
 916	}
 917
 918 out:
 919	spin_unlock_irq(&dev->power.lock);
 920}
 921
 922/**
 923 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 924 * @data: Device pointer passed by pm_schedule_suspend().
 925 *
 926 * Check if the time is right and queue a suspend request.
 927 */
 928static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
 929{
 930	struct device *dev = container_of(timer, struct device, power.suspend_timer);
 931	unsigned long flags;
 932	u64 expires;
 933
 934	spin_lock_irqsave(&dev->power.lock, flags);
 935
 936	expires = dev->power.timer_expires;
 937	/*
 938	 * If 'expires' is after the current time, we've been called
 939	 * too early.
 940	 */
 941	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
 942		dev->power.timer_expires = 0;
 943		rpm_suspend(dev, dev->power.timer_autosuspends ?
 944		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 945	}
 946
 947	spin_unlock_irqrestore(&dev->power.lock, flags);
 948
 949	return HRTIMER_NORESTART;
 950}
 951
 952/**
 953 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 954 * @dev: Device to suspend.
 955 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 956 */
 957int pm_schedule_suspend(struct device *dev, unsigned int delay)
 958{
 959	unsigned long flags;
 960	u64 expires;
 961	int retval;
 962
 963	spin_lock_irqsave(&dev->power.lock, flags);
 964
 965	if (!delay) {
 966		retval = rpm_suspend(dev, RPM_ASYNC);
 967		goto out;
 968	}
 969
 970	retval = rpm_check_suspend_allowed(dev);
 971	if (retval)
 972		goto out;
 973
 974	/* Other scheduled or pending requests need to be canceled. */
 975	pm_runtime_cancel_pending(dev);
 976
 977	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
 978	dev->power.timer_expires = expires;
 979	dev->power.timer_autosuspends = 0;
 980	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
 981
 982 out:
 983	spin_unlock_irqrestore(&dev->power.lock, flags);
 984
 985	return retval;
 986}
 987EXPORT_SYMBOL_GPL(pm_schedule_suspend);
 988
 989/**
 990 * __pm_runtime_idle - Entry point for runtime idle operations.
 991 * @dev: Device to send idle notification for.
 992 * @rpmflags: Flag bits.
 993 *
 994 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 995 * return immediately if it is larger than zero.  Then carry out an idle
 996 * notification, either synchronous or asynchronous.
 997 *
 998 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
 999 * or if pm_runtime_irq_safe() has been called.
1000 */
1001int __pm_runtime_idle(struct device *dev, int rpmflags)
1002{
1003	unsigned long flags;
1004	int retval;
1005
1006	if (rpmflags & RPM_GET_PUT) {
1007		if (!atomic_dec_and_test(&dev->power.usage_count)) {
1008			trace_rpm_usage_rcuidle(dev, rpmflags);
1009			return 0;
1010		}
1011	}
1012
1013	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1014
1015	spin_lock_irqsave(&dev->power.lock, flags);
1016	retval = rpm_idle(dev, rpmflags);
1017	spin_unlock_irqrestore(&dev->power.lock, flags);
1018
1019	return retval;
1020}
1021EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1022
1023/**
1024 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1025 * @dev: Device to suspend.
1026 * @rpmflags: Flag bits.
1027 *
1028 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1029 * return immediately if it is larger than zero.  Then carry out a suspend,
1030 * either synchronous or asynchronous.
1031 *
1032 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1033 * or if pm_runtime_irq_safe() has been called.
1034 */
1035int __pm_runtime_suspend(struct device *dev, int rpmflags)
1036{
1037	unsigned long flags;
1038	int retval;
1039
1040	if (rpmflags & RPM_GET_PUT) {
1041		if (!atomic_dec_and_test(&dev->power.usage_count)) {
1042			trace_rpm_usage_rcuidle(dev, rpmflags);
1043			return 0;
1044		}
1045	}
1046
1047	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1048
1049	spin_lock_irqsave(&dev->power.lock, flags);
1050	retval = rpm_suspend(dev, rpmflags);
1051	spin_unlock_irqrestore(&dev->power.lock, flags);
1052
1053	return retval;
1054}
1055EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1056
1057/**
1058 * __pm_runtime_resume - Entry point for runtime resume operations.
1059 * @dev: Device to resume.
1060 * @rpmflags: Flag bits.
1061 *
1062 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1063 * carry out a resume, either synchronous or asynchronous.
1064 *
1065 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1066 * or if pm_runtime_irq_safe() has been called.
1067 */
1068int __pm_runtime_resume(struct device *dev, int rpmflags)
1069{
1070	unsigned long flags;
1071	int retval;
1072
1073	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1074			dev->power.runtime_status != RPM_ACTIVE);
1075
1076	if (rpmflags & RPM_GET_PUT)
1077		atomic_inc(&dev->power.usage_count);
1078
1079	spin_lock_irqsave(&dev->power.lock, flags);
1080	retval = rpm_resume(dev, rpmflags);
1081	spin_unlock_irqrestore(&dev->power.lock, flags);
1082
1083	return retval;
1084}
1085EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1086
1087/**
1088 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1089 * @dev: Device to handle.
1090 * @ign_usage_count: Whether or not to look at the current usage counter value.
1091 *
1092 * Return -EINVAL if runtime PM is disabled for @dev.
1093 *
1094 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1095 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1096 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1097 * without changing the usage counter.
1098 *
1099 * If @ign_usage_count is %true, this function can be used to prevent suspending
1100 * the device when its runtime PM status is %RPM_ACTIVE.
1101 *
1102 * If @ign_usage_count is %false, this function can be used to prevent
1103 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1104 * runtime PM usage counter is not zero.
1105 *
1106 * The caller is resposible for decrementing the runtime PM usage counter of
1107 * @dev after this function has returned a positive value for it.
1108 */
1109int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1110{
1111	unsigned long flags;
1112	int retval;
1113
1114	spin_lock_irqsave(&dev->power.lock, flags);
1115	if (dev->power.disable_depth > 0) {
1116		retval = -EINVAL;
1117	} else if (dev->power.runtime_status != RPM_ACTIVE) {
1118		retval = 0;
1119	} else if (ign_usage_count) {
1120		retval = 1;
1121		atomic_inc(&dev->power.usage_count);
1122	} else {
1123		retval = atomic_inc_not_zero(&dev->power.usage_count);
1124	}
1125	trace_rpm_usage_rcuidle(dev, 0);
1126	spin_unlock_irqrestore(&dev->power.lock, flags);
1127
1128	return retval;
1129}
1130EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1131
1132/**
1133 * __pm_runtime_set_status - Set runtime PM status of a device.
1134 * @dev: Device to handle.
1135 * @status: New runtime PM status of the device.
1136 *
1137 * If runtime PM of the device is disabled or its power.runtime_error field is
1138 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1139 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1140 * However, if the device has a parent and the parent is not active, and the
1141 * parent's power.ignore_children flag is unset, the device's status cannot be
1142 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1143 *
1144 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1145 * and the device parent's counter of unsuspended children is modified to
1146 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1147 * notification request for the parent is submitted.
1148 *
1149 * If @dev has any suppliers (as reflected by device links to them), and @status
1150 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1151 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1152 * of the @status value) and the suppliers will be deacticated on exit.  The
1153 * error returned by the failing supplier activation will be returned in that
1154 * case.
1155 */
1156int __pm_runtime_set_status(struct device *dev, unsigned int status)
1157{
1158	struct device *parent = dev->parent;
 
1159	bool notify_parent = false;
1160	int error = 0;
1161
1162	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1163		return -EINVAL;
1164
1165	spin_lock_irq(&dev->power.lock);
1166
1167	/*
1168	 * Prevent PM-runtime from being enabled for the device or return an
1169	 * error if it is enabled already and working.
1170	 */
1171	if (dev->power.runtime_error || dev->power.disable_depth)
1172		dev->power.disable_depth++;
1173	else
1174		error = -EAGAIN;
1175
1176	spin_unlock_irq(&dev->power.lock);
1177
1178	if (error)
1179		return error;
1180
1181	/*
1182	 * If the new status is RPM_ACTIVE, the suppliers can be activated
1183	 * upfront regardless of the current status, because next time
1184	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1185	 * involved will be dropped down to one anyway.
1186	 */
1187	if (status == RPM_ACTIVE) {
1188		int idx = device_links_read_lock();
1189
1190		error = rpm_get_suppliers(dev);
1191		if (error)
1192			status = RPM_SUSPENDED;
1193
1194		device_links_read_unlock(idx);
1195	}
1196
1197	spin_lock_irq(&dev->power.lock);
1198
1199	if (dev->power.runtime_status == status || !parent)
1200		goto out_set;
1201
1202	if (status == RPM_SUSPENDED) {
1203		atomic_add_unless(&parent->power.child_count, -1, 0);
1204		notify_parent = !parent->power.ignore_children;
1205	} else {
1206		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1207
1208		/*
1209		 * It is invalid to put an active child under a parent that is
1210		 * not active, has runtime PM enabled and the
1211		 * 'power.ignore_children' flag unset.
1212		 */
1213		if (!parent->power.disable_depth
1214		    && !parent->power.ignore_children
1215		    && parent->power.runtime_status != RPM_ACTIVE) {
1216			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1217				dev_name(dev),
1218				dev_name(parent));
1219			error = -EBUSY;
1220		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1221			atomic_inc(&parent->power.child_count);
1222		}
1223
1224		spin_unlock(&parent->power.lock);
1225
1226		if (error) {
1227			status = RPM_SUSPENDED;
1228			goto out;
1229		}
1230	}
1231
1232 out_set:
1233	__update_runtime_status(dev, status);
1234	if (!error)
1235		dev->power.runtime_error = 0;
1236
1237 out:
1238	spin_unlock_irq(&dev->power.lock);
1239
1240	if (notify_parent)
1241		pm_request_idle(parent);
1242
1243	if (status == RPM_SUSPENDED) {
1244		int idx = device_links_read_lock();
1245
1246		rpm_put_suppliers(dev);
1247
1248		device_links_read_unlock(idx);
1249	}
1250
1251	pm_runtime_enable(dev);
1252
1253	return error;
1254}
1255EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1256
1257/**
1258 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1259 * @dev: Device to handle.
1260 *
1261 * Flush all pending requests for the device from pm_wq and wait for all
1262 * runtime PM operations involving the device in progress to complete.
1263 *
1264 * Should be called under dev->power.lock with interrupts disabled.
1265 */
1266static void __pm_runtime_barrier(struct device *dev)
1267{
1268	pm_runtime_deactivate_timer(dev);
1269
1270	if (dev->power.request_pending) {
1271		dev->power.request = RPM_REQ_NONE;
1272		spin_unlock_irq(&dev->power.lock);
1273
1274		cancel_work_sync(&dev->power.work);
1275
1276		spin_lock_irq(&dev->power.lock);
1277		dev->power.request_pending = false;
1278	}
1279
1280	if (dev->power.runtime_status == RPM_SUSPENDING
1281	    || dev->power.runtime_status == RPM_RESUMING
1282	    || dev->power.idle_notification) {
1283		DEFINE_WAIT(wait);
1284
1285		/* Suspend, wake-up or idle notification in progress. */
1286		for (;;) {
1287			prepare_to_wait(&dev->power.wait_queue, &wait,
1288					TASK_UNINTERRUPTIBLE);
1289			if (dev->power.runtime_status != RPM_SUSPENDING
1290			    && dev->power.runtime_status != RPM_RESUMING
1291			    && !dev->power.idle_notification)
1292				break;
1293			spin_unlock_irq(&dev->power.lock);
1294
1295			schedule();
1296
1297			spin_lock_irq(&dev->power.lock);
1298		}
1299		finish_wait(&dev->power.wait_queue, &wait);
1300	}
1301}
1302
1303/**
1304 * pm_runtime_barrier - Flush pending requests and wait for completions.
1305 * @dev: Device to handle.
1306 *
1307 * Prevent the device from being suspended by incrementing its usage counter and
1308 * if there's a pending resume request for the device, wake the device up.
1309 * Next, make sure that all pending requests for the device have been flushed
1310 * from pm_wq and wait for all runtime PM operations involving the device in
1311 * progress to complete.
1312 *
1313 * Return value:
1314 * 1, if there was a resume request pending and the device had to be woken up,
1315 * 0, otherwise
1316 */
1317int pm_runtime_barrier(struct device *dev)
1318{
1319	int retval = 0;
1320
1321	pm_runtime_get_noresume(dev);
1322	spin_lock_irq(&dev->power.lock);
1323
1324	if (dev->power.request_pending
1325	    && dev->power.request == RPM_REQ_RESUME) {
1326		rpm_resume(dev, 0);
1327		retval = 1;
1328	}
1329
1330	__pm_runtime_barrier(dev);
1331
1332	spin_unlock_irq(&dev->power.lock);
1333	pm_runtime_put_noidle(dev);
1334
1335	return retval;
1336}
1337EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1338
1339/**
1340 * __pm_runtime_disable - Disable runtime PM of a device.
1341 * @dev: Device to handle.
1342 * @check_resume: If set, check if there's a resume request for the device.
1343 *
1344 * Increment power.disable_depth for the device and if it was zero previously,
1345 * cancel all pending runtime PM requests for the device and wait for all
1346 * operations in progress to complete.  The device can be either active or
1347 * suspended after its runtime PM has been disabled.
1348 *
1349 * If @check_resume is set and there's a resume request pending when
1350 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1351 * function will wake up the device before disabling its runtime PM.
1352 */
1353void __pm_runtime_disable(struct device *dev, bool check_resume)
1354{
1355	spin_lock_irq(&dev->power.lock);
1356
1357	if (dev->power.disable_depth > 0) {
1358		dev->power.disable_depth++;
1359		goto out;
1360	}
1361
1362	/*
1363	 * Wake up the device if there's a resume request pending, because that
1364	 * means there probably is some I/O to process and disabling runtime PM
1365	 * shouldn't prevent the device from processing the I/O.
1366	 */
1367	if (check_resume && dev->power.request_pending
1368	    && dev->power.request == RPM_REQ_RESUME) {
1369		/*
1370		 * Prevent suspends and idle notifications from being carried
1371		 * out after we have woken up the device.
1372		 */
1373		pm_runtime_get_noresume(dev);
1374
1375		rpm_resume(dev, 0);
1376
1377		pm_runtime_put_noidle(dev);
1378	}
1379
1380	/* Update time accounting before disabling PM-runtime. */
1381	update_pm_runtime_accounting(dev);
1382
1383	if (!dev->power.disable_depth++)
1384		__pm_runtime_barrier(dev);
1385
1386 out:
1387	spin_unlock_irq(&dev->power.lock);
1388}
1389EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1390
1391/**
1392 * pm_runtime_enable - Enable runtime PM of a device.
1393 * @dev: Device to handle.
1394 */
1395void pm_runtime_enable(struct device *dev)
1396{
1397	unsigned long flags;
1398
1399	spin_lock_irqsave(&dev->power.lock, flags);
1400
1401	if (dev->power.disable_depth > 0) {
1402		dev->power.disable_depth--;
1403
1404		/* About to enable runtime pm, set accounting_timestamp to now */
1405		if (!dev->power.disable_depth)
1406			dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1407	} else {
1408		dev_warn(dev, "Unbalanced %s!\n", __func__);
1409	}
1410
1411	WARN(!dev->power.disable_depth &&
1412	     dev->power.runtime_status == RPM_SUSPENDED &&
1413	     !dev->power.ignore_children &&
1414	     atomic_read(&dev->power.child_count) > 0,
1415	     "Enabling runtime PM for inactive device (%s) with active children\n",
1416	     dev_name(dev));
1417
1418	spin_unlock_irqrestore(&dev->power.lock, flags);
1419}
1420EXPORT_SYMBOL_GPL(pm_runtime_enable);
1421
1422/**
1423 * pm_runtime_forbid - Block runtime PM of a device.
1424 * @dev: Device to handle.
1425 *
1426 * Increase the device's usage count and clear its power.runtime_auto flag,
1427 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1428 * for it.
1429 */
1430void pm_runtime_forbid(struct device *dev)
1431{
1432	spin_lock_irq(&dev->power.lock);
1433	if (!dev->power.runtime_auto)
1434		goto out;
1435
1436	dev->power.runtime_auto = false;
1437	atomic_inc(&dev->power.usage_count);
1438	rpm_resume(dev, 0);
1439
1440 out:
1441	spin_unlock_irq(&dev->power.lock);
1442}
1443EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1444
1445/**
1446 * pm_runtime_allow - Unblock runtime PM of a device.
1447 * @dev: Device to handle.
1448 *
1449 * Decrease the device's usage count and set its power.runtime_auto flag.
1450 */
1451void pm_runtime_allow(struct device *dev)
1452{
1453	spin_lock_irq(&dev->power.lock);
1454	if (dev->power.runtime_auto)
1455		goto out;
1456
1457	dev->power.runtime_auto = true;
1458	if (atomic_dec_and_test(&dev->power.usage_count))
1459		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1460	else
1461		trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1462
1463 out:
1464	spin_unlock_irq(&dev->power.lock);
1465}
1466EXPORT_SYMBOL_GPL(pm_runtime_allow);
1467
1468/**
1469 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1470 * @dev: Device to handle.
1471 *
1472 * Set the power.no_callbacks flag, which tells the PM core that this
1473 * device is power-managed through its parent and has no runtime PM
1474 * callbacks of its own.  The runtime sysfs attributes will be removed.
1475 */
1476void pm_runtime_no_callbacks(struct device *dev)
1477{
1478	spin_lock_irq(&dev->power.lock);
1479	dev->power.no_callbacks = 1;
1480	spin_unlock_irq(&dev->power.lock);
1481	if (device_is_registered(dev))
1482		rpm_sysfs_remove(dev);
1483}
1484EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1485
1486/**
1487 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1488 * @dev: Device to handle
1489 *
1490 * Set the power.irq_safe flag, which tells the PM core that the
1491 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1492 * always be invoked with the spinlock held and interrupts disabled.  It also
1493 * causes the parent's usage counter to be permanently incremented, preventing
1494 * the parent from runtime suspending -- otherwise an irq-safe child might have
1495 * to wait for a non-irq-safe parent.
1496 */
1497void pm_runtime_irq_safe(struct device *dev)
1498{
1499	if (dev->parent)
1500		pm_runtime_get_sync(dev->parent);
1501	spin_lock_irq(&dev->power.lock);
1502	dev->power.irq_safe = 1;
1503	spin_unlock_irq(&dev->power.lock);
1504}
1505EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1506
1507/**
1508 * update_autosuspend - Handle a change to a device's autosuspend settings.
1509 * @dev: Device to handle.
1510 * @old_delay: The former autosuspend_delay value.
1511 * @old_use: The former use_autosuspend value.
1512 *
1513 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1514 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1515 *
1516 * This function must be called under dev->power.lock with interrupts disabled.
1517 */
1518static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1519{
1520	int delay = dev->power.autosuspend_delay;
1521
1522	/* Should runtime suspend be prevented now? */
1523	if (dev->power.use_autosuspend && delay < 0) {
1524
1525		/* If it used to be allowed then prevent it. */
1526		if (!old_use || old_delay >= 0) {
1527			atomic_inc(&dev->power.usage_count);
1528			rpm_resume(dev, 0);
1529		} else {
1530			trace_rpm_usage_rcuidle(dev, 0);
1531		}
1532	}
1533
1534	/* Runtime suspend should be allowed now. */
1535	else {
1536
1537		/* If it used to be prevented then allow it. */
1538		if (old_use && old_delay < 0)
1539			atomic_dec(&dev->power.usage_count);
1540
1541		/* Maybe we can autosuspend now. */
1542		rpm_idle(dev, RPM_AUTO);
1543	}
1544}
1545
1546/**
1547 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1548 * @dev: Device to handle.
1549 * @delay: Value of the new delay in milliseconds.
1550 *
1551 * Set the device's power.autosuspend_delay value.  If it changes to negative
1552 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1553 * changes the other way, allow runtime suspends.
1554 */
1555void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1556{
1557	int old_delay, old_use;
1558
1559	spin_lock_irq(&dev->power.lock);
1560	old_delay = dev->power.autosuspend_delay;
1561	old_use = dev->power.use_autosuspend;
1562	dev->power.autosuspend_delay = delay;
1563	update_autosuspend(dev, old_delay, old_use);
1564	spin_unlock_irq(&dev->power.lock);
1565}
1566EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1567
1568/**
1569 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1570 * @dev: Device to handle.
1571 * @use: New value for use_autosuspend.
1572 *
1573 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1574 * suspends as needed.
1575 */
1576void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1577{
1578	int old_delay, old_use;
1579
1580	spin_lock_irq(&dev->power.lock);
1581	old_delay = dev->power.autosuspend_delay;
1582	old_use = dev->power.use_autosuspend;
1583	dev->power.use_autosuspend = use;
1584	update_autosuspend(dev, old_delay, old_use);
1585	spin_unlock_irq(&dev->power.lock);
1586}
1587EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1588
1589/**
1590 * pm_runtime_init - Initialize runtime PM fields in given device object.
1591 * @dev: Device object to initialize.
1592 */
1593void pm_runtime_init(struct device *dev)
1594{
1595	dev->power.runtime_status = RPM_SUSPENDED;
1596	dev->power.idle_notification = false;
1597
1598	dev->power.disable_depth = 1;
1599	atomic_set(&dev->power.usage_count, 0);
1600
1601	dev->power.runtime_error = 0;
1602
1603	atomic_set(&dev->power.child_count, 0);
1604	pm_suspend_ignore_children(dev, false);
1605	dev->power.runtime_auto = true;
1606
1607	dev->power.request_pending = false;
1608	dev->power.request = RPM_REQ_NONE;
1609	dev->power.deferred_resume = false;
 
1610	INIT_WORK(&dev->power.work, pm_runtime_work);
1611
1612	dev->power.timer_expires = 0;
1613	hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1614	dev->power.suspend_timer.function = pm_suspend_timer_fn;
1615
1616	init_waitqueue_head(&dev->power.wait_queue);
1617}
1618
1619/**
1620 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1621 * @dev: Device object to re-initialize.
1622 */
1623void pm_runtime_reinit(struct device *dev)
1624{
1625	if (!pm_runtime_enabled(dev)) {
1626		if (dev->power.runtime_status == RPM_ACTIVE)
1627			pm_runtime_set_suspended(dev);
1628		if (dev->power.irq_safe) {
1629			spin_lock_irq(&dev->power.lock);
1630			dev->power.irq_safe = 0;
1631			spin_unlock_irq(&dev->power.lock);
1632			if (dev->parent)
1633				pm_runtime_put(dev->parent);
1634		}
1635	}
1636}
1637
1638/**
1639 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1640 * @dev: Device object being removed from device hierarchy.
1641 */
1642void pm_runtime_remove(struct device *dev)
1643{
1644	__pm_runtime_disable(dev, false);
1645	pm_runtime_reinit(dev);
1646}
1647
1648/**
1649 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1650 * @dev: Device whose driver is going to be removed.
1651 *
1652 * Check links from this device to any consumers and if any of them have active
1653 * runtime PM references to the device, drop the usage counter of the device
1654 * (as many times as needed).
1655 *
1656 * Links with the DL_FLAG_MANAGED flag unset are ignored.
1657 *
1658 * Since the device is guaranteed to be runtime-active at the point this is
1659 * called, nothing else needs to be done here.
1660 *
1661 * Moreover, this is called after device_links_busy() has returned 'false', so
1662 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1663 * therefore rpm_active can't be manipulated concurrently.
1664 */
1665void pm_runtime_clean_up_links(struct device *dev)
1666{
1667	struct device_link *link;
1668	int idx;
1669
1670	idx = device_links_read_lock();
1671
1672	list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
1673				device_links_read_lock_held()) {
1674		if (!(link->flags & DL_FLAG_MANAGED))
1675			continue;
1676
1677		while (refcount_dec_not_one(&link->rpm_active))
1678			pm_runtime_put_noidle(dev);
 
 
1679	}
1680
1681	device_links_read_unlock(idx);
1682}
1683
1684/**
1685 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1686 * @dev: Consumer device.
1687 */
1688void pm_runtime_get_suppliers(struct device *dev)
1689{
1690	struct device_link *link;
1691	int idx;
1692
1693	idx = device_links_read_lock();
1694
1695	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1696				device_links_read_lock_held())
1697		if (link->flags & DL_FLAG_PM_RUNTIME) {
1698			link->supplier_preactivated = true;
1699			refcount_inc(&link->rpm_active);
1700			pm_runtime_get_sync(link->supplier);
1701		}
1702
1703	device_links_read_unlock(idx);
1704}
1705
1706/**
1707 * pm_runtime_put_suppliers - Drop references to supplier devices.
1708 * @dev: Consumer device.
1709 */
1710void pm_runtime_put_suppliers(struct device *dev)
1711{
1712	struct device_link *link;
1713	int idx;
1714
1715	idx = device_links_read_lock();
1716
1717	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1718				device_links_read_lock_held())
1719		if (link->supplier_preactivated) {
1720			link->supplier_preactivated = false;
1721			if (refcount_dec_not_one(&link->rpm_active))
1722				pm_runtime_put(link->supplier);
1723		}
1724
1725	device_links_read_unlock(idx);
1726}
1727
1728void pm_runtime_new_link(struct device *dev)
1729{
1730	spin_lock_irq(&dev->power.lock);
1731	dev->power.links_count++;
1732	spin_unlock_irq(&dev->power.lock);
1733}
1734
1735void pm_runtime_drop_link(struct device *dev)
1736{
1737	spin_lock_irq(&dev->power.lock);
1738	WARN_ON(dev->power.links_count == 0);
1739	dev->power.links_count--;
1740	spin_unlock_irq(&dev->power.lock);
1741}
1742
1743static bool pm_runtime_need_not_resume(struct device *dev)
1744{
1745	return atomic_read(&dev->power.usage_count) <= 1 &&
1746		(atomic_read(&dev->power.child_count) == 0 ||
1747		 dev->power.ignore_children);
1748}
1749
1750/**
1751 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1752 * @dev: Device to suspend.
1753 *
1754 * Disable runtime PM so we safely can check the device's runtime PM status and
1755 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1756 * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1757 * usage and children counters don't indicate that the device was in use before
1758 * the system-wide transition under way, decrement its parent's children counter
1759 * (if there is a parent).  Keep runtime PM disabled to preserve the state
1760 * unless we encounter errors.
1761 *
1762 * Typically this function may be invoked from a system suspend callback to make
1763 * sure the device is put into low power state and it should only be used during
1764 * system-wide PM transitions to sleep states.  It assumes that the analogous
1765 * pm_runtime_force_resume() will be used to resume the device.
1766 */
1767int pm_runtime_force_suspend(struct device *dev)
1768{
1769	int (*callback)(struct device *);
1770	int ret;
1771
1772	pm_runtime_disable(dev);
1773	if (pm_runtime_status_suspended(dev))
1774		return 0;
1775
1776	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1777
1778	ret = callback ? callback(dev) : 0;
1779	if (ret)
1780		goto err;
1781
1782	/*
1783	 * If the device can stay in suspend after the system-wide transition
1784	 * to the working state that will follow, drop the children counter of
1785	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1786	 * function will be called again for it in the meantime.
1787	 */
1788	if (pm_runtime_need_not_resume(dev))
1789		pm_runtime_set_suspended(dev);
1790	else
1791		__update_runtime_status(dev, RPM_SUSPENDED);
1792
1793	return 0;
1794
1795err:
1796	pm_runtime_enable(dev);
1797	return ret;
1798}
1799EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1800
1801/**
1802 * pm_runtime_force_resume - Force a device into resume state if needed.
1803 * @dev: Device to resume.
1804 *
1805 * Prior invoking this function we expect the user to have brought the device
1806 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1807 * those actions and bring the device into full power, if it is expected to be
1808 * used on system resume.  In the other case, we defer the resume to be managed
1809 * via runtime PM.
1810 *
1811 * Typically this function may be invoked from a system resume callback.
1812 */
1813int pm_runtime_force_resume(struct device *dev)
1814{
1815	int (*callback)(struct device *);
1816	int ret = 0;
1817
1818	if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1819		goto out;
1820
1821	/*
1822	 * The value of the parent's children counter is correct already, so
1823	 * just update the status of the device.
1824	 */
1825	__update_runtime_status(dev, RPM_ACTIVE);
1826
1827	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1828
1829	ret = callback ? callback(dev) : 0;
1830	if (ret) {
1831		pm_runtime_set_suspended(dev);
1832		goto out;
1833	}
1834
1835	pm_runtime_mark_last_busy(dev);
1836out:
1837	pm_runtime_enable(dev);
1838	return ret;
1839}
1840EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
v4.17
 
   1/*
   2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   3 *
   4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
   6 *
   7 * This file is released under the GPLv2.
   8 */
   9
  10#include <linux/sched/mm.h>
 
 
  11#include <linux/export.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/pm_wakeirq.h>
  14#include <trace/events/rpm.h>
  15
  16#include "../base.h"
  17#include "power.h"
  18
  19typedef int (*pm_callback_t)(struct device *);
  20
  21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
  22{
  23	pm_callback_t cb;
  24	const struct dev_pm_ops *ops;
  25
  26	if (dev->pm_domain)
  27		ops = &dev->pm_domain->ops;
  28	else if (dev->type && dev->type->pm)
  29		ops = dev->type->pm;
  30	else if (dev->class && dev->class->pm)
  31		ops = dev->class->pm;
  32	else if (dev->bus && dev->bus->pm)
  33		ops = dev->bus->pm;
  34	else
  35		ops = NULL;
  36
  37	if (ops)
  38		cb = *(pm_callback_t *)((void *)ops + cb_offset);
  39	else
  40		cb = NULL;
  41
  42	if (!cb && dev->driver && dev->driver->pm)
  43		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
  44
  45	return cb;
  46}
  47
  48#define RPM_GET_CALLBACK(dev, callback) \
  49		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
  50
  51static int rpm_resume(struct device *dev, int rpmflags);
  52static int rpm_suspend(struct device *dev, int rpmflags);
  53
  54/**
  55 * update_pm_runtime_accounting - Update the time accounting of power states
  56 * @dev: Device to update the accounting for
  57 *
  58 * In order to be able to have time accounting of the various power states
  59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  60 * PM), we need to track the time spent in each state.
  61 * update_pm_runtime_accounting must be called each time before the
  62 * runtime_status field is updated, to account the time in the old state
  63 * correctly.
  64 */
  65void update_pm_runtime_accounting(struct device *dev)
  66{
  67	unsigned long now = jiffies;
  68	unsigned long delta;
  69
  70	delta = now - dev->power.accounting_timestamp;
 
  71
 
 
 
  72	dev->power.accounting_timestamp = now;
  73
  74	if (dev->power.disable_depth > 0)
 
 
 
 
 
  75		return;
  76
 
 
  77	if (dev->power.runtime_status == RPM_SUSPENDED)
  78		dev->power.suspended_jiffies += delta;
  79	else
  80		dev->power.active_jiffies += delta;
  81}
  82
  83static void __update_runtime_status(struct device *dev, enum rpm_status status)
  84{
  85	update_pm_runtime_accounting(dev);
  86	dev->power.runtime_status = status;
  87}
  88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89/**
  90 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  91 * @dev: Device to handle.
  92 */
  93static void pm_runtime_deactivate_timer(struct device *dev)
  94{
  95	if (dev->power.timer_expires > 0) {
  96		del_timer(&dev->power.suspend_timer);
  97		dev->power.timer_expires = 0;
  98	}
  99}
 100
 101/**
 102 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
 103 * @dev: Device to handle.
 104 */
 105static void pm_runtime_cancel_pending(struct device *dev)
 106{
 107	pm_runtime_deactivate_timer(dev);
 108	/*
 109	 * In case there's a request pending, make sure its work function will
 110	 * return without doing anything.
 111	 */
 112	dev->power.request = RPM_REQ_NONE;
 113}
 114
 115/*
 116 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
 117 * @dev: Device to handle.
 118 *
 119 * Compute the autosuspend-delay expiration time based on the device's
 120 * power.last_busy time.  If the delay has already expired or is disabled
 121 * (negative) or the power.use_autosuspend flag isn't set, return 0.
 122 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
 123 *
 124 * This function may be called either with or without dev->power.lock held.
 125 * Either way it can be racy, since power.last_busy may be updated at any time.
 126 */
 127unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
 128{
 129	int autosuspend_delay;
 130	long elapsed;
 131	unsigned long last_busy;
 132	unsigned long expires = 0;
 133
 134	if (!dev->power.use_autosuspend)
 135		goto out;
 136
 137	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
 138	if (autosuspend_delay < 0)
 139		goto out;
 140
 141	last_busy = READ_ONCE(dev->power.last_busy);
 142	elapsed = jiffies - last_busy;
 143	if (elapsed < 0)
 144		goto out;	/* jiffies has wrapped around. */
 145
 146	/*
 147	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
 148	 * up to the nearest second.
 149	 */
 150	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
 151	if (autosuspend_delay >= 1000)
 152		expires = round_jiffies(expires);
 153	expires += !expires;
 154	if (elapsed >= expires - last_busy)
 155		expires = 0;	/* Already expired. */
 156
 157 out:
 158	return expires;
 159}
 160EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 161
 162static int dev_memalloc_noio(struct device *dev, void *data)
 163{
 164	return dev->power.memalloc_noio;
 165}
 166
 167/*
 168 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
 169 * @dev: Device to handle.
 170 * @enable: True for setting the flag and False for clearing the flag.
 171 *
 172 * Set the flag for all devices in the path from the device to the
 173 * root device in the device tree if @enable is true, otherwise clear
 174 * the flag for devices in the path whose siblings don't set the flag.
 175 *
 176 * The function should only be called by block device, or network
 177 * device driver for solving the deadlock problem during runtime
 178 * resume/suspend:
 179 *
 180 *     If memory allocation with GFP_KERNEL is called inside runtime
 181 *     resume/suspend callback of any one of its ancestors(or the
 182 *     block device itself), the deadlock may be triggered inside the
 183 *     memory allocation since it might not complete until the block
 184 *     device becomes active and the involed page I/O finishes. The
 185 *     situation is pointed out first by Alan Stern. Network device
 186 *     are involved in iSCSI kind of situation.
 187 *
 188 * The lock of dev_hotplug_mutex is held in the function for handling
 189 * hotplug race because pm_runtime_set_memalloc_noio() may be called
 190 * in async probe().
 191 *
 192 * The function should be called between device_add() and device_del()
 193 * on the affected device(block/network device).
 194 */
 195void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
 196{
 197	static DEFINE_MUTEX(dev_hotplug_mutex);
 198
 199	mutex_lock(&dev_hotplug_mutex);
 200	for (;;) {
 201		bool enabled;
 202
 203		/* hold power lock since bitfield is not SMP-safe. */
 204		spin_lock_irq(&dev->power.lock);
 205		enabled = dev->power.memalloc_noio;
 206		dev->power.memalloc_noio = enable;
 207		spin_unlock_irq(&dev->power.lock);
 208
 209		/*
 210		 * not need to enable ancestors any more if the device
 211		 * has been enabled.
 212		 */
 213		if (enabled && enable)
 214			break;
 215
 216		dev = dev->parent;
 217
 218		/*
 219		 * clear flag of the parent device only if all the
 220		 * children don't set the flag because ancestor's
 221		 * flag was set by any one of the descendants.
 222		 */
 223		if (!dev || (!enable &&
 224			     device_for_each_child(dev, NULL,
 225						   dev_memalloc_noio)))
 226			break;
 227	}
 228	mutex_unlock(&dev_hotplug_mutex);
 229}
 230EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
 231
 232/**
 233 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 234 * @dev: Device to test.
 235 */
 236static int rpm_check_suspend_allowed(struct device *dev)
 237{
 238	int retval = 0;
 239
 240	if (dev->power.runtime_error)
 241		retval = -EINVAL;
 242	else if (dev->power.disable_depth > 0)
 243		retval = -EACCES;
 244	else if (atomic_read(&dev->power.usage_count) > 0)
 245		retval = -EAGAIN;
 246	else if (!dev->power.ignore_children &&
 247			atomic_read(&dev->power.child_count))
 248		retval = -EBUSY;
 249
 250	/* Pending resume requests take precedence over suspends. */
 251	else if ((dev->power.deferred_resume
 252			&& dev->power.runtime_status == RPM_SUSPENDING)
 253	    || (dev->power.request_pending
 254			&& dev->power.request == RPM_REQ_RESUME))
 255		retval = -EAGAIN;
 256	else if (__dev_pm_qos_read_value(dev) == 0)
 257		retval = -EPERM;
 258	else if (dev->power.runtime_status == RPM_SUSPENDED)
 259		retval = 1;
 260
 261	return retval;
 262}
 263
 264static int rpm_get_suppliers(struct device *dev)
 265{
 266	struct device_link *link;
 267
 268	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
 
 269		int retval;
 270
 271		if (!(link->flags & DL_FLAG_PM_RUNTIME))
 272			continue;
 273
 274		if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
 275		    link->rpm_active)
 276			continue;
 277
 278		retval = pm_runtime_get_sync(link->supplier);
 279		/* Ignore suppliers with disabled runtime PM. */
 280		if (retval < 0 && retval != -EACCES) {
 281			pm_runtime_put_noidle(link->supplier);
 282			return retval;
 283		}
 284		link->rpm_active = true;
 285	}
 286	return 0;
 287}
 288
 289static void rpm_put_suppliers(struct device *dev)
 290{
 291	struct device_link *link;
 292
 293	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
 294		if (link->rpm_active &&
 295		    READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
 
 
 
 296			pm_runtime_put(link->supplier);
 297			link->rpm_active = false;
 298		}
 299}
 300
 301/**
 302 * __rpm_callback - Run a given runtime PM callback for a given device.
 303 * @cb: Runtime PM callback to run.
 304 * @dev: Device to run the callback for.
 305 */
 306static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 307	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 308{
 309	int retval, idx;
 310	bool use_links = dev->power.links_count > 0;
 311
 312	if (dev->power.irq_safe) {
 313		spin_unlock(&dev->power.lock);
 314	} else {
 315		spin_unlock_irq(&dev->power.lock);
 316
 317		/*
 318		 * Resume suppliers if necessary.
 319		 *
 320		 * The device's runtime PM status cannot change until this
 321		 * routine returns, so it is safe to read the status outside of
 322		 * the lock.
 323		 */
 324		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
 325			idx = device_links_read_lock();
 326
 327			retval = rpm_get_suppliers(dev);
 328			if (retval)
 329				goto fail;
 330
 331			device_links_read_unlock(idx);
 332		}
 333	}
 334
 335	retval = cb(dev);
 336
 337	if (dev->power.irq_safe) {
 338		spin_lock(&dev->power.lock);
 339	} else {
 340		/*
 341		 * If the device is suspending and the callback has returned
 342		 * success, drop the usage counters of the suppliers that have
 343		 * been reference counted on its resume.
 344		 *
 345		 * Do that if resume fails too.
 346		 */
 347		if (use_links
 348		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
 349		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
 350			idx = device_links_read_lock();
 351
 352 fail:
 353			rpm_put_suppliers(dev);
 354
 355			device_links_read_unlock(idx);
 356		}
 357
 358		spin_lock_irq(&dev->power.lock);
 359	}
 360
 361	return retval;
 362}
 363
 364/**
 365 * rpm_idle - Notify device bus type if the device can be suspended.
 366 * @dev: Device to notify the bus type about.
 367 * @rpmflags: Flag bits.
 368 *
 369 * Check if the device's runtime PM status allows it to be suspended.  If
 370 * another idle notification has been started earlier, return immediately.  If
 371 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 372 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
 373 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
 374 *
 375 * This function must be called under dev->power.lock with interrupts disabled.
 376 */
 377static int rpm_idle(struct device *dev, int rpmflags)
 378{
 379	int (*callback)(struct device *);
 380	int retval;
 381
 382	trace_rpm_idle_rcuidle(dev, rpmflags);
 383	retval = rpm_check_suspend_allowed(dev);
 384	if (retval < 0)
 385		;	/* Conditions are wrong. */
 386
 387	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
 388	else if (dev->power.runtime_status != RPM_ACTIVE)
 389		retval = -EAGAIN;
 390
 391	/*
 392	 * Any pending request other than an idle notification takes
 393	 * precedence over us, except that the timer may be running.
 394	 */
 395	else if (dev->power.request_pending &&
 396	    dev->power.request > RPM_REQ_IDLE)
 397		retval = -EAGAIN;
 398
 399	/* Act as though RPM_NOWAIT is always set. */
 400	else if (dev->power.idle_notification)
 401		retval = -EINPROGRESS;
 402	if (retval)
 403		goto out;
 404
 405	/* Pending requests need to be canceled. */
 406	dev->power.request = RPM_REQ_NONE;
 407
 408	if (dev->power.no_callbacks)
 409		goto out;
 410
 411	/* Carry out an asynchronous or a synchronous idle notification. */
 412	if (rpmflags & RPM_ASYNC) {
 413		dev->power.request = RPM_REQ_IDLE;
 414		if (!dev->power.request_pending) {
 415			dev->power.request_pending = true;
 416			queue_work(pm_wq, &dev->power.work);
 417		}
 418		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
 419		return 0;
 420	}
 421
 422	dev->power.idle_notification = true;
 423
 424	callback = RPM_GET_CALLBACK(dev, runtime_idle);
 425
 426	if (callback)
 427		retval = __rpm_callback(callback, dev);
 428
 429	dev->power.idle_notification = false;
 430	wake_up_all(&dev->power.wait_queue);
 431
 432 out:
 433	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 434	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 435}
 436
 437/**
 438 * rpm_callback - Run a given runtime PM callback for a given device.
 439 * @cb: Runtime PM callback to run.
 440 * @dev: Device to run the callback for.
 441 */
 442static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 443{
 444	int retval;
 445
 446	if (!cb)
 447		return -ENOSYS;
 448
 449	if (dev->power.memalloc_noio) {
 450		unsigned int noio_flag;
 451
 452		/*
 453		 * Deadlock might be caused if memory allocation with
 454		 * GFP_KERNEL happens inside runtime_suspend and
 455		 * runtime_resume callbacks of one block device's
 456		 * ancestor or the block device itself. Network
 457		 * device might be thought as part of iSCSI block
 458		 * device, so network device and its ancestor should
 459		 * be marked as memalloc_noio too.
 460		 */
 461		noio_flag = memalloc_noio_save();
 462		retval = __rpm_callback(cb, dev);
 463		memalloc_noio_restore(noio_flag);
 464	} else {
 465		retval = __rpm_callback(cb, dev);
 466	}
 467
 468	dev->power.runtime_error = retval;
 469	return retval != -EACCES ? retval : -EIO;
 470}
 471
 472/**
 473 * rpm_suspend - Carry out runtime suspend of given device.
 474 * @dev: Device to suspend.
 475 * @rpmflags: Flag bits.
 476 *
 477 * Check if the device's runtime PM status allows it to be suspended.
 478 * Cancel a pending idle notification, autosuspend or suspend. If
 479 * another suspend has been started earlier, either return immediately
 480 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
 481 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
 482 * otherwise run the ->runtime_suspend() callback directly. When
 483 * ->runtime_suspend succeeded, if a deferred resume was requested while
 484 * the callback was running then carry it out, otherwise send an idle
 485 * notification for its parent (if the suspend succeeded and both
 486 * ignore_children of parent->power and irq_safe of dev->power are not set).
 487 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 488 * flag is set and the next autosuspend-delay expiration time is in the
 489 * future, schedule another autosuspend attempt.
 490 *
 491 * This function must be called under dev->power.lock with interrupts disabled.
 492 */
 493static int rpm_suspend(struct device *dev, int rpmflags)
 494	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 495{
 496	int (*callback)(struct device *);
 497	struct device *parent = NULL;
 498	int retval;
 499
 500	trace_rpm_suspend_rcuidle(dev, rpmflags);
 501
 502 repeat:
 503	retval = rpm_check_suspend_allowed(dev);
 504
 505	if (retval < 0)
 506		;	/* Conditions are wrong. */
 507
 508	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 509	else if (dev->power.runtime_status == RPM_RESUMING &&
 510	    !(rpmflags & RPM_ASYNC))
 511		retval = -EAGAIN;
 512	if (retval)
 513		goto out;
 514
 515	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
 516	if ((rpmflags & RPM_AUTO)
 517	    && dev->power.runtime_status != RPM_SUSPENDING) {
 518		unsigned long expires = pm_runtime_autosuspend_expiration(dev);
 519
 520		if (expires != 0) {
 521			/* Pending requests need to be canceled. */
 522			dev->power.request = RPM_REQ_NONE;
 523
 524			/*
 525			 * Optimization: If the timer is already running and is
 526			 * set to expire at or before the autosuspend delay,
 527			 * avoid the overhead of resetting it.  Just let it
 528			 * expire; pm_suspend_timer_fn() will take care of the
 529			 * rest.
 530			 */
 531			if (!(dev->power.timer_expires && time_before_eq(
 532			    dev->power.timer_expires, expires))) {
 
 
 
 
 
 
 
 533				dev->power.timer_expires = expires;
 534				mod_timer(&dev->power.suspend_timer, expires);
 
 
 
 535			}
 536			dev->power.timer_autosuspends = 1;
 537			goto out;
 538		}
 539	}
 540
 541	/* Other scheduled or pending requests need to be canceled. */
 542	pm_runtime_cancel_pending(dev);
 543
 544	if (dev->power.runtime_status == RPM_SUSPENDING) {
 545		DEFINE_WAIT(wait);
 546
 547		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 548			retval = -EINPROGRESS;
 549			goto out;
 550		}
 551
 552		if (dev->power.irq_safe) {
 553			spin_unlock(&dev->power.lock);
 554
 555			cpu_relax();
 556
 557			spin_lock(&dev->power.lock);
 558			goto repeat;
 559		}
 560
 561		/* Wait for the other suspend running in parallel with us. */
 562		for (;;) {
 563			prepare_to_wait(&dev->power.wait_queue, &wait,
 564					TASK_UNINTERRUPTIBLE);
 565			if (dev->power.runtime_status != RPM_SUSPENDING)
 566				break;
 567
 568			spin_unlock_irq(&dev->power.lock);
 569
 570			schedule();
 571
 572			spin_lock_irq(&dev->power.lock);
 573		}
 574		finish_wait(&dev->power.wait_queue, &wait);
 575		goto repeat;
 576	}
 577
 578	if (dev->power.no_callbacks)
 579		goto no_callback;	/* Assume success. */
 580
 581	/* Carry out an asynchronous or a synchronous suspend. */
 582	if (rpmflags & RPM_ASYNC) {
 583		dev->power.request = (rpmflags & RPM_AUTO) ?
 584		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 585		if (!dev->power.request_pending) {
 586			dev->power.request_pending = true;
 587			queue_work(pm_wq, &dev->power.work);
 588		}
 589		goto out;
 590	}
 591
 592	__update_runtime_status(dev, RPM_SUSPENDING);
 593
 594	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 595
 596	dev_pm_enable_wake_irq_check(dev, true);
 597	retval = rpm_callback(callback, dev);
 598	if (retval)
 599		goto fail;
 600
 601 no_callback:
 602	__update_runtime_status(dev, RPM_SUSPENDED);
 603	pm_runtime_deactivate_timer(dev);
 604
 605	if (dev->parent) {
 606		parent = dev->parent;
 607		atomic_add_unless(&parent->power.child_count, -1, 0);
 608	}
 609	wake_up_all(&dev->power.wait_queue);
 610
 611	if (dev->power.deferred_resume) {
 612		dev->power.deferred_resume = false;
 613		rpm_resume(dev, 0);
 614		retval = -EAGAIN;
 615		goto out;
 616	}
 617
 618	/* Maybe the parent is now able to suspend. */
 619	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
 620		spin_unlock(&dev->power.lock);
 621
 622		spin_lock(&parent->power.lock);
 623		rpm_idle(parent, RPM_ASYNC);
 624		spin_unlock(&parent->power.lock);
 625
 626		spin_lock(&dev->power.lock);
 627	}
 628
 629 out:
 630	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 631
 632	return retval;
 633
 634 fail:
 635	dev_pm_disable_wake_irq_check(dev);
 636	__update_runtime_status(dev, RPM_ACTIVE);
 637	dev->power.deferred_resume = false;
 638	wake_up_all(&dev->power.wait_queue);
 639
 640	if (retval == -EAGAIN || retval == -EBUSY) {
 641		dev->power.runtime_error = 0;
 642
 643		/*
 644		 * If the callback routine failed an autosuspend, and
 645		 * if the last_busy time has been updated so that there
 646		 * is a new autosuspend expiration time, automatically
 647		 * reschedule another autosuspend.
 648		 */
 649		if ((rpmflags & RPM_AUTO) &&
 650		    pm_runtime_autosuspend_expiration(dev) != 0)
 651			goto repeat;
 652	} else {
 653		pm_runtime_cancel_pending(dev);
 654	}
 655	goto out;
 656}
 657
 658/**
 659 * rpm_resume - Carry out runtime resume of given device.
 660 * @dev: Device to resume.
 661 * @rpmflags: Flag bits.
 662 *
 663 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 664 * any scheduled or pending requests.  If another resume has been started
 665 * earlier, either return immediately or wait for it to finish, depending on the
 666 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 667 * parallel with this function, either tell the other process to resume after
 668 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 669 * flag is set then queue a resume request; otherwise run the
 670 * ->runtime_resume() callback directly.  Queue an idle notification for the
 671 * device if the resume succeeded.
 672 *
 673 * This function must be called under dev->power.lock with interrupts disabled.
 674 */
 675static int rpm_resume(struct device *dev, int rpmflags)
 676	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 677{
 678	int (*callback)(struct device *);
 679	struct device *parent = NULL;
 680	int retval = 0;
 681
 682	trace_rpm_resume_rcuidle(dev, rpmflags);
 683
 684 repeat:
 685	if (dev->power.runtime_error)
 686		retval = -EINVAL;
 687	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
 688	    && dev->power.runtime_status == RPM_ACTIVE)
 689		retval = 1;
 690	else if (dev->power.disable_depth > 0)
 691		retval = -EACCES;
 692	if (retval)
 693		goto out;
 694
 695	/*
 696	 * Other scheduled or pending requests need to be canceled.  Small
 697	 * optimization: If an autosuspend timer is running, leave it running
 698	 * rather than cancelling it now only to restart it again in the near
 699	 * future.
 700	 */
 701	dev->power.request = RPM_REQ_NONE;
 702	if (!dev->power.timer_autosuspends)
 703		pm_runtime_deactivate_timer(dev);
 704
 705	if (dev->power.runtime_status == RPM_ACTIVE) {
 706		retval = 1;
 707		goto out;
 708	}
 709
 710	if (dev->power.runtime_status == RPM_RESUMING
 711	    || dev->power.runtime_status == RPM_SUSPENDING) {
 712		DEFINE_WAIT(wait);
 713
 714		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 715			if (dev->power.runtime_status == RPM_SUSPENDING)
 716				dev->power.deferred_resume = true;
 717			else
 718				retval = -EINPROGRESS;
 719			goto out;
 720		}
 721
 722		if (dev->power.irq_safe) {
 723			spin_unlock(&dev->power.lock);
 724
 725			cpu_relax();
 726
 727			spin_lock(&dev->power.lock);
 728			goto repeat;
 729		}
 730
 731		/* Wait for the operation carried out in parallel with us. */
 732		for (;;) {
 733			prepare_to_wait(&dev->power.wait_queue, &wait,
 734					TASK_UNINTERRUPTIBLE);
 735			if (dev->power.runtime_status != RPM_RESUMING
 736			    && dev->power.runtime_status != RPM_SUSPENDING)
 737				break;
 738
 739			spin_unlock_irq(&dev->power.lock);
 740
 741			schedule();
 742
 743			spin_lock_irq(&dev->power.lock);
 744		}
 745		finish_wait(&dev->power.wait_queue, &wait);
 746		goto repeat;
 747	}
 748
 749	/*
 750	 * See if we can skip waking up the parent.  This is safe only if
 751	 * power.no_callbacks is set, because otherwise we don't know whether
 752	 * the resume will actually succeed.
 753	 */
 754	if (dev->power.no_callbacks && !parent && dev->parent) {
 755		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 756		if (dev->parent->power.disable_depth > 0
 757		    || dev->parent->power.ignore_children
 758		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 759			atomic_inc(&dev->parent->power.child_count);
 760			spin_unlock(&dev->parent->power.lock);
 761			retval = 1;
 762			goto no_callback;	/* Assume success. */
 763		}
 764		spin_unlock(&dev->parent->power.lock);
 765	}
 766
 767	/* Carry out an asynchronous or a synchronous resume. */
 768	if (rpmflags & RPM_ASYNC) {
 769		dev->power.request = RPM_REQ_RESUME;
 770		if (!dev->power.request_pending) {
 771			dev->power.request_pending = true;
 772			queue_work(pm_wq, &dev->power.work);
 773		}
 774		retval = 0;
 775		goto out;
 776	}
 777
 778	if (!parent && dev->parent) {
 779		/*
 780		 * Increment the parent's usage counter and resume it if
 781		 * necessary.  Not needed if dev is irq-safe; then the
 782		 * parent is permanently resumed.
 783		 */
 784		parent = dev->parent;
 785		if (dev->power.irq_safe)
 786			goto skip_parent;
 787		spin_unlock(&dev->power.lock);
 788
 789		pm_runtime_get_noresume(parent);
 790
 791		spin_lock(&parent->power.lock);
 792		/*
 793		 * Resume the parent if it has runtime PM enabled and not been
 794		 * set to ignore its children.
 795		 */
 796		if (!parent->power.disable_depth
 797		    && !parent->power.ignore_children) {
 798			rpm_resume(parent, 0);
 799			if (parent->power.runtime_status != RPM_ACTIVE)
 800				retval = -EBUSY;
 801		}
 802		spin_unlock(&parent->power.lock);
 803
 804		spin_lock(&dev->power.lock);
 805		if (retval)
 806			goto out;
 807		goto repeat;
 808	}
 809 skip_parent:
 810
 811	if (dev->power.no_callbacks)
 812		goto no_callback;	/* Assume success. */
 813
 814	__update_runtime_status(dev, RPM_RESUMING);
 815
 816	callback = RPM_GET_CALLBACK(dev, runtime_resume);
 817
 818	dev_pm_disable_wake_irq_check(dev);
 819	retval = rpm_callback(callback, dev);
 820	if (retval) {
 821		__update_runtime_status(dev, RPM_SUSPENDED);
 822		pm_runtime_cancel_pending(dev);
 823		dev_pm_enable_wake_irq_check(dev, false);
 824	} else {
 825 no_callback:
 826		__update_runtime_status(dev, RPM_ACTIVE);
 827		pm_runtime_mark_last_busy(dev);
 828		if (parent)
 829			atomic_inc(&parent->power.child_count);
 830	}
 831	wake_up_all(&dev->power.wait_queue);
 832
 833	if (retval >= 0)
 834		rpm_idle(dev, RPM_ASYNC);
 835
 836 out:
 837	if (parent && !dev->power.irq_safe) {
 838		spin_unlock_irq(&dev->power.lock);
 839
 840		pm_runtime_put(parent);
 841
 842		spin_lock_irq(&dev->power.lock);
 843	}
 844
 845	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 846
 847	return retval;
 848}
 849
 850/**
 851 * pm_runtime_work - Universal runtime PM work function.
 852 * @work: Work structure used for scheduling the execution of this function.
 853 *
 854 * Use @work to get the device object the work is to be done for, determine what
 855 * is to be done and execute the appropriate runtime PM function.
 856 */
 857static void pm_runtime_work(struct work_struct *work)
 858{
 859	struct device *dev = container_of(work, struct device, power.work);
 860	enum rpm_request req;
 861
 862	spin_lock_irq(&dev->power.lock);
 863
 864	if (!dev->power.request_pending)
 865		goto out;
 866
 867	req = dev->power.request;
 868	dev->power.request = RPM_REQ_NONE;
 869	dev->power.request_pending = false;
 870
 871	switch (req) {
 872	case RPM_REQ_NONE:
 873		break;
 874	case RPM_REQ_IDLE:
 875		rpm_idle(dev, RPM_NOWAIT);
 876		break;
 877	case RPM_REQ_SUSPEND:
 878		rpm_suspend(dev, RPM_NOWAIT);
 879		break;
 880	case RPM_REQ_AUTOSUSPEND:
 881		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 882		break;
 883	case RPM_REQ_RESUME:
 884		rpm_resume(dev, RPM_NOWAIT);
 885		break;
 886	}
 887
 888 out:
 889	spin_unlock_irq(&dev->power.lock);
 890}
 891
 892/**
 893 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 894 * @data: Device pointer passed by pm_schedule_suspend().
 895 *
 896 * Check if the time is right and queue a suspend request.
 897 */
 898static void pm_suspend_timer_fn(struct timer_list *t)
 899{
 900	struct device *dev = from_timer(dev, t, power.suspend_timer);
 901	unsigned long flags;
 902	unsigned long expires;
 903
 904	spin_lock_irqsave(&dev->power.lock, flags);
 905
 906	expires = dev->power.timer_expires;
 907	/* If 'expire' is after 'jiffies' we've been called too early. */
 908	if (expires > 0 && !time_after(expires, jiffies)) {
 
 
 
 909		dev->power.timer_expires = 0;
 910		rpm_suspend(dev, dev->power.timer_autosuspends ?
 911		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 912	}
 913
 914	spin_unlock_irqrestore(&dev->power.lock, flags);
 
 
 915}
 916
 917/**
 918 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 919 * @dev: Device to suspend.
 920 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 921 */
 922int pm_schedule_suspend(struct device *dev, unsigned int delay)
 923{
 924	unsigned long flags;
 
 925	int retval;
 926
 927	spin_lock_irqsave(&dev->power.lock, flags);
 928
 929	if (!delay) {
 930		retval = rpm_suspend(dev, RPM_ASYNC);
 931		goto out;
 932	}
 933
 934	retval = rpm_check_suspend_allowed(dev);
 935	if (retval)
 936		goto out;
 937
 938	/* Other scheduled or pending requests need to be canceled. */
 939	pm_runtime_cancel_pending(dev);
 940
 941	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
 942	dev->power.timer_expires += !dev->power.timer_expires;
 943	dev->power.timer_autosuspends = 0;
 944	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
 945
 946 out:
 947	spin_unlock_irqrestore(&dev->power.lock, flags);
 948
 949	return retval;
 950}
 951EXPORT_SYMBOL_GPL(pm_schedule_suspend);
 952
 953/**
 954 * __pm_runtime_idle - Entry point for runtime idle operations.
 955 * @dev: Device to send idle notification for.
 956 * @rpmflags: Flag bits.
 957 *
 958 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 959 * return immediately if it is larger than zero.  Then carry out an idle
 960 * notification, either synchronous or asynchronous.
 961 *
 962 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
 963 * or if pm_runtime_irq_safe() has been called.
 964 */
 965int __pm_runtime_idle(struct device *dev, int rpmflags)
 966{
 967	unsigned long flags;
 968	int retval;
 969
 970	if (rpmflags & RPM_GET_PUT) {
 971		if (!atomic_dec_and_test(&dev->power.usage_count))
 
 972			return 0;
 
 973	}
 974
 975	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
 976
 977	spin_lock_irqsave(&dev->power.lock, flags);
 978	retval = rpm_idle(dev, rpmflags);
 979	spin_unlock_irqrestore(&dev->power.lock, flags);
 980
 981	return retval;
 982}
 983EXPORT_SYMBOL_GPL(__pm_runtime_idle);
 984
 985/**
 986 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
 987 * @dev: Device to suspend.
 988 * @rpmflags: Flag bits.
 989 *
 990 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 991 * return immediately if it is larger than zero.  Then carry out a suspend,
 992 * either synchronous or asynchronous.
 993 *
 994 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
 995 * or if pm_runtime_irq_safe() has been called.
 996 */
 997int __pm_runtime_suspend(struct device *dev, int rpmflags)
 998{
 999	unsigned long flags;
1000	int retval;
1001
1002	if (rpmflags & RPM_GET_PUT) {
1003		if (!atomic_dec_and_test(&dev->power.usage_count))
 
1004			return 0;
 
1005	}
1006
1007	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1008
1009	spin_lock_irqsave(&dev->power.lock, flags);
1010	retval = rpm_suspend(dev, rpmflags);
1011	spin_unlock_irqrestore(&dev->power.lock, flags);
1012
1013	return retval;
1014}
1015EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1016
1017/**
1018 * __pm_runtime_resume - Entry point for runtime resume operations.
1019 * @dev: Device to resume.
1020 * @rpmflags: Flag bits.
1021 *
1022 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1023 * carry out a resume, either synchronous or asynchronous.
1024 *
1025 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1026 * or if pm_runtime_irq_safe() has been called.
1027 */
1028int __pm_runtime_resume(struct device *dev, int rpmflags)
1029{
1030	unsigned long flags;
1031	int retval;
1032
1033	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1034			dev->power.runtime_status != RPM_ACTIVE);
1035
1036	if (rpmflags & RPM_GET_PUT)
1037		atomic_inc(&dev->power.usage_count);
1038
1039	spin_lock_irqsave(&dev->power.lock, flags);
1040	retval = rpm_resume(dev, rpmflags);
1041	spin_unlock_irqrestore(&dev->power.lock, flags);
1042
1043	return retval;
1044}
1045EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1046
1047/**
1048 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1049 * @dev: Device to handle.
 
 
 
1050 *
1051 * Return -EINVAL if runtime PM is disabled for the device.
 
 
 
1052 *
1053 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1054 * and the runtime PM usage counter is nonzero, increment the counter and
1055 * return 1.  Otherwise return 0 without changing the counter.
 
 
 
 
 
 
1056 */
1057int pm_runtime_get_if_in_use(struct device *dev)
1058{
1059	unsigned long flags;
1060	int retval;
1061
1062	spin_lock_irqsave(&dev->power.lock, flags);
1063	retval = dev->power.disable_depth > 0 ? -EINVAL :
1064		dev->power.runtime_status == RPM_ACTIVE
1065			&& atomic_inc_not_zero(&dev->power.usage_count);
 
 
 
 
 
 
 
 
1066	spin_unlock_irqrestore(&dev->power.lock, flags);
 
1067	return retval;
1068}
1069EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1070
1071/**
1072 * __pm_runtime_set_status - Set runtime PM status of a device.
1073 * @dev: Device to handle.
1074 * @status: New runtime PM status of the device.
1075 *
1076 * If runtime PM of the device is disabled or its power.runtime_error field is
1077 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1078 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1079 * However, if the device has a parent and the parent is not active, and the
1080 * parent's power.ignore_children flag is unset, the device's status cannot be
1081 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1082 *
1083 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1084 * and the device parent's counter of unsuspended children is modified to
1085 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1086 * notification request for the parent is submitted.
 
 
 
 
 
 
 
1087 */
1088int __pm_runtime_set_status(struct device *dev, unsigned int status)
1089{
1090	struct device *parent = dev->parent;
1091	unsigned long flags;
1092	bool notify_parent = false;
1093	int error = 0;
1094
1095	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1096		return -EINVAL;
1097
1098	spin_lock_irqsave(&dev->power.lock, flags);
1099
1100	if (!dev->power.runtime_error && !dev->power.disable_depth) {
 
 
 
 
 
 
1101		error = -EAGAIN;
1102		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103	}
1104
 
 
1105	if (dev->power.runtime_status == status || !parent)
1106		goto out_set;
1107
1108	if (status == RPM_SUSPENDED) {
1109		atomic_add_unless(&parent->power.child_count, -1, 0);
1110		notify_parent = !parent->power.ignore_children;
1111	} else {
1112		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1113
1114		/*
1115		 * It is invalid to put an active child under a parent that is
1116		 * not active, has runtime PM enabled and the
1117		 * 'power.ignore_children' flag unset.
1118		 */
1119		if (!parent->power.disable_depth
1120		    && !parent->power.ignore_children
1121		    && parent->power.runtime_status != RPM_ACTIVE) {
1122			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1123				dev_name(dev),
1124				dev_name(parent));
1125			error = -EBUSY;
1126		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1127			atomic_inc(&parent->power.child_count);
1128		}
1129
1130		spin_unlock(&parent->power.lock);
1131
1132		if (error)
 
1133			goto out;
 
1134	}
1135
1136 out_set:
1137	__update_runtime_status(dev, status);
1138	dev->power.runtime_error = 0;
 
 
1139 out:
1140	spin_unlock_irqrestore(&dev->power.lock, flags);
1141
1142	if (notify_parent)
1143		pm_request_idle(parent);
1144
 
 
 
 
 
 
 
 
 
 
1145	return error;
1146}
1147EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1148
1149/**
1150 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1151 * @dev: Device to handle.
1152 *
1153 * Flush all pending requests for the device from pm_wq and wait for all
1154 * runtime PM operations involving the device in progress to complete.
1155 *
1156 * Should be called under dev->power.lock with interrupts disabled.
1157 */
1158static void __pm_runtime_barrier(struct device *dev)
1159{
1160	pm_runtime_deactivate_timer(dev);
1161
1162	if (dev->power.request_pending) {
1163		dev->power.request = RPM_REQ_NONE;
1164		spin_unlock_irq(&dev->power.lock);
1165
1166		cancel_work_sync(&dev->power.work);
1167
1168		spin_lock_irq(&dev->power.lock);
1169		dev->power.request_pending = false;
1170	}
1171
1172	if (dev->power.runtime_status == RPM_SUSPENDING
1173	    || dev->power.runtime_status == RPM_RESUMING
1174	    || dev->power.idle_notification) {
1175		DEFINE_WAIT(wait);
1176
1177		/* Suspend, wake-up or idle notification in progress. */
1178		for (;;) {
1179			prepare_to_wait(&dev->power.wait_queue, &wait,
1180					TASK_UNINTERRUPTIBLE);
1181			if (dev->power.runtime_status != RPM_SUSPENDING
1182			    && dev->power.runtime_status != RPM_RESUMING
1183			    && !dev->power.idle_notification)
1184				break;
1185			spin_unlock_irq(&dev->power.lock);
1186
1187			schedule();
1188
1189			spin_lock_irq(&dev->power.lock);
1190		}
1191		finish_wait(&dev->power.wait_queue, &wait);
1192	}
1193}
1194
1195/**
1196 * pm_runtime_barrier - Flush pending requests and wait for completions.
1197 * @dev: Device to handle.
1198 *
1199 * Prevent the device from being suspended by incrementing its usage counter and
1200 * if there's a pending resume request for the device, wake the device up.
1201 * Next, make sure that all pending requests for the device have been flushed
1202 * from pm_wq and wait for all runtime PM operations involving the device in
1203 * progress to complete.
1204 *
1205 * Return value:
1206 * 1, if there was a resume request pending and the device had to be woken up,
1207 * 0, otherwise
1208 */
1209int pm_runtime_barrier(struct device *dev)
1210{
1211	int retval = 0;
1212
1213	pm_runtime_get_noresume(dev);
1214	spin_lock_irq(&dev->power.lock);
1215
1216	if (dev->power.request_pending
1217	    && dev->power.request == RPM_REQ_RESUME) {
1218		rpm_resume(dev, 0);
1219		retval = 1;
1220	}
1221
1222	__pm_runtime_barrier(dev);
1223
1224	spin_unlock_irq(&dev->power.lock);
1225	pm_runtime_put_noidle(dev);
1226
1227	return retval;
1228}
1229EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1230
1231/**
1232 * __pm_runtime_disable - Disable runtime PM of a device.
1233 * @dev: Device to handle.
1234 * @check_resume: If set, check if there's a resume request for the device.
1235 *
1236 * Increment power.disable_depth for the device and if it was zero previously,
1237 * cancel all pending runtime PM requests for the device and wait for all
1238 * operations in progress to complete.  The device can be either active or
1239 * suspended after its runtime PM has been disabled.
1240 *
1241 * If @check_resume is set and there's a resume request pending when
1242 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1243 * function will wake up the device before disabling its runtime PM.
1244 */
1245void __pm_runtime_disable(struct device *dev, bool check_resume)
1246{
1247	spin_lock_irq(&dev->power.lock);
1248
1249	if (dev->power.disable_depth > 0) {
1250		dev->power.disable_depth++;
1251		goto out;
1252	}
1253
1254	/*
1255	 * Wake up the device if there's a resume request pending, because that
1256	 * means there probably is some I/O to process and disabling runtime PM
1257	 * shouldn't prevent the device from processing the I/O.
1258	 */
1259	if (check_resume && dev->power.request_pending
1260	    && dev->power.request == RPM_REQ_RESUME) {
1261		/*
1262		 * Prevent suspends and idle notifications from being carried
1263		 * out after we have woken up the device.
1264		 */
1265		pm_runtime_get_noresume(dev);
1266
1267		rpm_resume(dev, 0);
1268
1269		pm_runtime_put_noidle(dev);
1270	}
1271
 
 
 
1272	if (!dev->power.disable_depth++)
1273		__pm_runtime_barrier(dev);
1274
1275 out:
1276	spin_unlock_irq(&dev->power.lock);
1277}
1278EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1279
1280/**
1281 * pm_runtime_enable - Enable runtime PM of a device.
1282 * @dev: Device to handle.
1283 */
1284void pm_runtime_enable(struct device *dev)
1285{
1286	unsigned long flags;
1287
1288	spin_lock_irqsave(&dev->power.lock, flags);
1289
1290	if (dev->power.disable_depth > 0)
1291		dev->power.disable_depth--;
1292	else
 
 
 
 
1293		dev_warn(dev, "Unbalanced %s!\n", __func__);
 
1294
1295	WARN(!dev->power.disable_depth &&
1296	     dev->power.runtime_status == RPM_SUSPENDED &&
1297	     !dev->power.ignore_children &&
1298	     atomic_read(&dev->power.child_count) > 0,
1299	     "Enabling runtime PM for inactive device (%s) with active children\n",
1300	     dev_name(dev));
1301
1302	spin_unlock_irqrestore(&dev->power.lock, flags);
1303}
1304EXPORT_SYMBOL_GPL(pm_runtime_enable);
1305
1306/**
1307 * pm_runtime_forbid - Block runtime PM of a device.
1308 * @dev: Device to handle.
1309 *
1310 * Increase the device's usage count and clear its power.runtime_auto flag,
1311 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1312 * for it.
1313 */
1314void pm_runtime_forbid(struct device *dev)
1315{
1316	spin_lock_irq(&dev->power.lock);
1317	if (!dev->power.runtime_auto)
1318		goto out;
1319
1320	dev->power.runtime_auto = false;
1321	atomic_inc(&dev->power.usage_count);
1322	rpm_resume(dev, 0);
1323
1324 out:
1325	spin_unlock_irq(&dev->power.lock);
1326}
1327EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1328
1329/**
1330 * pm_runtime_allow - Unblock runtime PM of a device.
1331 * @dev: Device to handle.
1332 *
1333 * Decrease the device's usage count and set its power.runtime_auto flag.
1334 */
1335void pm_runtime_allow(struct device *dev)
1336{
1337	spin_lock_irq(&dev->power.lock);
1338	if (dev->power.runtime_auto)
1339		goto out;
1340
1341	dev->power.runtime_auto = true;
1342	if (atomic_dec_and_test(&dev->power.usage_count))
1343		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
 
 
1344
1345 out:
1346	spin_unlock_irq(&dev->power.lock);
1347}
1348EXPORT_SYMBOL_GPL(pm_runtime_allow);
1349
1350/**
1351 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1352 * @dev: Device to handle.
1353 *
1354 * Set the power.no_callbacks flag, which tells the PM core that this
1355 * device is power-managed through its parent and has no runtime PM
1356 * callbacks of its own.  The runtime sysfs attributes will be removed.
1357 */
1358void pm_runtime_no_callbacks(struct device *dev)
1359{
1360	spin_lock_irq(&dev->power.lock);
1361	dev->power.no_callbacks = 1;
1362	spin_unlock_irq(&dev->power.lock);
1363	if (device_is_registered(dev))
1364		rpm_sysfs_remove(dev);
1365}
1366EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1367
1368/**
1369 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1370 * @dev: Device to handle
1371 *
1372 * Set the power.irq_safe flag, which tells the PM core that the
1373 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1374 * always be invoked with the spinlock held and interrupts disabled.  It also
1375 * causes the parent's usage counter to be permanently incremented, preventing
1376 * the parent from runtime suspending -- otherwise an irq-safe child might have
1377 * to wait for a non-irq-safe parent.
1378 */
1379void pm_runtime_irq_safe(struct device *dev)
1380{
1381	if (dev->parent)
1382		pm_runtime_get_sync(dev->parent);
1383	spin_lock_irq(&dev->power.lock);
1384	dev->power.irq_safe = 1;
1385	spin_unlock_irq(&dev->power.lock);
1386}
1387EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1388
1389/**
1390 * update_autosuspend - Handle a change to a device's autosuspend settings.
1391 * @dev: Device to handle.
1392 * @old_delay: The former autosuspend_delay value.
1393 * @old_use: The former use_autosuspend value.
1394 *
1395 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1396 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1397 *
1398 * This function must be called under dev->power.lock with interrupts disabled.
1399 */
1400static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1401{
1402	int delay = dev->power.autosuspend_delay;
1403
1404	/* Should runtime suspend be prevented now? */
1405	if (dev->power.use_autosuspend && delay < 0) {
1406
1407		/* If it used to be allowed then prevent it. */
1408		if (!old_use || old_delay >= 0) {
1409			atomic_inc(&dev->power.usage_count);
1410			rpm_resume(dev, 0);
 
 
1411		}
1412	}
1413
1414	/* Runtime suspend should be allowed now. */
1415	else {
1416
1417		/* If it used to be prevented then allow it. */
1418		if (old_use && old_delay < 0)
1419			atomic_dec(&dev->power.usage_count);
1420
1421		/* Maybe we can autosuspend now. */
1422		rpm_idle(dev, RPM_AUTO);
1423	}
1424}
1425
1426/**
1427 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1428 * @dev: Device to handle.
1429 * @delay: Value of the new delay in milliseconds.
1430 *
1431 * Set the device's power.autosuspend_delay value.  If it changes to negative
1432 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1433 * changes the other way, allow runtime suspends.
1434 */
1435void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1436{
1437	int old_delay, old_use;
1438
1439	spin_lock_irq(&dev->power.lock);
1440	old_delay = dev->power.autosuspend_delay;
1441	old_use = dev->power.use_autosuspend;
1442	dev->power.autosuspend_delay = delay;
1443	update_autosuspend(dev, old_delay, old_use);
1444	spin_unlock_irq(&dev->power.lock);
1445}
1446EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1447
1448/**
1449 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1450 * @dev: Device to handle.
1451 * @use: New value for use_autosuspend.
1452 *
1453 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1454 * suspends as needed.
1455 */
1456void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1457{
1458	int old_delay, old_use;
1459
1460	spin_lock_irq(&dev->power.lock);
1461	old_delay = dev->power.autosuspend_delay;
1462	old_use = dev->power.use_autosuspend;
1463	dev->power.use_autosuspend = use;
1464	update_autosuspend(dev, old_delay, old_use);
1465	spin_unlock_irq(&dev->power.lock);
1466}
1467EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1468
1469/**
1470 * pm_runtime_init - Initialize runtime PM fields in given device object.
1471 * @dev: Device object to initialize.
1472 */
1473void pm_runtime_init(struct device *dev)
1474{
1475	dev->power.runtime_status = RPM_SUSPENDED;
1476	dev->power.idle_notification = false;
1477
1478	dev->power.disable_depth = 1;
1479	atomic_set(&dev->power.usage_count, 0);
1480
1481	dev->power.runtime_error = 0;
1482
1483	atomic_set(&dev->power.child_count, 0);
1484	pm_suspend_ignore_children(dev, false);
1485	dev->power.runtime_auto = true;
1486
1487	dev->power.request_pending = false;
1488	dev->power.request = RPM_REQ_NONE;
1489	dev->power.deferred_resume = false;
1490	dev->power.accounting_timestamp = jiffies;
1491	INIT_WORK(&dev->power.work, pm_runtime_work);
1492
1493	dev->power.timer_expires = 0;
1494	timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
 
1495
1496	init_waitqueue_head(&dev->power.wait_queue);
1497}
1498
1499/**
1500 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1501 * @dev: Device object to re-initialize.
1502 */
1503void pm_runtime_reinit(struct device *dev)
1504{
1505	if (!pm_runtime_enabled(dev)) {
1506		if (dev->power.runtime_status == RPM_ACTIVE)
1507			pm_runtime_set_suspended(dev);
1508		if (dev->power.irq_safe) {
1509			spin_lock_irq(&dev->power.lock);
1510			dev->power.irq_safe = 0;
1511			spin_unlock_irq(&dev->power.lock);
1512			if (dev->parent)
1513				pm_runtime_put(dev->parent);
1514		}
1515	}
1516}
1517
1518/**
1519 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1520 * @dev: Device object being removed from device hierarchy.
1521 */
1522void pm_runtime_remove(struct device *dev)
1523{
1524	__pm_runtime_disable(dev, false);
1525	pm_runtime_reinit(dev);
1526}
1527
1528/**
1529 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1530 * @dev: Device whose driver is going to be removed.
1531 *
1532 * Check links from this device to any consumers and if any of them have active
1533 * runtime PM references to the device, drop the usage counter of the device
1534 * (once per link).
1535 *
1536 * Links with the DL_FLAG_STATELESS flag set are ignored.
1537 *
1538 * Since the device is guaranteed to be runtime-active at the point this is
1539 * called, nothing else needs to be done here.
1540 *
1541 * Moreover, this is called after device_links_busy() has returned 'false', so
1542 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1543 * therefore rpm_active can't be manipulated concurrently.
1544 */
1545void pm_runtime_clean_up_links(struct device *dev)
1546{
1547	struct device_link *link;
1548	int idx;
1549
1550	idx = device_links_read_lock();
1551
1552	list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1553		if (link->flags & DL_FLAG_STATELESS)
 
1554			continue;
1555
1556		if (link->rpm_active) {
1557			pm_runtime_put_noidle(dev);
1558			link->rpm_active = false;
1559		}
1560	}
1561
1562	device_links_read_unlock(idx);
1563}
1564
1565/**
1566 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1567 * @dev: Consumer device.
1568 */
1569void pm_runtime_get_suppliers(struct device *dev)
1570{
1571	struct device_link *link;
1572	int idx;
1573
1574	idx = device_links_read_lock();
1575
1576	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1577		if (link->flags & DL_FLAG_PM_RUNTIME)
 
 
 
1578			pm_runtime_get_sync(link->supplier);
 
1579
1580	device_links_read_unlock(idx);
1581}
1582
1583/**
1584 * pm_runtime_put_suppliers - Drop references to supplier devices.
1585 * @dev: Consumer device.
1586 */
1587void pm_runtime_put_suppliers(struct device *dev)
1588{
1589	struct device_link *link;
1590	int idx;
1591
1592	idx = device_links_read_lock();
1593
1594	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1595		if (link->flags & DL_FLAG_PM_RUNTIME)
1596			pm_runtime_put(link->supplier);
 
 
 
 
1597
1598	device_links_read_unlock(idx);
1599}
1600
1601void pm_runtime_new_link(struct device *dev)
1602{
1603	spin_lock_irq(&dev->power.lock);
1604	dev->power.links_count++;
1605	spin_unlock_irq(&dev->power.lock);
1606}
1607
1608void pm_runtime_drop_link(struct device *dev)
1609{
1610	spin_lock_irq(&dev->power.lock);
1611	WARN_ON(dev->power.links_count == 0);
1612	dev->power.links_count--;
1613	spin_unlock_irq(&dev->power.lock);
1614}
1615
1616static bool pm_runtime_need_not_resume(struct device *dev)
1617{
1618	return atomic_read(&dev->power.usage_count) <= 1 &&
1619		(atomic_read(&dev->power.child_count) == 0 ||
1620		 dev->power.ignore_children);
1621}
1622
1623/**
1624 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1625 * @dev: Device to suspend.
1626 *
1627 * Disable runtime PM so we safely can check the device's runtime PM status and
1628 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1629 * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1630 * usage and children counters don't indicate that the device was in use before
1631 * the system-wide transition under way, decrement its parent's children counter
1632 * (if there is a parent).  Keep runtime PM disabled to preserve the state
1633 * unless we encounter errors.
1634 *
1635 * Typically this function may be invoked from a system suspend callback to make
1636 * sure the device is put into low power state and it should only be used during
1637 * system-wide PM transitions to sleep states.  It assumes that the analogous
1638 * pm_runtime_force_resume() will be used to resume the device.
1639 */
1640int pm_runtime_force_suspend(struct device *dev)
1641{
1642	int (*callback)(struct device *);
1643	int ret;
1644
1645	pm_runtime_disable(dev);
1646	if (pm_runtime_status_suspended(dev))
1647		return 0;
1648
1649	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1650
1651	ret = callback ? callback(dev) : 0;
1652	if (ret)
1653		goto err;
1654
1655	/*
1656	 * If the device can stay in suspend after the system-wide transition
1657	 * to the working state that will follow, drop the children counter of
1658	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1659	 * function will be called again for it in the meantime.
1660	 */
1661	if (pm_runtime_need_not_resume(dev))
1662		pm_runtime_set_suspended(dev);
1663	else
1664		__update_runtime_status(dev, RPM_SUSPENDED);
1665
1666	return 0;
1667
1668err:
1669	pm_runtime_enable(dev);
1670	return ret;
1671}
1672EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1673
1674/**
1675 * pm_runtime_force_resume - Force a device into resume state if needed.
1676 * @dev: Device to resume.
1677 *
1678 * Prior invoking this function we expect the user to have brought the device
1679 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1680 * those actions and bring the device into full power, if it is expected to be
1681 * used on system resume.  In the other case, we defer the resume to be managed
1682 * via runtime PM.
1683 *
1684 * Typically this function may be invoked from a system resume callback.
1685 */
1686int pm_runtime_force_resume(struct device *dev)
1687{
1688	int (*callback)(struct device *);
1689	int ret = 0;
1690
1691	if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1692		goto out;
1693
1694	/*
1695	 * The value of the parent's children counter is correct already, so
1696	 * just update the status of the device.
1697	 */
1698	__update_runtime_status(dev, RPM_ACTIVE);
1699
1700	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1701
1702	ret = callback ? callback(dev) : 0;
1703	if (ret) {
1704		pm_runtime_set_suspended(dev);
1705		goto out;
1706	}
1707
1708	pm_runtime_mark_last_busy(dev);
1709out:
1710	pm_runtime_enable(dev);
1711	return ret;
1712}
1713EXPORT_SYMBOL_GPL(pm_runtime_force_resume);