Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   4 *
   5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
   7 */
   8#include <linux/sched/mm.h>
   9#include <linux/ktime.h>
  10#include <linux/hrtimer.h>
  11#include <linux/export.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/pm_wakeirq.h>
  14#include <trace/events/rpm.h>
  15
  16#include "../base.h"
  17#include "power.h"
  18
  19typedef int (*pm_callback_t)(struct device *);
  20
  21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
  22{
  23	pm_callback_t cb;
  24	const struct dev_pm_ops *ops;
  25
  26	if (dev->pm_domain)
  27		ops = &dev->pm_domain->ops;
  28	else if (dev->type && dev->type->pm)
  29		ops = dev->type->pm;
  30	else if (dev->class && dev->class->pm)
  31		ops = dev->class->pm;
  32	else if (dev->bus && dev->bus->pm)
  33		ops = dev->bus->pm;
  34	else
  35		ops = NULL;
  36
  37	if (ops)
  38		cb = *(pm_callback_t *)((void *)ops + cb_offset);
  39	else
  40		cb = NULL;
  41
  42	if (!cb && dev->driver && dev->driver->pm)
  43		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
  44
  45	return cb;
  46}
  47
  48#define RPM_GET_CALLBACK(dev, callback) \
  49		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
  50
  51static int rpm_resume(struct device *dev, int rpmflags);
  52static int rpm_suspend(struct device *dev, int rpmflags);
  53
  54/**
  55 * update_pm_runtime_accounting - Update the time accounting of power states
  56 * @dev: Device to update the accounting for
  57 *
  58 * In order to be able to have time accounting of the various power states
  59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  60 * PM), we need to track the time spent in each state.
  61 * update_pm_runtime_accounting must be called each time before the
  62 * runtime_status field is updated, to account the time in the old state
  63 * correctly.
  64 */
  65static void update_pm_runtime_accounting(struct device *dev)
  66{
  67	u64 now, last, delta;
  68
  69	if (dev->power.disable_depth > 0)
  70		return;
  71
  72	last = dev->power.accounting_timestamp;
  73
  74	now = ktime_get_mono_fast_ns();
  75	dev->power.accounting_timestamp = now;
  76
  77	/*
  78	 * Because ktime_get_mono_fast_ns() is not monotonic during
  79	 * timekeeping updates, ensure that 'now' is after the last saved
  80	 * timesptamp.
  81	 */
  82	if (now < last)
  83		return;
  84
  85	delta = now - last;
  86
  87	if (dev->power.runtime_status == RPM_SUSPENDED)
  88		dev->power.suspended_time += delta;
  89	else
  90		dev->power.active_time += delta;
  91}
  92
  93static void __update_runtime_status(struct device *dev, enum rpm_status status)
  94{
  95	update_pm_runtime_accounting(dev);
  96	dev->power.runtime_status = status;
  97}
  98
  99static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
 100{
 101	u64 time;
 102	unsigned long flags;
 103
 104	spin_lock_irqsave(&dev->power.lock, flags);
 105
 106	update_pm_runtime_accounting(dev);
 107	time = suspended ? dev->power.suspended_time : dev->power.active_time;
 108
 109	spin_unlock_irqrestore(&dev->power.lock, flags);
 110
 111	return time;
 112}
 113
 114u64 pm_runtime_active_time(struct device *dev)
 115{
 116	return rpm_get_accounted_time(dev, false);
 117}
 118
 119u64 pm_runtime_suspended_time(struct device *dev)
 120{
 121	return rpm_get_accounted_time(dev, true);
 122}
 123EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
 124
 125/**
 126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
 127 * @dev: Device to handle.
 128 */
 129static void pm_runtime_deactivate_timer(struct device *dev)
 130{
 131	if (dev->power.timer_expires > 0) {
 132		hrtimer_try_to_cancel(&dev->power.suspend_timer);
 133		dev->power.timer_expires = 0;
 134	}
 135}
 136
 137/**
 138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
 139 * @dev: Device to handle.
 140 */
 141static void pm_runtime_cancel_pending(struct device *dev)
 142{
 143	pm_runtime_deactivate_timer(dev);
 144	/*
 145	 * In case there's a request pending, make sure its work function will
 146	 * return without doing anything.
 147	 */
 148	dev->power.request = RPM_REQ_NONE;
 149}
 150
 151/*
 152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
 153 * @dev: Device to handle.
 154 *
 155 * Compute the autosuspend-delay expiration time based on the device's
 156 * power.last_busy time.  If the delay has already expired or is disabled
 157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
 158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
 159 *
 160 * This function may be called either with or without dev->power.lock held.
 161 * Either way it can be racy, since power.last_busy may be updated at any time.
 162 */
 163u64 pm_runtime_autosuspend_expiration(struct device *dev)
 164{
 165	int autosuspend_delay;
 166	u64 expires;
 167
 168	if (!dev->power.use_autosuspend)
 169		return 0;
 170
 171	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
 172	if (autosuspend_delay < 0)
 173		return 0;
 174
 175	expires  = READ_ONCE(dev->power.last_busy);
 176	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
 177	if (expires > ktime_get_mono_fast_ns())
 178		return expires;	/* Expires in the future */
 179
 180	return 0;
 181}
 182EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 183
 184static int dev_memalloc_noio(struct device *dev, void *data)
 185{
 186	return dev->power.memalloc_noio;
 187}
 188
 189/*
 190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
 191 * @dev: Device to handle.
 192 * @enable: True for setting the flag and False for clearing the flag.
 193 *
 194 * Set the flag for all devices in the path from the device to the
 195 * root device in the device tree if @enable is true, otherwise clear
 196 * the flag for devices in the path whose siblings don't set the flag.
 197 *
 198 * The function should only be called by block device, or network
 199 * device driver for solving the deadlock problem during runtime
 200 * resume/suspend:
 201 *
 202 *     If memory allocation with GFP_KERNEL is called inside runtime
 203 *     resume/suspend callback of any one of its ancestors(or the
 204 *     block device itself), the deadlock may be triggered inside the
 205 *     memory allocation since it might not complete until the block
 206 *     device becomes active and the involed page I/O finishes. The
 207 *     situation is pointed out first by Alan Stern. Network device
 208 *     are involved in iSCSI kind of situation.
 209 *
 210 * The lock of dev_hotplug_mutex is held in the function for handling
 211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
 212 * in async probe().
 213 *
 214 * The function should be called between device_add() and device_del()
 215 * on the affected device(block/network device).
 216 */
 217void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
 218{
 219	static DEFINE_MUTEX(dev_hotplug_mutex);
 220
 221	mutex_lock(&dev_hotplug_mutex);
 222	for (;;) {
 223		bool enabled;
 224
 225		/* hold power lock since bitfield is not SMP-safe. */
 226		spin_lock_irq(&dev->power.lock);
 227		enabled = dev->power.memalloc_noio;
 228		dev->power.memalloc_noio = enable;
 229		spin_unlock_irq(&dev->power.lock);
 230
 231		/*
 232		 * not need to enable ancestors any more if the device
 233		 * has been enabled.
 234		 */
 235		if (enabled && enable)
 236			break;
 237
 238		dev = dev->parent;
 239
 240		/*
 241		 * clear flag of the parent device only if all the
 242		 * children don't set the flag because ancestor's
 243		 * flag was set by any one of the descendants.
 244		 */
 245		if (!dev || (!enable &&
 246			     device_for_each_child(dev, NULL,
 247						   dev_memalloc_noio)))
 248			break;
 249	}
 250	mutex_unlock(&dev_hotplug_mutex);
 251}
 252EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
 253
 254/**
 255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 256 * @dev: Device to test.
 257 */
 258static int rpm_check_suspend_allowed(struct device *dev)
 259{
 260	int retval = 0;
 261
 262	if (dev->power.runtime_error)
 263		retval = -EINVAL;
 264	else if (dev->power.disable_depth > 0)
 265		retval = -EACCES;
 266	else if (atomic_read(&dev->power.usage_count) > 0)
 267		retval = -EAGAIN;
 268	else if (!dev->power.ignore_children &&
 269			atomic_read(&dev->power.child_count))
 270		retval = -EBUSY;
 271
 272	/* Pending resume requests take precedence over suspends. */
 273	else if ((dev->power.deferred_resume
 274			&& dev->power.runtime_status == RPM_SUSPENDING)
 275	    || (dev->power.request_pending
 276			&& dev->power.request == RPM_REQ_RESUME))
 277		retval = -EAGAIN;
 278	else if (__dev_pm_qos_resume_latency(dev) == 0)
 279		retval = -EPERM;
 280	else if (dev->power.runtime_status == RPM_SUSPENDED)
 281		retval = 1;
 282
 283	return retval;
 284}
 285
 286static int rpm_get_suppliers(struct device *dev)
 287{
 288	struct device_link *link;
 289
 290	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 291				device_links_read_lock_held()) {
 292		int retval;
 293
 294		if (!(link->flags & DL_FLAG_PM_RUNTIME))
 
 295			continue;
 296
 297		retval = pm_runtime_get_sync(link->supplier);
 298		/* Ignore suppliers with disabled runtime PM. */
 299		if (retval < 0 && retval != -EACCES) {
 300			pm_runtime_put_noidle(link->supplier);
 301			return retval;
 302		}
 303		refcount_inc(&link->rpm_active);
 304	}
 305	return 0;
 306}
 307
 308static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
 309{
 310	struct device_link *link;
 311
 312	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 313				device_links_read_lock_held()) {
 
 
 314
 315		while (refcount_dec_not_one(&link->rpm_active))
 316			pm_runtime_put_noidle(link->supplier);
 317
 318		if (try_to_suspend)
 319			pm_request_idle(link->supplier);
 320	}
 321}
 322
 323static void rpm_put_suppliers(struct device *dev)
 324{
 325	__rpm_put_suppliers(dev, true);
 326}
 327
 328static void rpm_suspend_suppliers(struct device *dev)
 329{
 330	struct device_link *link;
 331	int idx = device_links_read_lock();
 332
 333	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 334				device_links_read_lock_held())
 335		pm_request_idle(link->supplier);
 336
 337	device_links_read_unlock(idx);
 338}
 339
 340/**
 341 * __rpm_callback - Run a given runtime PM callback for a given device.
 342 * @cb: Runtime PM callback to run.
 343 * @dev: Device to run the callback for.
 344 */
 345static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 346	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 347{
 348	int retval = 0, idx;
 349	bool use_links = dev->power.links_count > 0;
 350
 351	if (dev->power.irq_safe) {
 352		spin_unlock(&dev->power.lock);
 353	} else {
 354		spin_unlock_irq(&dev->power.lock);
 355
 356		/*
 357		 * Resume suppliers if necessary.
 358		 *
 359		 * The device's runtime PM status cannot change until this
 360		 * routine returns, so it is safe to read the status outside of
 361		 * the lock.
 362		 */
 363		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
 364			idx = device_links_read_lock();
 365
 366			retval = rpm_get_suppliers(dev);
 367			if (retval) {
 368				rpm_put_suppliers(dev);
 369				goto fail;
 370			}
 371
 372			device_links_read_unlock(idx);
 373		}
 374	}
 375
 376	if (cb)
 377		retval = cb(dev);
 378
 379	if (dev->power.irq_safe) {
 380		spin_lock(&dev->power.lock);
 381	} else {
 382		/*
 383		 * If the device is suspending and the callback has returned
 384		 * success, drop the usage counters of the suppliers that have
 385		 * been reference counted on its resume.
 386		 *
 387		 * Do that if resume fails too.
 388		 */
 389		if (use_links
 390		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
 391		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
 392			idx = device_links_read_lock();
 393
 394			__rpm_put_suppliers(dev, false);
 
 395
 396fail:
 397			device_links_read_unlock(idx);
 398		}
 399
 400		spin_lock_irq(&dev->power.lock);
 401	}
 402
 403	return retval;
 404}
 405
 406/**
 407 * rpm_idle - Notify device bus type if the device can be suspended.
 408 * @dev: Device to notify the bus type about.
 409 * @rpmflags: Flag bits.
 410 *
 411 * Check if the device's runtime PM status allows it to be suspended.  If
 412 * another idle notification has been started earlier, return immediately.  If
 413 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 414 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
 415 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
 416 *
 417 * This function must be called under dev->power.lock with interrupts disabled.
 418 */
 419static int rpm_idle(struct device *dev, int rpmflags)
 420{
 421	int (*callback)(struct device *);
 422	int retval;
 423
 424	trace_rpm_idle_rcuidle(dev, rpmflags);
 425	retval = rpm_check_suspend_allowed(dev);
 426	if (retval < 0)
 427		;	/* Conditions are wrong. */
 428
 429	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
 430	else if (dev->power.runtime_status != RPM_ACTIVE)
 431		retval = -EAGAIN;
 432
 433	/*
 434	 * Any pending request other than an idle notification takes
 435	 * precedence over us, except that the timer may be running.
 436	 */
 437	else if (dev->power.request_pending &&
 438	    dev->power.request > RPM_REQ_IDLE)
 439		retval = -EAGAIN;
 440
 441	/* Act as though RPM_NOWAIT is always set. */
 442	else if (dev->power.idle_notification)
 443		retval = -EINPROGRESS;
 444	if (retval)
 445		goto out;
 446
 447	/* Pending requests need to be canceled. */
 448	dev->power.request = RPM_REQ_NONE;
 449
 450	callback = RPM_GET_CALLBACK(dev, runtime_idle);
 451
 452	/* If no callback assume success. */
 453	if (!callback || dev->power.no_callbacks)
 454		goto out;
 455
 456	/* Carry out an asynchronous or a synchronous idle notification. */
 457	if (rpmflags & RPM_ASYNC) {
 458		dev->power.request = RPM_REQ_IDLE;
 459		if (!dev->power.request_pending) {
 460			dev->power.request_pending = true;
 461			queue_work(pm_wq, &dev->power.work);
 462		}
 463		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
 464		return 0;
 465	}
 466
 467	dev->power.idle_notification = true;
 468
 469	retval = __rpm_callback(callback, dev);
 
 
 
 470
 471	dev->power.idle_notification = false;
 472	wake_up_all(&dev->power.wait_queue);
 473
 474 out:
 475	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 476	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 477}
 478
 479/**
 480 * rpm_callback - Run a given runtime PM callback for a given device.
 481 * @cb: Runtime PM callback to run.
 482 * @dev: Device to run the callback for.
 483 */
 484static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 485{
 486	int retval;
 487
 
 
 
 488	if (dev->power.memalloc_noio) {
 489		unsigned int noio_flag;
 490
 491		/*
 492		 * Deadlock might be caused if memory allocation with
 493		 * GFP_KERNEL happens inside runtime_suspend and
 494		 * runtime_resume callbacks of one block device's
 495		 * ancestor or the block device itself. Network
 496		 * device might be thought as part of iSCSI block
 497		 * device, so network device and its ancestor should
 498		 * be marked as memalloc_noio too.
 499		 */
 500		noio_flag = memalloc_noio_save();
 501		retval = __rpm_callback(cb, dev);
 502		memalloc_noio_restore(noio_flag);
 503	} else {
 504		retval = __rpm_callback(cb, dev);
 505	}
 506
 507	dev->power.runtime_error = retval;
 508	return retval != -EACCES ? retval : -EIO;
 509}
 510
 511/**
 512 * rpm_suspend - Carry out runtime suspend of given device.
 513 * @dev: Device to suspend.
 514 * @rpmflags: Flag bits.
 515 *
 516 * Check if the device's runtime PM status allows it to be suspended.
 517 * Cancel a pending idle notification, autosuspend or suspend. If
 518 * another suspend has been started earlier, either return immediately
 519 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
 520 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
 521 * otherwise run the ->runtime_suspend() callback directly. When
 522 * ->runtime_suspend succeeded, if a deferred resume was requested while
 523 * the callback was running then carry it out, otherwise send an idle
 524 * notification for its parent (if the suspend succeeded and both
 525 * ignore_children of parent->power and irq_safe of dev->power are not set).
 526 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 527 * flag is set and the next autosuspend-delay expiration time is in the
 528 * future, schedule another autosuspend attempt.
 529 *
 530 * This function must be called under dev->power.lock with interrupts disabled.
 531 */
 532static int rpm_suspend(struct device *dev, int rpmflags)
 533	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 534{
 535	int (*callback)(struct device *);
 536	struct device *parent = NULL;
 537	int retval;
 538
 539	trace_rpm_suspend_rcuidle(dev, rpmflags);
 540
 541 repeat:
 542	retval = rpm_check_suspend_allowed(dev);
 
 543	if (retval < 0)
 544		goto out;	/* Conditions are wrong. */
 545
 546	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 547	if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
 
 548		retval = -EAGAIN;
 549	if (retval)
 550		goto out;
 551
 552	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
 553	if ((rpmflags & RPM_AUTO)
 554	    && dev->power.runtime_status != RPM_SUSPENDING) {
 555		u64 expires = pm_runtime_autosuspend_expiration(dev);
 556
 557		if (expires != 0) {
 558			/* Pending requests need to be canceled. */
 559			dev->power.request = RPM_REQ_NONE;
 560
 561			/*
 562			 * Optimization: If the timer is already running and is
 563			 * set to expire at or before the autosuspend delay,
 564			 * avoid the overhead of resetting it.  Just let it
 565			 * expire; pm_suspend_timer_fn() will take care of the
 566			 * rest.
 567			 */
 568			if (!(dev->power.timer_expires &&
 569					dev->power.timer_expires <= expires)) {
 570				/*
 571				 * We add a slack of 25% to gather wakeups
 572				 * without sacrificing the granularity.
 573				 */
 574				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
 575						    (NSEC_PER_MSEC >> 2);
 576
 577				dev->power.timer_expires = expires;
 578				hrtimer_start_range_ns(&dev->power.suspend_timer,
 579						ns_to_ktime(expires),
 580						slack,
 581						HRTIMER_MODE_ABS);
 582			}
 583			dev->power.timer_autosuspends = 1;
 584			goto out;
 585		}
 586	}
 587
 588	/* Other scheduled or pending requests need to be canceled. */
 589	pm_runtime_cancel_pending(dev);
 590
 591	if (dev->power.runtime_status == RPM_SUSPENDING) {
 592		DEFINE_WAIT(wait);
 593
 594		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 595			retval = -EINPROGRESS;
 596			goto out;
 597		}
 598
 599		if (dev->power.irq_safe) {
 600			spin_unlock(&dev->power.lock);
 601
 602			cpu_relax();
 603
 604			spin_lock(&dev->power.lock);
 605			goto repeat;
 606		}
 607
 608		/* Wait for the other suspend running in parallel with us. */
 609		for (;;) {
 610			prepare_to_wait(&dev->power.wait_queue, &wait,
 611					TASK_UNINTERRUPTIBLE);
 612			if (dev->power.runtime_status != RPM_SUSPENDING)
 613				break;
 614
 615			spin_unlock_irq(&dev->power.lock);
 616
 617			schedule();
 618
 619			spin_lock_irq(&dev->power.lock);
 620		}
 621		finish_wait(&dev->power.wait_queue, &wait);
 622		goto repeat;
 623	}
 624
 625	if (dev->power.no_callbacks)
 626		goto no_callback;	/* Assume success. */
 627
 628	/* Carry out an asynchronous or a synchronous suspend. */
 629	if (rpmflags & RPM_ASYNC) {
 630		dev->power.request = (rpmflags & RPM_AUTO) ?
 631		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 632		if (!dev->power.request_pending) {
 633			dev->power.request_pending = true;
 634			queue_work(pm_wq, &dev->power.work);
 635		}
 636		goto out;
 637	}
 638
 639	__update_runtime_status(dev, RPM_SUSPENDING);
 640
 641	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 642
 643	dev_pm_enable_wake_irq_check(dev, true);
 644	retval = rpm_callback(callback, dev);
 645	if (retval)
 646		goto fail;
 647
 648 no_callback:
 649	__update_runtime_status(dev, RPM_SUSPENDED);
 650	pm_runtime_deactivate_timer(dev);
 651
 652	if (dev->parent) {
 653		parent = dev->parent;
 654		atomic_add_unless(&parent->power.child_count, -1, 0);
 655	}
 656	wake_up_all(&dev->power.wait_queue);
 657
 658	if (dev->power.deferred_resume) {
 659		dev->power.deferred_resume = false;
 660		rpm_resume(dev, 0);
 661		retval = -EAGAIN;
 662		goto out;
 663	}
 664
 665	if (dev->power.irq_safe)
 666		goto out;
 667
 668	/* Maybe the parent is now able to suspend. */
 669	if (parent && !parent->power.ignore_children) {
 670		spin_unlock(&dev->power.lock);
 671
 672		spin_lock(&parent->power.lock);
 673		rpm_idle(parent, RPM_ASYNC);
 674		spin_unlock(&parent->power.lock);
 675
 676		spin_lock(&dev->power.lock);
 677	}
 678	/* Maybe the suppliers are now able to suspend. */
 679	if (dev->power.links_count > 0) {
 680		spin_unlock_irq(&dev->power.lock);
 681
 682		rpm_suspend_suppliers(dev);
 683
 684		spin_lock_irq(&dev->power.lock);
 685	}
 686
 687 out:
 688	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 689
 690	return retval;
 691
 692 fail:
 693	dev_pm_disable_wake_irq_check(dev);
 694	__update_runtime_status(dev, RPM_ACTIVE);
 695	dev->power.deferred_resume = false;
 696	wake_up_all(&dev->power.wait_queue);
 697
 698	if (retval == -EAGAIN || retval == -EBUSY) {
 699		dev->power.runtime_error = 0;
 700
 701		/*
 702		 * If the callback routine failed an autosuspend, and
 703		 * if the last_busy time has been updated so that there
 704		 * is a new autosuspend expiration time, automatically
 705		 * reschedule another autosuspend.
 706		 */
 707		if ((rpmflags & RPM_AUTO) &&
 708		    pm_runtime_autosuspend_expiration(dev) != 0)
 709			goto repeat;
 710	} else {
 711		pm_runtime_cancel_pending(dev);
 712	}
 713	goto out;
 714}
 715
 716/**
 717 * rpm_resume - Carry out runtime resume of given device.
 718 * @dev: Device to resume.
 719 * @rpmflags: Flag bits.
 720 *
 721 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 722 * any scheduled or pending requests.  If another resume has been started
 723 * earlier, either return immediately or wait for it to finish, depending on the
 724 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 725 * parallel with this function, either tell the other process to resume after
 726 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 727 * flag is set then queue a resume request; otherwise run the
 728 * ->runtime_resume() callback directly.  Queue an idle notification for the
 729 * device if the resume succeeded.
 730 *
 731 * This function must be called under dev->power.lock with interrupts disabled.
 732 */
 733static int rpm_resume(struct device *dev, int rpmflags)
 734	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 735{
 736	int (*callback)(struct device *);
 737	struct device *parent = NULL;
 738	int retval = 0;
 739
 740	trace_rpm_resume_rcuidle(dev, rpmflags);
 741
 742 repeat:
 743	if (dev->power.runtime_error)
 744		retval = -EINVAL;
 745	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
 746	    && dev->power.runtime_status == RPM_ACTIVE)
 747		retval = 1;
 748	else if (dev->power.disable_depth > 0)
 749		retval = -EACCES;
 750	if (retval)
 751		goto out;
 752
 753	/*
 754	 * Other scheduled or pending requests need to be canceled.  Small
 755	 * optimization: If an autosuspend timer is running, leave it running
 756	 * rather than cancelling it now only to restart it again in the near
 757	 * future.
 758	 */
 759	dev->power.request = RPM_REQ_NONE;
 760	if (!dev->power.timer_autosuspends)
 761		pm_runtime_deactivate_timer(dev);
 762
 763	if (dev->power.runtime_status == RPM_ACTIVE) {
 764		retval = 1;
 765		goto out;
 766	}
 767
 768	if (dev->power.runtime_status == RPM_RESUMING
 769	    || dev->power.runtime_status == RPM_SUSPENDING) {
 770		DEFINE_WAIT(wait);
 771
 772		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 773			if (dev->power.runtime_status == RPM_SUSPENDING)
 774				dev->power.deferred_resume = true;
 775			else
 776				retval = -EINPROGRESS;
 777			goto out;
 778		}
 779
 780		if (dev->power.irq_safe) {
 781			spin_unlock(&dev->power.lock);
 782
 783			cpu_relax();
 784
 785			spin_lock(&dev->power.lock);
 786			goto repeat;
 787		}
 788
 789		/* Wait for the operation carried out in parallel with us. */
 790		for (;;) {
 791			prepare_to_wait(&dev->power.wait_queue, &wait,
 792					TASK_UNINTERRUPTIBLE);
 793			if (dev->power.runtime_status != RPM_RESUMING
 794			    && dev->power.runtime_status != RPM_SUSPENDING)
 795				break;
 796
 797			spin_unlock_irq(&dev->power.lock);
 798
 799			schedule();
 800
 801			spin_lock_irq(&dev->power.lock);
 802		}
 803		finish_wait(&dev->power.wait_queue, &wait);
 804		goto repeat;
 805	}
 806
 807	/*
 808	 * See if we can skip waking up the parent.  This is safe only if
 809	 * power.no_callbacks is set, because otherwise we don't know whether
 810	 * the resume will actually succeed.
 811	 */
 812	if (dev->power.no_callbacks && !parent && dev->parent) {
 813		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 814		if (dev->parent->power.disable_depth > 0
 815		    || dev->parent->power.ignore_children
 816		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 817			atomic_inc(&dev->parent->power.child_count);
 818			spin_unlock(&dev->parent->power.lock);
 819			retval = 1;
 820			goto no_callback;	/* Assume success. */
 821		}
 822		spin_unlock(&dev->parent->power.lock);
 823	}
 824
 825	/* Carry out an asynchronous or a synchronous resume. */
 826	if (rpmflags & RPM_ASYNC) {
 827		dev->power.request = RPM_REQ_RESUME;
 828		if (!dev->power.request_pending) {
 829			dev->power.request_pending = true;
 830			queue_work(pm_wq, &dev->power.work);
 831		}
 832		retval = 0;
 833		goto out;
 834	}
 835
 836	if (!parent && dev->parent) {
 837		/*
 838		 * Increment the parent's usage counter and resume it if
 839		 * necessary.  Not needed if dev is irq-safe; then the
 840		 * parent is permanently resumed.
 841		 */
 842		parent = dev->parent;
 843		if (dev->power.irq_safe)
 844			goto skip_parent;
 845		spin_unlock(&dev->power.lock);
 846
 847		pm_runtime_get_noresume(parent);
 848
 849		spin_lock(&parent->power.lock);
 850		/*
 851		 * Resume the parent if it has runtime PM enabled and not been
 852		 * set to ignore its children.
 853		 */
 854		if (!parent->power.disable_depth
 855		    && !parent->power.ignore_children) {
 856			rpm_resume(parent, 0);
 857			if (parent->power.runtime_status != RPM_ACTIVE)
 858				retval = -EBUSY;
 859		}
 860		spin_unlock(&parent->power.lock);
 861
 862		spin_lock(&dev->power.lock);
 863		if (retval)
 864			goto out;
 865		goto repeat;
 866	}
 867 skip_parent:
 868
 869	if (dev->power.no_callbacks)
 870		goto no_callback;	/* Assume success. */
 871
 872	__update_runtime_status(dev, RPM_RESUMING);
 873
 874	callback = RPM_GET_CALLBACK(dev, runtime_resume);
 875
 876	dev_pm_disable_wake_irq_check(dev);
 877	retval = rpm_callback(callback, dev);
 878	if (retval) {
 879		__update_runtime_status(dev, RPM_SUSPENDED);
 880		pm_runtime_cancel_pending(dev);
 881		dev_pm_enable_wake_irq_check(dev, false);
 882	} else {
 883 no_callback:
 884		__update_runtime_status(dev, RPM_ACTIVE);
 885		pm_runtime_mark_last_busy(dev);
 886		if (parent)
 887			atomic_inc(&parent->power.child_count);
 888	}
 889	wake_up_all(&dev->power.wait_queue);
 890
 891	if (retval >= 0)
 892		rpm_idle(dev, RPM_ASYNC);
 893
 894 out:
 895	if (parent && !dev->power.irq_safe) {
 896		spin_unlock_irq(&dev->power.lock);
 897
 898		pm_runtime_put(parent);
 899
 900		spin_lock_irq(&dev->power.lock);
 901	}
 902
 903	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 904
 905	return retval;
 906}
 907
 908/**
 909 * pm_runtime_work - Universal runtime PM work function.
 910 * @work: Work structure used for scheduling the execution of this function.
 911 *
 912 * Use @work to get the device object the work is to be done for, determine what
 913 * is to be done and execute the appropriate runtime PM function.
 914 */
 915static void pm_runtime_work(struct work_struct *work)
 916{
 917	struct device *dev = container_of(work, struct device, power.work);
 918	enum rpm_request req;
 919
 920	spin_lock_irq(&dev->power.lock);
 921
 922	if (!dev->power.request_pending)
 923		goto out;
 924
 925	req = dev->power.request;
 926	dev->power.request = RPM_REQ_NONE;
 927	dev->power.request_pending = false;
 928
 929	switch (req) {
 930	case RPM_REQ_NONE:
 931		break;
 932	case RPM_REQ_IDLE:
 933		rpm_idle(dev, RPM_NOWAIT);
 934		break;
 935	case RPM_REQ_SUSPEND:
 936		rpm_suspend(dev, RPM_NOWAIT);
 937		break;
 938	case RPM_REQ_AUTOSUSPEND:
 939		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 940		break;
 941	case RPM_REQ_RESUME:
 942		rpm_resume(dev, RPM_NOWAIT);
 943		break;
 944	}
 945
 946 out:
 947	spin_unlock_irq(&dev->power.lock);
 948}
 949
 950/**
 951 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 952 * @timer: hrtimer used by pm_schedule_suspend().
 953 *
 954 * Check if the time is right and queue a suspend request.
 955 */
 956static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
 957{
 958	struct device *dev = container_of(timer, struct device, power.suspend_timer);
 959	unsigned long flags;
 960	u64 expires;
 961
 962	spin_lock_irqsave(&dev->power.lock, flags);
 963
 964	expires = dev->power.timer_expires;
 965	/*
 966	 * If 'expires' is after the current time, we've been called
 967	 * too early.
 968	 */
 969	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
 970		dev->power.timer_expires = 0;
 971		rpm_suspend(dev, dev->power.timer_autosuspends ?
 972		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 973	}
 974
 975	spin_unlock_irqrestore(&dev->power.lock, flags);
 976
 977	return HRTIMER_NORESTART;
 978}
 979
 980/**
 981 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 982 * @dev: Device to suspend.
 983 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 984 */
 985int pm_schedule_suspend(struct device *dev, unsigned int delay)
 986{
 987	unsigned long flags;
 988	u64 expires;
 989	int retval;
 990
 991	spin_lock_irqsave(&dev->power.lock, flags);
 992
 993	if (!delay) {
 994		retval = rpm_suspend(dev, RPM_ASYNC);
 995		goto out;
 996	}
 997
 998	retval = rpm_check_suspend_allowed(dev);
 999	if (retval)
1000		goto out;
1001
1002	/* Other scheduled or pending requests need to be canceled. */
1003	pm_runtime_cancel_pending(dev);
1004
1005	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1006	dev->power.timer_expires = expires;
1007	dev->power.timer_autosuspends = 0;
1008	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1009
1010 out:
1011	spin_unlock_irqrestore(&dev->power.lock, flags);
1012
1013	return retval;
1014}
1015EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1016
1017/**
1018 * __pm_runtime_idle - Entry point for runtime idle operations.
1019 * @dev: Device to send idle notification for.
1020 * @rpmflags: Flag bits.
1021 *
1022 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1023 * return immediately if it is larger than zero.  Then carry out an idle
1024 * notification, either synchronous or asynchronous.
1025 *
1026 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1027 * or if pm_runtime_irq_safe() has been called.
1028 */
1029int __pm_runtime_idle(struct device *dev, int rpmflags)
1030{
1031	unsigned long flags;
1032	int retval;
1033
1034	if (rpmflags & RPM_GET_PUT) {
1035		if (!atomic_dec_and_test(&dev->power.usage_count)) {
1036			trace_rpm_usage_rcuidle(dev, rpmflags);
1037			return 0;
1038		}
1039	}
1040
1041	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1042
1043	spin_lock_irqsave(&dev->power.lock, flags);
1044	retval = rpm_idle(dev, rpmflags);
1045	spin_unlock_irqrestore(&dev->power.lock, flags);
1046
1047	return retval;
1048}
1049EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1050
1051/**
1052 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1053 * @dev: Device to suspend.
1054 * @rpmflags: Flag bits.
1055 *
1056 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1057 * return immediately if it is larger than zero.  Then carry out a suspend,
1058 * either synchronous or asynchronous.
1059 *
1060 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1061 * or if pm_runtime_irq_safe() has been called.
1062 */
1063int __pm_runtime_suspend(struct device *dev, int rpmflags)
1064{
1065	unsigned long flags;
1066	int retval;
1067
1068	if (rpmflags & RPM_GET_PUT) {
1069		if (!atomic_dec_and_test(&dev->power.usage_count)) {
1070			trace_rpm_usage_rcuidle(dev, rpmflags);
1071			return 0;
1072		}
1073	}
1074
1075	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1076
1077	spin_lock_irqsave(&dev->power.lock, flags);
1078	retval = rpm_suspend(dev, rpmflags);
1079	spin_unlock_irqrestore(&dev->power.lock, flags);
1080
1081	return retval;
1082}
1083EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1084
1085/**
1086 * __pm_runtime_resume - Entry point for runtime resume operations.
1087 * @dev: Device to resume.
1088 * @rpmflags: Flag bits.
1089 *
1090 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1091 * carry out a resume, either synchronous or asynchronous.
1092 *
1093 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1094 * or if pm_runtime_irq_safe() has been called.
1095 */
1096int __pm_runtime_resume(struct device *dev, int rpmflags)
1097{
1098	unsigned long flags;
1099	int retval;
1100
1101	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1102			dev->power.runtime_status != RPM_ACTIVE);
1103
1104	if (rpmflags & RPM_GET_PUT)
1105		atomic_inc(&dev->power.usage_count);
1106
1107	spin_lock_irqsave(&dev->power.lock, flags);
1108	retval = rpm_resume(dev, rpmflags);
1109	spin_unlock_irqrestore(&dev->power.lock, flags);
1110
1111	return retval;
1112}
1113EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1114
1115/**
1116 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1117 * @dev: Device to handle.
1118 * @ign_usage_count: Whether or not to look at the current usage counter value.
1119 *
1120 * Return -EINVAL if runtime PM is disabled for @dev.
1121 *
1122 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1123 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1124 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1125 * without changing the usage counter.
1126 *
1127 * If @ign_usage_count is %true, this function can be used to prevent suspending
1128 * the device when its runtime PM status is %RPM_ACTIVE.
1129 *
1130 * If @ign_usage_count is %false, this function can be used to prevent
1131 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1132 * runtime PM usage counter is not zero.
1133 *
1134 * The caller is responsible for decrementing the runtime PM usage counter of
1135 * @dev after this function has returned a positive value for it.
1136 */
1137int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1138{
1139	unsigned long flags;
1140	int retval;
1141
1142	spin_lock_irqsave(&dev->power.lock, flags);
1143	if (dev->power.disable_depth > 0) {
1144		retval = -EINVAL;
1145	} else if (dev->power.runtime_status != RPM_ACTIVE) {
1146		retval = 0;
1147	} else if (ign_usage_count) {
1148		retval = 1;
1149		atomic_inc(&dev->power.usage_count);
1150	} else {
1151		retval = atomic_inc_not_zero(&dev->power.usage_count);
1152	}
1153	trace_rpm_usage_rcuidle(dev, 0);
1154	spin_unlock_irqrestore(&dev->power.lock, flags);
1155
1156	return retval;
1157}
1158EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1159
1160/**
1161 * __pm_runtime_set_status - Set runtime PM status of a device.
1162 * @dev: Device to handle.
1163 * @status: New runtime PM status of the device.
1164 *
1165 * If runtime PM of the device is disabled or its power.runtime_error field is
1166 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1167 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1168 * However, if the device has a parent and the parent is not active, and the
1169 * parent's power.ignore_children flag is unset, the device's status cannot be
1170 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1171 *
1172 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1173 * and the device parent's counter of unsuspended children is modified to
1174 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1175 * notification request for the parent is submitted.
1176 *
1177 * If @dev has any suppliers (as reflected by device links to them), and @status
1178 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1179 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1180 * of the @status value) and the suppliers will be deacticated on exit.  The
1181 * error returned by the failing supplier activation will be returned in that
1182 * case.
1183 */
1184int __pm_runtime_set_status(struct device *dev, unsigned int status)
1185{
1186	struct device *parent = dev->parent;
1187	bool notify_parent = false;
1188	int error = 0;
1189
1190	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1191		return -EINVAL;
1192
1193	spin_lock_irq(&dev->power.lock);
1194
1195	/*
1196	 * Prevent PM-runtime from being enabled for the device or return an
1197	 * error if it is enabled already and working.
1198	 */
1199	if (dev->power.runtime_error || dev->power.disable_depth)
1200		dev->power.disable_depth++;
1201	else
1202		error = -EAGAIN;
1203
1204	spin_unlock_irq(&dev->power.lock);
1205
1206	if (error)
1207		return error;
1208
1209	/*
1210	 * If the new status is RPM_ACTIVE, the suppliers can be activated
1211	 * upfront regardless of the current status, because next time
1212	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1213	 * involved will be dropped down to one anyway.
1214	 */
1215	if (status == RPM_ACTIVE) {
1216		int idx = device_links_read_lock();
1217
1218		error = rpm_get_suppliers(dev);
1219		if (error)
1220			status = RPM_SUSPENDED;
1221
1222		device_links_read_unlock(idx);
1223	}
1224
1225	spin_lock_irq(&dev->power.lock);
1226
1227	if (dev->power.runtime_status == status || !parent)
1228		goto out_set;
1229
1230	if (status == RPM_SUSPENDED) {
1231		atomic_add_unless(&parent->power.child_count, -1, 0);
1232		notify_parent = !parent->power.ignore_children;
1233	} else {
1234		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1235
1236		/*
1237		 * It is invalid to put an active child under a parent that is
1238		 * not active, has runtime PM enabled and the
1239		 * 'power.ignore_children' flag unset.
1240		 */
1241		if (!parent->power.disable_depth
1242		    && !parent->power.ignore_children
1243		    && parent->power.runtime_status != RPM_ACTIVE) {
1244			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1245				dev_name(dev),
1246				dev_name(parent));
1247			error = -EBUSY;
1248		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1249			atomic_inc(&parent->power.child_count);
1250		}
1251
1252		spin_unlock(&parent->power.lock);
1253
1254		if (error) {
1255			status = RPM_SUSPENDED;
1256			goto out;
1257		}
1258	}
1259
1260 out_set:
1261	__update_runtime_status(dev, status);
1262	if (!error)
1263		dev->power.runtime_error = 0;
1264
1265 out:
1266	spin_unlock_irq(&dev->power.lock);
1267
1268	if (notify_parent)
1269		pm_request_idle(parent);
1270
1271	if (status == RPM_SUSPENDED) {
1272		int idx = device_links_read_lock();
1273
1274		rpm_put_suppliers(dev);
1275
1276		device_links_read_unlock(idx);
1277	}
1278
1279	pm_runtime_enable(dev);
1280
1281	return error;
1282}
1283EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1284
1285/**
1286 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1287 * @dev: Device to handle.
1288 *
1289 * Flush all pending requests for the device from pm_wq and wait for all
1290 * runtime PM operations involving the device in progress to complete.
1291 *
1292 * Should be called under dev->power.lock with interrupts disabled.
1293 */
1294static void __pm_runtime_barrier(struct device *dev)
1295{
1296	pm_runtime_deactivate_timer(dev);
1297
1298	if (dev->power.request_pending) {
1299		dev->power.request = RPM_REQ_NONE;
1300		spin_unlock_irq(&dev->power.lock);
1301
1302		cancel_work_sync(&dev->power.work);
1303
1304		spin_lock_irq(&dev->power.lock);
1305		dev->power.request_pending = false;
1306	}
1307
1308	if (dev->power.runtime_status == RPM_SUSPENDING
1309	    || dev->power.runtime_status == RPM_RESUMING
1310	    || dev->power.idle_notification) {
1311		DEFINE_WAIT(wait);
1312
1313		/* Suspend, wake-up or idle notification in progress. */
1314		for (;;) {
1315			prepare_to_wait(&dev->power.wait_queue, &wait,
1316					TASK_UNINTERRUPTIBLE);
1317			if (dev->power.runtime_status != RPM_SUSPENDING
1318			    && dev->power.runtime_status != RPM_RESUMING
1319			    && !dev->power.idle_notification)
1320				break;
1321			spin_unlock_irq(&dev->power.lock);
1322
1323			schedule();
1324
1325			spin_lock_irq(&dev->power.lock);
1326		}
1327		finish_wait(&dev->power.wait_queue, &wait);
1328	}
1329}
1330
1331/**
1332 * pm_runtime_barrier - Flush pending requests and wait for completions.
1333 * @dev: Device to handle.
1334 *
1335 * Prevent the device from being suspended by incrementing its usage counter and
1336 * if there's a pending resume request for the device, wake the device up.
1337 * Next, make sure that all pending requests for the device have been flushed
1338 * from pm_wq and wait for all runtime PM operations involving the device in
1339 * progress to complete.
1340 *
1341 * Return value:
1342 * 1, if there was a resume request pending and the device had to be woken up,
1343 * 0, otherwise
1344 */
1345int pm_runtime_barrier(struct device *dev)
1346{
1347	int retval = 0;
1348
1349	pm_runtime_get_noresume(dev);
1350	spin_lock_irq(&dev->power.lock);
1351
1352	if (dev->power.request_pending
1353	    && dev->power.request == RPM_REQ_RESUME) {
1354		rpm_resume(dev, 0);
1355		retval = 1;
1356	}
1357
1358	__pm_runtime_barrier(dev);
1359
1360	spin_unlock_irq(&dev->power.lock);
1361	pm_runtime_put_noidle(dev);
1362
1363	return retval;
1364}
1365EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1366
1367/**
1368 * __pm_runtime_disable - Disable runtime PM of a device.
1369 * @dev: Device to handle.
1370 * @check_resume: If set, check if there's a resume request for the device.
1371 *
1372 * Increment power.disable_depth for the device and if it was zero previously,
1373 * cancel all pending runtime PM requests for the device and wait for all
1374 * operations in progress to complete.  The device can be either active or
1375 * suspended after its runtime PM has been disabled.
1376 *
1377 * If @check_resume is set and there's a resume request pending when
1378 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1379 * function will wake up the device before disabling its runtime PM.
1380 */
1381void __pm_runtime_disable(struct device *dev, bool check_resume)
1382{
1383	spin_lock_irq(&dev->power.lock);
1384
1385	if (dev->power.disable_depth > 0) {
1386		dev->power.disable_depth++;
1387		goto out;
1388	}
1389
1390	/*
1391	 * Wake up the device if there's a resume request pending, because that
1392	 * means there probably is some I/O to process and disabling runtime PM
1393	 * shouldn't prevent the device from processing the I/O.
1394	 */
1395	if (check_resume && dev->power.request_pending
1396	    && dev->power.request == RPM_REQ_RESUME) {
1397		/*
1398		 * Prevent suspends and idle notifications from being carried
1399		 * out after we have woken up the device.
1400		 */
1401		pm_runtime_get_noresume(dev);
1402
1403		rpm_resume(dev, 0);
1404
1405		pm_runtime_put_noidle(dev);
1406	}
1407
1408	/* Update time accounting before disabling PM-runtime. */
1409	update_pm_runtime_accounting(dev);
1410
1411	if (!dev->power.disable_depth++)
1412		__pm_runtime_barrier(dev);
1413
1414 out:
1415	spin_unlock_irq(&dev->power.lock);
1416}
1417EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1418
1419/**
1420 * pm_runtime_enable - Enable runtime PM of a device.
1421 * @dev: Device to handle.
1422 */
1423void pm_runtime_enable(struct device *dev)
1424{
1425	unsigned long flags;
1426
1427	spin_lock_irqsave(&dev->power.lock, flags);
1428
1429	if (dev->power.disable_depth > 0) {
1430		dev->power.disable_depth--;
1431
1432		/* About to enable runtime pm, set accounting_timestamp to now */
1433		if (!dev->power.disable_depth)
1434			dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1435	} else {
1436		dev_warn(dev, "Unbalanced %s!\n", __func__);
1437	}
1438
1439	WARN(!dev->power.disable_depth &&
1440	     dev->power.runtime_status == RPM_SUSPENDED &&
1441	     !dev->power.ignore_children &&
1442	     atomic_read(&dev->power.child_count) > 0,
1443	     "Enabling runtime PM for inactive device (%s) with active children\n",
1444	     dev_name(dev));
1445
1446	spin_unlock_irqrestore(&dev->power.lock, flags);
1447}
1448EXPORT_SYMBOL_GPL(pm_runtime_enable);
1449
1450/**
1451 * pm_runtime_forbid - Block runtime PM of a device.
1452 * @dev: Device to handle.
1453 *
1454 * Increase the device's usage count and clear its power.runtime_auto flag,
1455 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1456 * for it.
1457 */
1458void pm_runtime_forbid(struct device *dev)
1459{
1460	spin_lock_irq(&dev->power.lock);
1461	if (!dev->power.runtime_auto)
1462		goto out;
1463
1464	dev->power.runtime_auto = false;
1465	atomic_inc(&dev->power.usage_count);
1466	rpm_resume(dev, 0);
1467
1468 out:
1469	spin_unlock_irq(&dev->power.lock);
1470}
1471EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1472
1473/**
1474 * pm_runtime_allow - Unblock runtime PM of a device.
1475 * @dev: Device to handle.
1476 *
1477 * Decrease the device's usage count and set its power.runtime_auto flag.
1478 */
1479void pm_runtime_allow(struct device *dev)
1480{
1481	spin_lock_irq(&dev->power.lock);
1482	if (dev->power.runtime_auto)
1483		goto out;
1484
1485	dev->power.runtime_auto = true;
1486	if (atomic_dec_and_test(&dev->power.usage_count))
1487		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1488	else
1489		trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1490
1491 out:
1492	spin_unlock_irq(&dev->power.lock);
1493}
1494EXPORT_SYMBOL_GPL(pm_runtime_allow);
1495
1496/**
1497 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1498 * @dev: Device to handle.
1499 *
1500 * Set the power.no_callbacks flag, which tells the PM core that this
1501 * device is power-managed through its parent and has no runtime PM
1502 * callbacks of its own.  The runtime sysfs attributes will be removed.
1503 */
1504void pm_runtime_no_callbacks(struct device *dev)
1505{
1506	spin_lock_irq(&dev->power.lock);
1507	dev->power.no_callbacks = 1;
1508	spin_unlock_irq(&dev->power.lock);
1509	if (device_is_registered(dev))
1510		rpm_sysfs_remove(dev);
1511}
1512EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1513
1514/**
1515 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1516 * @dev: Device to handle
1517 *
1518 * Set the power.irq_safe flag, which tells the PM core that the
1519 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1520 * always be invoked with the spinlock held and interrupts disabled.  It also
1521 * causes the parent's usage counter to be permanently incremented, preventing
1522 * the parent from runtime suspending -- otherwise an irq-safe child might have
1523 * to wait for a non-irq-safe parent.
1524 */
1525void pm_runtime_irq_safe(struct device *dev)
1526{
1527	if (dev->parent)
1528		pm_runtime_get_sync(dev->parent);
1529	spin_lock_irq(&dev->power.lock);
1530	dev->power.irq_safe = 1;
1531	spin_unlock_irq(&dev->power.lock);
1532}
1533EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1534
1535/**
1536 * update_autosuspend - Handle a change to a device's autosuspend settings.
1537 * @dev: Device to handle.
1538 * @old_delay: The former autosuspend_delay value.
1539 * @old_use: The former use_autosuspend value.
1540 *
1541 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1542 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1543 *
1544 * This function must be called under dev->power.lock with interrupts disabled.
1545 */
1546static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1547{
1548	int delay = dev->power.autosuspend_delay;
1549
1550	/* Should runtime suspend be prevented now? */
1551	if (dev->power.use_autosuspend && delay < 0) {
1552
1553		/* If it used to be allowed then prevent it. */
1554		if (!old_use || old_delay >= 0) {
1555			atomic_inc(&dev->power.usage_count);
1556			rpm_resume(dev, 0);
1557		} else {
1558			trace_rpm_usage_rcuidle(dev, 0);
1559		}
1560	}
1561
1562	/* Runtime suspend should be allowed now. */
1563	else {
1564
1565		/* If it used to be prevented then allow it. */
1566		if (old_use && old_delay < 0)
1567			atomic_dec(&dev->power.usage_count);
1568
1569		/* Maybe we can autosuspend now. */
1570		rpm_idle(dev, RPM_AUTO);
1571	}
1572}
1573
1574/**
1575 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1576 * @dev: Device to handle.
1577 * @delay: Value of the new delay in milliseconds.
1578 *
1579 * Set the device's power.autosuspend_delay value.  If it changes to negative
1580 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1581 * changes the other way, allow runtime suspends.
1582 */
1583void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1584{
1585	int old_delay, old_use;
1586
1587	spin_lock_irq(&dev->power.lock);
1588	old_delay = dev->power.autosuspend_delay;
1589	old_use = dev->power.use_autosuspend;
1590	dev->power.autosuspend_delay = delay;
1591	update_autosuspend(dev, old_delay, old_use);
1592	spin_unlock_irq(&dev->power.lock);
1593}
1594EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1595
1596/**
1597 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1598 * @dev: Device to handle.
1599 * @use: New value for use_autosuspend.
1600 *
1601 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1602 * suspends as needed.
1603 */
1604void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1605{
1606	int old_delay, old_use;
1607
1608	spin_lock_irq(&dev->power.lock);
1609	old_delay = dev->power.autosuspend_delay;
1610	old_use = dev->power.use_autosuspend;
1611	dev->power.use_autosuspend = use;
1612	update_autosuspend(dev, old_delay, old_use);
1613	spin_unlock_irq(&dev->power.lock);
1614}
1615EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1616
1617/**
1618 * pm_runtime_init - Initialize runtime PM fields in given device object.
1619 * @dev: Device object to initialize.
1620 */
1621void pm_runtime_init(struct device *dev)
1622{
1623	dev->power.runtime_status = RPM_SUSPENDED;
1624	dev->power.idle_notification = false;
1625
1626	dev->power.disable_depth = 1;
1627	atomic_set(&dev->power.usage_count, 0);
1628
1629	dev->power.runtime_error = 0;
1630
1631	atomic_set(&dev->power.child_count, 0);
1632	pm_suspend_ignore_children(dev, false);
1633	dev->power.runtime_auto = true;
1634
1635	dev->power.request_pending = false;
1636	dev->power.request = RPM_REQ_NONE;
1637	dev->power.deferred_resume = false;
1638	dev->power.needs_force_resume = 0;
1639	INIT_WORK(&dev->power.work, pm_runtime_work);
1640
1641	dev->power.timer_expires = 0;
1642	hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1643	dev->power.suspend_timer.function = pm_suspend_timer_fn;
1644
1645	init_waitqueue_head(&dev->power.wait_queue);
1646}
1647
1648/**
1649 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1650 * @dev: Device object to re-initialize.
1651 */
1652void pm_runtime_reinit(struct device *dev)
1653{
1654	if (!pm_runtime_enabled(dev)) {
1655		if (dev->power.runtime_status == RPM_ACTIVE)
1656			pm_runtime_set_suspended(dev);
1657		if (dev->power.irq_safe) {
1658			spin_lock_irq(&dev->power.lock);
1659			dev->power.irq_safe = 0;
1660			spin_unlock_irq(&dev->power.lock);
1661			if (dev->parent)
1662				pm_runtime_put(dev->parent);
1663		}
1664	}
1665}
1666
1667/**
1668 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1669 * @dev: Device object being removed from device hierarchy.
1670 */
1671void pm_runtime_remove(struct device *dev)
1672{
1673	__pm_runtime_disable(dev, false);
1674	pm_runtime_reinit(dev);
1675}
1676
1677/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1678 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1679 * @dev: Consumer device.
1680 */
1681void pm_runtime_get_suppliers(struct device *dev)
1682{
1683	struct device_link *link;
1684	int idx;
1685
1686	idx = device_links_read_lock();
1687
1688	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1689				device_links_read_lock_held())
1690		if (link->flags & DL_FLAG_PM_RUNTIME) {
1691			link->supplier_preactivated = true;
1692			pm_runtime_get_sync(link->supplier);
1693			refcount_inc(&link->rpm_active);
 
1694		}
1695
1696	device_links_read_unlock(idx);
1697}
1698
1699/**
1700 * pm_runtime_put_suppliers - Drop references to supplier devices.
1701 * @dev: Consumer device.
1702 */
1703void pm_runtime_put_suppliers(struct device *dev)
1704{
1705	struct device_link *link;
1706	unsigned long flags;
1707	bool put;
1708	int idx;
1709
1710	idx = device_links_read_lock();
1711
1712	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1713				device_links_read_lock_held())
1714		if (link->supplier_preactivated) {
1715			link->supplier_preactivated = false;
1716			spin_lock_irqsave(&dev->power.lock, flags);
1717			put = pm_runtime_status_suspended(dev) &&
1718			      refcount_dec_not_one(&link->rpm_active);
1719			spin_unlock_irqrestore(&dev->power.lock, flags);
1720			if (put)
1721				pm_runtime_put(link->supplier);
1722		}
1723
1724	device_links_read_unlock(idx);
1725}
1726
1727void pm_runtime_new_link(struct device *dev)
1728{
1729	spin_lock_irq(&dev->power.lock);
1730	dev->power.links_count++;
1731	spin_unlock_irq(&dev->power.lock);
1732}
1733
1734static void pm_runtime_drop_link_count(struct device *dev)
1735{
1736	spin_lock_irq(&dev->power.lock);
1737	WARN_ON(dev->power.links_count == 0);
1738	dev->power.links_count--;
1739	spin_unlock_irq(&dev->power.lock);
1740}
1741
1742/**
1743 * pm_runtime_drop_link - Prepare for device link removal.
1744 * @link: Device link going away.
1745 *
1746 * Drop the link count of the consumer end of @link and decrement the supplier
1747 * device's runtime PM usage counter as many times as needed to drop all of the
1748 * PM runtime reference to it from the consumer.
1749 */
1750void pm_runtime_drop_link(struct device_link *link)
1751{
1752	if (!(link->flags & DL_FLAG_PM_RUNTIME))
1753		return;
1754
1755	pm_runtime_drop_link_count(link->consumer);
1756
1757	while (refcount_dec_not_one(&link->rpm_active))
1758		pm_runtime_put(link->supplier);
1759}
1760
1761static bool pm_runtime_need_not_resume(struct device *dev)
1762{
1763	return atomic_read(&dev->power.usage_count) <= 1 &&
1764		(atomic_read(&dev->power.child_count) == 0 ||
1765		 dev->power.ignore_children);
1766}
1767
1768/**
1769 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1770 * @dev: Device to suspend.
1771 *
1772 * Disable runtime PM so we safely can check the device's runtime PM status and
1773 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1774 * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1775 * usage and children counters don't indicate that the device was in use before
1776 * the system-wide transition under way, decrement its parent's children counter
1777 * (if there is a parent).  Keep runtime PM disabled to preserve the state
1778 * unless we encounter errors.
1779 *
1780 * Typically this function may be invoked from a system suspend callback to make
1781 * sure the device is put into low power state and it should only be used during
1782 * system-wide PM transitions to sleep states.  It assumes that the analogous
1783 * pm_runtime_force_resume() will be used to resume the device.
1784 */
1785int pm_runtime_force_suspend(struct device *dev)
1786{
1787	int (*callback)(struct device *);
1788	int ret;
1789
1790	pm_runtime_disable(dev);
1791	if (pm_runtime_status_suspended(dev))
1792		return 0;
1793
1794	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1795
1796	ret = callback ? callback(dev) : 0;
1797	if (ret)
1798		goto err;
1799
1800	/*
1801	 * If the device can stay in suspend after the system-wide transition
1802	 * to the working state that will follow, drop the children counter of
1803	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1804	 * function will be called again for it in the meantime.
1805	 */
1806	if (pm_runtime_need_not_resume(dev)) {
1807		pm_runtime_set_suspended(dev);
1808	} else {
1809		__update_runtime_status(dev, RPM_SUSPENDED);
1810		dev->power.needs_force_resume = 1;
1811	}
1812
1813	return 0;
1814
1815err:
1816	pm_runtime_enable(dev);
1817	return ret;
1818}
1819EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1820
1821/**
1822 * pm_runtime_force_resume - Force a device into resume state if needed.
1823 * @dev: Device to resume.
1824 *
1825 * Prior invoking this function we expect the user to have brought the device
1826 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1827 * those actions and bring the device into full power, if it is expected to be
1828 * used on system resume.  In the other case, we defer the resume to be managed
1829 * via runtime PM.
1830 *
1831 * Typically this function may be invoked from a system resume callback.
1832 */
1833int pm_runtime_force_resume(struct device *dev)
1834{
1835	int (*callback)(struct device *);
1836	int ret = 0;
1837
1838	if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1839		goto out;
1840
1841	/*
1842	 * The value of the parent's children counter is correct already, so
1843	 * just update the status of the device.
1844	 */
1845	__update_runtime_status(dev, RPM_ACTIVE);
1846
1847	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1848
1849	ret = callback ? callback(dev) : 0;
1850	if (ret) {
1851		pm_runtime_set_suspended(dev);
1852		goto out;
1853	}
1854
1855	pm_runtime_mark_last_busy(dev);
1856out:
1857	dev->power.needs_force_resume = 0;
1858	pm_runtime_enable(dev);
1859	return ret;
1860}
1861EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   4 *
   5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
   7 */
   8#include <linux/sched/mm.h>
   9#include <linux/ktime.h>
  10#include <linux/hrtimer.h>
  11#include <linux/export.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/pm_wakeirq.h>
  14#include <trace/events/rpm.h>
  15
  16#include "../base.h"
  17#include "power.h"
  18
  19typedef int (*pm_callback_t)(struct device *);
  20
  21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
  22{
  23	pm_callback_t cb;
  24	const struct dev_pm_ops *ops;
  25
  26	if (dev->pm_domain)
  27		ops = &dev->pm_domain->ops;
  28	else if (dev->type && dev->type->pm)
  29		ops = dev->type->pm;
  30	else if (dev->class && dev->class->pm)
  31		ops = dev->class->pm;
  32	else if (dev->bus && dev->bus->pm)
  33		ops = dev->bus->pm;
  34	else
  35		ops = NULL;
  36
  37	if (ops)
  38		cb = *(pm_callback_t *)((void *)ops + cb_offset);
  39	else
  40		cb = NULL;
  41
  42	if (!cb && dev->driver && dev->driver->pm)
  43		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
  44
  45	return cb;
  46}
  47
  48#define RPM_GET_CALLBACK(dev, callback) \
  49		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
  50
  51static int rpm_resume(struct device *dev, int rpmflags);
  52static int rpm_suspend(struct device *dev, int rpmflags);
  53
  54/**
  55 * update_pm_runtime_accounting - Update the time accounting of power states
  56 * @dev: Device to update the accounting for
  57 *
  58 * In order to be able to have time accounting of the various power states
  59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  60 * PM), we need to track the time spent in each state.
  61 * update_pm_runtime_accounting must be called each time before the
  62 * runtime_status field is updated, to account the time in the old state
  63 * correctly.
  64 */
  65static void update_pm_runtime_accounting(struct device *dev)
  66{
  67	u64 now, last, delta;
  68
  69	if (dev->power.disable_depth > 0)
  70		return;
  71
  72	last = dev->power.accounting_timestamp;
  73
  74	now = ktime_get_mono_fast_ns();
  75	dev->power.accounting_timestamp = now;
  76
  77	/*
  78	 * Because ktime_get_mono_fast_ns() is not monotonic during
  79	 * timekeeping updates, ensure that 'now' is after the last saved
  80	 * timesptamp.
  81	 */
  82	if (now < last)
  83		return;
  84
  85	delta = now - last;
  86
  87	if (dev->power.runtime_status == RPM_SUSPENDED)
  88		dev->power.suspended_time += delta;
  89	else
  90		dev->power.active_time += delta;
  91}
  92
  93static void __update_runtime_status(struct device *dev, enum rpm_status status)
  94{
  95	update_pm_runtime_accounting(dev);
  96	dev->power.runtime_status = status;
  97}
  98
  99static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
 100{
 101	u64 time;
 102	unsigned long flags;
 103
 104	spin_lock_irqsave(&dev->power.lock, flags);
 105
 106	update_pm_runtime_accounting(dev);
 107	time = suspended ? dev->power.suspended_time : dev->power.active_time;
 108
 109	spin_unlock_irqrestore(&dev->power.lock, flags);
 110
 111	return time;
 112}
 113
 114u64 pm_runtime_active_time(struct device *dev)
 115{
 116	return rpm_get_accounted_time(dev, false);
 117}
 118
 119u64 pm_runtime_suspended_time(struct device *dev)
 120{
 121	return rpm_get_accounted_time(dev, true);
 122}
 123EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
 124
 125/**
 126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
 127 * @dev: Device to handle.
 128 */
 129static void pm_runtime_deactivate_timer(struct device *dev)
 130{
 131	if (dev->power.timer_expires > 0) {
 132		hrtimer_try_to_cancel(&dev->power.suspend_timer);
 133		dev->power.timer_expires = 0;
 134	}
 135}
 136
 137/**
 138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
 139 * @dev: Device to handle.
 140 */
 141static void pm_runtime_cancel_pending(struct device *dev)
 142{
 143	pm_runtime_deactivate_timer(dev);
 144	/*
 145	 * In case there's a request pending, make sure its work function will
 146	 * return without doing anything.
 147	 */
 148	dev->power.request = RPM_REQ_NONE;
 149}
 150
 151/*
 152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
 153 * @dev: Device to handle.
 154 *
 155 * Compute the autosuspend-delay expiration time based on the device's
 156 * power.last_busy time.  If the delay has already expired or is disabled
 157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
 158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
 159 *
 160 * This function may be called either with or without dev->power.lock held.
 161 * Either way it can be racy, since power.last_busy may be updated at any time.
 162 */
 163u64 pm_runtime_autosuspend_expiration(struct device *dev)
 164{
 165	int autosuspend_delay;
 166	u64 expires;
 167
 168	if (!dev->power.use_autosuspend)
 169		return 0;
 170
 171	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
 172	if (autosuspend_delay < 0)
 173		return 0;
 174
 175	expires  = READ_ONCE(dev->power.last_busy);
 176	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
 177	if (expires > ktime_get_mono_fast_ns())
 178		return expires;	/* Expires in the future */
 179
 180	return 0;
 181}
 182EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 183
 184static int dev_memalloc_noio(struct device *dev, void *data)
 185{
 186	return dev->power.memalloc_noio;
 187}
 188
 189/*
 190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
 191 * @dev: Device to handle.
 192 * @enable: True for setting the flag and False for clearing the flag.
 193 *
 194 * Set the flag for all devices in the path from the device to the
 195 * root device in the device tree if @enable is true, otherwise clear
 196 * the flag for devices in the path whose siblings don't set the flag.
 197 *
 198 * The function should only be called by block device, or network
 199 * device driver for solving the deadlock problem during runtime
 200 * resume/suspend:
 201 *
 202 *     If memory allocation with GFP_KERNEL is called inside runtime
 203 *     resume/suspend callback of any one of its ancestors(or the
 204 *     block device itself), the deadlock may be triggered inside the
 205 *     memory allocation since it might not complete until the block
 206 *     device becomes active and the involed page I/O finishes. The
 207 *     situation is pointed out first by Alan Stern. Network device
 208 *     are involved in iSCSI kind of situation.
 209 *
 210 * The lock of dev_hotplug_mutex is held in the function for handling
 211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
 212 * in async probe().
 213 *
 214 * The function should be called between device_add() and device_del()
 215 * on the affected device(block/network device).
 216 */
 217void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
 218{
 219	static DEFINE_MUTEX(dev_hotplug_mutex);
 220
 221	mutex_lock(&dev_hotplug_mutex);
 222	for (;;) {
 223		bool enabled;
 224
 225		/* hold power lock since bitfield is not SMP-safe. */
 226		spin_lock_irq(&dev->power.lock);
 227		enabled = dev->power.memalloc_noio;
 228		dev->power.memalloc_noio = enable;
 229		spin_unlock_irq(&dev->power.lock);
 230
 231		/*
 232		 * not need to enable ancestors any more if the device
 233		 * has been enabled.
 234		 */
 235		if (enabled && enable)
 236			break;
 237
 238		dev = dev->parent;
 239
 240		/*
 241		 * clear flag of the parent device only if all the
 242		 * children don't set the flag because ancestor's
 243		 * flag was set by any one of the descendants.
 244		 */
 245		if (!dev || (!enable &&
 246			     device_for_each_child(dev, NULL,
 247						   dev_memalloc_noio)))
 248			break;
 249	}
 250	mutex_unlock(&dev_hotplug_mutex);
 251}
 252EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
 253
 254/**
 255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 256 * @dev: Device to test.
 257 */
 258static int rpm_check_suspend_allowed(struct device *dev)
 259{
 260	int retval = 0;
 261
 262	if (dev->power.runtime_error)
 263		retval = -EINVAL;
 264	else if (dev->power.disable_depth > 0)
 265		retval = -EACCES;
 266	else if (atomic_read(&dev->power.usage_count) > 0)
 267		retval = -EAGAIN;
 268	else if (!dev->power.ignore_children &&
 269			atomic_read(&dev->power.child_count))
 270		retval = -EBUSY;
 271
 272	/* Pending resume requests take precedence over suspends. */
 273	else if ((dev->power.deferred_resume
 274			&& dev->power.runtime_status == RPM_SUSPENDING)
 275	    || (dev->power.request_pending
 276			&& dev->power.request == RPM_REQ_RESUME))
 277		retval = -EAGAIN;
 278	else if (__dev_pm_qos_resume_latency(dev) == 0)
 279		retval = -EPERM;
 280	else if (dev->power.runtime_status == RPM_SUSPENDED)
 281		retval = 1;
 282
 283	return retval;
 284}
 285
 286static int rpm_get_suppliers(struct device *dev)
 287{
 288	struct device_link *link;
 289
 290	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 291				device_links_read_lock_held()) {
 292		int retval;
 293
 294		if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
 295		    READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
 296			continue;
 297
 298		retval = pm_runtime_get_sync(link->supplier);
 299		/* Ignore suppliers with disabled runtime PM. */
 300		if (retval < 0 && retval != -EACCES) {
 301			pm_runtime_put_noidle(link->supplier);
 302			return retval;
 303		}
 304		refcount_inc(&link->rpm_active);
 305	}
 306	return 0;
 307}
 308
 309static void rpm_put_suppliers(struct device *dev)
 310{
 311	struct device_link *link;
 312
 313	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 314				device_links_read_lock_held()) {
 315		if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
 316			continue;
 317
 318		while (refcount_dec_not_one(&link->rpm_active))
 319			pm_runtime_put(link->supplier);
 
 
 
 320	}
 321}
 322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323/**
 324 * __rpm_callback - Run a given runtime PM callback for a given device.
 325 * @cb: Runtime PM callback to run.
 326 * @dev: Device to run the callback for.
 327 */
 328static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 329	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 330{
 331	int retval, idx;
 332	bool use_links = dev->power.links_count > 0;
 333
 334	if (dev->power.irq_safe) {
 335		spin_unlock(&dev->power.lock);
 336	} else {
 337		spin_unlock_irq(&dev->power.lock);
 338
 339		/*
 340		 * Resume suppliers if necessary.
 341		 *
 342		 * The device's runtime PM status cannot change until this
 343		 * routine returns, so it is safe to read the status outside of
 344		 * the lock.
 345		 */
 346		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
 347			idx = device_links_read_lock();
 348
 349			retval = rpm_get_suppliers(dev);
 350			if (retval)
 
 351				goto fail;
 
 352
 353			device_links_read_unlock(idx);
 354		}
 355	}
 356
 357	retval = cb(dev);
 
 358
 359	if (dev->power.irq_safe) {
 360		spin_lock(&dev->power.lock);
 361	} else {
 362		/*
 363		 * If the device is suspending and the callback has returned
 364		 * success, drop the usage counters of the suppliers that have
 365		 * been reference counted on its resume.
 366		 *
 367		 * Do that if resume fails too.
 368		 */
 369		if (use_links
 370		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
 371		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
 372			idx = device_links_read_lock();
 373
 374 fail:
 375			rpm_put_suppliers(dev);
 376
 
 377			device_links_read_unlock(idx);
 378		}
 379
 380		spin_lock_irq(&dev->power.lock);
 381	}
 382
 383	return retval;
 384}
 385
 386/**
 387 * rpm_idle - Notify device bus type if the device can be suspended.
 388 * @dev: Device to notify the bus type about.
 389 * @rpmflags: Flag bits.
 390 *
 391 * Check if the device's runtime PM status allows it to be suspended.  If
 392 * another idle notification has been started earlier, return immediately.  If
 393 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 394 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
 395 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
 396 *
 397 * This function must be called under dev->power.lock with interrupts disabled.
 398 */
 399static int rpm_idle(struct device *dev, int rpmflags)
 400{
 401	int (*callback)(struct device *);
 402	int retval;
 403
 404	trace_rpm_idle_rcuidle(dev, rpmflags);
 405	retval = rpm_check_suspend_allowed(dev);
 406	if (retval < 0)
 407		;	/* Conditions are wrong. */
 408
 409	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
 410	else if (dev->power.runtime_status != RPM_ACTIVE)
 411		retval = -EAGAIN;
 412
 413	/*
 414	 * Any pending request other than an idle notification takes
 415	 * precedence over us, except that the timer may be running.
 416	 */
 417	else if (dev->power.request_pending &&
 418	    dev->power.request > RPM_REQ_IDLE)
 419		retval = -EAGAIN;
 420
 421	/* Act as though RPM_NOWAIT is always set. */
 422	else if (dev->power.idle_notification)
 423		retval = -EINPROGRESS;
 424	if (retval)
 425		goto out;
 426
 427	/* Pending requests need to be canceled. */
 428	dev->power.request = RPM_REQ_NONE;
 429
 430	if (dev->power.no_callbacks)
 
 
 
 431		goto out;
 432
 433	/* Carry out an asynchronous or a synchronous idle notification. */
 434	if (rpmflags & RPM_ASYNC) {
 435		dev->power.request = RPM_REQ_IDLE;
 436		if (!dev->power.request_pending) {
 437			dev->power.request_pending = true;
 438			queue_work(pm_wq, &dev->power.work);
 439		}
 440		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
 441		return 0;
 442	}
 443
 444	dev->power.idle_notification = true;
 445
 446	callback = RPM_GET_CALLBACK(dev, runtime_idle);
 447
 448	if (callback)
 449		retval = __rpm_callback(callback, dev);
 450
 451	dev->power.idle_notification = false;
 452	wake_up_all(&dev->power.wait_queue);
 453
 454 out:
 455	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 456	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 457}
 458
 459/**
 460 * rpm_callback - Run a given runtime PM callback for a given device.
 461 * @cb: Runtime PM callback to run.
 462 * @dev: Device to run the callback for.
 463 */
 464static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 465{
 466	int retval;
 467
 468	if (!cb)
 469		return -ENOSYS;
 470
 471	if (dev->power.memalloc_noio) {
 472		unsigned int noio_flag;
 473
 474		/*
 475		 * Deadlock might be caused if memory allocation with
 476		 * GFP_KERNEL happens inside runtime_suspend and
 477		 * runtime_resume callbacks of one block device's
 478		 * ancestor or the block device itself. Network
 479		 * device might be thought as part of iSCSI block
 480		 * device, so network device and its ancestor should
 481		 * be marked as memalloc_noio too.
 482		 */
 483		noio_flag = memalloc_noio_save();
 484		retval = __rpm_callback(cb, dev);
 485		memalloc_noio_restore(noio_flag);
 486	} else {
 487		retval = __rpm_callback(cb, dev);
 488	}
 489
 490	dev->power.runtime_error = retval;
 491	return retval != -EACCES ? retval : -EIO;
 492}
 493
 494/**
 495 * rpm_suspend - Carry out runtime suspend of given device.
 496 * @dev: Device to suspend.
 497 * @rpmflags: Flag bits.
 498 *
 499 * Check if the device's runtime PM status allows it to be suspended.
 500 * Cancel a pending idle notification, autosuspend or suspend. If
 501 * another suspend has been started earlier, either return immediately
 502 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
 503 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
 504 * otherwise run the ->runtime_suspend() callback directly. When
 505 * ->runtime_suspend succeeded, if a deferred resume was requested while
 506 * the callback was running then carry it out, otherwise send an idle
 507 * notification for its parent (if the suspend succeeded and both
 508 * ignore_children of parent->power and irq_safe of dev->power are not set).
 509 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 510 * flag is set and the next autosuspend-delay expiration time is in the
 511 * future, schedule another autosuspend attempt.
 512 *
 513 * This function must be called under dev->power.lock with interrupts disabled.
 514 */
 515static int rpm_suspend(struct device *dev, int rpmflags)
 516	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 517{
 518	int (*callback)(struct device *);
 519	struct device *parent = NULL;
 520	int retval;
 521
 522	trace_rpm_suspend_rcuidle(dev, rpmflags);
 523
 524 repeat:
 525	retval = rpm_check_suspend_allowed(dev);
 526
 527	if (retval < 0)
 528		;	/* Conditions are wrong. */
 529
 530	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 531	else if (dev->power.runtime_status == RPM_RESUMING &&
 532	    !(rpmflags & RPM_ASYNC))
 533		retval = -EAGAIN;
 534	if (retval)
 535		goto out;
 536
 537	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
 538	if ((rpmflags & RPM_AUTO)
 539	    && dev->power.runtime_status != RPM_SUSPENDING) {
 540		u64 expires = pm_runtime_autosuspend_expiration(dev);
 541
 542		if (expires != 0) {
 543			/* Pending requests need to be canceled. */
 544			dev->power.request = RPM_REQ_NONE;
 545
 546			/*
 547			 * Optimization: If the timer is already running and is
 548			 * set to expire at or before the autosuspend delay,
 549			 * avoid the overhead of resetting it.  Just let it
 550			 * expire; pm_suspend_timer_fn() will take care of the
 551			 * rest.
 552			 */
 553			if (!(dev->power.timer_expires &&
 554					dev->power.timer_expires <= expires)) {
 555				/*
 556				 * We add a slack of 25% to gather wakeups
 557				 * without sacrificing the granularity.
 558				 */
 559				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
 560						    (NSEC_PER_MSEC >> 2);
 561
 562				dev->power.timer_expires = expires;
 563				hrtimer_start_range_ns(&dev->power.suspend_timer,
 564						ns_to_ktime(expires),
 565						slack,
 566						HRTIMER_MODE_ABS);
 567			}
 568			dev->power.timer_autosuspends = 1;
 569			goto out;
 570		}
 571	}
 572
 573	/* Other scheduled or pending requests need to be canceled. */
 574	pm_runtime_cancel_pending(dev);
 575
 576	if (dev->power.runtime_status == RPM_SUSPENDING) {
 577		DEFINE_WAIT(wait);
 578
 579		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 580			retval = -EINPROGRESS;
 581			goto out;
 582		}
 583
 584		if (dev->power.irq_safe) {
 585			spin_unlock(&dev->power.lock);
 586
 587			cpu_relax();
 588
 589			spin_lock(&dev->power.lock);
 590			goto repeat;
 591		}
 592
 593		/* Wait for the other suspend running in parallel with us. */
 594		for (;;) {
 595			prepare_to_wait(&dev->power.wait_queue, &wait,
 596					TASK_UNINTERRUPTIBLE);
 597			if (dev->power.runtime_status != RPM_SUSPENDING)
 598				break;
 599
 600			spin_unlock_irq(&dev->power.lock);
 601
 602			schedule();
 603
 604			spin_lock_irq(&dev->power.lock);
 605		}
 606		finish_wait(&dev->power.wait_queue, &wait);
 607		goto repeat;
 608	}
 609
 610	if (dev->power.no_callbacks)
 611		goto no_callback;	/* Assume success. */
 612
 613	/* Carry out an asynchronous or a synchronous suspend. */
 614	if (rpmflags & RPM_ASYNC) {
 615		dev->power.request = (rpmflags & RPM_AUTO) ?
 616		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 617		if (!dev->power.request_pending) {
 618			dev->power.request_pending = true;
 619			queue_work(pm_wq, &dev->power.work);
 620		}
 621		goto out;
 622	}
 623
 624	__update_runtime_status(dev, RPM_SUSPENDING);
 625
 626	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 627
 628	dev_pm_enable_wake_irq_check(dev, true);
 629	retval = rpm_callback(callback, dev);
 630	if (retval)
 631		goto fail;
 632
 633 no_callback:
 634	__update_runtime_status(dev, RPM_SUSPENDED);
 635	pm_runtime_deactivate_timer(dev);
 636
 637	if (dev->parent) {
 638		parent = dev->parent;
 639		atomic_add_unless(&parent->power.child_count, -1, 0);
 640	}
 641	wake_up_all(&dev->power.wait_queue);
 642
 643	if (dev->power.deferred_resume) {
 644		dev->power.deferred_resume = false;
 645		rpm_resume(dev, 0);
 646		retval = -EAGAIN;
 647		goto out;
 648	}
 649
 
 
 
 650	/* Maybe the parent is now able to suspend. */
 651	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
 652		spin_unlock(&dev->power.lock);
 653
 654		spin_lock(&parent->power.lock);
 655		rpm_idle(parent, RPM_ASYNC);
 656		spin_unlock(&parent->power.lock);
 657
 658		spin_lock(&dev->power.lock);
 659	}
 
 
 
 
 
 
 
 
 660
 661 out:
 662	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 663
 664	return retval;
 665
 666 fail:
 667	dev_pm_disable_wake_irq_check(dev);
 668	__update_runtime_status(dev, RPM_ACTIVE);
 669	dev->power.deferred_resume = false;
 670	wake_up_all(&dev->power.wait_queue);
 671
 672	if (retval == -EAGAIN || retval == -EBUSY) {
 673		dev->power.runtime_error = 0;
 674
 675		/*
 676		 * If the callback routine failed an autosuspend, and
 677		 * if the last_busy time has been updated so that there
 678		 * is a new autosuspend expiration time, automatically
 679		 * reschedule another autosuspend.
 680		 */
 681		if ((rpmflags & RPM_AUTO) &&
 682		    pm_runtime_autosuspend_expiration(dev) != 0)
 683			goto repeat;
 684	} else {
 685		pm_runtime_cancel_pending(dev);
 686	}
 687	goto out;
 688}
 689
 690/**
 691 * rpm_resume - Carry out runtime resume of given device.
 692 * @dev: Device to resume.
 693 * @rpmflags: Flag bits.
 694 *
 695 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 696 * any scheduled or pending requests.  If another resume has been started
 697 * earlier, either return immediately or wait for it to finish, depending on the
 698 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 699 * parallel with this function, either tell the other process to resume after
 700 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 701 * flag is set then queue a resume request; otherwise run the
 702 * ->runtime_resume() callback directly.  Queue an idle notification for the
 703 * device if the resume succeeded.
 704 *
 705 * This function must be called under dev->power.lock with interrupts disabled.
 706 */
 707static int rpm_resume(struct device *dev, int rpmflags)
 708	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 709{
 710	int (*callback)(struct device *);
 711	struct device *parent = NULL;
 712	int retval = 0;
 713
 714	trace_rpm_resume_rcuidle(dev, rpmflags);
 715
 716 repeat:
 717	if (dev->power.runtime_error)
 718		retval = -EINVAL;
 719	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
 720	    && dev->power.runtime_status == RPM_ACTIVE)
 721		retval = 1;
 722	else if (dev->power.disable_depth > 0)
 723		retval = -EACCES;
 724	if (retval)
 725		goto out;
 726
 727	/*
 728	 * Other scheduled or pending requests need to be canceled.  Small
 729	 * optimization: If an autosuspend timer is running, leave it running
 730	 * rather than cancelling it now only to restart it again in the near
 731	 * future.
 732	 */
 733	dev->power.request = RPM_REQ_NONE;
 734	if (!dev->power.timer_autosuspends)
 735		pm_runtime_deactivate_timer(dev);
 736
 737	if (dev->power.runtime_status == RPM_ACTIVE) {
 738		retval = 1;
 739		goto out;
 740	}
 741
 742	if (dev->power.runtime_status == RPM_RESUMING
 743	    || dev->power.runtime_status == RPM_SUSPENDING) {
 744		DEFINE_WAIT(wait);
 745
 746		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 747			if (dev->power.runtime_status == RPM_SUSPENDING)
 748				dev->power.deferred_resume = true;
 749			else
 750				retval = -EINPROGRESS;
 751			goto out;
 752		}
 753
 754		if (dev->power.irq_safe) {
 755			spin_unlock(&dev->power.lock);
 756
 757			cpu_relax();
 758
 759			spin_lock(&dev->power.lock);
 760			goto repeat;
 761		}
 762
 763		/* Wait for the operation carried out in parallel with us. */
 764		for (;;) {
 765			prepare_to_wait(&dev->power.wait_queue, &wait,
 766					TASK_UNINTERRUPTIBLE);
 767			if (dev->power.runtime_status != RPM_RESUMING
 768			    && dev->power.runtime_status != RPM_SUSPENDING)
 769				break;
 770
 771			spin_unlock_irq(&dev->power.lock);
 772
 773			schedule();
 774
 775			spin_lock_irq(&dev->power.lock);
 776		}
 777		finish_wait(&dev->power.wait_queue, &wait);
 778		goto repeat;
 779	}
 780
 781	/*
 782	 * See if we can skip waking up the parent.  This is safe only if
 783	 * power.no_callbacks is set, because otherwise we don't know whether
 784	 * the resume will actually succeed.
 785	 */
 786	if (dev->power.no_callbacks && !parent && dev->parent) {
 787		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 788		if (dev->parent->power.disable_depth > 0
 789		    || dev->parent->power.ignore_children
 790		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 791			atomic_inc(&dev->parent->power.child_count);
 792			spin_unlock(&dev->parent->power.lock);
 793			retval = 1;
 794			goto no_callback;	/* Assume success. */
 795		}
 796		spin_unlock(&dev->parent->power.lock);
 797	}
 798
 799	/* Carry out an asynchronous or a synchronous resume. */
 800	if (rpmflags & RPM_ASYNC) {
 801		dev->power.request = RPM_REQ_RESUME;
 802		if (!dev->power.request_pending) {
 803			dev->power.request_pending = true;
 804			queue_work(pm_wq, &dev->power.work);
 805		}
 806		retval = 0;
 807		goto out;
 808	}
 809
 810	if (!parent && dev->parent) {
 811		/*
 812		 * Increment the parent's usage counter and resume it if
 813		 * necessary.  Not needed if dev is irq-safe; then the
 814		 * parent is permanently resumed.
 815		 */
 816		parent = dev->parent;
 817		if (dev->power.irq_safe)
 818			goto skip_parent;
 819		spin_unlock(&dev->power.lock);
 820
 821		pm_runtime_get_noresume(parent);
 822
 823		spin_lock(&parent->power.lock);
 824		/*
 825		 * Resume the parent if it has runtime PM enabled and not been
 826		 * set to ignore its children.
 827		 */
 828		if (!parent->power.disable_depth
 829		    && !parent->power.ignore_children) {
 830			rpm_resume(parent, 0);
 831			if (parent->power.runtime_status != RPM_ACTIVE)
 832				retval = -EBUSY;
 833		}
 834		spin_unlock(&parent->power.lock);
 835
 836		spin_lock(&dev->power.lock);
 837		if (retval)
 838			goto out;
 839		goto repeat;
 840	}
 841 skip_parent:
 842
 843	if (dev->power.no_callbacks)
 844		goto no_callback;	/* Assume success. */
 845
 846	__update_runtime_status(dev, RPM_RESUMING);
 847
 848	callback = RPM_GET_CALLBACK(dev, runtime_resume);
 849
 850	dev_pm_disable_wake_irq_check(dev);
 851	retval = rpm_callback(callback, dev);
 852	if (retval) {
 853		__update_runtime_status(dev, RPM_SUSPENDED);
 854		pm_runtime_cancel_pending(dev);
 855		dev_pm_enable_wake_irq_check(dev, false);
 856	} else {
 857 no_callback:
 858		__update_runtime_status(dev, RPM_ACTIVE);
 859		pm_runtime_mark_last_busy(dev);
 860		if (parent)
 861			atomic_inc(&parent->power.child_count);
 862	}
 863	wake_up_all(&dev->power.wait_queue);
 864
 865	if (retval >= 0)
 866		rpm_idle(dev, RPM_ASYNC);
 867
 868 out:
 869	if (parent && !dev->power.irq_safe) {
 870		spin_unlock_irq(&dev->power.lock);
 871
 872		pm_runtime_put(parent);
 873
 874		spin_lock_irq(&dev->power.lock);
 875	}
 876
 877	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 878
 879	return retval;
 880}
 881
 882/**
 883 * pm_runtime_work - Universal runtime PM work function.
 884 * @work: Work structure used for scheduling the execution of this function.
 885 *
 886 * Use @work to get the device object the work is to be done for, determine what
 887 * is to be done and execute the appropriate runtime PM function.
 888 */
 889static void pm_runtime_work(struct work_struct *work)
 890{
 891	struct device *dev = container_of(work, struct device, power.work);
 892	enum rpm_request req;
 893
 894	spin_lock_irq(&dev->power.lock);
 895
 896	if (!dev->power.request_pending)
 897		goto out;
 898
 899	req = dev->power.request;
 900	dev->power.request = RPM_REQ_NONE;
 901	dev->power.request_pending = false;
 902
 903	switch (req) {
 904	case RPM_REQ_NONE:
 905		break;
 906	case RPM_REQ_IDLE:
 907		rpm_idle(dev, RPM_NOWAIT);
 908		break;
 909	case RPM_REQ_SUSPEND:
 910		rpm_suspend(dev, RPM_NOWAIT);
 911		break;
 912	case RPM_REQ_AUTOSUSPEND:
 913		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 914		break;
 915	case RPM_REQ_RESUME:
 916		rpm_resume(dev, RPM_NOWAIT);
 917		break;
 918	}
 919
 920 out:
 921	spin_unlock_irq(&dev->power.lock);
 922}
 923
 924/**
 925 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 926 * @data: Device pointer passed by pm_schedule_suspend().
 927 *
 928 * Check if the time is right and queue a suspend request.
 929 */
 930static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
 931{
 932	struct device *dev = container_of(timer, struct device, power.suspend_timer);
 933	unsigned long flags;
 934	u64 expires;
 935
 936	spin_lock_irqsave(&dev->power.lock, flags);
 937
 938	expires = dev->power.timer_expires;
 939	/*
 940	 * If 'expires' is after the current time, we've been called
 941	 * too early.
 942	 */
 943	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
 944		dev->power.timer_expires = 0;
 945		rpm_suspend(dev, dev->power.timer_autosuspends ?
 946		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 947	}
 948
 949	spin_unlock_irqrestore(&dev->power.lock, flags);
 950
 951	return HRTIMER_NORESTART;
 952}
 953
 954/**
 955 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 956 * @dev: Device to suspend.
 957 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 958 */
 959int pm_schedule_suspend(struct device *dev, unsigned int delay)
 960{
 961	unsigned long flags;
 962	u64 expires;
 963	int retval;
 964
 965	spin_lock_irqsave(&dev->power.lock, flags);
 966
 967	if (!delay) {
 968		retval = rpm_suspend(dev, RPM_ASYNC);
 969		goto out;
 970	}
 971
 972	retval = rpm_check_suspend_allowed(dev);
 973	if (retval)
 974		goto out;
 975
 976	/* Other scheduled or pending requests need to be canceled. */
 977	pm_runtime_cancel_pending(dev);
 978
 979	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
 980	dev->power.timer_expires = expires;
 981	dev->power.timer_autosuspends = 0;
 982	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
 983
 984 out:
 985	spin_unlock_irqrestore(&dev->power.lock, flags);
 986
 987	return retval;
 988}
 989EXPORT_SYMBOL_GPL(pm_schedule_suspend);
 990
 991/**
 992 * __pm_runtime_idle - Entry point for runtime idle operations.
 993 * @dev: Device to send idle notification for.
 994 * @rpmflags: Flag bits.
 995 *
 996 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 997 * return immediately if it is larger than zero.  Then carry out an idle
 998 * notification, either synchronous or asynchronous.
 999 *
1000 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1001 * or if pm_runtime_irq_safe() has been called.
1002 */
1003int __pm_runtime_idle(struct device *dev, int rpmflags)
1004{
1005	unsigned long flags;
1006	int retval;
1007
1008	if (rpmflags & RPM_GET_PUT) {
1009		if (!atomic_dec_and_test(&dev->power.usage_count))
 
1010			return 0;
 
1011	}
1012
1013	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1014
1015	spin_lock_irqsave(&dev->power.lock, flags);
1016	retval = rpm_idle(dev, rpmflags);
1017	spin_unlock_irqrestore(&dev->power.lock, flags);
1018
1019	return retval;
1020}
1021EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1022
1023/**
1024 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1025 * @dev: Device to suspend.
1026 * @rpmflags: Flag bits.
1027 *
1028 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1029 * return immediately if it is larger than zero.  Then carry out a suspend,
1030 * either synchronous or asynchronous.
1031 *
1032 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1033 * or if pm_runtime_irq_safe() has been called.
1034 */
1035int __pm_runtime_suspend(struct device *dev, int rpmflags)
1036{
1037	unsigned long flags;
1038	int retval;
1039
1040	if (rpmflags & RPM_GET_PUT) {
1041		if (!atomic_dec_and_test(&dev->power.usage_count))
 
1042			return 0;
 
1043	}
1044
1045	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1046
1047	spin_lock_irqsave(&dev->power.lock, flags);
1048	retval = rpm_suspend(dev, rpmflags);
1049	spin_unlock_irqrestore(&dev->power.lock, flags);
1050
1051	return retval;
1052}
1053EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1054
1055/**
1056 * __pm_runtime_resume - Entry point for runtime resume operations.
1057 * @dev: Device to resume.
1058 * @rpmflags: Flag bits.
1059 *
1060 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1061 * carry out a resume, either synchronous or asynchronous.
1062 *
1063 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1064 * or if pm_runtime_irq_safe() has been called.
1065 */
1066int __pm_runtime_resume(struct device *dev, int rpmflags)
1067{
1068	unsigned long flags;
1069	int retval;
1070
1071	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1072			dev->power.runtime_status != RPM_ACTIVE);
1073
1074	if (rpmflags & RPM_GET_PUT)
1075		atomic_inc(&dev->power.usage_count);
1076
1077	spin_lock_irqsave(&dev->power.lock, flags);
1078	retval = rpm_resume(dev, rpmflags);
1079	spin_unlock_irqrestore(&dev->power.lock, flags);
1080
1081	return retval;
1082}
1083EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1084
1085/**
1086 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1087 * @dev: Device to handle.
 
 
 
1088 *
1089 * Return -EINVAL if runtime PM is disabled for the device.
 
 
 
1090 *
1091 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1092 * and the runtime PM usage counter is nonzero, increment the counter and
1093 * return 1.  Otherwise return 0 without changing the counter.
 
 
 
 
 
 
1094 */
1095int pm_runtime_get_if_in_use(struct device *dev)
1096{
1097	unsigned long flags;
1098	int retval;
1099
1100	spin_lock_irqsave(&dev->power.lock, flags);
1101	retval = dev->power.disable_depth > 0 ? -EINVAL :
1102		dev->power.runtime_status == RPM_ACTIVE
1103			&& atomic_inc_not_zero(&dev->power.usage_count);
 
 
 
 
 
 
 
 
1104	spin_unlock_irqrestore(&dev->power.lock, flags);
 
1105	return retval;
1106}
1107EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1108
1109/**
1110 * __pm_runtime_set_status - Set runtime PM status of a device.
1111 * @dev: Device to handle.
1112 * @status: New runtime PM status of the device.
1113 *
1114 * If runtime PM of the device is disabled or its power.runtime_error field is
1115 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1116 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1117 * However, if the device has a parent and the parent is not active, and the
1118 * parent's power.ignore_children flag is unset, the device's status cannot be
1119 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1120 *
1121 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1122 * and the device parent's counter of unsuspended children is modified to
1123 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1124 * notification request for the parent is submitted.
1125 *
1126 * If @dev has any suppliers (as reflected by device links to them), and @status
1127 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1128 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1129 * of the @status value) and the suppliers will be deacticated on exit.  The
1130 * error returned by the failing supplier activation will be returned in that
1131 * case.
1132 */
1133int __pm_runtime_set_status(struct device *dev, unsigned int status)
1134{
1135	struct device *parent = dev->parent;
1136	bool notify_parent = false;
1137	int error = 0;
1138
1139	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1140		return -EINVAL;
1141
1142	spin_lock_irq(&dev->power.lock);
1143
1144	/*
1145	 * Prevent PM-runtime from being enabled for the device or return an
1146	 * error if it is enabled already and working.
1147	 */
1148	if (dev->power.runtime_error || dev->power.disable_depth)
1149		dev->power.disable_depth++;
1150	else
1151		error = -EAGAIN;
1152
1153	spin_unlock_irq(&dev->power.lock);
1154
1155	if (error)
1156		return error;
1157
1158	/*
1159	 * If the new status is RPM_ACTIVE, the suppliers can be activated
1160	 * upfront regardless of the current status, because next time
1161	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1162	 * involved will be dropped down to one anyway.
1163	 */
1164	if (status == RPM_ACTIVE) {
1165		int idx = device_links_read_lock();
1166
1167		error = rpm_get_suppliers(dev);
1168		if (error)
1169			status = RPM_SUSPENDED;
1170
1171		device_links_read_unlock(idx);
1172	}
1173
1174	spin_lock_irq(&dev->power.lock);
1175
1176	if (dev->power.runtime_status == status || !parent)
1177		goto out_set;
1178
1179	if (status == RPM_SUSPENDED) {
1180		atomic_add_unless(&parent->power.child_count, -1, 0);
1181		notify_parent = !parent->power.ignore_children;
1182	} else {
1183		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1184
1185		/*
1186		 * It is invalid to put an active child under a parent that is
1187		 * not active, has runtime PM enabled and the
1188		 * 'power.ignore_children' flag unset.
1189		 */
1190		if (!parent->power.disable_depth
1191		    && !parent->power.ignore_children
1192		    && parent->power.runtime_status != RPM_ACTIVE) {
1193			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1194				dev_name(dev),
1195				dev_name(parent));
1196			error = -EBUSY;
1197		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1198			atomic_inc(&parent->power.child_count);
1199		}
1200
1201		spin_unlock(&parent->power.lock);
1202
1203		if (error) {
1204			status = RPM_SUSPENDED;
1205			goto out;
1206		}
1207	}
1208
1209 out_set:
1210	__update_runtime_status(dev, status);
1211	if (!error)
1212		dev->power.runtime_error = 0;
1213
1214 out:
1215	spin_unlock_irq(&dev->power.lock);
1216
1217	if (notify_parent)
1218		pm_request_idle(parent);
1219
1220	if (status == RPM_SUSPENDED) {
1221		int idx = device_links_read_lock();
1222
1223		rpm_put_suppliers(dev);
1224
1225		device_links_read_unlock(idx);
1226	}
1227
1228	pm_runtime_enable(dev);
1229
1230	return error;
1231}
1232EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1233
1234/**
1235 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1236 * @dev: Device to handle.
1237 *
1238 * Flush all pending requests for the device from pm_wq and wait for all
1239 * runtime PM operations involving the device in progress to complete.
1240 *
1241 * Should be called under dev->power.lock with interrupts disabled.
1242 */
1243static void __pm_runtime_barrier(struct device *dev)
1244{
1245	pm_runtime_deactivate_timer(dev);
1246
1247	if (dev->power.request_pending) {
1248		dev->power.request = RPM_REQ_NONE;
1249		spin_unlock_irq(&dev->power.lock);
1250
1251		cancel_work_sync(&dev->power.work);
1252
1253		spin_lock_irq(&dev->power.lock);
1254		dev->power.request_pending = false;
1255	}
1256
1257	if (dev->power.runtime_status == RPM_SUSPENDING
1258	    || dev->power.runtime_status == RPM_RESUMING
1259	    || dev->power.idle_notification) {
1260		DEFINE_WAIT(wait);
1261
1262		/* Suspend, wake-up or idle notification in progress. */
1263		for (;;) {
1264			prepare_to_wait(&dev->power.wait_queue, &wait,
1265					TASK_UNINTERRUPTIBLE);
1266			if (dev->power.runtime_status != RPM_SUSPENDING
1267			    && dev->power.runtime_status != RPM_RESUMING
1268			    && !dev->power.idle_notification)
1269				break;
1270			spin_unlock_irq(&dev->power.lock);
1271
1272			schedule();
1273
1274			spin_lock_irq(&dev->power.lock);
1275		}
1276		finish_wait(&dev->power.wait_queue, &wait);
1277	}
1278}
1279
1280/**
1281 * pm_runtime_barrier - Flush pending requests and wait for completions.
1282 * @dev: Device to handle.
1283 *
1284 * Prevent the device from being suspended by incrementing its usage counter and
1285 * if there's a pending resume request for the device, wake the device up.
1286 * Next, make sure that all pending requests for the device have been flushed
1287 * from pm_wq and wait for all runtime PM operations involving the device in
1288 * progress to complete.
1289 *
1290 * Return value:
1291 * 1, if there was a resume request pending and the device had to be woken up,
1292 * 0, otherwise
1293 */
1294int pm_runtime_barrier(struct device *dev)
1295{
1296	int retval = 0;
1297
1298	pm_runtime_get_noresume(dev);
1299	spin_lock_irq(&dev->power.lock);
1300
1301	if (dev->power.request_pending
1302	    && dev->power.request == RPM_REQ_RESUME) {
1303		rpm_resume(dev, 0);
1304		retval = 1;
1305	}
1306
1307	__pm_runtime_barrier(dev);
1308
1309	spin_unlock_irq(&dev->power.lock);
1310	pm_runtime_put_noidle(dev);
1311
1312	return retval;
1313}
1314EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1315
1316/**
1317 * __pm_runtime_disable - Disable runtime PM of a device.
1318 * @dev: Device to handle.
1319 * @check_resume: If set, check if there's a resume request for the device.
1320 *
1321 * Increment power.disable_depth for the device and if it was zero previously,
1322 * cancel all pending runtime PM requests for the device and wait for all
1323 * operations in progress to complete.  The device can be either active or
1324 * suspended after its runtime PM has been disabled.
1325 *
1326 * If @check_resume is set and there's a resume request pending when
1327 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1328 * function will wake up the device before disabling its runtime PM.
1329 */
1330void __pm_runtime_disable(struct device *dev, bool check_resume)
1331{
1332	spin_lock_irq(&dev->power.lock);
1333
1334	if (dev->power.disable_depth > 0) {
1335		dev->power.disable_depth++;
1336		goto out;
1337	}
1338
1339	/*
1340	 * Wake up the device if there's a resume request pending, because that
1341	 * means there probably is some I/O to process and disabling runtime PM
1342	 * shouldn't prevent the device from processing the I/O.
1343	 */
1344	if (check_resume && dev->power.request_pending
1345	    && dev->power.request == RPM_REQ_RESUME) {
1346		/*
1347		 * Prevent suspends and idle notifications from being carried
1348		 * out after we have woken up the device.
1349		 */
1350		pm_runtime_get_noresume(dev);
1351
1352		rpm_resume(dev, 0);
1353
1354		pm_runtime_put_noidle(dev);
1355	}
1356
1357	/* Update time accounting before disabling PM-runtime. */
1358	update_pm_runtime_accounting(dev);
1359
1360	if (!dev->power.disable_depth++)
1361		__pm_runtime_barrier(dev);
1362
1363 out:
1364	spin_unlock_irq(&dev->power.lock);
1365}
1366EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1367
1368/**
1369 * pm_runtime_enable - Enable runtime PM of a device.
1370 * @dev: Device to handle.
1371 */
1372void pm_runtime_enable(struct device *dev)
1373{
1374	unsigned long flags;
1375
1376	spin_lock_irqsave(&dev->power.lock, flags);
1377
1378	if (dev->power.disable_depth > 0) {
1379		dev->power.disable_depth--;
1380
1381		/* About to enable runtime pm, set accounting_timestamp to now */
1382		if (!dev->power.disable_depth)
1383			dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1384	} else {
1385		dev_warn(dev, "Unbalanced %s!\n", __func__);
1386	}
1387
1388	WARN(!dev->power.disable_depth &&
1389	     dev->power.runtime_status == RPM_SUSPENDED &&
1390	     !dev->power.ignore_children &&
1391	     atomic_read(&dev->power.child_count) > 0,
1392	     "Enabling runtime PM for inactive device (%s) with active children\n",
1393	     dev_name(dev));
1394
1395	spin_unlock_irqrestore(&dev->power.lock, flags);
1396}
1397EXPORT_SYMBOL_GPL(pm_runtime_enable);
1398
1399/**
1400 * pm_runtime_forbid - Block runtime PM of a device.
1401 * @dev: Device to handle.
1402 *
1403 * Increase the device's usage count and clear its power.runtime_auto flag,
1404 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1405 * for it.
1406 */
1407void pm_runtime_forbid(struct device *dev)
1408{
1409	spin_lock_irq(&dev->power.lock);
1410	if (!dev->power.runtime_auto)
1411		goto out;
1412
1413	dev->power.runtime_auto = false;
1414	atomic_inc(&dev->power.usage_count);
1415	rpm_resume(dev, 0);
1416
1417 out:
1418	spin_unlock_irq(&dev->power.lock);
1419}
1420EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1421
1422/**
1423 * pm_runtime_allow - Unblock runtime PM of a device.
1424 * @dev: Device to handle.
1425 *
1426 * Decrease the device's usage count and set its power.runtime_auto flag.
1427 */
1428void pm_runtime_allow(struct device *dev)
1429{
1430	spin_lock_irq(&dev->power.lock);
1431	if (dev->power.runtime_auto)
1432		goto out;
1433
1434	dev->power.runtime_auto = true;
1435	if (atomic_dec_and_test(&dev->power.usage_count))
1436		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
 
 
1437
1438 out:
1439	spin_unlock_irq(&dev->power.lock);
1440}
1441EXPORT_SYMBOL_GPL(pm_runtime_allow);
1442
1443/**
1444 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1445 * @dev: Device to handle.
1446 *
1447 * Set the power.no_callbacks flag, which tells the PM core that this
1448 * device is power-managed through its parent and has no runtime PM
1449 * callbacks of its own.  The runtime sysfs attributes will be removed.
1450 */
1451void pm_runtime_no_callbacks(struct device *dev)
1452{
1453	spin_lock_irq(&dev->power.lock);
1454	dev->power.no_callbacks = 1;
1455	spin_unlock_irq(&dev->power.lock);
1456	if (device_is_registered(dev))
1457		rpm_sysfs_remove(dev);
1458}
1459EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1460
1461/**
1462 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1463 * @dev: Device to handle
1464 *
1465 * Set the power.irq_safe flag, which tells the PM core that the
1466 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1467 * always be invoked with the spinlock held and interrupts disabled.  It also
1468 * causes the parent's usage counter to be permanently incremented, preventing
1469 * the parent from runtime suspending -- otherwise an irq-safe child might have
1470 * to wait for a non-irq-safe parent.
1471 */
1472void pm_runtime_irq_safe(struct device *dev)
1473{
1474	if (dev->parent)
1475		pm_runtime_get_sync(dev->parent);
1476	spin_lock_irq(&dev->power.lock);
1477	dev->power.irq_safe = 1;
1478	spin_unlock_irq(&dev->power.lock);
1479}
1480EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1481
1482/**
1483 * update_autosuspend - Handle a change to a device's autosuspend settings.
1484 * @dev: Device to handle.
1485 * @old_delay: The former autosuspend_delay value.
1486 * @old_use: The former use_autosuspend value.
1487 *
1488 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1489 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1490 *
1491 * This function must be called under dev->power.lock with interrupts disabled.
1492 */
1493static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1494{
1495	int delay = dev->power.autosuspend_delay;
1496
1497	/* Should runtime suspend be prevented now? */
1498	if (dev->power.use_autosuspend && delay < 0) {
1499
1500		/* If it used to be allowed then prevent it. */
1501		if (!old_use || old_delay >= 0) {
1502			atomic_inc(&dev->power.usage_count);
1503			rpm_resume(dev, 0);
 
 
1504		}
1505	}
1506
1507	/* Runtime suspend should be allowed now. */
1508	else {
1509
1510		/* If it used to be prevented then allow it. */
1511		if (old_use && old_delay < 0)
1512			atomic_dec(&dev->power.usage_count);
1513
1514		/* Maybe we can autosuspend now. */
1515		rpm_idle(dev, RPM_AUTO);
1516	}
1517}
1518
1519/**
1520 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1521 * @dev: Device to handle.
1522 * @delay: Value of the new delay in milliseconds.
1523 *
1524 * Set the device's power.autosuspend_delay value.  If it changes to negative
1525 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1526 * changes the other way, allow runtime suspends.
1527 */
1528void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1529{
1530	int old_delay, old_use;
1531
1532	spin_lock_irq(&dev->power.lock);
1533	old_delay = dev->power.autosuspend_delay;
1534	old_use = dev->power.use_autosuspend;
1535	dev->power.autosuspend_delay = delay;
1536	update_autosuspend(dev, old_delay, old_use);
1537	spin_unlock_irq(&dev->power.lock);
1538}
1539EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1540
1541/**
1542 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1543 * @dev: Device to handle.
1544 * @use: New value for use_autosuspend.
1545 *
1546 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1547 * suspends as needed.
1548 */
1549void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1550{
1551	int old_delay, old_use;
1552
1553	spin_lock_irq(&dev->power.lock);
1554	old_delay = dev->power.autosuspend_delay;
1555	old_use = dev->power.use_autosuspend;
1556	dev->power.use_autosuspend = use;
1557	update_autosuspend(dev, old_delay, old_use);
1558	spin_unlock_irq(&dev->power.lock);
1559}
1560EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1561
1562/**
1563 * pm_runtime_init - Initialize runtime PM fields in given device object.
1564 * @dev: Device object to initialize.
1565 */
1566void pm_runtime_init(struct device *dev)
1567{
1568	dev->power.runtime_status = RPM_SUSPENDED;
1569	dev->power.idle_notification = false;
1570
1571	dev->power.disable_depth = 1;
1572	atomic_set(&dev->power.usage_count, 0);
1573
1574	dev->power.runtime_error = 0;
1575
1576	atomic_set(&dev->power.child_count, 0);
1577	pm_suspend_ignore_children(dev, false);
1578	dev->power.runtime_auto = true;
1579
1580	dev->power.request_pending = false;
1581	dev->power.request = RPM_REQ_NONE;
1582	dev->power.deferred_resume = false;
 
1583	INIT_WORK(&dev->power.work, pm_runtime_work);
1584
1585	dev->power.timer_expires = 0;
1586	hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1587	dev->power.suspend_timer.function = pm_suspend_timer_fn;
1588
1589	init_waitqueue_head(&dev->power.wait_queue);
1590}
1591
1592/**
1593 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1594 * @dev: Device object to re-initialize.
1595 */
1596void pm_runtime_reinit(struct device *dev)
1597{
1598	if (!pm_runtime_enabled(dev)) {
1599		if (dev->power.runtime_status == RPM_ACTIVE)
1600			pm_runtime_set_suspended(dev);
1601		if (dev->power.irq_safe) {
1602			spin_lock_irq(&dev->power.lock);
1603			dev->power.irq_safe = 0;
1604			spin_unlock_irq(&dev->power.lock);
1605			if (dev->parent)
1606				pm_runtime_put(dev->parent);
1607		}
1608	}
1609}
1610
1611/**
1612 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1613 * @dev: Device object being removed from device hierarchy.
1614 */
1615void pm_runtime_remove(struct device *dev)
1616{
1617	__pm_runtime_disable(dev, false);
1618	pm_runtime_reinit(dev);
1619}
1620
1621/**
1622 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1623 * @dev: Device whose driver is going to be removed.
1624 *
1625 * Check links from this device to any consumers and if any of them have active
1626 * runtime PM references to the device, drop the usage counter of the device
1627 * (as many times as needed).
1628 *
1629 * Links with the DL_FLAG_MANAGED flag unset are ignored.
1630 *
1631 * Since the device is guaranteed to be runtime-active at the point this is
1632 * called, nothing else needs to be done here.
1633 *
1634 * Moreover, this is called after device_links_busy() has returned 'false', so
1635 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1636 * therefore rpm_active can't be manipulated concurrently.
1637 */
1638void pm_runtime_clean_up_links(struct device *dev)
1639{
1640	struct device_link *link;
1641	int idx;
1642
1643	idx = device_links_read_lock();
1644
1645	list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
1646				device_links_read_lock_held()) {
1647		if (!(link->flags & DL_FLAG_MANAGED))
1648			continue;
1649
1650		while (refcount_dec_not_one(&link->rpm_active))
1651			pm_runtime_put_noidle(dev);
1652	}
1653
1654	device_links_read_unlock(idx);
1655}
1656
1657/**
1658 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1659 * @dev: Consumer device.
1660 */
1661void pm_runtime_get_suppliers(struct device *dev)
1662{
1663	struct device_link *link;
1664	int idx;
1665
1666	idx = device_links_read_lock();
1667
1668	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1669				device_links_read_lock_held())
1670		if (link->flags & DL_FLAG_PM_RUNTIME) {
1671			link->supplier_preactivated = true;
 
1672			refcount_inc(&link->rpm_active);
1673			pm_runtime_get_sync(link->supplier);
1674		}
1675
1676	device_links_read_unlock(idx);
1677}
1678
1679/**
1680 * pm_runtime_put_suppliers - Drop references to supplier devices.
1681 * @dev: Consumer device.
1682 */
1683void pm_runtime_put_suppliers(struct device *dev)
1684{
1685	struct device_link *link;
 
 
1686	int idx;
1687
1688	idx = device_links_read_lock();
1689
1690	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1691				device_links_read_lock_held())
1692		if (link->supplier_preactivated) {
1693			link->supplier_preactivated = false;
1694			if (refcount_dec_not_one(&link->rpm_active))
 
 
 
 
1695				pm_runtime_put(link->supplier);
1696		}
1697
1698	device_links_read_unlock(idx);
1699}
1700
1701void pm_runtime_new_link(struct device *dev)
1702{
1703	spin_lock_irq(&dev->power.lock);
1704	dev->power.links_count++;
1705	spin_unlock_irq(&dev->power.lock);
1706}
1707
1708void pm_runtime_drop_link(struct device *dev)
1709{
1710	spin_lock_irq(&dev->power.lock);
1711	WARN_ON(dev->power.links_count == 0);
1712	dev->power.links_count--;
1713	spin_unlock_irq(&dev->power.lock);
1714}
1715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1716static bool pm_runtime_need_not_resume(struct device *dev)
1717{
1718	return atomic_read(&dev->power.usage_count) <= 1 &&
1719		(atomic_read(&dev->power.child_count) == 0 ||
1720		 dev->power.ignore_children);
1721}
1722
1723/**
1724 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1725 * @dev: Device to suspend.
1726 *
1727 * Disable runtime PM so we safely can check the device's runtime PM status and
1728 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1729 * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1730 * usage and children counters don't indicate that the device was in use before
1731 * the system-wide transition under way, decrement its parent's children counter
1732 * (if there is a parent).  Keep runtime PM disabled to preserve the state
1733 * unless we encounter errors.
1734 *
1735 * Typically this function may be invoked from a system suspend callback to make
1736 * sure the device is put into low power state and it should only be used during
1737 * system-wide PM transitions to sleep states.  It assumes that the analogous
1738 * pm_runtime_force_resume() will be used to resume the device.
1739 */
1740int pm_runtime_force_suspend(struct device *dev)
1741{
1742	int (*callback)(struct device *);
1743	int ret;
1744
1745	pm_runtime_disable(dev);
1746	if (pm_runtime_status_suspended(dev))
1747		return 0;
1748
1749	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1750
1751	ret = callback ? callback(dev) : 0;
1752	if (ret)
1753		goto err;
1754
1755	/*
1756	 * If the device can stay in suspend after the system-wide transition
1757	 * to the working state that will follow, drop the children counter of
1758	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1759	 * function will be called again for it in the meantime.
1760	 */
1761	if (pm_runtime_need_not_resume(dev))
1762		pm_runtime_set_suspended(dev);
1763	else
1764		__update_runtime_status(dev, RPM_SUSPENDED);
 
 
1765
1766	return 0;
1767
1768err:
1769	pm_runtime_enable(dev);
1770	return ret;
1771}
1772EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1773
1774/**
1775 * pm_runtime_force_resume - Force a device into resume state if needed.
1776 * @dev: Device to resume.
1777 *
1778 * Prior invoking this function we expect the user to have brought the device
1779 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1780 * those actions and bring the device into full power, if it is expected to be
1781 * used on system resume.  In the other case, we defer the resume to be managed
1782 * via runtime PM.
1783 *
1784 * Typically this function may be invoked from a system resume callback.
1785 */
1786int pm_runtime_force_resume(struct device *dev)
1787{
1788	int (*callback)(struct device *);
1789	int ret = 0;
1790
1791	if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1792		goto out;
1793
1794	/*
1795	 * The value of the parent's children counter is correct already, so
1796	 * just update the status of the device.
1797	 */
1798	__update_runtime_status(dev, RPM_ACTIVE);
1799
1800	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1801
1802	ret = callback ? callback(dev) : 0;
1803	if (ret) {
1804		pm_runtime_set_suspended(dev);
1805		goto out;
1806	}
1807
1808	pm_runtime_mark_last_busy(dev);
1809out:
 
1810	pm_runtime_enable(dev);
1811	return ret;
1812}
1813EXPORT_SYMBOL_GPL(pm_runtime_force_resume);