Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   4 *
   5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
   7 */
   8#include <linux/sched/mm.h>
   9#include <linux/ktime.h>
  10#include <linux/hrtimer.h>
  11#include <linux/export.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/pm_wakeirq.h>
 
  14#include <trace/events/rpm.h>
  15
  16#include "../base.h"
  17#include "power.h"
  18
  19typedef int (*pm_callback_t)(struct device *);
  20
  21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
  22{
  23	pm_callback_t cb;
  24	const struct dev_pm_ops *ops;
  25
  26	if (dev->pm_domain)
  27		ops = &dev->pm_domain->ops;
  28	else if (dev->type && dev->type->pm)
  29		ops = dev->type->pm;
  30	else if (dev->class && dev->class->pm)
  31		ops = dev->class->pm;
  32	else if (dev->bus && dev->bus->pm)
  33		ops = dev->bus->pm;
  34	else
  35		ops = NULL;
  36
  37	if (ops)
  38		cb = *(pm_callback_t *)((void *)ops + cb_offset);
  39	else
  40		cb = NULL;
  41
  42	if (!cb && dev->driver && dev->driver->pm)
  43		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
  44
  45	return cb;
  46}
  47
  48#define RPM_GET_CALLBACK(dev, callback) \
  49		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
  50
  51static int rpm_resume(struct device *dev, int rpmflags);
  52static int rpm_suspend(struct device *dev, int rpmflags);
  53
  54/**
  55 * update_pm_runtime_accounting - Update the time accounting of power states
  56 * @dev: Device to update the accounting for
  57 *
  58 * In order to be able to have time accounting of the various power states
  59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  60 * PM), we need to track the time spent in each state.
  61 * update_pm_runtime_accounting must be called each time before the
  62 * runtime_status field is updated, to account the time in the old state
  63 * correctly.
  64 */
  65static void update_pm_runtime_accounting(struct device *dev)
  66{
  67	u64 now, last, delta;
  68
  69	if (dev->power.disable_depth > 0)
  70		return;
  71
  72	last = dev->power.accounting_timestamp;
  73
  74	now = ktime_get_mono_fast_ns();
  75	dev->power.accounting_timestamp = now;
  76
  77	/*
  78	 * Because ktime_get_mono_fast_ns() is not monotonic during
  79	 * timekeeping updates, ensure that 'now' is after the last saved
  80	 * timesptamp.
  81	 */
  82	if (now < last)
  83		return;
  84
  85	delta = now - last;
  86
  87	if (dev->power.runtime_status == RPM_SUSPENDED)
  88		dev->power.suspended_time += delta;
  89	else
  90		dev->power.active_time += delta;
  91}
  92
  93static void __update_runtime_status(struct device *dev, enum rpm_status status)
  94{
  95	update_pm_runtime_accounting(dev);
 
  96	dev->power.runtime_status = status;
  97}
  98
  99static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
 100{
 101	u64 time;
 102	unsigned long flags;
 103
 104	spin_lock_irqsave(&dev->power.lock, flags);
 105
 106	update_pm_runtime_accounting(dev);
 107	time = suspended ? dev->power.suspended_time : dev->power.active_time;
 108
 109	spin_unlock_irqrestore(&dev->power.lock, flags);
 110
 111	return time;
 112}
 113
 114u64 pm_runtime_active_time(struct device *dev)
 115{
 116	return rpm_get_accounted_time(dev, false);
 117}
 118
 119u64 pm_runtime_suspended_time(struct device *dev)
 120{
 121	return rpm_get_accounted_time(dev, true);
 122}
 123EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
 124
 125/**
 126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
 127 * @dev: Device to handle.
 128 */
 129static void pm_runtime_deactivate_timer(struct device *dev)
 130{
 131	if (dev->power.timer_expires > 0) {
 132		hrtimer_try_to_cancel(&dev->power.suspend_timer);
 133		dev->power.timer_expires = 0;
 134	}
 135}
 136
 137/**
 138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
 139 * @dev: Device to handle.
 140 */
 141static void pm_runtime_cancel_pending(struct device *dev)
 142{
 143	pm_runtime_deactivate_timer(dev);
 144	/*
 145	 * In case there's a request pending, make sure its work function will
 146	 * return without doing anything.
 147	 */
 148	dev->power.request = RPM_REQ_NONE;
 149}
 150
 151/*
 152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
 153 * @dev: Device to handle.
 154 *
 155 * Compute the autosuspend-delay expiration time based on the device's
 156 * power.last_busy time.  If the delay has already expired or is disabled
 157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
 158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
 159 *
 160 * This function may be called either with or without dev->power.lock held.
 161 * Either way it can be racy, since power.last_busy may be updated at any time.
 162 */
 163u64 pm_runtime_autosuspend_expiration(struct device *dev)
 164{
 165	int autosuspend_delay;
 166	u64 expires;
 167
 168	if (!dev->power.use_autosuspend)
 169		return 0;
 170
 171	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
 172	if (autosuspend_delay < 0)
 173		return 0;
 174
 175	expires  = READ_ONCE(dev->power.last_busy);
 176	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
 177	if (expires > ktime_get_mono_fast_ns())
 178		return expires;	/* Expires in the future */
 179
 180	return 0;
 181}
 182EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 183
 184static int dev_memalloc_noio(struct device *dev, void *data)
 185{
 186	return dev->power.memalloc_noio;
 187}
 188
 189/*
 190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
 191 * @dev: Device to handle.
 192 * @enable: True for setting the flag and False for clearing the flag.
 193 *
 194 * Set the flag for all devices in the path from the device to the
 195 * root device in the device tree if @enable is true, otherwise clear
 196 * the flag for devices in the path whose siblings don't set the flag.
 197 *
 198 * The function should only be called by block device, or network
 199 * device driver for solving the deadlock problem during runtime
 200 * resume/suspend:
 201 *
 202 *     If memory allocation with GFP_KERNEL is called inside runtime
 203 *     resume/suspend callback of any one of its ancestors(or the
 204 *     block device itself), the deadlock may be triggered inside the
 205 *     memory allocation since it might not complete until the block
 206 *     device becomes active and the involed page I/O finishes. The
 207 *     situation is pointed out first by Alan Stern. Network device
 208 *     are involved in iSCSI kind of situation.
 209 *
 210 * The lock of dev_hotplug_mutex is held in the function for handling
 211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
 212 * in async probe().
 213 *
 214 * The function should be called between device_add() and device_del()
 215 * on the affected device(block/network device).
 216 */
 217void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
 218{
 219	static DEFINE_MUTEX(dev_hotplug_mutex);
 220
 221	mutex_lock(&dev_hotplug_mutex);
 222	for (;;) {
 223		bool enabled;
 224
 225		/* hold power lock since bitfield is not SMP-safe. */
 226		spin_lock_irq(&dev->power.lock);
 227		enabled = dev->power.memalloc_noio;
 228		dev->power.memalloc_noio = enable;
 229		spin_unlock_irq(&dev->power.lock);
 230
 231		/*
 232		 * not need to enable ancestors any more if the device
 233		 * has been enabled.
 234		 */
 235		if (enabled && enable)
 236			break;
 237
 238		dev = dev->parent;
 239
 240		/*
 241		 * clear flag of the parent device only if all the
 242		 * children don't set the flag because ancestor's
 243		 * flag was set by any one of the descendants.
 244		 */
 245		if (!dev || (!enable &&
 246			     device_for_each_child(dev, NULL,
 247						   dev_memalloc_noio)))
 248			break;
 249	}
 250	mutex_unlock(&dev_hotplug_mutex);
 251}
 252EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
 253
 254/**
 255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 256 * @dev: Device to test.
 257 */
 258static int rpm_check_suspend_allowed(struct device *dev)
 259{
 260	int retval = 0;
 261
 262	if (dev->power.runtime_error)
 263		retval = -EINVAL;
 264	else if (dev->power.disable_depth > 0)
 265		retval = -EACCES;
 266	else if (atomic_read(&dev->power.usage_count) > 0)
 267		retval = -EAGAIN;
 268	else if (!dev->power.ignore_children &&
 269			atomic_read(&dev->power.child_count))
 270		retval = -EBUSY;
 271
 272	/* Pending resume requests take precedence over suspends. */
 273	else if ((dev->power.deferred_resume
 274			&& dev->power.runtime_status == RPM_SUSPENDING)
 275	    || (dev->power.request_pending
 276			&& dev->power.request == RPM_REQ_RESUME))
 277		retval = -EAGAIN;
 278	else if (__dev_pm_qos_resume_latency(dev) == 0)
 279		retval = -EPERM;
 280	else if (dev->power.runtime_status == RPM_SUSPENDED)
 281		retval = 1;
 282
 283	return retval;
 284}
 285
 286static int rpm_get_suppliers(struct device *dev)
 287{
 288	struct device_link *link;
 289
 290	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 291				device_links_read_lock_held()) {
 292		int retval;
 293
 294		if (!(link->flags & DL_FLAG_PM_RUNTIME))
 295			continue;
 296
 297		retval = pm_runtime_get_sync(link->supplier);
 298		/* Ignore suppliers with disabled runtime PM. */
 299		if (retval < 0 && retval != -EACCES) {
 300			pm_runtime_put_noidle(link->supplier);
 301			return retval;
 302		}
 303		refcount_inc(&link->rpm_active);
 304	}
 305	return 0;
 306}
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
 309{
 310	struct device_link *link;
 311
 312	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 313				device_links_read_lock_held()) {
 314
 315		while (refcount_dec_not_one(&link->rpm_active))
 316			pm_runtime_put_noidle(link->supplier);
 317
 318		if (try_to_suspend)
 319			pm_request_idle(link->supplier);
 320	}
 321}
 322
 323static void rpm_put_suppliers(struct device *dev)
 324{
 325	__rpm_put_suppliers(dev, true);
 326}
 327
 328static void rpm_suspend_suppliers(struct device *dev)
 329{
 330	struct device_link *link;
 331	int idx = device_links_read_lock();
 332
 333	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 334				device_links_read_lock_held())
 335		pm_request_idle(link->supplier);
 336
 337	device_links_read_unlock(idx);
 338}
 339
 340/**
 341 * __rpm_callback - Run a given runtime PM callback for a given device.
 342 * @cb: Runtime PM callback to run.
 343 * @dev: Device to run the callback for.
 344 */
 345static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 346	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 347{
 348	int retval = 0, idx;
 349	bool use_links = dev->power.links_count > 0;
 350
 351	if (dev->power.irq_safe) {
 352		spin_unlock(&dev->power.lock);
 353	} else {
 354		spin_unlock_irq(&dev->power.lock);
 355
 356		/*
 357		 * Resume suppliers if necessary.
 358		 *
 359		 * The device's runtime PM status cannot change until this
 360		 * routine returns, so it is safe to read the status outside of
 361		 * the lock.
 362		 */
 363		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
 364			idx = device_links_read_lock();
 365
 366			retval = rpm_get_suppliers(dev);
 367			if (retval) {
 368				rpm_put_suppliers(dev);
 369				goto fail;
 370			}
 371
 372			device_links_read_unlock(idx);
 373		}
 374	}
 375
 376	if (cb)
 377		retval = cb(dev);
 378
 379	if (dev->power.irq_safe) {
 380		spin_lock(&dev->power.lock);
 381	} else {
 382		/*
 383		 * If the device is suspending and the callback has returned
 384		 * success, drop the usage counters of the suppliers that have
 385		 * been reference counted on its resume.
 386		 *
 387		 * Do that if resume fails too.
 388		 */
 389		if (use_links
 390		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
 391		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
 392			idx = device_links_read_lock();
 393
 394			__rpm_put_suppliers(dev, false);
 395
 396fail:
 397			device_links_read_unlock(idx);
 398		}
 399
 400		spin_lock_irq(&dev->power.lock);
 401	}
 402
 403	return retval;
 404}
 405
 406/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407 * rpm_idle - Notify device bus type if the device can be suspended.
 408 * @dev: Device to notify the bus type about.
 409 * @rpmflags: Flag bits.
 410 *
 411 * Check if the device's runtime PM status allows it to be suspended.  If
 412 * another idle notification has been started earlier, return immediately.  If
 413 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 414 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
 415 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
 416 *
 417 * This function must be called under dev->power.lock with interrupts disabled.
 418 */
 419static int rpm_idle(struct device *dev, int rpmflags)
 420{
 421	int (*callback)(struct device *);
 422	int retval;
 423
 424	trace_rpm_idle_rcuidle(dev, rpmflags);
 425	retval = rpm_check_suspend_allowed(dev);
 426	if (retval < 0)
 427		;	/* Conditions are wrong. */
 428
 429	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
 430	else if (dev->power.runtime_status != RPM_ACTIVE)
 431		retval = -EAGAIN;
 432
 433	/*
 434	 * Any pending request other than an idle notification takes
 435	 * precedence over us, except that the timer may be running.
 436	 */
 437	else if (dev->power.request_pending &&
 438	    dev->power.request > RPM_REQ_IDLE)
 439		retval = -EAGAIN;
 440
 441	/* Act as though RPM_NOWAIT is always set. */
 442	else if (dev->power.idle_notification)
 443		retval = -EINPROGRESS;
 
 444	if (retval)
 445		goto out;
 446
 447	/* Pending requests need to be canceled. */
 448	dev->power.request = RPM_REQ_NONE;
 449
 450	callback = RPM_GET_CALLBACK(dev, runtime_idle);
 451
 452	/* If no callback assume success. */
 453	if (!callback || dev->power.no_callbacks)
 454		goto out;
 455
 456	/* Carry out an asynchronous or a synchronous idle notification. */
 457	if (rpmflags & RPM_ASYNC) {
 458		dev->power.request = RPM_REQ_IDLE;
 459		if (!dev->power.request_pending) {
 460			dev->power.request_pending = true;
 461			queue_work(pm_wq, &dev->power.work);
 462		}
 463		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
 464		return 0;
 465	}
 466
 467	dev->power.idle_notification = true;
 468
 469	retval = __rpm_callback(callback, dev);
 
 
 
 
 
 
 
 
 
 
 470
 471	dev->power.idle_notification = false;
 472	wake_up_all(&dev->power.wait_queue);
 473
 474 out:
 475	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 476	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 477}
 478
 479/**
 480 * rpm_callback - Run a given runtime PM callback for a given device.
 481 * @cb: Runtime PM callback to run.
 482 * @dev: Device to run the callback for.
 483 */
 484static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 485{
 486	int retval;
 487
 488	if (dev->power.memalloc_noio) {
 489		unsigned int noio_flag;
 490
 491		/*
 492		 * Deadlock might be caused if memory allocation with
 493		 * GFP_KERNEL happens inside runtime_suspend and
 494		 * runtime_resume callbacks of one block device's
 495		 * ancestor or the block device itself. Network
 496		 * device might be thought as part of iSCSI block
 497		 * device, so network device and its ancestor should
 498		 * be marked as memalloc_noio too.
 499		 */
 500		noio_flag = memalloc_noio_save();
 501		retval = __rpm_callback(cb, dev);
 502		memalloc_noio_restore(noio_flag);
 503	} else {
 504		retval = __rpm_callback(cb, dev);
 505	}
 506
 507	dev->power.runtime_error = retval;
 508	return retval != -EACCES ? retval : -EIO;
 509}
 510
 511/**
 512 * rpm_suspend - Carry out runtime suspend of given device.
 513 * @dev: Device to suspend.
 514 * @rpmflags: Flag bits.
 515 *
 516 * Check if the device's runtime PM status allows it to be suspended.
 517 * Cancel a pending idle notification, autosuspend or suspend. If
 518 * another suspend has been started earlier, either return immediately
 519 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
 520 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
 521 * otherwise run the ->runtime_suspend() callback directly. When
 522 * ->runtime_suspend succeeded, if a deferred resume was requested while
 523 * the callback was running then carry it out, otherwise send an idle
 524 * notification for its parent (if the suspend succeeded and both
 525 * ignore_children of parent->power and irq_safe of dev->power are not set).
 526 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 527 * flag is set and the next autosuspend-delay expiration time is in the
 528 * future, schedule another autosuspend attempt.
 529 *
 530 * This function must be called under dev->power.lock with interrupts disabled.
 531 */
 532static int rpm_suspend(struct device *dev, int rpmflags)
 533	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 534{
 535	int (*callback)(struct device *);
 536	struct device *parent = NULL;
 537	int retval;
 538
 539	trace_rpm_suspend_rcuidle(dev, rpmflags);
 540
 541 repeat:
 542	retval = rpm_check_suspend_allowed(dev);
 543	if (retval < 0)
 544		goto out;	/* Conditions are wrong. */
 545
 546	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 547	if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
 548		retval = -EAGAIN;
 
 549	if (retval)
 550		goto out;
 551
 552	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
 553	if ((rpmflags & RPM_AUTO)
 554	    && dev->power.runtime_status != RPM_SUSPENDING) {
 555		u64 expires = pm_runtime_autosuspend_expiration(dev);
 556
 557		if (expires != 0) {
 558			/* Pending requests need to be canceled. */
 559			dev->power.request = RPM_REQ_NONE;
 560
 561			/*
 562			 * Optimization: If the timer is already running and is
 563			 * set to expire at or before the autosuspend delay,
 564			 * avoid the overhead of resetting it.  Just let it
 565			 * expire; pm_suspend_timer_fn() will take care of the
 566			 * rest.
 567			 */
 568			if (!(dev->power.timer_expires &&
 569					dev->power.timer_expires <= expires)) {
 570				/*
 571				 * We add a slack of 25% to gather wakeups
 572				 * without sacrificing the granularity.
 573				 */
 574				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
 575						    (NSEC_PER_MSEC >> 2);
 576
 577				dev->power.timer_expires = expires;
 578				hrtimer_start_range_ns(&dev->power.suspend_timer,
 579						ns_to_ktime(expires),
 580						slack,
 581						HRTIMER_MODE_ABS);
 582			}
 583			dev->power.timer_autosuspends = 1;
 584			goto out;
 585		}
 586	}
 587
 588	/* Other scheduled or pending requests need to be canceled. */
 589	pm_runtime_cancel_pending(dev);
 590
 591	if (dev->power.runtime_status == RPM_SUSPENDING) {
 592		DEFINE_WAIT(wait);
 593
 594		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 595			retval = -EINPROGRESS;
 596			goto out;
 597		}
 598
 599		if (dev->power.irq_safe) {
 600			spin_unlock(&dev->power.lock);
 601
 602			cpu_relax();
 603
 604			spin_lock(&dev->power.lock);
 605			goto repeat;
 606		}
 607
 608		/* Wait for the other suspend running in parallel with us. */
 609		for (;;) {
 610			prepare_to_wait(&dev->power.wait_queue, &wait,
 611					TASK_UNINTERRUPTIBLE);
 612			if (dev->power.runtime_status != RPM_SUSPENDING)
 613				break;
 614
 615			spin_unlock_irq(&dev->power.lock);
 616
 617			schedule();
 618
 619			spin_lock_irq(&dev->power.lock);
 620		}
 621		finish_wait(&dev->power.wait_queue, &wait);
 622		goto repeat;
 623	}
 624
 625	if (dev->power.no_callbacks)
 626		goto no_callback;	/* Assume success. */
 627
 628	/* Carry out an asynchronous or a synchronous suspend. */
 629	if (rpmflags & RPM_ASYNC) {
 630		dev->power.request = (rpmflags & RPM_AUTO) ?
 631		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 632		if (!dev->power.request_pending) {
 633			dev->power.request_pending = true;
 634			queue_work(pm_wq, &dev->power.work);
 635		}
 636		goto out;
 637	}
 638
 639	__update_runtime_status(dev, RPM_SUSPENDING);
 640
 641	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 642
 643	dev_pm_enable_wake_irq_check(dev, true);
 644	retval = rpm_callback(callback, dev);
 645	if (retval)
 646		goto fail;
 647
 
 
 648 no_callback:
 649	__update_runtime_status(dev, RPM_SUSPENDED);
 650	pm_runtime_deactivate_timer(dev);
 651
 652	if (dev->parent) {
 653		parent = dev->parent;
 654		atomic_add_unless(&parent->power.child_count, -1, 0);
 655	}
 656	wake_up_all(&dev->power.wait_queue);
 657
 658	if (dev->power.deferred_resume) {
 659		dev->power.deferred_resume = false;
 660		rpm_resume(dev, 0);
 661		retval = -EAGAIN;
 662		goto out;
 663	}
 664
 665	if (dev->power.irq_safe)
 666		goto out;
 667
 668	/* Maybe the parent is now able to suspend. */
 669	if (parent && !parent->power.ignore_children) {
 670		spin_unlock(&dev->power.lock);
 671
 672		spin_lock(&parent->power.lock);
 673		rpm_idle(parent, RPM_ASYNC);
 674		spin_unlock(&parent->power.lock);
 675
 676		spin_lock(&dev->power.lock);
 677	}
 678	/* Maybe the suppliers are now able to suspend. */
 679	if (dev->power.links_count > 0) {
 680		spin_unlock_irq(&dev->power.lock);
 681
 682		rpm_suspend_suppliers(dev);
 683
 684		spin_lock_irq(&dev->power.lock);
 685	}
 686
 687 out:
 688	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 689
 690	return retval;
 691
 692 fail:
 693	dev_pm_disable_wake_irq_check(dev);
 694	__update_runtime_status(dev, RPM_ACTIVE);
 695	dev->power.deferred_resume = false;
 696	wake_up_all(&dev->power.wait_queue);
 697
 698	if (retval == -EAGAIN || retval == -EBUSY) {
 699		dev->power.runtime_error = 0;
 700
 701		/*
 702		 * If the callback routine failed an autosuspend, and
 703		 * if the last_busy time has been updated so that there
 704		 * is a new autosuspend expiration time, automatically
 705		 * reschedule another autosuspend.
 706		 */
 707		if ((rpmflags & RPM_AUTO) &&
 708		    pm_runtime_autosuspend_expiration(dev) != 0)
 709			goto repeat;
 710	} else {
 711		pm_runtime_cancel_pending(dev);
 712	}
 713	goto out;
 714}
 715
 716/**
 717 * rpm_resume - Carry out runtime resume of given device.
 718 * @dev: Device to resume.
 719 * @rpmflags: Flag bits.
 720 *
 721 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 722 * any scheduled or pending requests.  If another resume has been started
 723 * earlier, either return immediately or wait for it to finish, depending on the
 724 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 725 * parallel with this function, either tell the other process to resume after
 726 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 727 * flag is set then queue a resume request; otherwise run the
 728 * ->runtime_resume() callback directly.  Queue an idle notification for the
 729 * device if the resume succeeded.
 730 *
 731 * This function must be called under dev->power.lock with interrupts disabled.
 732 */
 733static int rpm_resume(struct device *dev, int rpmflags)
 734	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 735{
 736	int (*callback)(struct device *);
 737	struct device *parent = NULL;
 738	int retval = 0;
 739
 740	trace_rpm_resume_rcuidle(dev, rpmflags);
 741
 742 repeat:
 743	if (dev->power.runtime_error)
 744		retval = -EINVAL;
 745	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
 746	    && dev->power.runtime_status == RPM_ACTIVE)
 747		retval = 1;
 748	else if (dev->power.disable_depth > 0)
 749		retval = -EACCES;
 
 
 750	if (retval)
 751		goto out;
 752
 753	/*
 754	 * Other scheduled or pending requests need to be canceled.  Small
 755	 * optimization: If an autosuspend timer is running, leave it running
 756	 * rather than cancelling it now only to restart it again in the near
 757	 * future.
 758	 */
 759	dev->power.request = RPM_REQ_NONE;
 760	if (!dev->power.timer_autosuspends)
 761		pm_runtime_deactivate_timer(dev);
 762
 763	if (dev->power.runtime_status == RPM_ACTIVE) {
 764		retval = 1;
 765		goto out;
 766	}
 767
 768	if (dev->power.runtime_status == RPM_RESUMING
 769	    || dev->power.runtime_status == RPM_SUSPENDING) {
 770		DEFINE_WAIT(wait);
 771
 772		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 773			if (dev->power.runtime_status == RPM_SUSPENDING)
 774				dev->power.deferred_resume = true;
 775			else
 
 
 776				retval = -EINPROGRESS;
 
 777			goto out;
 778		}
 779
 780		if (dev->power.irq_safe) {
 781			spin_unlock(&dev->power.lock);
 782
 783			cpu_relax();
 784
 785			spin_lock(&dev->power.lock);
 786			goto repeat;
 787		}
 788
 789		/* Wait for the operation carried out in parallel with us. */
 790		for (;;) {
 791			prepare_to_wait(&dev->power.wait_queue, &wait,
 792					TASK_UNINTERRUPTIBLE);
 793			if (dev->power.runtime_status != RPM_RESUMING
 794			    && dev->power.runtime_status != RPM_SUSPENDING)
 795				break;
 796
 797			spin_unlock_irq(&dev->power.lock);
 798
 799			schedule();
 800
 801			spin_lock_irq(&dev->power.lock);
 802		}
 803		finish_wait(&dev->power.wait_queue, &wait);
 804		goto repeat;
 805	}
 806
 807	/*
 808	 * See if we can skip waking up the parent.  This is safe only if
 809	 * power.no_callbacks is set, because otherwise we don't know whether
 810	 * the resume will actually succeed.
 811	 */
 812	if (dev->power.no_callbacks && !parent && dev->parent) {
 813		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 814		if (dev->parent->power.disable_depth > 0
 815		    || dev->parent->power.ignore_children
 816		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 817			atomic_inc(&dev->parent->power.child_count);
 818			spin_unlock(&dev->parent->power.lock);
 819			retval = 1;
 820			goto no_callback;	/* Assume success. */
 821		}
 822		spin_unlock(&dev->parent->power.lock);
 823	}
 824
 825	/* Carry out an asynchronous or a synchronous resume. */
 826	if (rpmflags & RPM_ASYNC) {
 827		dev->power.request = RPM_REQ_RESUME;
 828		if (!dev->power.request_pending) {
 829			dev->power.request_pending = true;
 830			queue_work(pm_wq, &dev->power.work);
 831		}
 832		retval = 0;
 833		goto out;
 834	}
 835
 836	if (!parent && dev->parent) {
 837		/*
 838		 * Increment the parent's usage counter and resume it if
 839		 * necessary.  Not needed if dev is irq-safe; then the
 840		 * parent is permanently resumed.
 841		 */
 842		parent = dev->parent;
 843		if (dev->power.irq_safe)
 844			goto skip_parent;
 
 845		spin_unlock(&dev->power.lock);
 846
 847		pm_runtime_get_noresume(parent);
 848
 849		spin_lock(&parent->power.lock);
 850		/*
 851		 * Resume the parent if it has runtime PM enabled and not been
 852		 * set to ignore its children.
 853		 */
 854		if (!parent->power.disable_depth
 855		    && !parent->power.ignore_children) {
 856			rpm_resume(parent, 0);
 857			if (parent->power.runtime_status != RPM_ACTIVE)
 858				retval = -EBUSY;
 859		}
 860		spin_unlock(&parent->power.lock);
 861
 862		spin_lock(&dev->power.lock);
 863		if (retval)
 864			goto out;
 
 865		goto repeat;
 866	}
 867 skip_parent:
 868
 869	if (dev->power.no_callbacks)
 870		goto no_callback;	/* Assume success. */
 871
 872	__update_runtime_status(dev, RPM_RESUMING);
 873
 874	callback = RPM_GET_CALLBACK(dev, runtime_resume);
 875
 876	dev_pm_disable_wake_irq_check(dev);
 877	retval = rpm_callback(callback, dev);
 878	if (retval) {
 879		__update_runtime_status(dev, RPM_SUSPENDED);
 880		pm_runtime_cancel_pending(dev);
 881		dev_pm_enable_wake_irq_check(dev, false);
 882	} else {
 883 no_callback:
 884		__update_runtime_status(dev, RPM_ACTIVE);
 885		pm_runtime_mark_last_busy(dev);
 886		if (parent)
 887			atomic_inc(&parent->power.child_count);
 888	}
 889	wake_up_all(&dev->power.wait_queue);
 890
 891	if (retval >= 0)
 892		rpm_idle(dev, RPM_ASYNC);
 893
 894 out:
 895	if (parent && !dev->power.irq_safe) {
 896		spin_unlock_irq(&dev->power.lock);
 897
 898		pm_runtime_put(parent);
 899
 900		spin_lock_irq(&dev->power.lock);
 901	}
 902
 903	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 904
 905	return retval;
 906}
 907
 908/**
 909 * pm_runtime_work - Universal runtime PM work function.
 910 * @work: Work structure used for scheduling the execution of this function.
 911 *
 912 * Use @work to get the device object the work is to be done for, determine what
 913 * is to be done and execute the appropriate runtime PM function.
 914 */
 915static void pm_runtime_work(struct work_struct *work)
 916{
 917	struct device *dev = container_of(work, struct device, power.work);
 918	enum rpm_request req;
 919
 920	spin_lock_irq(&dev->power.lock);
 921
 922	if (!dev->power.request_pending)
 923		goto out;
 924
 925	req = dev->power.request;
 926	dev->power.request = RPM_REQ_NONE;
 927	dev->power.request_pending = false;
 928
 929	switch (req) {
 930	case RPM_REQ_NONE:
 931		break;
 932	case RPM_REQ_IDLE:
 933		rpm_idle(dev, RPM_NOWAIT);
 934		break;
 935	case RPM_REQ_SUSPEND:
 936		rpm_suspend(dev, RPM_NOWAIT);
 937		break;
 938	case RPM_REQ_AUTOSUSPEND:
 939		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 940		break;
 941	case RPM_REQ_RESUME:
 942		rpm_resume(dev, RPM_NOWAIT);
 943		break;
 944	}
 945
 946 out:
 947	spin_unlock_irq(&dev->power.lock);
 948}
 949
 950/**
 951 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 952 * @timer: hrtimer used by pm_schedule_suspend().
 953 *
 954 * Check if the time is right and queue a suspend request.
 955 */
 956static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
 957{
 958	struct device *dev = container_of(timer, struct device, power.suspend_timer);
 959	unsigned long flags;
 960	u64 expires;
 961
 962	spin_lock_irqsave(&dev->power.lock, flags);
 963
 964	expires = dev->power.timer_expires;
 965	/*
 966	 * If 'expires' is after the current time, we've been called
 967	 * too early.
 968	 */
 969	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
 970		dev->power.timer_expires = 0;
 971		rpm_suspend(dev, dev->power.timer_autosuspends ?
 972		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 973	}
 974
 975	spin_unlock_irqrestore(&dev->power.lock, flags);
 976
 977	return HRTIMER_NORESTART;
 978}
 979
 980/**
 981 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 982 * @dev: Device to suspend.
 983 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 984 */
 985int pm_schedule_suspend(struct device *dev, unsigned int delay)
 986{
 987	unsigned long flags;
 988	u64 expires;
 989	int retval;
 990
 991	spin_lock_irqsave(&dev->power.lock, flags);
 992
 993	if (!delay) {
 994		retval = rpm_suspend(dev, RPM_ASYNC);
 995		goto out;
 996	}
 997
 998	retval = rpm_check_suspend_allowed(dev);
 999	if (retval)
1000		goto out;
1001
1002	/* Other scheduled or pending requests need to be canceled. */
1003	pm_runtime_cancel_pending(dev);
1004
1005	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1006	dev->power.timer_expires = expires;
1007	dev->power.timer_autosuspends = 0;
1008	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1009
1010 out:
1011	spin_unlock_irqrestore(&dev->power.lock, flags);
1012
1013	return retval;
1014}
1015EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1017/**
1018 * __pm_runtime_idle - Entry point for runtime idle operations.
1019 * @dev: Device to send idle notification for.
1020 * @rpmflags: Flag bits.
1021 *
1022 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1023 * return immediately if it is larger than zero.  Then carry out an idle
 
1024 * notification, either synchronous or asynchronous.
1025 *
1026 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1027 * or if pm_runtime_irq_safe() has been called.
1028 */
1029int __pm_runtime_idle(struct device *dev, int rpmflags)
1030{
1031	unsigned long flags;
1032	int retval;
1033
1034	if (rpmflags & RPM_GET_PUT) {
1035		if (!atomic_dec_and_test(&dev->power.usage_count)) {
1036			trace_rpm_usage_rcuidle(dev, rpmflags);
 
 
 
1037			return 0;
1038		}
1039	}
1040
1041	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1042
1043	spin_lock_irqsave(&dev->power.lock, flags);
1044	retval = rpm_idle(dev, rpmflags);
1045	spin_unlock_irqrestore(&dev->power.lock, flags);
1046
1047	return retval;
1048}
1049EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1050
1051/**
1052 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1053 * @dev: Device to suspend.
1054 * @rpmflags: Flag bits.
1055 *
1056 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1057 * return immediately if it is larger than zero.  Then carry out a suspend,
 
1058 * either synchronous or asynchronous.
1059 *
1060 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1061 * or if pm_runtime_irq_safe() has been called.
1062 */
1063int __pm_runtime_suspend(struct device *dev, int rpmflags)
1064{
1065	unsigned long flags;
1066	int retval;
1067
1068	if (rpmflags & RPM_GET_PUT) {
1069		if (!atomic_dec_and_test(&dev->power.usage_count)) {
1070			trace_rpm_usage_rcuidle(dev, rpmflags);
 
 
 
1071			return 0;
1072		}
1073	}
1074
1075	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1076
1077	spin_lock_irqsave(&dev->power.lock, flags);
1078	retval = rpm_suspend(dev, rpmflags);
1079	spin_unlock_irqrestore(&dev->power.lock, flags);
1080
1081	return retval;
1082}
1083EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1084
1085/**
1086 * __pm_runtime_resume - Entry point for runtime resume operations.
1087 * @dev: Device to resume.
1088 * @rpmflags: Flag bits.
1089 *
1090 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1091 * carry out a resume, either synchronous or asynchronous.
1092 *
1093 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1094 * or if pm_runtime_irq_safe() has been called.
1095 */
1096int __pm_runtime_resume(struct device *dev, int rpmflags)
1097{
1098	unsigned long flags;
1099	int retval;
1100
1101	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1102			dev->power.runtime_status != RPM_ACTIVE);
1103
1104	if (rpmflags & RPM_GET_PUT)
1105		atomic_inc(&dev->power.usage_count);
1106
1107	spin_lock_irqsave(&dev->power.lock, flags);
1108	retval = rpm_resume(dev, rpmflags);
1109	spin_unlock_irqrestore(&dev->power.lock, flags);
1110
1111	return retval;
1112}
1113EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1114
1115/**
1116 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1117 * @dev: Device to handle.
1118 * @ign_usage_count: Whether or not to look at the current usage counter value.
1119 *
1120 * Return -EINVAL if runtime PM is disabled for @dev.
1121 *
1122 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1123 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1124 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1125 * without changing the usage counter.
1126 *
1127 * If @ign_usage_count is %true, this function can be used to prevent suspending
1128 * the device when its runtime PM status is %RPM_ACTIVE.
1129 *
1130 * If @ign_usage_count is %false, this function can be used to prevent
1131 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1132 * runtime PM usage counter is not zero.
1133 *
1134 * The caller is responsible for decrementing the runtime PM usage counter of
1135 * @dev after this function has returned a positive value for it.
1136 */
1137int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1138{
1139	unsigned long flags;
1140	int retval;
1141
1142	spin_lock_irqsave(&dev->power.lock, flags);
1143	if (dev->power.disable_depth > 0) {
1144		retval = -EINVAL;
1145	} else if (dev->power.runtime_status != RPM_ACTIVE) {
1146		retval = 0;
1147	} else if (ign_usage_count) {
1148		retval = 1;
1149		atomic_inc(&dev->power.usage_count);
1150	} else {
1151		retval = atomic_inc_not_zero(&dev->power.usage_count);
1152	}
1153	trace_rpm_usage_rcuidle(dev, 0);
1154	spin_unlock_irqrestore(&dev->power.lock, flags);
1155
1156	return retval;
1157}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1158EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1159
1160/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161 * __pm_runtime_set_status - Set runtime PM status of a device.
1162 * @dev: Device to handle.
1163 * @status: New runtime PM status of the device.
1164 *
1165 * If runtime PM of the device is disabled or its power.runtime_error field is
1166 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1167 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1168 * However, if the device has a parent and the parent is not active, and the
1169 * parent's power.ignore_children flag is unset, the device's status cannot be
1170 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1171 *
1172 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1173 * and the device parent's counter of unsuspended children is modified to
1174 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1175 * notification request for the parent is submitted.
1176 *
1177 * If @dev has any suppliers (as reflected by device links to them), and @status
1178 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1179 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1180 * of the @status value) and the suppliers will be deacticated on exit.  The
1181 * error returned by the failing supplier activation will be returned in that
1182 * case.
1183 */
1184int __pm_runtime_set_status(struct device *dev, unsigned int status)
1185{
1186	struct device *parent = dev->parent;
1187	bool notify_parent = false;
 
1188	int error = 0;
1189
1190	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1191		return -EINVAL;
1192
1193	spin_lock_irq(&dev->power.lock);
1194
1195	/*
1196	 * Prevent PM-runtime from being enabled for the device or return an
1197	 * error if it is enabled already and working.
1198	 */
1199	if (dev->power.runtime_error || dev->power.disable_depth)
1200		dev->power.disable_depth++;
1201	else
1202		error = -EAGAIN;
1203
1204	spin_unlock_irq(&dev->power.lock);
1205
1206	if (error)
1207		return error;
1208
1209	/*
1210	 * If the new status is RPM_ACTIVE, the suppliers can be activated
1211	 * upfront regardless of the current status, because next time
1212	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1213	 * involved will be dropped down to one anyway.
1214	 */
1215	if (status == RPM_ACTIVE) {
1216		int idx = device_links_read_lock();
1217
1218		error = rpm_get_suppliers(dev);
1219		if (error)
1220			status = RPM_SUSPENDED;
1221
1222		device_links_read_unlock(idx);
1223	}
1224
1225	spin_lock_irq(&dev->power.lock);
1226
1227	if (dev->power.runtime_status == status || !parent)
1228		goto out_set;
1229
1230	if (status == RPM_SUSPENDED) {
1231		atomic_add_unless(&parent->power.child_count, -1, 0);
1232		notify_parent = !parent->power.ignore_children;
1233	} else {
1234		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1235
1236		/*
1237		 * It is invalid to put an active child under a parent that is
1238		 * not active, has runtime PM enabled and the
1239		 * 'power.ignore_children' flag unset.
1240		 */
1241		if (!parent->power.disable_depth
1242		    && !parent->power.ignore_children
1243		    && parent->power.runtime_status != RPM_ACTIVE) {
1244			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1245				dev_name(dev),
1246				dev_name(parent));
1247			error = -EBUSY;
1248		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1249			atomic_inc(&parent->power.child_count);
1250		}
1251
1252		spin_unlock(&parent->power.lock);
1253
1254		if (error) {
1255			status = RPM_SUSPENDED;
1256			goto out;
1257		}
1258	}
1259
1260 out_set:
1261	__update_runtime_status(dev, status);
1262	if (!error)
1263		dev->power.runtime_error = 0;
1264
1265 out:
1266	spin_unlock_irq(&dev->power.lock);
1267
1268	if (notify_parent)
1269		pm_request_idle(parent);
1270
1271	if (status == RPM_SUSPENDED) {
1272		int idx = device_links_read_lock();
1273
1274		rpm_put_suppliers(dev);
1275
1276		device_links_read_unlock(idx);
1277	}
1278
1279	pm_runtime_enable(dev);
1280
1281	return error;
1282}
1283EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1284
1285/**
1286 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1287 * @dev: Device to handle.
1288 *
1289 * Flush all pending requests for the device from pm_wq and wait for all
1290 * runtime PM operations involving the device in progress to complete.
1291 *
1292 * Should be called under dev->power.lock with interrupts disabled.
1293 */
1294static void __pm_runtime_barrier(struct device *dev)
1295{
1296	pm_runtime_deactivate_timer(dev);
1297
1298	if (dev->power.request_pending) {
1299		dev->power.request = RPM_REQ_NONE;
1300		spin_unlock_irq(&dev->power.lock);
1301
1302		cancel_work_sync(&dev->power.work);
1303
1304		spin_lock_irq(&dev->power.lock);
1305		dev->power.request_pending = false;
1306	}
1307
1308	if (dev->power.runtime_status == RPM_SUSPENDING
1309	    || dev->power.runtime_status == RPM_RESUMING
1310	    || dev->power.idle_notification) {
1311		DEFINE_WAIT(wait);
1312
1313		/* Suspend, wake-up or idle notification in progress. */
1314		for (;;) {
1315			prepare_to_wait(&dev->power.wait_queue, &wait,
1316					TASK_UNINTERRUPTIBLE);
1317			if (dev->power.runtime_status != RPM_SUSPENDING
1318			    && dev->power.runtime_status != RPM_RESUMING
1319			    && !dev->power.idle_notification)
1320				break;
1321			spin_unlock_irq(&dev->power.lock);
1322
1323			schedule();
1324
1325			spin_lock_irq(&dev->power.lock);
1326		}
1327		finish_wait(&dev->power.wait_queue, &wait);
1328	}
1329}
1330
1331/**
1332 * pm_runtime_barrier - Flush pending requests and wait for completions.
1333 * @dev: Device to handle.
1334 *
1335 * Prevent the device from being suspended by incrementing its usage counter and
1336 * if there's a pending resume request for the device, wake the device up.
1337 * Next, make sure that all pending requests for the device have been flushed
1338 * from pm_wq and wait for all runtime PM operations involving the device in
1339 * progress to complete.
1340 *
1341 * Return value:
1342 * 1, if there was a resume request pending and the device had to be woken up,
1343 * 0, otherwise
1344 */
1345int pm_runtime_barrier(struct device *dev)
1346{
1347	int retval = 0;
1348
1349	pm_runtime_get_noresume(dev);
1350	spin_lock_irq(&dev->power.lock);
1351
1352	if (dev->power.request_pending
1353	    && dev->power.request == RPM_REQ_RESUME) {
1354		rpm_resume(dev, 0);
1355		retval = 1;
1356	}
1357
1358	__pm_runtime_barrier(dev);
1359
1360	spin_unlock_irq(&dev->power.lock);
1361	pm_runtime_put_noidle(dev);
1362
1363	return retval;
1364}
1365EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1366
1367/**
1368 * __pm_runtime_disable - Disable runtime PM of a device.
1369 * @dev: Device to handle.
1370 * @check_resume: If set, check if there's a resume request for the device.
1371 *
1372 * Increment power.disable_depth for the device and if it was zero previously,
1373 * cancel all pending runtime PM requests for the device and wait for all
1374 * operations in progress to complete.  The device can be either active or
1375 * suspended after its runtime PM has been disabled.
1376 *
1377 * If @check_resume is set and there's a resume request pending when
1378 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1379 * function will wake up the device before disabling its runtime PM.
1380 */
1381void __pm_runtime_disable(struct device *dev, bool check_resume)
1382{
1383	spin_lock_irq(&dev->power.lock);
1384
1385	if (dev->power.disable_depth > 0) {
1386		dev->power.disable_depth++;
1387		goto out;
1388	}
1389
1390	/*
1391	 * Wake up the device if there's a resume request pending, because that
1392	 * means there probably is some I/O to process and disabling runtime PM
1393	 * shouldn't prevent the device from processing the I/O.
1394	 */
1395	if (check_resume && dev->power.request_pending
1396	    && dev->power.request == RPM_REQ_RESUME) {
1397		/*
1398		 * Prevent suspends and idle notifications from being carried
1399		 * out after we have woken up the device.
1400		 */
1401		pm_runtime_get_noresume(dev);
1402
1403		rpm_resume(dev, 0);
1404
1405		pm_runtime_put_noidle(dev);
1406	}
1407
1408	/* Update time accounting before disabling PM-runtime. */
1409	update_pm_runtime_accounting(dev);
1410
1411	if (!dev->power.disable_depth++)
1412		__pm_runtime_barrier(dev);
 
 
1413
1414 out:
1415	spin_unlock_irq(&dev->power.lock);
1416}
1417EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1418
1419/**
1420 * pm_runtime_enable - Enable runtime PM of a device.
1421 * @dev: Device to handle.
1422 */
1423void pm_runtime_enable(struct device *dev)
1424{
1425	unsigned long flags;
1426
1427	spin_lock_irqsave(&dev->power.lock, flags);
1428
1429	if (dev->power.disable_depth > 0) {
1430		dev->power.disable_depth--;
1431
1432		/* About to enable runtime pm, set accounting_timestamp to now */
1433		if (!dev->power.disable_depth)
1434			dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1435	} else {
1436		dev_warn(dev, "Unbalanced %s!\n", __func__);
 
1437	}
1438
1439	WARN(!dev->power.disable_depth &&
1440	     dev->power.runtime_status == RPM_SUSPENDED &&
1441	     !dev->power.ignore_children &&
1442	     atomic_read(&dev->power.child_count) > 0,
1443	     "Enabling runtime PM for inactive device (%s) with active children\n",
1444	     dev_name(dev));
 
 
 
 
1445
 
1446	spin_unlock_irqrestore(&dev->power.lock, flags);
1447}
1448EXPORT_SYMBOL_GPL(pm_runtime_enable);
1449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1450/**
1451 * pm_runtime_forbid - Block runtime PM of a device.
1452 * @dev: Device to handle.
1453 *
1454 * Increase the device's usage count and clear its power.runtime_auto flag,
1455 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1456 * for it.
1457 */
1458void pm_runtime_forbid(struct device *dev)
1459{
1460	spin_lock_irq(&dev->power.lock);
1461	if (!dev->power.runtime_auto)
1462		goto out;
1463
1464	dev->power.runtime_auto = false;
1465	atomic_inc(&dev->power.usage_count);
1466	rpm_resume(dev, 0);
1467
1468 out:
1469	spin_unlock_irq(&dev->power.lock);
1470}
1471EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1472
1473/**
1474 * pm_runtime_allow - Unblock runtime PM of a device.
1475 * @dev: Device to handle.
1476 *
1477 * Decrease the device's usage count and set its power.runtime_auto flag.
1478 */
1479void pm_runtime_allow(struct device *dev)
1480{
 
 
1481	spin_lock_irq(&dev->power.lock);
1482	if (dev->power.runtime_auto)
1483		goto out;
1484
1485	dev->power.runtime_auto = true;
1486	if (atomic_dec_and_test(&dev->power.usage_count))
 
1487		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1488	else
1489		trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1490
1491 out:
1492	spin_unlock_irq(&dev->power.lock);
1493}
1494EXPORT_SYMBOL_GPL(pm_runtime_allow);
1495
1496/**
1497 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1498 * @dev: Device to handle.
1499 *
1500 * Set the power.no_callbacks flag, which tells the PM core that this
1501 * device is power-managed through its parent and has no runtime PM
1502 * callbacks of its own.  The runtime sysfs attributes will be removed.
1503 */
1504void pm_runtime_no_callbacks(struct device *dev)
1505{
1506	spin_lock_irq(&dev->power.lock);
1507	dev->power.no_callbacks = 1;
1508	spin_unlock_irq(&dev->power.lock);
1509	if (device_is_registered(dev))
1510		rpm_sysfs_remove(dev);
1511}
1512EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1513
1514/**
1515 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1516 * @dev: Device to handle
1517 *
1518 * Set the power.irq_safe flag, which tells the PM core that the
1519 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1520 * always be invoked with the spinlock held and interrupts disabled.  It also
1521 * causes the parent's usage counter to be permanently incremented, preventing
1522 * the parent from runtime suspending -- otherwise an irq-safe child might have
1523 * to wait for a non-irq-safe parent.
1524 */
1525void pm_runtime_irq_safe(struct device *dev)
1526{
1527	if (dev->parent)
1528		pm_runtime_get_sync(dev->parent);
 
1529	spin_lock_irq(&dev->power.lock);
1530	dev->power.irq_safe = 1;
1531	spin_unlock_irq(&dev->power.lock);
1532}
1533EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1534
1535/**
1536 * update_autosuspend - Handle a change to a device's autosuspend settings.
1537 * @dev: Device to handle.
1538 * @old_delay: The former autosuspend_delay value.
1539 * @old_use: The former use_autosuspend value.
1540 *
1541 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1542 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1543 *
1544 * This function must be called under dev->power.lock with interrupts disabled.
1545 */
1546static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1547{
1548	int delay = dev->power.autosuspend_delay;
1549
1550	/* Should runtime suspend be prevented now? */
1551	if (dev->power.use_autosuspend && delay < 0) {
1552
1553		/* If it used to be allowed then prevent it. */
1554		if (!old_use || old_delay >= 0) {
1555			atomic_inc(&dev->power.usage_count);
1556			rpm_resume(dev, 0);
1557		} else {
1558			trace_rpm_usage_rcuidle(dev, 0);
1559		}
1560	}
1561
1562	/* Runtime suspend should be allowed now. */
1563	else {
1564
1565		/* If it used to be prevented then allow it. */
1566		if (old_use && old_delay < 0)
1567			atomic_dec(&dev->power.usage_count);
1568
1569		/* Maybe we can autosuspend now. */
1570		rpm_idle(dev, RPM_AUTO);
1571	}
1572}
1573
1574/**
1575 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1576 * @dev: Device to handle.
1577 * @delay: Value of the new delay in milliseconds.
1578 *
1579 * Set the device's power.autosuspend_delay value.  If it changes to negative
1580 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1581 * changes the other way, allow runtime suspends.
1582 */
1583void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1584{
1585	int old_delay, old_use;
1586
1587	spin_lock_irq(&dev->power.lock);
1588	old_delay = dev->power.autosuspend_delay;
1589	old_use = dev->power.use_autosuspend;
1590	dev->power.autosuspend_delay = delay;
1591	update_autosuspend(dev, old_delay, old_use);
1592	spin_unlock_irq(&dev->power.lock);
1593}
1594EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1595
1596/**
1597 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1598 * @dev: Device to handle.
1599 * @use: New value for use_autosuspend.
1600 *
1601 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1602 * suspends as needed.
1603 */
1604void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1605{
1606	int old_delay, old_use;
1607
1608	spin_lock_irq(&dev->power.lock);
1609	old_delay = dev->power.autosuspend_delay;
1610	old_use = dev->power.use_autosuspend;
1611	dev->power.use_autosuspend = use;
1612	update_autosuspend(dev, old_delay, old_use);
1613	spin_unlock_irq(&dev->power.lock);
1614}
1615EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1616
1617/**
1618 * pm_runtime_init - Initialize runtime PM fields in given device object.
1619 * @dev: Device object to initialize.
1620 */
1621void pm_runtime_init(struct device *dev)
1622{
1623	dev->power.runtime_status = RPM_SUSPENDED;
 
1624	dev->power.idle_notification = false;
1625
1626	dev->power.disable_depth = 1;
1627	atomic_set(&dev->power.usage_count, 0);
1628
1629	dev->power.runtime_error = 0;
1630
1631	atomic_set(&dev->power.child_count, 0);
1632	pm_suspend_ignore_children(dev, false);
1633	dev->power.runtime_auto = true;
1634
1635	dev->power.request_pending = false;
1636	dev->power.request = RPM_REQ_NONE;
1637	dev->power.deferred_resume = false;
1638	dev->power.needs_force_resume = 0;
1639	INIT_WORK(&dev->power.work, pm_runtime_work);
1640
1641	dev->power.timer_expires = 0;
1642	hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1643	dev->power.suspend_timer.function = pm_suspend_timer_fn;
1644
1645	init_waitqueue_head(&dev->power.wait_queue);
1646}
1647
1648/**
1649 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1650 * @dev: Device object to re-initialize.
1651 */
1652void pm_runtime_reinit(struct device *dev)
1653{
1654	if (!pm_runtime_enabled(dev)) {
1655		if (dev->power.runtime_status == RPM_ACTIVE)
1656			pm_runtime_set_suspended(dev);
1657		if (dev->power.irq_safe) {
1658			spin_lock_irq(&dev->power.lock);
1659			dev->power.irq_safe = 0;
1660			spin_unlock_irq(&dev->power.lock);
1661			if (dev->parent)
1662				pm_runtime_put(dev->parent);
1663		}
1664	}
1665}
1666
1667/**
1668 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1669 * @dev: Device object being removed from device hierarchy.
1670 */
1671void pm_runtime_remove(struct device *dev)
1672{
1673	__pm_runtime_disable(dev, false);
1674	pm_runtime_reinit(dev);
1675}
1676
1677/**
1678 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1679 * @dev: Consumer device.
1680 */
1681void pm_runtime_get_suppliers(struct device *dev)
1682{
1683	struct device_link *link;
1684	int idx;
1685
1686	idx = device_links_read_lock();
1687
1688	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1689				device_links_read_lock_held())
1690		if (link->flags & DL_FLAG_PM_RUNTIME) {
1691			link->supplier_preactivated = true;
1692			pm_runtime_get_sync(link->supplier);
1693			refcount_inc(&link->rpm_active);
1694		}
1695
1696	device_links_read_unlock(idx);
1697}
1698
1699/**
1700 * pm_runtime_put_suppliers - Drop references to supplier devices.
1701 * @dev: Consumer device.
1702 */
1703void pm_runtime_put_suppliers(struct device *dev)
1704{
1705	struct device_link *link;
1706	unsigned long flags;
1707	bool put;
1708	int idx;
1709
1710	idx = device_links_read_lock();
1711
1712	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1713				device_links_read_lock_held())
1714		if (link->supplier_preactivated) {
1715			link->supplier_preactivated = false;
1716			spin_lock_irqsave(&dev->power.lock, flags);
1717			put = pm_runtime_status_suspended(dev) &&
1718			      refcount_dec_not_one(&link->rpm_active);
1719			spin_unlock_irqrestore(&dev->power.lock, flags);
1720			if (put)
1721				pm_runtime_put(link->supplier);
1722		}
1723
1724	device_links_read_unlock(idx);
1725}
1726
1727void pm_runtime_new_link(struct device *dev)
1728{
1729	spin_lock_irq(&dev->power.lock);
1730	dev->power.links_count++;
1731	spin_unlock_irq(&dev->power.lock);
1732}
1733
1734static void pm_runtime_drop_link_count(struct device *dev)
1735{
1736	spin_lock_irq(&dev->power.lock);
1737	WARN_ON(dev->power.links_count == 0);
1738	dev->power.links_count--;
1739	spin_unlock_irq(&dev->power.lock);
1740}
1741
1742/**
1743 * pm_runtime_drop_link - Prepare for device link removal.
1744 * @link: Device link going away.
1745 *
1746 * Drop the link count of the consumer end of @link and decrement the supplier
1747 * device's runtime PM usage counter as many times as needed to drop all of the
1748 * PM runtime reference to it from the consumer.
1749 */
1750void pm_runtime_drop_link(struct device_link *link)
1751{
1752	if (!(link->flags & DL_FLAG_PM_RUNTIME))
1753		return;
1754
1755	pm_runtime_drop_link_count(link->consumer);
1756
1757	while (refcount_dec_not_one(&link->rpm_active))
1758		pm_runtime_put(link->supplier);
1759}
1760
1761static bool pm_runtime_need_not_resume(struct device *dev)
1762{
1763	return atomic_read(&dev->power.usage_count) <= 1 &&
1764		(atomic_read(&dev->power.child_count) == 0 ||
1765		 dev->power.ignore_children);
1766}
1767
1768/**
1769 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1770 * @dev: Device to suspend.
1771 *
1772 * Disable runtime PM so we safely can check the device's runtime PM status and
1773 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1774 * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1775 * usage and children counters don't indicate that the device was in use before
1776 * the system-wide transition under way, decrement its parent's children counter
1777 * (if there is a parent).  Keep runtime PM disabled to preserve the state
1778 * unless we encounter errors.
1779 *
1780 * Typically this function may be invoked from a system suspend callback to make
1781 * sure the device is put into low power state and it should only be used during
1782 * system-wide PM transitions to sleep states.  It assumes that the analogous
1783 * pm_runtime_force_resume() will be used to resume the device.
 
 
 
 
1784 */
1785int pm_runtime_force_suspend(struct device *dev)
1786{
1787	int (*callback)(struct device *);
1788	int ret;
1789
1790	pm_runtime_disable(dev);
1791	if (pm_runtime_status_suspended(dev))
1792		return 0;
1793
1794	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1795
 
1796	ret = callback ? callback(dev) : 0;
1797	if (ret)
1798		goto err;
1799
 
 
1800	/*
1801	 * If the device can stay in suspend after the system-wide transition
1802	 * to the working state that will follow, drop the children counter of
1803	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1804	 * function will be called again for it in the meantime.
1805	 */
1806	if (pm_runtime_need_not_resume(dev)) {
1807		pm_runtime_set_suspended(dev);
1808	} else {
1809		__update_runtime_status(dev, RPM_SUSPENDED);
1810		dev->power.needs_force_resume = 1;
1811	}
1812
1813	return 0;
1814
1815err:
 
1816	pm_runtime_enable(dev);
1817	return ret;
1818}
1819EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1820
1821/**
1822 * pm_runtime_force_resume - Force a device into resume state if needed.
1823 * @dev: Device to resume.
1824 *
1825 * Prior invoking this function we expect the user to have brought the device
1826 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1827 * those actions and bring the device into full power, if it is expected to be
1828 * used on system resume.  In the other case, we defer the resume to be managed
1829 * via runtime PM.
1830 *
1831 * Typically this function may be invoked from a system resume callback.
1832 */
1833int pm_runtime_force_resume(struct device *dev)
1834{
1835	int (*callback)(struct device *);
1836	int ret = 0;
1837
1838	if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1839		goto out;
1840
1841	/*
1842	 * The value of the parent's children counter is correct already, so
1843	 * just update the status of the device.
1844	 */
1845	__update_runtime_status(dev, RPM_ACTIVE);
1846
1847	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1848
 
1849	ret = callback ? callback(dev) : 0;
1850	if (ret) {
1851		pm_runtime_set_suspended(dev);
 
1852		goto out;
1853	}
1854
1855	pm_runtime_mark_last_busy(dev);
1856out:
1857	dev->power.needs_force_resume = 0;
1858	pm_runtime_enable(dev);
1859	return ret;
1860}
1861EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   4 *
   5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
   7 */
   8#include <linux/sched/mm.h>
   9#include <linux/ktime.h>
  10#include <linux/hrtimer.h>
  11#include <linux/export.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/pm_wakeirq.h>
  14#include <linux/rculist.h>
  15#include <trace/events/rpm.h>
  16
  17#include "../base.h"
  18#include "power.h"
  19
  20typedef int (*pm_callback_t)(struct device *);
  21
  22static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
  23{
  24	pm_callback_t cb;
  25	const struct dev_pm_ops *ops;
  26
  27	if (dev->pm_domain)
  28		ops = &dev->pm_domain->ops;
  29	else if (dev->type && dev->type->pm)
  30		ops = dev->type->pm;
  31	else if (dev->class && dev->class->pm)
  32		ops = dev->class->pm;
  33	else if (dev->bus && dev->bus->pm)
  34		ops = dev->bus->pm;
  35	else
  36		ops = NULL;
  37
  38	if (ops)
  39		cb = *(pm_callback_t *)((void *)ops + cb_offset);
  40	else
  41		cb = NULL;
  42
  43	if (!cb && dev->driver && dev->driver->pm)
  44		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
  45
  46	return cb;
  47}
  48
  49#define RPM_GET_CALLBACK(dev, callback) \
  50		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
  51
  52static int rpm_resume(struct device *dev, int rpmflags);
  53static int rpm_suspend(struct device *dev, int rpmflags);
  54
  55/**
  56 * update_pm_runtime_accounting - Update the time accounting of power states
  57 * @dev: Device to update the accounting for
  58 *
  59 * In order to be able to have time accounting of the various power states
  60 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  61 * PM), we need to track the time spent in each state.
  62 * update_pm_runtime_accounting must be called each time before the
  63 * runtime_status field is updated, to account the time in the old state
  64 * correctly.
  65 */
  66static void update_pm_runtime_accounting(struct device *dev)
  67{
  68	u64 now, last, delta;
  69
  70	if (dev->power.disable_depth > 0)
  71		return;
  72
  73	last = dev->power.accounting_timestamp;
  74
  75	now = ktime_get_mono_fast_ns();
  76	dev->power.accounting_timestamp = now;
  77
  78	/*
  79	 * Because ktime_get_mono_fast_ns() is not monotonic during
  80	 * timekeeping updates, ensure that 'now' is after the last saved
  81	 * timesptamp.
  82	 */
  83	if (now < last)
  84		return;
  85
  86	delta = now - last;
  87
  88	if (dev->power.runtime_status == RPM_SUSPENDED)
  89		dev->power.suspended_time += delta;
  90	else
  91		dev->power.active_time += delta;
  92}
  93
  94static void __update_runtime_status(struct device *dev, enum rpm_status status)
  95{
  96	update_pm_runtime_accounting(dev);
  97	trace_rpm_status(dev, status);
  98	dev->power.runtime_status = status;
  99}
 100
 101static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
 102{
 103	u64 time;
 104	unsigned long flags;
 105
 106	spin_lock_irqsave(&dev->power.lock, flags);
 107
 108	update_pm_runtime_accounting(dev);
 109	time = suspended ? dev->power.suspended_time : dev->power.active_time;
 110
 111	spin_unlock_irqrestore(&dev->power.lock, flags);
 112
 113	return time;
 114}
 115
 116u64 pm_runtime_active_time(struct device *dev)
 117{
 118	return rpm_get_accounted_time(dev, false);
 119}
 120
 121u64 pm_runtime_suspended_time(struct device *dev)
 122{
 123	return rpm_get_accounted_time(dev, true);
 124}
 125EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
 126
 127/**
 128 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
 129 * @dev: Device to handle.
 130 */
 131static void pm_runtime_deactivate_timer(struct device *dev)
 132{
 133	if (dev->power.timer_expires > 0) {
 134		hrtimer_try_to_cancel(&dev->power.suspend_timer);
 135		dev->power.timer_expires = 0;
 136	}
 137}
 138
 139/**
 140 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
 141 * @dev: Device to handle.
 142 */
 143static void pm_runtime_cancel_pending(struct device *dev)
 144{
 145	pm_runtime_deactivate_timer(dev);
 146	/*
 147	 * In case there's a request pending, make sure its work function will
 148	 * return without doing anything.
 149	 */
 150	dev->power.request = RPM_REQ_NONE;
 151}
 152
 153/*
 154 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
 155 * @dev: Device to handle.
 156 *
 157 * Compute the autosuspend-delay expiration time based on the device's
 158 * power.last_busy time.  If the delay has already expired or is disabled
 159 * (negative) or the power.use_autosuspend flag isn't set, return 0.
 160 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
 161 *
 162 * This function may be called either with or without dev->power.lock held.
 163 * Either way it can be racy, since power.last_busy may be updated at any time.
 164 */
 165u64 pm_runtime_autosuspend_expiration(struct device *dev)
 166{
 167	int autosuspend_delay;
 168	u64 expires;
 169
 170	if (!dev->power.use_autosuspend)
 171		return 0;
 172
 173	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
 174	if (autosuspend_delay < 0)
 175		return 0;
 176
 177	expires  = READ_ONCE(dev->power.last_busy);
 178	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
 179	if (expires > ktime_get_mono_fast_ns())
 180		return expires;	/* Expires in the future */
 181
 182	return 0;
 183}
 184EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 185
 186static int dev_memalloc_noio(struct device *dev, void *data)
 187{
 188	return dev->power.memalloc_noio;
 189}
 190
 191/*
 192 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
 193 * @dev: Device to handle.
 194 * @enable: True for setting the flag and False for clearing the flag.
 195 *
 196 * Set the flag for all devices in the path from the device to the
 197 * root device in the device tree if @enable is true, otherwise clear
 198 * the flag for devices in the path whose siblings don't set the flag.
 199 *
 200 * The function should only be called by block device, or network
 201 * device driver for solving the deadlock problem during runtime
 202 * resume/suspend:
 203 *
 204 *     If memory allocation with GFP_KERNEL is called inside runtime
 205 *     resume/suspend callback of any one of its ancestors(or the
 206 *     block device itself), the deadlock may be triggered inside the
 207 *     memory allocation since it might not complete until the block
 208 *     device becomes active and the involed page I/O finishes. The
 209 *     situation is pointed out first by Alan Stern. Network device
 210 *     are involved in iSCSI kind of situation.
 211 *
 212 * The lock of dev_hotplug_mutex is held in the function for handling
 213 * hotplug race because pm_runtime_set_memalloc_noio() may be called
 214 * in async probe().
 215 *
 216 * The function should be called between device_add() and device_del()
 217 * on the affected device(block/network device).
 218 */
 219void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
 220{
 221	static DEFINE_MUTEX(dev_hotplug_mutex);
 222
 223	mutex_lock(&dev_hotplug_mutex);
 224	for (;;) {
 225		bool enabled;
 226
 227		/* hold power lock since bitfield is not SMP-safe. */
 228		spin_lock_irq(&dev->power.lock);
 229		enabled = dev->power.memalloc_noio;
 230		dev->power.memalloc_noio = enable;
 231		spin_unlock_irq(&dev->power.lock);
 232
 233		/*
 234		 * not need to enable ancestors any more if the device
 235		 * has been enabled.
 236		 */
 237		if (enabled && enable)
 238			break;
 239
 240		dev = dev->parent;
 241
 242		/*
 243		 * clear flag of the parent device only if all the
 244		 * children don't set the flag because ancestor's
 245		 * flag was set by any one of the descendants.
 246		 */
 247		if (!dev || (!enable &&
 248		    device_for_each_child(dev, NULL, dev_memalloc_noio)))
 
 249			break;
 250	}
 251	mutex_unlock(&dev_hotplug_mutex);
 252}
 253EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
 254
 255/**
 256 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 257 * @dev: Device to test.
 258 */
 259static int rpm_check_suspend_allowed(struct device *dev)
 260{
 261	int retval = 0;
 262
 263	if (dev->power.runtime_error)
 264		retval = -EINVAL;
 265	else if (dev->power.disable_depth > 0)
 266		retval = -EACCES;
 267	else if (atomic_read(&dev->power.usage_count))
 268		retval = -EAGAIN;
 269	else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
 
 270		retval = -EBUSY;
 271
 272	/* Pending resume requests take precedence over suspends. */
 273	else if ((dev->power.deferred_resume &&
 274	    dev->power.runtime_status == RPM_SUSPENDING) ||
 275	    (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
 
 276		retval = -EAGAIN;
 277	else if (__dev_pm_qos_resume_latency(dev) == 0)
 278		retval = -EPERM;
 279	else if (dev->power.runtime_status == RPM_SUSPENDED)
 280		retval = 1;
 281
 282	return retval;
 283}
 284
 285static int rpm_get_suppliers(struct device *dev)
 286{
 287	struct device_link *link;
 288
 289	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 290				device_links_read_lock_held()) {
 291		int retval;
 292
 293		if (!(link->flags & DL_FLAG_PM_RUNTIME))
 294			continue;
 295
 296		retval = pm_runtime_get_sync(link->supplier);
 297		/* Ignore suppliers with disabled runtime PM. */
 298		if (retval < 0 && retval != -EACCES) {
 299			pm_runtime_put_noidle(link->supplier);
 300			return retval;
 301		}
 302		refcount_inc(&link->rpm_active);
 303	}
 304	return 0;
 305}
 306
 307/**
 308 * pm_runtime_release_supplier - Drop references to device link's supplier.
 309 * @link: Target device link.
 310 *
 311 * Drop all runtime PM references associated with @link to its supplier device.
 312 */
 313void pm_runtime_release_supplier(struct device_link *link)
 314{
 315	struct device *supplier = link->supplier;
 316
 317	/*
 318	 * The additional power.usage_count check is a safety net in case
 319	 * the rpm_active refcount becomes saturated, in which case
 320	 * refcount_dec_not_one() would return true forever, but it is not
 321	 * strictly necessary.
 322	 */
 323	while (refcount_dec_not_one(&link->rpm_active) &&
 324	       atomic_read(&supplier->power.usage_count) > 0)
 325		pm_runtime_put_noidle(supplier);
 326}
 327
 328static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
 329{
 330	struct device_link *link;
 331
 332	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 333				device_links_read_lock_held()) {
 334		pm_runtime_release_supplier(link);
 
 
 
 335		if (try_to_suspend)
 336			pm_request_idle(link->supplier);
 337	}
 338}
 339
 340static void rpm_put_suppliers(struct device *dev)
 341{
 342	__rpm_put_suppliers(dev, true);
 343}
 344
 345static void rpm_suspend_suppliers(struct device *dev)
 346{
 347	struct device_link *link;
 348	int idx = device_links_read_lock();
 349
 350	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 351				device_links_read_lock_held())
 352		pm_request_idle(link->supplier);
 353
 354	device_links_read_unlock(idx);
 355}
 356
 357/**
 358 * __rpm_callback - Run a given runtime PM callback for a given device.
 359 * @cb: Runtime PM callback to run.
 360 * @dev: Device to run the callback for.
 361 */
 362static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 363	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 364{
 365	int retval = 0, idx;
 366	bool use_links = dev->power.links_count > 0;
 367
 368	if (dev->power.irq_safe) {
 369		spin_unlock(&dev->power.lock);
 370	} else {
 371		spin_unlock_irq(&dev->power.lock);
 372
 373		/*
 374		 * Resume suppliers if necessary.
 375		 *
 376		 * The device's runtime PM status cannot change until this
 377		 * routine returns, so it is safe to read the status outside of
 378		 * the lock.
 379		 */
 380		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
 381			idx = device_links_read_lock();
 382
 383			retval = rpm_get_suppliers(dev);
 384			if (retval) {
 385				rpm_put_suppliers(dev);
 386				goto fail;
 387			}
 388
 389			device_links_read_unlock(idx);
 390		}
 391	}
 392
 393	if (cb)
 394		retval = cb(dev);
 395
 396	if (dev->power.irq_safe) {
 397		spin_lock(&dev->power.lock);
 398	} else {
 399		/*
 400		 * If the device is suspending and the callback has returned
 401		 * success, drop the usage counters of the suppliers that have
 402		 * been reference counted on its resume.
 403		 *
 404		 * Do that if resume fails too.
 405		 */
 406		if (use_links &&
 407		    ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
 408		    (dev->power.runtime_status == RPM_RESUMING && retval))) {
 409			idx = device_links_read_lock();
 410
 411			__rpm_put_suppliers(dev, false);
 412
 413fail:
 414			device_links_read_unlock(idx);
 415		}
 416
 417		spin_lock_irq(&dev->power.lock);
 418	}
 419
 420	return retval;
 421}
 422
 423/**
 424 * rpm_callback - Run a given runtime PM callback for a given device.
 425 * @cb: Runtime PM callback to run.
 426 * @dev: Device to run the callback for.
 427 */
 428static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 429{
 430	int retval;
 431
 432	if (dev->power.memalloc_noio) {
 433		unsigned int noio_flag;
 434
 435		/*
 436		 * Deadlock might be caused if memory allocation with
 437		 * GFP_KERNEL happens inside runtime_suspend and
 438		 * runtime_resume callbacks of one block device's
 439		 * ancestor or the block device itself. Network
 440		 * device might be thought as part of iSCSI block
 441		 * device, so network device and its ancestor should
 442		 * be marked as memalloc_noio too.
 443		 */
 444		noio_flag = memalloc_noio_save();
 445		retval = __rpm_callback(cb, dev);
 446		memalloc_noio_restore(noio_flag);
 447	} else {
 448		retval = __rpm_callback(cb, dev);
 449	}
 450
 451	dev->power.runtime_error = retval;
 452	return retval != -EACCES ? retval : -EIO;
 453}
 454
 455/**
 456 * rpm_idle - Notify device bus type if the device can be suspended.
 457 * @dev: Device to notify the bus type about.
 458 * @rpmflags: Flag bits.
 459 *
 460 * Check if the device's runtime PM status allows it to be suspended.  If
 461 * another idle notification has been started earlier, return immediately.  If
 462 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 463 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
 464 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
 465 *
 466 * This function must be called under dev->power.lock with interrupts disabled.
 467 */
 468static int rpm_idle(struct device *dev, int rpmflags)
 469{
 470	int (*callback)(struct device *);
 471	int retval;
 472
 473	trace_rpm_idle(dev, rpmflags);
 474	retval = rpm_check_suspend_allowed(dev);
 475	if (retval < 0)
 476		;	/* Conditions are wrong. */
 477
 478	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
 479	else if (dev->power.runtime_status != RPM_ACTIVE)
 480		retval = -EAGAIN;
 481
 482	/*
 483	 * Any pending request other than an idle notification takes
 484	 * precedence over us, except that the timer may be running.
 485	 */
 486	else if (dev->power.request_pending &&
 487	    dev->power.request > RPM_REQ_IDLE)
 488		retval = -EAGAIN;
 489
 490	/* Act as though RPM_NOWAIT is always set. */
 491	else if (dev->power.idle_notification)
 492		retval = -EINPROGRESS;
 493
 494	if (retval)
 495		goto out;
 496
 497	/* Pending requests need to be canceled. */
 498	dev->power.request = RPM_REQ_NONE;
 499
 500	callback = RPM_GET_CALLBACK(dev, runtime_idle);
 501
 502	/* If no callback assume success. */
 503	if (!callback || dev->power.no_callbacks)
 504		goto out;
 505
 506	/* Carry out an asynchronous or a synchronous idle notification. */
 507	if (rpmflags & RPM_ASYNC) {
 508		dev->power.request = RPM_REQ_IDLE;
 509		if (!dev->power.request_pending) {
 510			dev->power.request_pending = true;
 511			queue_work(pm_wq, &dev->power.work);
 512		}
 513		trace_rpm_return_int(dev, _THIS_IP_, 0);
 514		return 0;
 515	}
 516
 517	dev->power.idle_notification = true;
 518
 519	if (dev->power.irq_safe)
 520		spin_unlock(&dev->power.lock);
 521	else
 522		spin_unlock_irq(&dev->power.lock);
 523
 524	retval = callback(dev);
 525
 526	if (dev->power.irq_safe)
 527		spin_lock(&dev->power.lock);
 528	else
 529		spin_lock_irq(&dev->power.lock);
 530
 531	dev->power.idle_notification = false;
 532	wake_up_all(&dev->power.wait_queue);
 533
 534 out:
 535	trace_rpm_return_int(dev, _THIS_IP_, retval);
 536	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 537}
 538
 539/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 540 * rpm_suspend - Carry out runtime suspend of given device.
 541 * @dev: Device to suspend.
 542 * @rpmflags: Flag bits.
 543 *
 544 * Check if the device's runtime PM status allows it to be suspended.
 545 * Cancel a pending idle notification, autosuspend or suspend. If
 546 * another suspend has been started earlier, either return immediately
 547 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
 548 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
 549 * otherwise run the ->runtime_suspend() callback directly. When
 550 * ->runtime_suspend succeeded, if a deferred resume was requested while
 551 * the callback was running then carry it out, otherwise send an idle
 552 * notification for its parent (if the suspend succeeded and both
 553 * ignore_children of parent->power and irq_safe of dev->power are not set).
 554 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 555 * flag is set and the next autosuspend-delay expiration time is in the
 556 * future, schedule another autosuspend attempt.
 557 *
 558 * This function must be called under dev->power.lock with interrupts disabled.
 559 */
 560static int rpm_suspend(struct device *dev, int rpmflags)
 561	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 562{
 563	int (*callback)(struct device *);
 564	struct device *parent = NULL;
 565	int retval;
 566
 567	trace_rpm_suspend(dev, rpmflags);
 568
 569 repeat:
 570	retval = rpm_check_suspend_allowed(dev);
 571	if (retval < 0)
 572		goto out;	/* Conditions are wrong. */
 573
 574	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 575	if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
 576		retval = -EAGAIN;
 577
 578	if (retval)
 579		goto out;
 580
 581	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
 582	if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
 
 583		u64 expires = pm_runtime_autosuspend_expiration(dev);
 584
 585		if (expires != 0) {
 586			/* Pending requests need to be canceled. */
 587			dev->power.request = RPM_REQ_NONE;
 588
 589			/*
 590			 * Optimization: If the timer is already running and is
 591			 * set to expire at or before the autosuspend delay,
 592			 * avoid the overhead of resetting it.  Just let it
 593			 * expire; pm_suspend_timer_fn() will take care of the
 594			 * rest.
 595			 */
 596			if (!(dev->power.timer_expires &&
 597			    dev->power.timer_expires <= expires)) {
 598				/*
 599				 * We add a slack of 25% to gather wakeups
 600				 * without sacrificing the granularity.
 601				 */
 602				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
 603						    (NSEC_PER_MSEC >> 2);
 604
 605				dev->power.timer_expires = expires;
 606				hrtimer_start_range_ns(&dev->power.suspend_timer,
 607						       ns_to_ktime(expires),
 608						       slack,
 609						       HRTIMER_MODE_ABS);
 610			}
 611			dev->power.timer_autosuspends = 1;
 612			goto out;
 613		}
 614	}
 615
 616	/* Other scheduled or pending requests need to be canceled. */
 617	pm_runtime_cancel_pending(dev);
 618
 619	if (dev->power.runtime_status == RPM_SUSPENDING) {
 620		DEFINE_WAIT(wait);
 621
 622		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 623			retval = -EINPROGRESS;
 624			goto out;
 625		}
 626
 627		if (dev->power.irq_safe) {
 628			spin_unlock(&dev->power.lock);
 629
 630			cpu_relax();
 631
 632			spin_lock(&dev->power.lock);
 633			goto repeat;
 634		}
 635
 636		/* Wait for the other suspend running in parallel with us. */
 637		for (;;) {
 638			prepare_to_wait(&dev->power.wait_queue, &wait,
 639					TASK_UNINTERRUPTIBLE);
 640			if (dev->power.runtime_status != RPM_SUSPENDING)
 641				break;
 642
 643			spin_unlock_irq(&dev->power.lock);
 644
 645			schedule();
 646
 647			spin_lock_irq(&dev->power.lock);
 648		}
 649		finish_wait(&dev->power.wait_queue, &wait);
 650		goto repeat;
 651	}
 652
 653	if (dev->power.no_callbacks)
 654		goto no_callback;	/* Assume success. */
 655
 656	/* Carry out an asynchronous or a synchronous suspend. */
 657	if (rpmflags & RPM_ASYNC) {
 658		dev->power.request = (rpmflags & RPM_AUTO) ?
 659		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 660		if (!dev->power.request_pending) {
 661			dev->power.request_pending = true;
 662			queue_work(pm_wq, &dev->power.work);
 663		}
 664		goto out;
 665	}
 666
 667	__update_runtime_status(dev, RPM_SUSPENDING);
 668
 669	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 670
 671	dev_pm_enable_wake_irq_check(dev, true);
 672	retval = rpm_callback(callback, dev);
 673	if (retval)
 674		goto fail;
 675
 676	dev_pm_enable_wake_irq_complete(dev);
 677
 678 no_callback:
 679	__update_runtime_status(dev, RPM_SUSPENDED);
 680	pm_runtime_deactivate_timer(dev);
 681
 682	if (dev->parent) {
 683		parent = dev->parent;
 684		atomic_add_unless(&parent->power.child_count, -1, 0);
 685	}
 686	wake_up_all(&dev->power.wait_queue);
 687
 688	if (dev->power.deferred_resume) {
 689		dev->power.deferred_resume = false;
 690		rpm_resume(dev, 0);
 691		retval = -EAGAIN;
 692		goto out;
 693	}
 694
 695	if (dev->power.irq_safe)
 696		goto out;
 697
 698	/* Maybe the parent is now able to suspend. */
 699	if (parent && !parent->power.ignore_children) {
 700		spin_unlock(&dev->power.lock);
 701
 702		spin_lock(&parent->power.lock);
 703		rpm_idle(parent, RPM_ASYNC);
 704		spin_unlock(&parent->power.lock);
 705
 706		spin_lock(&dev->power.lock);
 707	}
 708	/* Maybe the suppliers are now able to suspend. */
 709	if (dev->power.links_count > 0) {
 710		spin_unlock_irq(&dev->power.lock);
 711
 712		rpm_suspend_suppliers(dev);
 713
 714		spin_lock_irq(&dev->power.lock);
 715	}
 716
 717 out:
 718	trace_rpm_return_int(dev, _THIS_IP_, retval);
 719
 720	return retval;
 721
 722 fail:
 723	dev_pm_disable_wake_irq_check(dev, true);
 724	__update_runtime_status(dev, RPM_ACTIVE);
 725	dev->power.deferred_resume = false;
 726	wake_up_all(&dev->power.wait_queue);
 727
 728	if (retval == -EAGAIN || retval == -EBUSY) {
 729		dev->power.runtime_error = 0;
 730
 731		/*
 732		 * If the callback routine failed an autosuspend, and
 733		 * if the last_busy time has been updated so that there
 734		 * is a new autosuspend expiration time, automatically
 735		 * reschedule another autosuspend.
 736		 */
 737		if ((rpmflags & RPM_AUTO) &&
 738		    pm_runtime_autosuspend_expiration(dev) != 0)
 739			goto repeat;
 740	} else {
 741		pm_runtime_cancel_pending(dev);
 742	}
 743	goto out;
 744}
 745
 746/**
 747 * rpm_resume - Carry out runtime resume of given device.
 748 * @dev: Device to resume.
 749 * @rpmflags: Flag bits.
 750 *
 751 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 752 * any scheduled or pending requests.  If another resume has been started
 753 * earlier, either return immediately or wait for it to finish, depending on the
 754 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 755 * parallel with this function, either tell the other process to resume after
 756 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 757 * flag is set then queue a resume request; otherwise run the
 758 * ->runtime_resume() callback directly.  Queue an idle notification for the
 759 * device if the resume succeeded.
 760 *
 761 * This function must be called under dev->power.lock with interrupts disabled.
 762 */
 763static int rpm_resume(struct device *dev, int rpmflags)
 764	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 765{
 766	int (*callback)(struct device *);
 767	struct device *parent = NULL;
 768	int retval = 0;
 769
 770	trace_rpm_resume(dev, rpmflags);
 771
 772 repeat:
 773	if (dev->power.runtime_error) {
 774		retval = -EINVAL;
 775	} else if (dev->power.disable_depth > 0) {
 776		if (dev->power.runtime_status == RPM_ACTIVE &&
 777		    dev->power.last_status == RPM_ACTIVE)
 778			retval = 1;
 779		else
 780			retval = -EACCES;
 781	}
 782	if (retval)
 783		goto out;
 784
 785	/*
 786	 * Other scheduled or pending requests need to be canceled.  Small
 787	 * optimization: If an autosuspend timer is running, leave it running
 788	 * rather than cancelling it now only to restart it again in the near
 789	 * future.
 790	 */
 791	dev->power.request = RPM_REQ_NONE;
 792	if (!dev->power.timer_autosuspends)
 793		pm_runtime_deactivate_timer(dev);
 794
 795	if (dev->power.runtime_status == RPM_ACTIVE) {
 796		retval = 1;
 797		goto out;
 798	}
 799
 800	if (dev->power.runtime_status == RPM_RESUMING ||
 801	    dev->power.runtime_status == RPM_SUSPENDING) {
 802		DEFINE_WAIT(wait);
 803
 804		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 805			if (dev->power.runtime_status == RPM_SUSPENDING) {
 806				dev->power.deferred_resume = true;
 807				if (rpmflags & RPM_NOWAIT)
 808					retval = -EINPROGRESS;
 809			} else {
 810				retval = -EINPROGRESS;
 811			}
 812			goto out;
 813		}
 814
 815		if (dev->power.irq_safe) {
 816			spin_unlock(&dev->power.lock);
 817
 818			cpu_relax();
 819
 820			spin_lock(&dev->power.lock);
 821			goto repeat;
 822		}
 823
 824		/* Wait for the operation carried out in parallel with us. */
 825		for (;;) {
 826			prepare_to_wait(&dev->power.wait_queue, &wait,
 827					TASK_UNINTERRUPTIBLE);
 828			if (dev->power.runtime_status != RPM_RESUMING &&
 829			    dev->power.runtime_status != RPM_SUSPENDING)
 830				break;
 831
 832			spin_unlock_irq(&dev->power.lock);
 833
 834			schedule();
 835
 836			spin_lock_irq(&dev->power.lock);
 837		}
 838		finish_wait(&dev->power.wait_queue, &wait);
 839		goto repeat;
 840	}
 841
 842	/*
 843	 * See if we can skip waking up the parent.  This is safe only if
 844	 * power.no_callbacks is set, because otherwise we don't know whether
 845	 * the resume will actually succeed.
 846	 */
 847	if (dev->power.no_callbacks && !parent && dev->parent) {
 848		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 849		if (dev->parent->power.disable_depth > 0 ||
 850		    dev->parent->power.ignore_children ||
 851		    dev->parent->power.runtime_status == RPM_ACTIVE) {
 852			atomic_inc(&dev->parent->power.child_count);
 853			spin_unlock(&dev->parent->power.lock);
 854			retval = 1;
 855			goto no_callback;	/* Assume success. */
 856		}
 857		spin_unlock(&dev->parent->power.lock);
 858	}
 859
 860	/* Carry out an asynchronous or a synchronous resume. */
 861	if (rpmflags & RPM_ASYNC) {
 862		dev->power.request = RPM_REQ_RESUME;
 863		if (!dev->power.request_pending) {
 864			dev->power.request_pending = true;
 865			queue_work(pm_wq, &dev->power.work);
 866		}
 867		retval = 0;
 868		goto out;
 869	}
 870
 871	if (!parent && dev->parent) {
 872		/*
 873		 * Increment the parent's usage counter and resume it if
 874		 * necessary.  Not needed if dev is irq-safe; then the
 875		 * parent is permanently resumed.
 876		 */
 877		parent = dev->parent;
 878		if (dev->power.irq_safe)
 879			goto skip_parent;
 880
 881		spin_unlock(&dev->power.lock);
 882
 883		pm_runtime_get_noresume(parent);
 884
 885		spin_lock(&parent->power.lock);
 886		/*
 887		 * Resume the parent if it has runtime PM enabled and not been
 888		 * set to ignore its children.
 889		 */
 890		if (!parent->power.disable_depth &&
 891		    !parent->power.ignore_children) {
 892			rpm_resume(parent, 0);
 893			if (parent->power.runtime_status != RPM_ACTIVE)
 894				retval = -EBUSY;
 895		}
 896		spin_unlock(&parent->power.lock);
 897
 898		spin_lock(&dev->power.lock);
 899		if (retval)
 900			goto out;
 901
 902		goto repeat;
 903	}
 904 skip_parent:
 905
 906	if (dev->power.no_callbacks)
 907		goto no_callback;	/* Assume success. */
 908
 909	__update_runtime_status(dev, RPM_RESUMING);
 910
 911	callback = RPM_GET_CALLBACK(dev, runtime_resume);
 912
 913	dev_pm_disable_wake_irq_check(dev, false);
 914	retval = rpm_callback(callback, dev);
 915	if (retval) {
 916		__update_runtime_status(dev, RPM_SUSPENDED);
 917		pm_runtime_cancel_pending(dev);
 918		dev_pm_enable_wake_irq_check(dev, false);
 919	} else {
 920 no_callback:
 921		__update_runtime_status(dev, RPM_ACTIVE);
 922		pm_runtime_mark_last_busy(dev);
 923		if (parent)
 924			atomic_inc(&parent->power.child_count);
 925	}
 926	wake_up_all(&dev->power.wait_queue);
 927
 928	if (retval >= 0)
 929		rpm_idle(dev, RPM_ASYNC);
 930
 931 out:
 932	if (parent && !dev->power.irq_safe) {
 933		spin_unlock_irq(&dev->power.lock);
 934
 935		pm_runtime_put(parent);
 936
 937		spin_lock_irq(&dev->power.lock);
 938	}
 939
 940	trace_rpm_return_int(dev, _THIS_IP_, retval);
 941
 942	return retval;
 943}
 944
 945/**
 946 * pm_runtime_work - Universal runtime PM work function.
 947 * @work: Work structure used for scheduling the execution of this function.
 948 *
 949 * Use @work to get the device object the work is to be done for, determine what
 950 * is to be done and execute the appropriate runtime PM function.
 951 */
 952static void pm_runtime_work(struct work_struct *work)
 953{
 954	struct device *dev = container_of(work, struct device, power.work);
 955	enum rpm_request req;
 956
 957	spin_lock_irq(&dev->power.lock);
 958
 959	if (!dev->power.request_pending)
 960		goto out;
 961
 962	req = dev->power.request;
 963	dev->power.request = RPM_REQ_NONE;
 964	dev->power.request_pending = false;
 965
 966	switch (req) {
 967	case RPM_REQ_NONE:
 968		break;
 969	case RPM_REQ_IDLE:
 970		rpm_idle(dev, RPM_NOWAIT);
 971		break;
 972	case RPM_REQ_SUSPEND:
 973		rpm_suspend(dev, RPM_NOWAIT);
 974		break;
 975	case RPM_REQ_AUTOSUSPEND:
 976		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 977		break;
 978	case RPM_REQ_RESUME:
 979		rpm_resume(dev, RPM_NOWAIT);
 980		break;
 981	}
 982
 983 out:
 984	spin_unlock_irq(&dev->power.lock);
 985}
 986
 987/**
 988 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 989 * @timer: hrtimer used by pm_schedule_suspend().
 990 *
 991 * Check if the time is right and queue a suspend request.
 992 */
 993static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
 994{
 995	struct device *dev = container_of(timer, struct device, power.suspend_timer);
 996	unsigned long flags;
 997	u64 expires;
 998
 999	spin_lock_irqsave(&dev->power.lock, flags);
1000
1001	expires = dev->power.timer_expires;
1002	/*
1003	 * If 'expires' is after the current time, we've been called
1004	 * too early.
1005	 */
1006	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
1007		dev->power.timer_expires = 0;
1008		rpm_suspend(dev, dev->power.timer_autosuspends ?
1009		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1010	}
1011
1012	spin_unlock_irqrestore(&dev->power.lock, flags);
1013
1014	return HRTIMER_NORESTART;
1015}
1016
1017/**
1018 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1019 * @dev: Device to suspend.
1020 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1021 */
1022int pm_schedule_suspend(struct device *dev, unsigned int delay)
1023{
1024	unsigned long flags;
1025	u64 expires;
1026	int retval;
1027
1028	spin_lock_irqsave(&dev->power.lock, flags);
1029
1030	if (!delay) {
1031		retval = rpm_suspend(dev, RPM_ASYNC);
1032		goto out;
1033	}
1034
1035	retval = rpm_check_suspend_allowed(dev);
1036	if (retval)
1037		goto out;
1038
1039	/* Other scheduled or pending requests need to be canceled. */
1040	pm_runtime_cancel_pending(dev);
1041
1042	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1043	dev->power.timer_expires = expires;
1044	dev->power.timer_autosuspends = 0;
1045	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1046
1047 out:
1048	spin_unlock_irqrestore(&dev->power.lock, flags);
1049
1050	return retval;
1051}
1052EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1053
1054static int rpm_drop_usage_count(struct device *dev)
1055{
1056	int ret;
1057
1058	ret = atomic_sub_return(1, &dev->power.usage_count);
1059	if (ret >= 0)
1060		return ret;
1061
1062	/*
1063	 * Because rpm_resume() does not check the usage counter, it will resume
1064	 * the device even if the usage counter is 0 or negative, so it is
1065	 * sufficient to increment the usage counter here to reverse the change
1066	 * made above.
1067	 */
1068	atomic_inc(&dev->power.usage_count);
1069	dev_warn(dev, "Runtime PM usage count underflow!\n");
1070	return -EINVAL;
1071}
1072
1073/**
1074 * __pm_runtime_idle - Entry point for runtime idle operations.
1075 * @dev: Device to send idle notification for.
1076 * @rpmflags: Flag bits.
1077 *
1078 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1079 * return immediately if it is larger than zero (if it becomes negative, log a
1080 * warning, increment it, and return an error).  Then carry out an idle
1081 * notification, either synchronous or asynchronous.
1082 *
1083 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1084 * or if pm_runtime_irq_safe() has been called.
1085 */
1086int __pm_runtime_idle(struct device *dev, int rpmflags)
1087{
1088	unsigned long flags;
1089	int retval;
1090
1091	if (rpmflags & RPM_GET_PUT) {
1092		retval = rpm_drop_usage_count(dev);
1093		if (retval < 0) {
1094			return retval;
1095		} else if (retval > 0) {
1096			trace_rpm_usage(dev, rpmflags);
1097			return 0;
1098		}
1099	}
1100
1101	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1102
1103	spin_lock_irqsave(&dev->power.lock, flags);
1104	retval = rpm_idle(dev, rpmflags);
1105	spin_unlock_irqrestore(&dev->power.lock, flags);
1106
1107	return retval;
1108}
1109EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1110
1111/**
1112 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1113 * @dev: Device to suspend.
1114 * @rpmflags: Flag bits.
1115 *
1116 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1117 * return immediately if it is larger than zero (if it becomes negative, log a
1118 * warning, increment it, and return an error).  Then carry out a suspend,
1119 * either synchronous or asynchronous.
1120 *
1121 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1122 * or if pm_runtime_irq_safe() has been called.
1123 */
1124int __pm_runtime_suspend(struct device *dev, int rpmflags)
1125{
1126	unsigned long flags;
1127	int retval;
1128
1129	if (rpmflags & RPM_GET_PUT) {
1130		retval = rpm_drop_usage_count(dev);
1131		if (retval < 0) {
1132			return retval;
1133		} else if (retval > 0) {
1134			trace_rpm_usage(dev, rpmflags);
1135			return 0;
1136		}
1137	}
1138
1139	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1140
1141	spin_lock_irqsave(&dev->power.lock, flags);
1142	retval = rpm_suspend(dev, rpmflags);
1143	spin_unlock_irqrestore(&dev->power.lock, flags);
1144
1145	return retval;
1146}
1147EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1148
1149/**
1150 * __pm_runtime_resume - Entry point for runtime resume operations.
1151 * @dev: Device to resume.
1152 * @rpmflags: Flag bits.
1153 *
1154 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1155 * carry out a resume, either synchronous or asynchronous.
1156 *
1157 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1158 * or if pm_runtime_irq_safe() has been called.
1159 */
1160int __pm_runtime_resume(struct device *dev, int rpmflags)
1161{
1162	unsigned long flags;
1163	int retval;
1164
1165	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1166			dev->power.runtime_status != RPM_ACTIVE);
1167
1168	if (rpmflags & RPM_GET_PUT)
1169		atomic_inc(&dev->power.usage_count);
1170
1171	spin_lock_irqsave(&dev->power.lock, flags);
1172	retval = rpm_resume(dev, rpmflags);
1173	spin_unlock_irqrestore(&dev->power.lock, flags);
1174
1175	return retval;
1176}
1177EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1178
1179/**
1180 * pm_runtime_get_conditional - Conditionally bump up device usage counter.
1181 * @dev: Device to handle.
1182 * @ign_usage_count: Whether or not to look at the current usage counter value.
1183 *
1184 * Return -EINVAL if runtime PM is disabled for @dev.
1185 *
1186 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1187 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1188 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1189 * without changing the usage counter.
1190 *
1191 * If @ign_usage_count is %true, this function can be used to prevent suspending
1192 * the device when its runtime PM status is %RPM_ACTIVE.
1193 *
1194 * If @ign_usage_count is %false, this function can be used to prevent
1195 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1196 * runtime PM usage counter is not zero.
1197 *
1198 * The caller is responsible for decrementing the runtime PM usage counter of
1199 * @dev after this function has returned a positive value for it.
1200 */
1201static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
1202{
1203	unsigned long flags;
1204	int retval;
1205
1206	spin_lock_irqsave(&dev->power.lock, flags);
1207	if (dev->power.disable_depth > 0) {
1208		retval = -EINVAL;
1209	} else if (dev->power.runtime_status != RPM_ACTIVE) {
1210		retval = 0;
1211	} else if (ign_usage_count) {
1212		retval = 1;
1213		atomic_inc(&dev->power.usage_count);
1214	} else {
1215		retval = atomic_inc_not_zero(&dev->power.usage_count);
1216	}
1217	trace_rpm_usage(dev, 0);
1218	spin_unlock_irqrestore(&dev->power.lock, flags);
1219
1220	return retval;
1221}
1222
1223/**
1224 * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
1225 *			      in active state
1226 * @dev: Target device.
1227 *
1228 * Increment the runtime PM usage counter of @dev if its runtime PM status is
1229 * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
1230 * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
1231 * device, in which case also the usage_count will remain unmodified.
1232 */
1233int pm_runtime_get_if_active(struct device *dev)
1234{
1235	return pm_runtime_get_conditional(dev, true);
1236}
1237EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1238
1239/**
1240 * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
1241 * @dev: Target device.
1242 *
1243 * Increment the runtime PM usage counter of @dev if its runtime PM status is
1244 * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
1245 * it returns 1. If the device is in a different state or its usage_count is 0,
1246 * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
1247 * in which case also the usage_count will remain unmodified.
1248 */
1249int pm_runtime_get_if_in_use(struct device *dev)
1250{
1251	return pm_runtime_get_conditional(dev, false);
1252}
1253EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1254
1255/**
1256 * __pm_runtime_set_status - Set runtime PM status of a device.
1257 * @dev: Device to handle.
1258 * @status: New runtime PM status of the device.
1259 *
1260 * If runtime PM of the device is disabled or its power.runtime_error field is
1261 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1262 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1263 * However, if the device has a parent and the parent is not active, and the
1264 * parent's power.ignore_children flag is unset, the device's status cannot be
1265 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1266 *
1267 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1268 * and the device parent's counter of unsuspended children is modified to
1269 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1270 * notification request for the parent is submitted.
1271 *
1272 * If @dev has any suppliers (as reflected by device links to them), and @status
1273 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1274 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1275 * of the @status value) and the suppliers will be deacticated on exit.  The
1276 * error returned by the failing supplier activation will be returned in that
1277 * case.
1278 */
1279int __pm_runtime_set_status(struct device *dev, unsigned int status)
1280{
1281	struct device *parent = dev->parent;
1282	bool notify_parent = false;
1283	unsigned long flags;
1284	int error = 0;
1285
1286	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1287		return -EINVAL;
1288
1289	spin_lock_irqsave(&dev->power.lock, flags);
1290
1291	/*
1292	 * Prevent PM-runtime from being enabled for the device or return an
1293	 * error if it is enabled already and working.
1294	 */
1295	if (dev->power.runtime_error || dev->power.disable_depth)
1296		dev->power.disable_depth++;
1297	else
1298		error = -EAGAIN;
1299
1300	spin_unlock_irqrestore(&dev->power.lock, flags);
1301
1302	if (error)
1303		return error;
1304
1305	/*
1306	 * If the new status is RPM_ACTIVE, the suppliers can be activated
1307	 * upfront regardless of the current status, because next time
1308	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1309	 * involved will be dropped down to one anyway.
1310	 */
1311	if (status == RPM_ACTIVE) {
1312		int idx = device_links_read_lock();
1313
1314		error = rpm_get_suppliers(dev);
1315		if (error)
1316			status = RPM_SUSPENDED;
1317
1318		device_links_read_unlock(idx);
1319	}
1320
1321	spin_lock_irqsave(&dev->power.lock, flags);
1322
1323	if (dev->power.runtime_status == status || !parent)
1324		goto out_set;
1325
1326	if (status == RPM_SUSPENDED) {
1327		atomic_add_unless(&parent->power.child_count, -1, 0);
1328		notify_parent = !parent->power.ignore_children;
1329	} else {
1330		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1331
1332		/*
1333		 * It is invalid to put an active child under a parent that is
1334		 * not active, has runtime PM enabled and the
1335		 * 'power.ignore_children' flag unset.
1336		 */
1337		if (!parent->power.disable_depth &&
1338		    !parent->power.ignore_children &&
1339		    parent->power.runtime_status != RPM_ACTIVE) {
1340			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1341				dev_name(dev),
1342				dev_name(parent));
1343			error = -EBUSY;
1344		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1345			atomic_inc(&parent->power.child_count);
1346		}
1347
1348		spin_unlock(&parent->power.lock);
1349
1350		if (error) {
1351			status = RPM_SUSPENDED;
1352			goto out;
1353		}
1354	}
1355
1356 out_set:
1357	__update_runtime_status(dev, status);
1358	if (!error)
1359		dev->power.runtime_error = 0;
1360
1361 out:
1362	spin_unlock_irqrestore(&dev->power.lock, flags);
1363
1364	if (notify_parent)
1365		pm_request_idle(parent);
1366
1367	if (status == RPM_SUSPENDED) {
1368		int idx = device_links_read_lock();
1369
1370		rpm_put_suppliers(dev);
1371
1372		device_links_read_unlock(idx);
1373	}
1374
1375	pm_runtime_enable(dev);
1376
1377	return error;
1378}
1379EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1380
1381/**
1382 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1383 * @dev: Device to handle.
1384 *
1385 * Flush all pending requests for the device from pm_wq and wait for all
1386 * runtime PM operations involving the device in progress to complete.
1387 *
1388 * Should be called under dev->power.lock with interrupts disabled.
1389 */
1390static void __pm_runtime_barrier(struct device *dev)
1391{
1392	pm_runtime_deactivate_timer(dev);
1393
1394	if (dev->power.request_pending) {
1395		dev->power.request = RPM_REQ_NONE;
1396		spin_unlock_irq(&dev->power.lock);
1397
1398		cancel_work_sync(&dev->power.work);
1399
1400		spin_lock_irq(&dev->power.lock);
1401		dev->power.request_pending = false;
1402	}
1403
1404	if (dev->power.runtime_status == RPM_SUSPENDING ||
1405	    dev->power.runtime_status == RPM_RESUMING ||
1406	    dev->power.idle_notification) {
1407		DEFINE_WAIT(wait);
1408
1409		/* Suspend, wake-up or idle notification in progress. */
1410		for (;;) {
1411			prepare_to_wait(&dev->power.wait_queue, &wait,
1412					TASK_UNINTERRUPTIBLE);
1413			if (dev->power.runtime_status != RPM_SUSPENDING
1414			    && dev->power.runtime_status != RPM_RESUMING
1415			    && !dev->power.idle_notification)
1416				break;
1417			spin_unlock_irq(&dev->power.lock);
1418
1419			schedule();
1420
1421			spin_lock_irq(&dev->power.lock);
1422		}
1423		finish_wait(&dev->power.wait_queue, &wait);
1424	}
1425}
1426
1427/**
1428 * pm_runtime_barrier - Flush pending requests and wait for completions.
1429 * @dev: Device to handle.
1430 *
1431 * Prevent the device from being suspended by incrementing its usage counter and
1432 * if there's a pending resume request for the device, wake the device up.
1433 * Next, make sure that all pending requests for the device have been flushed
1434 * from pm_wq and wait for all runtime PM operations involving the device in
1435 * progress to complete.
1436 *
1437 * Return value:
1438 * 1, if there was a resume request pending and the device had to be woken up,
1439 * 0, otherwise
1440 */
1441int pm_runtime_barrier(struct device *dev)
1442{
1443	int retval = 0;
1444
1445	pm_runtime_get_noresume(dev);
1446	spin_lock_irq(&dev->power.lock);
1447
1448	if (dev->power.request_pending
1449	    && dev->power.request == RPM_REQ_RESUME) {
1450		rpm_resume(dev, 0);
1451		retval = 1;
1452	}
1453
1454	__pm_runtime_barrier(dev);
1455
1456	spin_unlock_irq(&dev->power.lock);
1457	pm_runtime_put_noidle(dev);
1458
1459	return retval;
1460}
1461EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1462
1463/**
1464 * __pm_runtime_disable - Disable runtime PM of a device.
1465 * @dev: Device to handle.
1466 * @check_resume: If set, check if there's a resume request for the device.
1467 *
1468 * Increment power.disable_depth for the device and if it was zero previously,
1469 * cancel all pending runtime PM requests for the device and wait for all
1470 * operations in progress to complete.  The device can be either active or
1471 * suspended after its runtime PM has been disabled.
1472 *
1473 * If @check_resume is set and there's a resume request pending when
1474 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1475 * function will wake up the device before disabling its runtime PM.
1476 */
1477void __pm_runtime_disable(struct device *dev, bool check_resume)
1478{
1479	spin_lock_irq(&dev->power.lock);
1480
1481	if (dev->power.disable_depth > 0) {
1482		dev->power.disable_depth++;
1483		goto out;
1484	}
1485
1486	/*
1487	 * Wake up the device if there's a resume request pending, because that
1488	 * means there probably is some I/O to process and disabling runtime PM
1489	 * shouldn't prevent the device from processing the I/O.
1490	 */
1491	if (check_resume && dev->power.request_pending &&
1492	    dev->power.request == RPM_REQ_RESUME) {
1493		/*
1494		 * Prevent suspends and idle notifications from being carried
1495		 * out after we have woken up the device.
1496		 */
1497		pm_runtime_get_noresume(dev);
1498
1499		rpm_resume(dev, 0);
1500
1501		pm_runtime_put_noidle(dev);
1502	}
1503
1504	/* Update time accounting before disabling PM-runtime. */
1505	update_pm_runtime_accounting(dev);
1506
1507	if (!dev->power.disable_depth++) {
1508		__pm_runtime_barrier(dev);
1509		dev->power.last_status = dev->power.runtime_status;
1510	}
1511
1512 out:
1513	spin_unlock_irq(&dev->power.lock);
1514}
1515EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1516
1517/**
1518 * pm_runtime_enable - Enable runtime PM of a device.
1519 * @dev: Device to handle.
1520 */
1521void pm_runtime_enable(struct device *dev)
1522{
1523	unsigned long flags;
1524
1525	spin_lock_irqsave(&dev->power.lock, flags);
1526
1527	if (!dev->power.disable_depth) {
 
 
 
 
 
 
1528		dev_warn(dev, "Unbalanced %s!\n", __func__);
1529		goto out;
1530	}
1531
1532	if (--dev->power.disable_depth > 0)
1533		goto out;
1534
1535	dev->power.last_status = RPM_INVALID;
1536	dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1537
1538	if (dev->power.runtime_status == RPM_SUSPENDED &&
1539	    !dev->power.ignore_children &&
1540	    atomic_read(&dev->power.child_count) > 0)
1541		dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1542
1543out:
1544	spin_unlock_irqrestore(&dev->power.lock, flags);
1545}
1546EXPORT_SYMBOL_GPL(pm_runtime_enable);
1547
1548static void pm_runtime_disable_action(void *data)
1549{
1550	pm_runtime_dont_use_autosuspend(data);
1551	pm_runtime_disable(data);
1552}
1553
1554/**
1555 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1556 *
1557 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1558 * you at driver exit time if needed.
1559 *
1560 * @dev: Device to handle.
1561 */
1562int devm_pm_runtime_enable(struct device *dev)
1563{
1564	pm_runtime_enable(dev);
1565
1566	return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1567}
1568EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1569
1570/**
1571 * pm_runtime_forbid - Block runtime PM of a device.
1572 * @dev: Device to handle.
1573 *
1574 * Increase the device's usage count and clear its power.runtime_auto flag,
1575 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1576 * for it.
1577 */
1578void pm_runtime_forbid(struct device *dev)
1579{
1580	spin_lock_irq(&dev->power.lock);
1581	if (!dev->power.runtime_auto)
1582		goto out;
1583
1584	dev->power.runtime_auto = false;
1585	atomic_inc(&dev->power.usage_count);
1586	rpm_resume(dev, 0);
1587
1588 out:
1589	spin_unlock_irq(&dev->power.lock);
1590}
1591EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1592
1593/**
1594 * pm_runtime_allow - Unblock runtime PM of a device.
1595 * @dev: Device to handle.
1596 *
1597 * Decrease the device's usage count and set its power.runtime_auto flag.
1598 */
1599void pm_runtime_allow(struct device *dev)
1600{
1601	int ret;
1602
1603	spin_lock_irq(&dev->power.lock);
1604	if (dev->power.runtime_auto)
1605		goto out;
1606
1607	dev->power.runtime_auto = true;
1608	ret = rpm_drop_usage_count(dev);
1609	if (ret == 0)
1610		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1611	else if (ret > 0)
1612		trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1613
1614 out:
1615	spin_unlock_irq(&dev->power.lock);
1616}
1617EXPORT_SYMBOL_GPL(pm_runtime_allow);
1618
1619/**
1620 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1621 * @dev: Device to handle.
1622 *
1623 * Set the power.no_callbacks flag, which tells the PM core that this
1624 * device is power-managed through its parent and has no runtime PM
1625 * callbacks of its own.  The runtime sysfs attributes will be removed.
1626 */
1627void pm_runtime_no_callbacks(struct device *dev)
1628{
1629	spin_lock_irq(&dev->power.lock);
1630	dev->power.no_callbacks = 1;
1631	spin_unlock_irq(&dev->power.lock);
1632	if (device_is_registered(dev))
1633		rpm_sysfs_remove(dev);
1634}
1635EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1636
1637/**
1638 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1639 * @dev: Device to handle
1640 *
1641 * Set the power.irq_safe flag, which tells the PM core that the
1642 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1643 * always be invoked with the spinlock held and interrupts disabled.  It also
1644 * causes the parent's usage counter to be permanently incremented, preventing
1645 * the parent from runtime suspending -- otherwise an irq-safe child might have
1646 * to wait for a non-irq-safe parent.
1647 */
1648void pm_runtime_irq_safe(struct device *dev)
1649{
1650	if (dev->parent)
1651		pm_runtime_get_sync(dev->parent);
1652
1653	spin_lock_irq(&dev->power.lock);
1654	dev->power.irq_safe = 1;
1655	spin_unlock_irq(&dev->power.lock);
1656}
1657EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1658
1659/**
1660 * update_autosuspend - Handle a change to a device's autosuspend settings.
1661 * @dev: Device to handle.
1662 * @old_delay: The former autosuspend_delay value.
1663 * @old_use: The former use_autosuspend value.
1664 *
1665 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1666 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1667 *
1668 * This function must be called under dev->power.lock with interrupts disabled.
1669 */
1670static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1671{
1672	int delay = dev->power.autosuspend_delay;
1673
1674	/* Should runtime suspend be prevented now? */
1675	if (dev->power.use_autosuspend && delay < 0) {
1676
1677		/* If it used to be allowed then prevent it. */
1678		if (!old_use || old_delay >= 0) {
1679			atomic_inc(&dev->power.usage_count);
1680			rpm_resume(dev, 0);
1681		} else {
1682			trace_rpm_usage(dev, 0);
1683		}
1684	}
1685
1686	/* Runtime suspend should be allowed now. */
1687	else {
1688
1689		/* If it used to be prevented then allow it. */
1690		if (old_use && old_delay < 0)
1691			atomic_dec(&dev->power.usage_count);
1692
1693		/* Maybe we can autosuspend now. */
1694		rpm_idle(dev, RPM_AUTO);
1695	}
1696}
1697
1698/**
1699 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1700 * @dev: Device to handle.
1701 * @delay: Value of the new delay in milliseconds.
1702 *
1703 * Set the device's power.autosuspend_delay value.  If it changes to negative
1704 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1705 * changes the other way, allow runtime suspends.
1706 */
1707void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1708{
1709	int old_delay, old_use;
1710
1711	spin_lock_irq(&dev->power.lock);
1712	old_delay = dev->power.autosuspend_delay;
1713	old_use = dev->power.use_autosuspend;
1714	dev->power.autosuspend_delay = delay;
1715	update_autosuspend(dev, old_delay, old_use);
1716	spin_unlock_irq(&dev->power.lock);
1717}
1718EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1719
1720/**
1721 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1722 * @dev: Device to handle.
1723 * @use: New value for use_autosuspend.
1724 *
1725 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1726 * suspends as needed.
1727 */
1728void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1729{
1730	int old_delay, old_use;
1731
1732	spin_lock_irq(&dev->power.lock);
1733	old_delay = dev->power.autosuspend_delay;
1734	old_use = dev->power.use_autosuspend;
1735	dev->power.use_autosuspend = use;
1736	update_autosuspend(dev, old_delay, old_use);
1737	spin_unlock_irq(&dev->power.lock);
1738}
1739EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1740
1741/**
1742 * pm_runtime_init - Initialize runtime PM fields in given device object.
1743 * @dev: Device object to initialize.
1744 */
1745void pm_runtime_init(struct device *dev)
1746{
1747	dev->power.runtime_status = RPM_SUSPENDED;
1748	dev->power.last_status = RPM_INVALID;
1749	dev->power.idle_notification = false;
1750
1751	dev->power.disable_depth = 1;
1752	atomic_set(&dev->power.usage_count, 0);
1753
1754	dev->power.runtime_error = 0;
1755
1756	atomic_set(&dev->power.child_count, 0);
1757	pm_suspend_ignore_children(dev, false);
1758	dev->power.runtime_auto = true;
1759
1760	dev->power.request_pending = false;
1761	dev->power.request = RPM_REQ_NONE;
1762	dev->power.deferred_resume = false;
1763	dev->power.needs_force_resume = 0;
1764	INIT_WORK(&dev->power.work, pm_runtime_work);
1765
1766	dev->power.timer_expires = 0;
1767	hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1768	dev->power.suspend_timer.function = pm_suspend_timer_fn;
1769
1770	init_waitqueue_head(&dev->power.wait_queue);
1771}
1772
1773/**
1774 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1775 * @dev: Device object to re-initialize.
1776 */
1777void pm_runtime_reinit(struct device *dev)
1778{
1779	if (!pm_runtime_enabled(dev)) {
1780		if (dev->power.runtime_status == RPM_ACTIVE)
1781			pm_runtime_set_suspended(dev);
1782		if (dev->power.irq_safe) {
1783			spin_lock_irq(&dev->power.lock);
1784			dev->power.irq_safe = 0;
1785			spin_unlock_irq(&dev->power.lock);
1786			if (dev->parent)
1787				pm_runtime_put(dev->parent);
1788		}
1789	}
1790}
1791
1792/**
1793 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1794 * @dev: Device object being removed from device hierarchy.
1795 */
1796void pm_runtime_remove(struct device *dev)
1797{
1798	__pm_runtime_disable(dev, false);
1799	pm_runtime_reinit(dev);
1800}
1801
1802/**
1803 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1804 * @dev: Consumer device.
1805 */
1806void pm_runtime_get_suppliers(struct device *dev)
1807{
1808	struct device_link *link;
1809	int idx;
1810
1811	idx = device_links_read_lock();
1812
1813	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1814				device_links_read_lock_held())
1815		if (link->flags & DL_FLAG_PM_RUNTIME) {
1816			link->supplier_preactivated = true;
1817			pm_runtime_get_sync(link->supplier);
 
1818		}
1819
1820	device_links_read_unlock(idx);
1821}
1822
1823/**
1824 * pm_runtime_put_suppliers - Drop references to supplier devices.
1825 * @dev: Consumer device.
1826 */
1827void pm_runtime_put_suppliers(struct device *dev)
1828{
1829	struct device_link *link;
 
 
1830	int idx;
1831
1832	idx = device_links_read_lock();
1833
1834	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1835				device_links_read_lock_held())
1836		if (link->supplier_preactivated) {
1837			link->supplier_preactivated = false;
1838			pm_runtime_put(link->supplier);
 
 
 
 
 
1839		}
1840
1841	device_links_read_unlock(idx);
1842}
1843
1844void pm_runtime_new_link(struct device *dev)
1845{
1846	spin_lock_irq(&dev->power.lock);
1847	dev->power.links_count++;
1848	spin_unlock_irq(&dev->power.lock);
1849}
1850
1851static void pm_runtime_drop_link_count(struct device *dev)
1852{
1853	spin_lock_irq(&dev->power.lock);
1854	WARN_ON(dev->power.links_count == 0);
1855	dev->power.links_count--;
1856	spin_unlock_irq(&dev->power.lock);
1857}
1858
1859/**
1860 * pm_runtime_drop_link - Prepare for device link removal.
1861 * @link: Device link going away.
1862 *
1863 * Drop the link count of the consumer end of @link and decrement the supplier
1864 * device's runtime PM usage counter as many times as needed to drop all of the
1865 * PM runtime reference to it from the consumer.
1866 */
1867void pm_runtime_drop_link(struct device_link *link)
1868{
1869	if (!(link->flags & DL_FLAG_PM_RUNTIME))
1870		return;
1871
1872	pm_runtime_drop_link_count(link->consumer);
1873	pm_runtime_release_supplier(link);
1874	pm_request_idle(link->supplier);
 
1875}
1876
1877static bool pm_runtime_need_not_resume(struct device *dev)
1878{
1879	return atomic_read(&dev->power.usage_count) <= 1 &&
1880		(atomic_read(&dev->power.child_count) == 0 ||
1881		 dev->power.ignore_children);
1882}
1883
1884/**
1885 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1886 * @dev: Device to suspend.
1887 *
1888 * Disable runtime PM so we safely can check the device's runtime PM status and
1889 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1890 * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1891 * usage and children counters don't indicate that the device was in use before
1892 * the system-wide transition under way, decrement its parent's children counter
1893 * (if there is a parent).  Keep runtime PM disabled to preserve the state
1894 * unless we encounter errors.
1895 *
1896 * Typically this function may be invoked from a system suspend callback to make
1897 * sure the device is put into low power state and it should only be used during
1898 * system-wide PM transitions to sleep states.  It assumes that the analogous
1899 * pm_runtime_force_resume() will be used to resume the device.
1900 *
1901 * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
1902 * state where this function has called the ->runtime_suspend callback but the
1903 * PM core marks the driver as runtime active.
1904 */
1905int pm_runtime_force_suspend(struct device *dev)
1906{
1907	int (*callback)(struct device *);
1908	int ret;
1909
1910	pm_runtime_disable(dev);
1911	if (pm_runtime_status_suspended(dev))
1912		return 0;
1913
1914	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1915
1916	dev_pm_enable_wake_irq_check(dev, true);
1917	ret = callback ? callback(dev) : 0;
1918	if (ret)
1919		goto err;
1920
1921	dev_pm_enable_wake_irq_complete(dev);
1922
1923	/*
1924	 * If the device can stay in suspend after the system-wide transition
1925	 * to the working state that will follow, drop the children counter of
1926	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1927	 * function will be called again for it in the meantime.
1928	 */
1929	if (pm_runtime_need_not_resume(dev)) {
1930		pm_runtime_set_suspended(dev);
1931	} else {
1932		__update_runtime_status(dev, RPM_SUSPENDED);
1933		dev->power.needs_force_resume = 1;
1934	}
1935
1936	return 0;
1937
1938err:
1939	dev_pm_disable_wake_irq_check(dev, true);
1940	pm_runtime_enable(dev);
1941	return ret;
1942}
1943EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1944
1945/**
1946 * pm_runtime_force_resume - Force a device into resume state if needed.
1947 * @dev: Device to resume.
1948 *
1949 * Prior invoking this function we expect the user to have brought the device
1950 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1951 * those actions and bring the device into full power, if it is expected to be
1952 * used on system resume.  In the other case, we defer the resume to be managed
1953 * via runtime PM.
1954 *
1955 * Typically this function may be invoked from a system resume callback.
1956 */
1957int pm_runtime_force_resume(struct device *dev)
1958{
1959	int (*callback)(struct device *);
1960	int ret = 0;
1961
1962	if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1963		goto out;
1964
1965	/*
1966	 * The value of the parent's children counter is correct already, so
1967	 * just update the status of the device.
1968	 */
1969	__update_runtime_status(dev, RPM_ACTIVE);
1970
1971	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1972
1973	dev_pm_disable_wake_irq_check(dev, false);
1974	ret = callback ? callback(dev) : 0;
1975	if (ret) {
1976		pm_runtime_set_suspended(dev);
1977		dev_pm_enable_wake_irq_check(dev, false);
1978		goto out;
1979	}
1980
1981	pm_runtime_mark_last_busy(dev);
1982out:
1983	dev->power.needs_force_resume = 0;
1984	pm_runtime_enable(dev);
1985	return ret;
1986}
1987EXPORT_SYMBOL_GPL(pm_runtime_force_resume);