Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   3 *
   4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
   6 *
   7 * This file is released under the GPLv2.
   8 */
   9
  10#include <linux/sched.h>
 
 
  11#include <linux/pm_runtime.h>
 
 
 
 
  12#include "power.h"
  13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  14static int rpm_resume(struct device *dev, int rpmflags);
  15static int rpm_suspend(struct device *dev, int rpmflags);
  16
  17/**
  18 * update_pm_runtime_accounting - Update the time accounting of power states
  19 * @dev: Device to update the accounting for
  20 *
  21 * In order to be able to have time accounting of the various power states
  22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  23 * PM), we need to track the time spent in each state.
  24 * update_pm_runtime_accounting must be called each time before the
  25 * runtime_status field is updated, to account the time in the old state
  26 * correctly.
  27 */
  28void update_pm_runtime_accounting(struct device *dev)
  29{
  30	unsigned long now = jiffies;
  31	int delta;
  32
  33	delta = now - dev->power.accounting_timestamp;
 
  34
  35	if (delta < 0)
  36		delta = 0;
  37
 
  38	dev->power.accounting_timestamp = now;
  39
  40	if (dev->power.disable_depth > 0)
 
 
 
 
 
  41		return;
  42
 
 
  43	if (dev->power.runtime_status == RPM_SUSPENDED)
  44		dev->power.suspended_jiffies += delta;
  45	else
  46		dev->power.active_jiffies += delta;
  47}
  48
  49static void __update_runtime_status(struct device *dev, enum rpm_status status)
  50{
  51	update_pm_runtime_accounting(dev);
  52	dev->power.runtime_status = status;
  53}
  54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55/**
  56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  57 * @dev: Device to handle.
  58 */
  59static void pm_runtime_deactivate_timer(struct device *dev)
  60{
  61	if (dev->power.timer_expires > 0) {
  62		del_timer(&dev->power.suspend_timer);
  63		dev->power.timer_expires = 0;
  64	}
  65}
  66
  67/**
  68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
  69 * @dev: Device to handle.
  70 */
  71static void pm_runtime_cancel_pending(struct device *dev)
  72{
  73	pm_runtime_deactivate_timer(dev);
  74	/*
  75	 * In case there's a request pending, make sure its work function will
  76	 * return without doing anything.
  77	 */
  78	dev->power.request = RPM_REQ_NONE;
  79}
  80
  81/*
  82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
  83 * @dev: Device to handle.
  84 *
  85 * Compute the autosuspend-delay expiration time based on the device's
  86 * power.last_busy time.  If the delay has already expired or is disabled
  87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
  88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
  89 *
  90 * This function may be called either with or without dev->power.lock held.
  91 * Either way it can be racy, since power.last_busy may be updated at any time.
  92 */
  93unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
  94{
  95	int autosuspend_delay;
  96	long elapsed;
  97	unsigned long last_busy;
  98	unsigned long expires = 0;
  99
 100	if (!dev->power.use_autosuspend)
 101		goto out;
 102
 103	autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
 104	if (autosuspend_delay < 0)
 105		goto out;
 106
 107	last_busy = ACCESS_ONCE(dev->power.last_busy);
 108	elapsed = jiffies - last_busy;
 109	if (elapsed < 0)
 110		goto out;	/* jiffies has wrapped around. */
 111
 112	/*
 113	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
 114	 * up to the nearest second.
 115	 */
 116	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
 117	if (autosuspend_delay >= 1000)
 118		expires = round_jiffies(expires);
 119	expires += !expires;
 120	if (elapsed >= expires - last_busy)
 121		expires = 0;	/* Already expired. */
 122
 123 out:
 124	return expires;
 125}
 126EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128/**
 129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 130 * @dev: Device to test.
 131 */
 132static int rpm_check_suspend_allowed(struct device *dev)
 133{
 134	int retval = 0;
 135
 136	if (dev->power.runtime_error)
 137		retval = -EINVAL;
 138	else if (dev->power.disable_depth > 0)
 139		retval = -EACCES;
 140	else if (atomic_read(&dev->power.usage_count) > 0)
 141		retval = -EAGAIN;
 142	else if (!pm_children_suspended(dev))
 
 143		retval = -EBUSY;
 144
 145	/* Pending resume requests take precedence over suspends. */
 146	else if ((dev->power.deferred_resume
 147			&& dev->power.runtime_status == RPM_SUSPENDING)
 148	    || (dev->power.request_pending
 149			&& dev->power.request == RPM_REQ_RESUME))
 150		retval = -EAGAIN;
 
 
 151	else if (dev->power.runtime_status == RPM_SUSPENDED)
 152		retval = 1;
 153
 154	return retval;
 155}
 156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157/**
 158 * rpm_idle - Notify device bus type if the device can be suspended.
 159 * @dev: Device to notify the bus type about.
 160 * @rpmflags: Flag bits.
 161 *
 162 * Check if the device's runtime PM status allows it to be suspended.  If
 163 * another idle notification has been started earlier, return immediately.  If
 164 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 165 * run the ->runtime_idle() callback directly.
 
 166 *
 167 * This function must be called under dev->power.lock with interrupts disabled.
 168 */
 169static int rpm_idle(struct device *dev, int rpmflags)
 170{
 171	int (*callback)(struct device *);
 172	int retval;
 173
 
 174	retval = rpm_check_suspend_allowed(dev);
 175	if (retval < 0)
 176		;	/* Conditions are wrong. */
 177
 178	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
 179	else if (dev->power.runtime_status != RPM_ACTIVE)
 180		retval = -EAGAIN;
 181
 182	/*
 183	 * Any pending request other than an idle notification takes
 184	 * precedence over us, except that the timer may be running.
 185	 */
 186	else if (dev->power.request_pending &&
 187	    dev->power.request > RPM_REQ_IDLE)
 188		retval = -EAGAIN;
 189
 190	/* Act as though RPM_NOWAIT is always set. */
 191	else if (dev->power.idle_notification)
 192		retval = -EINPROGRESS;
 193	if (retval)
 194		goto out;
 195
 196	/* Pending requests need to be canceled. */
 197	dev->power.request = RPM_REQ_NONE;
 198
 199	if (dev->power.no_callbacks) {
 200		/* Assume ->runtime_idle() callback would have suspended. */
 201		retval = rpm_suspend(dev, rpmflags);
 202		goto out;
 203	}
 204
 205	/* Carry out an asynchronous or a synchronous idle notification. */
 206	if (rpmflags & RPM_ASYNC) {
 207		dev->power.request = RPM_REQ_IDLE;
 208		if (!dev->power.request_pending) {
 209			dev->power.request_pending = true;
 210			queue_work(pm_wq, &dev->power.work);
 211		}
 212		goto out;
 
 213	}
 214
 215	dev->power.idle_notification = true;
 216
 217	if (dev->pm_domain)
 218		callback = dev->pm_domain->ops.runtime_idle;
 219	else if (dev->type && dev->type->pm)
 220		callback = dev->type->pm->runtime_idle;
 221	else if (dev->class && dev->class->pm)
 222		callback = dev->class->pm->runtime_idle;
 223	else if (dev->bus && dev->bus->pm)
 224		callback = dev->bus->pm->runtime_idle;
 225	else
 226		callback = NULL;
 227
 228	if (callback) {
 229		if (dev->power.irq_safe)
 230			spin_unlock(&dev->power.lock);
 231		else
 232			spin_unlock_irq(&dev->power.lock);
 233
 234		callback(dev);
 235
 236		if (dev->power.irq_safe)
 237			spin_lock(&dev->power.lock);
 238		else
 239			spin_lock_irq(&dev->power.lock);
 240	}
 241
 242	dev->power.idle_notification = false;
 243	wake_up_all(&dev->power.wait_queue);
 244
 245 out:
 246	return retval;
 
 247}
 248
 249/**
 250 * rpm_callback - Run a given runtime PM callback for a given device.
 251 * @cb: Runtime PM callback to run.
 252 * @dev: Device to run the callback for.
 253 */
 254static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 255	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 256{
 257	int retval;
 258
 259	if (!cb)
 260		return -ENOSYS;
 261
 262	if (dev->power.irq_safe) {
 263		retval = cb(dev);
 264	} else {
 265		spin_unlock_irq(&dev->power.lock);
 266
 267		retval = cb(dev);
 268
 269		spin_lock_irq(&dev->power.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 270	}
 
 271	dev->power.runtime_error = retval;
 272	return retval != -EACCES ? retval : -EIO;
 273}
 274
 275/**
 276 * rpm_suspend - Carry out runtime suspend of given device.
 277 * @dev: Device to suspend.
 278 * @rpmflags: Flag bits.
 279 *
 280 * Check if the device's runtime PM status allows it to be suspended.  If
 281 * another suspend has been started earlier, either return immediately or wait
 282 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
 283 * pending idle notification.  If the RPM_ASYNC flag is set then queue a
 284 * suspend request; otherwise run the ->runtime_suspend() callback directly.
 285 * If a deferred resume was requested while the callback was running then carry
 286 * it out; otherwise send an idle notification for the device (if the suspend
 287 * failed) or for its parent (if the suspend succeeded).
 
 
 
 
 
 288 *
 289 * This function must be called under dev->power.lock with interrupts disabled.
 290 */
 291static int rpm_suspend(struct device *dev, int rpmflags)
 292	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 293{
 294	int (*callback)(struct device *);
 295	struct device *parent = NULL;
 296	int retval;
 297
 298	dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
 299
 300 repeat:
 301	retval = rpm_check_suspend_allowed(dev);
 302
 303	if (retval < 0)
 304		;	/* Conditions are wrong. */
 305
 306	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 307	else if (dev->power.runtime_status == RPM_RESUMING &&
 308	    !(rpmflags & RPM_ASYNC))
 309		retval = -EAGAIN;
 310	if (retval)
 311		goto out;
 312
 313	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
 314	if ((rpmflags & RPM_AUTO)
 315	    && dev->power.runtime_status != RPM_SUSPENDING) {
 316		unsigned long expires = pm_runtime_autosuspend_expiration(dev);
 317
 318		if (expires != 0) {
 319			/* Pending requests need to be canceled. */
 320			dev->power.request = RPM_REQ_NONE;
 321
 322			/*
 323			 * Optimization: If the timer is already running and is
 324			 * set to expire at or before the autosuspend delay,
 325			 * avoid the overhead of resetting it.  Just let it
 326			 * expire; pm_suspend_timer_fn() will take care of the
 327			 * rest.
 328			 */
 329			if (!(dev->power.timer_expires && time_before_eq(
 330			    dev->power.timer_expires, expires))) {
 
 
 
 
 
 
 
 331				dev->power.timer_expires = expires;
 332				mod_timer(&dev->power.suspend_timer, expires);
 
 
 
 333			}
 334			dev->power.timer_autosuspends = 1;
 335			goto out;
 336		}
 337	}
 338
 339	/* Other scheduled or pending requests need to be canceled. */
 340	pm_runtime_cancel_pending(dev);
 341
 342	if (dev->power.runtime_status == RPM_SUSPENDING) {
 343		DEFINE_WAIT(wait);
 344
 345		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 346			retval = -EINPROGRESS;
 347			goto out;
 348		}
 349
 
 
 
 
 
 
 
 
 
 350		/* Wait for the other suspend running in parallel with us. */
 351		for (;;) {
 352			prepare_to_wait(&dev->power.wait_queue, &wait,
 353					TASK_UNINTERRUPTIBLE);
 354			if (dev->power.runtime_status != RPM_SUSPENDING)
 355				break;
 356
 357			spin_unlock_irq(&dev->power.lock);
 358
 359			schedule();
 360
 361			spin_lock_irq(&dev->power.lock);
 362		}
 363		finish_wait(&dev->power.wait_queue, &wait);
 364		goto repeat;
 365	}
 366
 367	dev->power.deferred_resume = false;
 368	if (dev->power.no_callbacks)
 369		goto no_callback;	/* Assume success. */
 370
 371	/* Carry out an asynchronous or a synchronous suspend. */
 372	if (rpmflags & RPM_ASYNC) {
 373		dev->power.request = (rpmflags & RPM_AUTO) ?
 374		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 375		if (!dev->power.request_pending) {
 376			dev->power.request_pending = true;
 377			queue_work(pm_wq, &dev->power.work);
 378		}
 379		goto out;
 380	}
 381
 382	__update_runtime_status(dev, RPM_SUSPENDING);
 383
 384	if (dev->pm_domain)
 385		callback = dev->pm_domain->ops.runtime_suspend;
 386	else if (dev->type && dev->type->pm)
 387		callback = dev->type->pm->runtime_suspend;
 388	else if (dev->class && dev->class->pm)
 389		callback = dev->class->pm->runtime_suspend;
 390	else if (dev->bus && dev->bus->pm)
 391		callback = dev->bus->pm->runtime_suspend;
 392	else
 393		callback = NULL;
 394
 
 395	retval = rpm_callback(callback, dev);
 396	if (retval) {
 397		__update_runtime_status(dev, RPM_ACTIVE);
 398		dev->power.deferred_resume = false;
 399		if (retval == -EAGAIN || retval == -EBUSY)
 400			dev->power.runtime_error = 0;
 401		else
 402			pm_runtime_cancel_pending(dev);
 403	} else {
 404 no_callback:
 405		__update_runtime_status(dev, RPM_SUSPENDED);
 406		pm_runtime_deactivate_timer(dev);
 407
 408		if (dev->parent) {
 409			parent = dev->parent;
 410			atomic_add_unless(&parent->power.child_count, -1, 0);
 411		}
 412	}
 413	wake_up_all(&dev->power.wait_queue);
 414
 415	if (dev->power.deferred_resume) {
 
 416		rpm_resume(dev, 0);
 417		retval = -EAGAIN;
 418		goto out;
 419	}
 420
 421	/* Maybe the parent is now able to suspend. */
 422	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
 423		spin_unlock(&dev->power.lock);
 424
 425		spin_lock(&parent->power.lock);
 426		rpm_idle(parent, RPM_ASYNC);
 427		spin_unlock(&parent->power.lock);
 428
 429		spin_lock(&dev->power.lock);
 430	}
 431
 432 out:
 433	dev_dbg(dev, "%s returns %d\n", __func__, retval);
 434
 435	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436}
 437
 438/**
 439 * rpm_resume - Carry out runtime resume of given device.
 440 * @dev: Device to resume.
 441 * @rpmflags: Flag bits.
 442 *
 443 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 444 * any scheduled or pending requests.  If another resume has been started
 445 * earlier, either return immediately or wait for it to finish, depending on the
 446 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 447 * parallel with this function, either tell the other process to resume after
 448 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 449 * flag is set then queue a resume request; otherwise run the
 450 * ->runtime_resume() callback directly.  Queue an idle notification for the
 451 * device if the resume succeeded.
 452 *
 453 * This function must be called under dev->power.lock with interrupts disabled.
 454 */
 455static int rpm_resume(struct device *dev, int rpmflags)
 456	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 457{
 458	int (*callback)(struct device *);
 459	struct device *parent = NULL;
 460	int retval = 0;
 461
 462	dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
 463
 464 repeat:
 465	if (dev->power.runtime_error)
 466		retval = -EINVAL;
 
 
 
 467	else if (dev->power.disable_depth > 0)
 468		retval = -EACCES;
 469	if (retval)
 470		goto out;
 471
 472	/*
 473	 * Other scheduled or pending requests need to be canceled.  Small
 474	 * optimization: If an autosuspend timer is running, leave it running
 475	 * rather than cancelling it now only to restart it again in the near
 476	 * future.
 477	 */
 478	dev->power.request = RPM_REQ_NONE;
 479	if (!dev->power.timer_autosuspends)
 480		pm_runtime_deactivate_timer(dev);
 481
 482	if (dev->power.runtime_status == RPM_ACTIVE) {
 483		retval = 1;
 484		goto out;
 485	}
 486
 487	if (dev->power.runtime_status == RPM_RESUMING
 488	    || dev->power.runtime_status == RPM_SUSPENDING) {
 489		DEFINE_WAIT(wait);
 490
 491		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 492			if (dev->power.runtime_status == RPM_SUSPENDING)
 493				dev->power.deferred_resume = true;
 494			else
 495				retval = -EINPROGRESS;
 496			goto out;
 497		}
 498
 
 
 
 
 
 
 
 
 
 499		/* Wait for the operation carried out in parallel with us. */
 500		for (;;) {
 501			prepare_to_wait(&dev->power.wait_queue, &wait,
 502					TASK_UNINTERRUPTIBLE);
 503			if (dev->power.runtime_status != RPM_RESUMING
 504			    && dev->power.runtime_status != RPM_SUSPENDING)
 505				break;
 506
 507			spin_unlock_irq(&dev->power.lock);
 508
 509			schedule();
 510
 511			spin_lock_irq(&dev->power.lock);
 512		}
 513		finish_wait(&dev->power.wait_queue, &wait);
 514		goto repeat;
 515	}
 516
 517	/*
 518	 * See if we can skip waking up the parent.  This is safe only if
 519	 * power.no_callbacks is set, because otherwise we don't know whether
 520	 * the resume will actually succeed.
 521	 */
 522	if (dev->power.no_callbacks && !parent && dev->parent) {
 523		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 524		if (dev->parent->power.disable_depth > 0
 525		    || dev->parent->power.ignore_children
 526		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 527			atomic_inc(&dev->parent->power.child_count);
 528			spin_unlock(&dev->parent->power.lock);
 
 529			goto no_callback;	/* Assume success. */
 530		}
 531		spin_unlock(&dev->parent->power.lock);
 532	}
 533
 534	/* Carry out an asynchronous or a synchronous resume. */
 535	if (rpmflags & RPM_ASYNC) {
 536		dev->power.request = RPM_REQ_RESUME;
 537		if (!dev->power.request_pending) {
 538			dev->power.request_pending = true;
 539			queue_work(pm_wq, &dev->power.work);
 540		}
 541		retval = 0;
 542		goto out;
 543	}
 544
 545	if (!parent && dev->parent) {
 546		/*
 547		 * Increment the parent's usage counter and resume it if
 548		 * necessary.  Not needed if dev is irq-safe; then the
 549		 * parent is permanently resumed.
 550		 */
 551		parent = dev->parent;
 552		if (dev->power.irq_safe)
 553			goto skip_parent;
 554		spin_unlock(&dev->power.lock);
 555
 556		pm_runtime_get_noresume(parent);
 557
 558		spin_lock(&parent->power.lock);
 559		/*
 560		 * We can resume if the parent's runtime PM is disabled or it
 561		 * is set to ignore children.
 562		 */
 563		if (!parent->power.disable_depth
 564		    && !parent->power.ignore_children) {
 565			rpm_resume(parent, 0);
 566			if (parent->power.runtime_status != RPM_ACTIVE)
 567				retval = -EBUSY;
 568		}
 569		spin_unlock(&parent->power.lock);
 570
 571		spin_lock(&dev->power.lock);
 572		if (retval)
 573			goto out;
 574		goto repeat;
 575	}
 576 skip_parent:
 577
 578	if (dev->power.no_callbacks)
 579		goto no_callback;	/* Assume success. */
 580
 581	__update_runtime_status(dev, RPM_RESUMING);
 582
 583	if (dev->pm_domain)
 584		callback = dev->pm_domain->ops.runtime_resume;
 585	else if (dev->type && dev->type->pm)
 586		callback = dev->type->pm->runtime_resume;
 587	else if (dev->class && dev->class->pm)
 588		callback = dev->class->pm->runtime_resume;
 589	else if (dev->bus && dev->bus->pm)
 590		callback = dev->bus->pm->runtime_resume;
 591	else
 592		callback = NULL;
 593
 
 594	retval = rpm_callback(callback, dev);
 595	if (retval) {
 596		__update_runtime_status(dev, RPM_SUSPENDED);
 597		pm_runtime_cancel_pending(dev);
 
 598	} else {
 599 no_callback:
 600		__update_runtime_status(dev, RPM_ACTIVE);
 
 601		if (parent)
 602			atomic_inc(&parent->power.child_count);
 603	}
 604	wake_up_all(&dev->power.wait_queue);
 605
 606	if (!retval)
 607		rpm_idle(dev, RPM_ASYNC);
 608
 609 out:
 610	if (parent && !dev->power.irq_safe) {
 611		spin_unlock_irq(&dev->power.lock);
 612
 613		pm_runtime_put(parent);
 614
 615		spin_lock_irq(&dev->power.lock);
 616	}
 617
 618	dev_dbg(dev, "%s returns %d\n", __func__, retval);
 619
 620	return retval;
 621}
 622
 623/**
 624 * pm_runtime_work - Universal runtime PM work function.
 625 * @work: Work structure used for scheduling the execution of this function.
 626 *
 627 * Use @work to get the device object the work is to be done for, determine what
 628 * is to be done and execute the appropriate runtime PM function.
 629 */
 630static void pm_runtime_work(struct work_struct *work)
 631{
 632	struct device *dev = container_of(work, struct device, power.work);
 633	enum rpm_request req;
 634
 635	spin_lock_irq(&dev->power.lock);
 636
 637	if (!dev->power.request_pending)
 638		goto out;
 639
 640	req = dev->power.request;
 641	dev->power.request = RPM_REQ_NONE;
 642	dev->power.request_pending = false;
 643
 644	switch (req) {
 645	case RPM_REQ_NONE:
 646		break;
 647	case RPM_REQ_IDLE:
 648		rpm_idle(dev, RPM_NOWAIT);
 649		break;
 650	case RPM_REQ_SUSPEND:
 651		rpm_suspend(dev, RPM_NOWAIT);
 652		break;
 653	case RPM_REQ_AUTOSUSPEND:
 654		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 655		break;
 656	case RPM_REQ_RESUME:
 657		rpm_resume(dev, RPM_NOWAIT);
 658		break;
 659	}
 660
 661 out:
 662	spin_unlock_irq(&dev->power.lock);
 663}
 664
 665/**
 666 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 667 * @data: Device pointer passed by pm_schedule_suspend().
 668 *
 669 * Check if the time is right and queue a suspend request.
 670 */
 671static void pm_suspend_timer_fn(unsigned long data)
 672{
 673	struct device *dev = (struct device *)data;
 674	unsigned long flags;
 675	unsigned long expires;
 676
 677	spin_lock_irqsave(&dev->power.lock, flags);
 678
 679	expires = dev->power.timer_expires;
 680	/* If 'expire' is after 'jiffies' we've been called too early. */
 681	if (expires > 0 && !time_after(expires, jiffies)) {
 
 
 
 682		dev->power.timer_expires = 0;
 683		rpm_suspend(dev, dev->power.timer_autosuspends ?
 684		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 685	}
 686
 687	spin_unlock_irqrestore(&dev->power.lock, flags);
 
 
 688}
 689
 690/**
 691 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 692 * @dev: Device to suspend.
 693 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 694 */
 695int pm_schedule_suspend(struct device *dev, unsigned int delay)
 696{
 697	unsigned long flags;
 
 698	int retval;
 699
 700	spin_lock_irqsave(&dev->power.lock, flags);
 701
 702	if (!delay) {
 703		retval = rpm_suspend(dev, RPM_ASYNC);
 704		goto out;
 705	}
 706
 707	retval = rpm_check_suspend_allowed(dev);
 708	if (retval)
 709		goto out;
 710
 711	/* Other scheduled or pending requests need to be canceled. */
 712	pm_runtime_cancel_pending(dev);
 713
 714	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
 715	dev->power.timer_expires += !dev->power.timer_expires;
 716	dev->power.timer_autosuspends = 0;
 717	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
 718
 719 out:
 720	spin_unlock_irqrestore(&dev->power.lock, flags);
 721
 722	return retval;
 723}
 724EXPORT_SYMBOL_GPL(pm_schedule_suspend);
 725
 726/**
 727 * __pm_runtime_idle - Entry point for runtime idle operations.
 728 * @dev: Device to send idle notification for.
 729 * @rpmflags: Flag bits.
 730 *
 731 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 732 * return immediately if it is larger than zero.  Then carry out an idle
 733 * notification, either synchronous or asynchronous.
 734 *
 735 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
 
 736 */
 737int __pm_runtime_idle(struct device *dev, int rpmflags)
 738{
 739	unsigned long flags;
 740	int retval;
 741
 742	if (rpmflags & RPM_GET_PUT) {
 743		if (!atomic_dec_and_test(&dev->power.usage_count))
 744			return 0;
 745	}
 746
 
 
 747	spin_lock_irqsave(&dev->power.lock, flags);
 748	retval = rpm_idle(dev, rpmflags);
 749	spin_unlock_irqrestore(&dev->power.lock, flags);
 750
 751	return retval;
 752}
 753EXPORT_SYMBOL_GPL(__pm_runtime_idle);
 754
 755/**
 756 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
 757 * @dev: Device to suspend.
 758 * @rpmflags: Flag bits.
 759 *
 760 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 761 * return immediately if it is larger than zero.  Then carry out a suspend,
 762 * either synchronous or asynchronous.
 763 *
 764 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
 
 765 */
 766int __pm_runtime_suspend(struct device *dev, int rpmflags)
 767{
 768	unsigned long flags;
 769	int retval;
 770
 771	if (rpmflags & RPM_GET_PUT) {
 772		if (!atomic_dec_and_test(&dev->power.usage_count))
 773			return 0;
 774	}
 775
 
 
 776	spin_lock_irqsave(&dev->power.lock, flags);
 777	retval = rpm_suspend(dev, rpmflags);
 778	spin_unlock_irqrestore(&dev->power.lock, flags);
 779
 780	return retval;
 781}
 782EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
 783
 784/**
 785 * __pm_runtime_resume - Entry point for runtime resume operations.
 786 * @dev: Device to resume.
 787 * @rpmflags: Flag bits.
 788 *
 789 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
 790 * carry out a resume, either synchronous or asynchronous.
 791 *
 792 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
 
 793 */
 794int __pm_runtime_resume(struct device *dev, int rpmflags)
 795{
 796	unsigned long flags;
 797	int retval;
 798
 
 
 
 799	if (rpmflags & RPM_GET_PUT)
 800		atomic_inc(&dev->power.usage_count);
 801
 802	spin_lock_irqsave(&dev->power.lock, flags);
 803	retval = rpm_resume(dev, rpmflags);
 804	spin_unlock_irqrestore(&dev->power.lock, flags);
 805
 806	return retval;
 807}
 808EXPORT_SYMBOL_GPL(__pm_runtime_resume);
 809
 810/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 811 * __pm_runtime_set_status - Set runtime PM status of a device.
 812 * @dev: Device to handle.
 813 * @status: New runtime PM status of the device.
 814 *
 815 * If runtime PM of the device is disabled or its power.runtime_error field is
 816 * different from zero, the status may be changed either to RPM_ACTIVE, or to
 817 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
 818 * However, if the device has a parent and the parent is not active, and the
 819 * parent's power.ignore_children flag is unset, the device's status cannot be
 820 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
 821 *
 822 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
 823 * and the device parent's counter of unsuspended children is modified to
 824 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
 825 * notification request for the parent is submitted.
 
 
 
 
 
 
 
 826 */
 827int __pm_runtime_set_status(struct device *dev, unsigned int status)
 828{
 829	struct device *parent = dev->parent;
 830	unsigned long flags;
 831	bool notify_parent = false;
 832	int error = 0;
 833
 834	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
 835		return -EINVAL;
 836
 837	spin_lock_irqsave(&dev->power.lock, flags);
 838
 839	if (!dev->power.runtime_error && !dev->power.disable_depth) {
 
 
 
 
 
 
 840		error = -EAGAIN;
 841		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 842	}
 843
 844	if (dev->power.runtime_status == status)
 845		goto out_set;
 846
 847	if (status == RPM_SUSPENDED) {
 848		/* It always is possible to set the status to 'suspended'. */
 849		if (parent) {
 850			atomic_add_unless(&parent->power.child_count, -1, 0);
 851			notify_parent = !parent->power.ignore_children;
 852		}
 853		goto out_set;
 854	}
 855
 856	if (parent) {
 
 
 
 857		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
 858
 859		/*
 860		 * It is invalid to put an active child under a parent that is
 861		 * not active, has runtime PM enabled and the
 862		 * 'power.ignore_children' flag unset.
 863		 */
 864		if (!parent->power.disable_depth
 865		    && !parent->power.ignore_children
 866		    && parent->power.runtime_status != RPM_ACTIVE)
 
 
 
 867			error = -EBUSY;
 868		else if (dev->power.runtime_status == RPM_SUSPENDED)
 869			atomic_inc(&parent->power.child_count);
 
 870
 871		spin_unlock(&parent->power.lock);
 872
 873		if (error)
 
 874			goto out;
 
 875	}
 876
 877 out_set:
 878	__update_runtime_status(dev, status);
 879	dev->power.runtime_error = 0;
 
 
 880 out:
 881	spin_unlock_irqrestore(&dev->power.lock, flags);
 882
 883	if (notify_parent)
 884		pm_request_idle(parent);
 885
 
 
 
 
 
 
 
 
 
 
 886	return error;
 887}
 888EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
 889
 890/**
 891 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
 892 * @dev: Device to handle.
 893 *
 894 * Flush all pending requests for the device from pm_wq and wait for all
 895 * runtime PM operations involving the device in progress to complete.
 896 *
 897 * Should be called under dev->power.lock with interrupts disabled.
 898 */
 899static void __pm_runtime_barrier(struct device *dev)
 900{
 901	pm_runtime_deactivate_timer(dev);
 902
 903	if (dev->power.request_pending) {
 904		dev->power.request = RPM_REQ_NONE;
 905		spin_unlock_irq(&dev->power.lock);
 906
 907		cancel_work_sync(&dev->power.work);
 908
 909		spin_lock_irq(&dev->power.lock);
 910		dev->power.request_pending = false;
 911	}
 912
 913	if (dev->power.runtime_status == RPM_SUSPENDING
 914	    || dev->power.runtime_status == RPM_RESUMING
 915	    || dev->power.idle_notification) {
 916		DEFINE_WAIT(wait);
 917
 918		/* Suspend, wake-up or idle notification in progress. */
 919		for (;;) {
 920			prepare_to_wait(&dev->power.wait_queue, &wait,
 921					TASK_UNINTERRUPTIBLE);
 922			if (dev->power.runtime_status != RPM_SUSPENDING
 923			    && dev->power.runtime_status != RPM_RESUMING
 924			    && !dev->power.idle_notification)
 925				break;
 926			spin_unlock_irq(&dev->power.lock);
 927
 928			schedule();
 929
 930			spin_lock_irq(&dev->power.lock);
 931		}
 932		finish_wait(&dev->power.wait_queue, &wait);
 933	}
 934}
 935
 936/**
 937 * pm_runtime_barrier - Flush pending requests and wait for completions.
 938 * @dev: Device to handle.
 939 *
 940 * Prevent the device from being suspended by incrementing its usage counter and
 941 * if there's a pending resume request for the device, wake the device up.
 942 * Next, make sure that all pending requests for the device have been flushed
 943 * from pm_wq and wait for all runtime PM operations involving the device in
 944 * progress to complete.
 945 *
 946 * Return value:
 947 * 1, if there was a resume request pending and the device had to be woken up,
 948 * 0, otherwise
 949 */
 950int pm_runtime_barrier(struct device *dev)
 951{
 952	int retval = 0;
 953
 954	pm_runtime_get_noresume(dev);
 955	spin_lock_irq(&dev->power.lock);
 956
 957	if (dev->power.request_pending
 958	    && dev->power.request == RPM_REQ_RESUME) {
 959		rpm_resume(dev, 0);
 960		retval = 1;
 961	}
 962
 963	__pm_runtime_barrier(dev);
 964
 965	spin_unlock_irq(&dev->power.lock);
 966	pm_runtime_put_noidle(dev);
 967
 968	return retval;
 969}
 970EXPORT_SYMBOL_GPL(pm_runtime_barrier);
 971
 972/**
 973 * __pm_runtime_disable - Disable runtime PM of a device.
 974 * @dev: Device to handle.
 975 * @check_resume: If set, check if there's a resume request for the device.
 976 *
 977 * Increment power.disable_depth for the device and if was zero previously,
 978 * cancel all pending runtime PM requests for the device and wait for all
 979 * operations in progress to complete.  The device can be either active or
 980 * suspended after its runtime PM has been disabled.
 981 *
 982 * If @check_resume is set and there's a resume request pending when
 983 * __pm_runtime_disable() is called and power.disable_depth is zero, the
 984 * function will wake up the device before disabling its runtime PM.
 985 */
 986void __pm_runtime_disable(struct device *dev, bool check_resume)
 987{
 988	spin_lock_irq(&dev->power.lock);
 989
 990	if (dev->power.disable_depth > 0) {
 991		dev->power.disable_depth++;
 992		goto out;
 993	}
 994
 995	/*
 996	 * Wake up the device if there's a resume request pending, because that
 997	 * means there probably is some I/O to process and disabling runtime PM
 998	 * shouldn't prevent the device from processing the I/O.
 999	 */
1000	if (check_resume && dev->power.request_pending
1001	    && dev->power.request == RPM_REQ_RESUME) {
1002		/*
1003		 * Prevent suspends and idle notifications from being carried
1004		 * out after we have woken up the device.
1005		 */
1006		pm_runtime_get_noresume(dev);
1007
1008		rpm_resume(dev, 0);
1009
1010		pm_runtime_put_noidle(dev);
1011	}
1012
 
 
 
1013	if (!dev->power.disable_depth++)
1014		__pm_runtime_barrier(dev);
1015
1016 out:
1017	spin_unlock_irq(&dev->power.lock);
1018}
1019EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1020
1021/**
1022 * pm_runtime_enable - Enable runtime PM of a device.
1023 * @dev: Device to handle.
1024 */
1025void pm_runtime_enable(struct device *dev)
1026{
1027	unsigned long flags;
1028
1029	spin_lock_irqsave(&dev->power.lock, flags);
1030
1031	if (dev->power.disable_depth > 0)
1032		dev->power.disable_depth--;
1033	else
 
 
 
 
1034		dev_warn(dev, "Unbalanced %s!\n", __func__);
 
 
 
 
 
 
 
 
1035
1036	spin_unlock_irqrestore(&dev->power.lock, flags);
1037}
1038EXPORT_SYMBOL_GPL(pm_runtime_enable);
1039
1040/**
1041 * pm_runtime_forbid - Block runtime PM of a device.
1042 * @dev: Device to handle.
1043 *
1044 * Increase the device's usage count and clear its power.runtime_auto flag,
1045 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1046 * for it.
1047 */
1048void pm_runtime_forbid(struct device *dev)
1049{
1050	spin_lock_irq(&dev->power.lock);
1051	if (!dev->power.runtime_auto)
1052		goto out;
1053
1054	dev->power.runtime_auto = false;
1055	atomic_inc(&dev->power.usage_count);
1056	rpm_resume(dev, 0);
1057
1058 out:
1059	spin_unlock_irq(&dev->power.lock);
1060}
1061EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1062
1063/**
1064 * pm_runtime_allow - Unblock runtime PM of a device.
1065 * @dev: Device to handle.
1066 *
1067 * Decrease the device's usage count and set its power.runtime_auto flag.
1068 */
1069void pm_runtime_allow(struct device *dev)
1070{
1071	spin_lock_irq(&dev->power.lock);
1072	if (dev->power.runtime_auto)
1073		goto out;
1074
1075	dev->power.runtime_auto = true;
1076	if (atomic_dec_and_test(&dev->power.usage_count))
1077		rpm_idle(dev, RPM_AUTO);
1078
1079 out:
1080	spin_unlock_irq(&dev->power.lock);
1081}
1082EXPORT_SYMBOL_GPL(pm_runtime_allow);
1083
1084/**
1085 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1086 * @dev: Device to handle.
1087 *
1088 * Set the power.no_callbacks flag, which tells the PM core that this
1089 * device is power-managed through its parent and has no runtime PM
1090 * callbacks of its own.  The runtime sysfs attributes will be removed.
1091 */
1092void pm_runtime_no_callbacks(struct device *dev)
1093{
1094	spin_lock_irq(&dev->power.lock);
1095	dev->power.no_callbacks = 1;
1096	spin_unlock_irq(&dev->power.lock);
1097	if (device_is_registered(dev))
1098		rpm_sysfs_remove(dev);
1099}
1100EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1101
1102/**
1103 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1104 * @dev: Device to handle
1105 *
1106 * Set the power.irq_safe flag, which tells the PM core that the
1107 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1108 * always be invoked with the spinlock held and interrupts disabled.  It also
1109 * causes the parent's usage counter to be permanently incremented, preventing
1110 * the parent from runtime suspending -- otherwise an irq-safe child might have
1111 * to wait for a non-irq-safe parent.
1112 */
1113void pm_runtime_irq_safe(struct device *dev)
1114{
1115	if (dev->parent)
1116		pm_runtime_get_sync(dev->parent);
1117	spin_lock_irq(&dev->power.lock);
1118	dev->power.irq_safe = 1;
1119	spin_unlock_irq(&dev->power.lock);
1120}
1121EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1122
1123/**
1124 * update_autosuspend - Handle a change to a device's autosuspend settings.
1125 * @dev: Device to handle.
1126 * @old_delay: The former autosuspend_delay value.
1127 * @old_use: The former use_autosuspend value.
1128 *
1129 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1130 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1131 *
1132 * This function must be called under dev->power.lock with interrupts disabled.
1133 */
1134static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1135{
1136	int delay = dev->power.autosuspend_delay;
1137
1138	/* Should runtime suspend be prevented now? */
1139	if (dev->power.use_autosuspend && delay < 0) {
1140
1141		/* If it used to be allowed then prevent it. */
1142		if (!old_use || old_delay >= 0) {
1143			atomic_inc(&dev->power.usage_count);
1144			rpm_resume(dev, 0);
1145		}
1146	}
1147
1148	/* Runtime suspend should be allowed now. */
1149	else {
1150
1151		/* If it used to be prevented then allow it. */
1152		if (old_use && old_delay < 0)
1153			atomic_dec(&dev->power.usage_count);
1154
1155		/* Maybe we can autosuspend now. */
1156		rpm_idle(dev, RPM_AUTO);
1157	}
1158}
1159
1160/**
1161 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1162 * @dev: Device to handle.
1163 * @delay: Value of the new delay in milliseconds.
1164 *
1165 * Set the device's power.autosuspend_delay value.  If it changes to negative
1166 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1167 * changes the other way, allow runtime suspends.
1168 */
1169void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1170{
1171	int old_delay, old_use;
1172
1173	spin_lock_irq(&dev->power.lock);
1174	old_delay = dev->power.autosuspend_delay;
1175	old_use = dev->power.use_autosuspend;
1176	dev->power.autosuspend_delay = delay;
1177	update_autosuspend(dev, old_delay, old_use);
1178	spin_unlock_irq(&dev->power.lock);
1179}
1180EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1181
1182/**
1183 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1184 * @dev: Device to handle.
1185 * @use: New value for use_autosuspend.
1186 *
1187 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1188 * suspends as needed.
1189 */
1190void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1191{
1192	int old_delay, old_use;
1193
1194	spin_lock_irq(&dev->power.lock);
1195	old_delay = dev->power.autosuspend_delay;
1196	old_use = dev->power.use_autosuspend;
1197	dev->power.use_autosuspend = use;
1198	update_autosuspend(dev, old_delay, old_use);
1199	spin_unlock_irq(&dev->power.lock);
1200}
1201EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1202
1203/**
1204 * pm_runtime_init - Initialize runtime PM fields in given device object.
1205 * @dev: Device object to initialize.
1206 */
1207void pm_runtime_init(struct device *dev)
1208{
1209	dev->power.runtime_status = RPM_SUSPENDED;
1210	dev->power.idle_notification = false;
1211
1212	dev->power.disable_depth = 1;
1213	atomic_set(&dev->power.usage_count, 0);
1214
1215	dev->power.runtime_error = 0;
1216
1217	atomic_set(&dev->power.child_count, 0);
1218	pm_suspend_ignore_children(dev, false);
1219	dev->power.runtime_auto = true;
1220
1221	dev->power.request_pending = false;
1222	dev->power.request = RPM_REQ_NONE;
1223	dev->power.deferred_resume = false;
1224	dev->power.accounting_timestamp = jiffies;
1225	INIT_WORK(&dev->power.work, pm_runtime_work);
1226
1227	dev->power.timer_expires = 0;
1228	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1229			(unsigned long)dev);
1230
1231	init_waitqueue_head(&dev->power.wait_queue);
1232}
1233
1234/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1235 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1236 * @dev: Device object being removed from device hierarchy.
1237 */
1238void pm_runtime_remove(struct device *dev)
1239{
1240	__pm_runtime_disable(dev, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1241
1242	/* Change the status back to 'suspended' to match the initial status. */
1243	if (dev->power.runtime_status == RPM_ACTIVE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244		pm_runtime_set_suspended(dev);
1245	if (dev->power.irq_safe && dev->parent)
1246		pm_runtime_put_sync(dev->parent);
 
 
 
 
 
1247}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   4 *
   5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
 
 
   7 */
   8#include <linux/sched/mm.h>
   9#include <linux/ktime.h>
  10#include <linux/hrtimer.h>
  11#include <linux/export.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/pm_wakeirq.h>
  14#include <trace/events/rpm.h>
  15
  16#include "../base.h"
  17#include "power.h"
  18
  19typedef int (*pm_callback_t)(struct device *);
  20
  21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
  22{
  23	pm_callback_t cb;
  24	const struct dev_pm_ops *ops;
  25
  26	if (dev->pm_domain)
  27		ops = &dev->pm_domain->ops;
  28	else if (dev->type && dev->type->pm)
  29		ops = dev->type->pm;
  30	else if (dev->class && dev->class->pm)
  31		ops = dev->class->pm;
  32	else if (dev->bus && dev->bus->pm)
  33		ops = dev->bus->pm;
  34	else
  35		ops = NULL;
  36
  37	if (ops)
  38		cb = *(pm_callback_t *)((void *)ops + cb_offset);
  39	else
  40		cb = NULL;
  41
  42	if (!cb && dev->driver && dev->driver->pm)
  43		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
  44
  45	return cb;
  46}
  47
  48#define RPM_GET_CALLBACK(dev, callback) \
  49		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
  50
  51static int rpm_resume(struct device *dev, int rpmflags);
  52static int rpm_suspend(struct device *dev, int rpmflags);
  53
  54/**
  55 * update_pm_runtime_accounting - Update the time accounting of power states
  56 * @dev: Device to update the accounting for
  57 *
  58 * In order to be able to have time accounting of the various power states
  59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  60 * PM), we need to track the time spent in each state.
  61 * update_pm_runtime_accounting must be called each time before the
  62 * runtime_status field is updated, to account the time in the old state
  63 * correctly.
  64 */
  65static void update_pm_runtime_accounting(struct device *dev)
  66{
  67	u64 now, last, delta;
 
  68
  69	if (dev->power.disable_depth > 0)
  70		return;
  71
  72	last = dev->power.accounting_timestamp;
 
  73
  74	now = ktime_get_mono_fast_ns();
  75	dev->power.accounting_timestamp = now;
  76
  77	/*
  78	 * Because ktime_get_mono_fast_ns() is not monotonic during
  79	 * timekeeping updates, ensure that 'now' is after the last saved
  80	 * timesptamp.
  81	 */
  82	if (now < last)
  83		return;
  84
  85	delta = now - last;
  86
  87	if (dev->power.runtime_status == RPM_SUSPENDED)
  88		dev->power.suspended_time += delta;
  89	else
  90		dev->power.active_time += delta;
  91}
  92
  93static void __update_runtime_status(struct device *dev, enum rpm_status status)
  94{
  95	update_pm_runtime_accounting(dev);
  96	dev->power.runtime_status = status;
  97}
  98
  99static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
 100{
 101	u64 time;
 102	unsigned long flags;
 103
 104	spin_lock_irqsave(&dev->power.lock, flags);
 105
 106	update_pm_runtime_accounting(dev);
 107	time = suspended ? dev->power.suspended_time : dev->power.active_time;
 108
 109	spin_unlock_irqrestore(&dev->power.lock, flags);
 110
 111	return time;
 112}
 113
 114u64 pm_runtime_active_time(struct device *dev)
 115{
 116	return rpm_get_accounted_time(dev, false);
 117}
 118
 119u64 pm_runtime_suspended_time(struct device *dev)
 120{
 121	return rpm_get_accounted_time(dev, true);
 122}
 123EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
 124
 125/**
 126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
 127 * @dev: Device to handle.
 128 */
 129static void pm_runtime_deactivate_timer(struct device *dev)
 130{
 131	if (dev->power.timer_expires > 0) {
 132		hrtimer_try_to_cancel(&dev->power.suspend_timer);
 133		dev->power.timer_expires = 0;
 134	}
 135}
 136
 137/**
 138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
 139 * @dev: Device to handle.
 140 */
 141static void pm_runtime_cancel_pending(struct device *dev)
 142{
 143	pm_runtime_deactivate_timer(dev);
 144	/*
 145	 * In case there's a request pending, make sure its work function will
 146	 * return without doing anything.
 147	 */
 148	dev->power.request = RPM_REQ_NONE;
 149}
 150
 151/*
 152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
 153 * @dev: Device to handle.
 154 *
 155 * Compute the autosuspend-delay expiration time based on the device's
 156 * power.last_busy time.  If the delay has already expired or is disabled
 157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
 158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
 159 *
 160 * This function may be called either with or without dev->power.lock held.
 161 * Either way it can be racy, since power.last_busy may be updated at any time.
 162 */
 163u64 pm_runtime_autosuspend_expiration(struct device *dev)
 164{
 165	int autosuspend_delay;
 166	u64 expires;
 
 
 167
 168	if (!dev->power.use_autosuspend)
 169		return 0;
 170
 171	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
 172	if (autosuspend_delay < 0)
 173		return 0;
 174
 175	expires  = READ_ONCE(dev->power.last_busy);
 176	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
 177	if (expires > ktime_get_mono_fast_ns())
 178		return expires;	/* Expires in the future */
 179
 180	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 181}
 182EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 183
 184static int dev_memalloc_noio(struct device *dev, void *data)
 185{
 186	return dev->power.memalloc_noio;
 187}
 188
 189/*
 190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
 191 * @dev: Device to handle.
 192 * @enable: True for setting the flag and False for clearing the flag.
 193 *
 194 * Set the flag for all devices in the path from the device to the
 195 * root device in the device tree if @enable is true, otherwise clear
 196 * the flag for devices in the path whose siblings don't set the flag.
 197 *
 198 * The function should only be called by block device, or network
 199 * device driver for solving the deadlock problem during runtime
 200 * resume/suspend:
 201 *
 202 *     If memory allocation with GFP_KERNEL is called inside runtime
 203 *     resume/suspend callback of any one of its ancestors(or the
 204 *     block device itself), the deadlock may be triggered inside the
 205 *     memory allocation since it might not complete until the block
 206 *     device becomes active and the involed page I/O finishes. The
 207 *     situation is pointed out first by Alan Stern. Network device
 208 *     are involved in iSCSI kind of situation.
 209 *
 210 * The lock of dev_hotplug_mutex is held in the function for handling
 211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
 212 * in async probe().
 213 *
 214 * The function should be called between device_add() and device_del()
 215 * on the affected device(block/network device).
 216 */
 217void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
 218{
 219	static DEFINE_MUTEX(dev_hotplug_mutex);
 220
 221	mutex_lock(&dev_hotplug_mutex);
 222	for (;;) {
 223		bool enabled;
 224
 225		/* hold power lock since bitfield is not SMP-safe. */
 226		spin_lock_irq(&dev->power.lock);
 227		enabled = dev->power.memalloc_noio;
 228		dev->power.memalloc_noio = enable;
 229		spin_unlock_irq(&dev->power.lock);
 230
 231		/*
 232		 * not need to enable ancestors any more if the device
 233		 * has been enabled.
 234		 */
 235		if (enabled && enable)
 236			break;
 237
 238		dev = dev->parent;
 239
 240		/*
 241		 * clear flag of the parent device only if all the
 242		 * children don't set the flag because ancestor's
 243		 * flag was set by any one of the descendants.
 244		 */
 245		if (!dev || (!enable &&
 246			     device_for_each_child(dev, NULL,
 247						   dev_memalloc_noio)))
 248			break;
 249	}
 250	mutex_unlock(&dev_hotplug_mutex);
 251}
 252EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
 253
 254/**
 255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 256 * @dev: Device to test.
 257 */
 258static int rpm_check_suspend_allowed(struct device *dev)
 259{
 260	int retval = 0;
 261
 262	if (dev->power.runtime_error)
 263		retval = -EINVAL;
 264	else if (dev->power.disable_depth > 0)
 265		retval = -EACCES;
 266	else if (atomic_read(&dev->power.usage_count) > 0)
 267		retval = -EAGAIN;
 268	else if (!dev->power.ignore_children &&
 269			atomic_read(&dev->power.child_count))
 270		retval = -EBUSY;
 271
 272	/* Pending resume requests take precedence over suspends. */
 273	else if ((dev->power.deferred_resume
 274			&& dev->power.runtime_status == RPM_SUSPENDING)
 275	    || (dev->power.request_pending
 276			&& dev->power.request == RPM_REQ_RESUME))
 277		retval = -EAGAIN;
 278	else if (__dev_pm_qos_resume_latency(dev) == 0)
 279		retval = -EPERM;
 280	else if (dev->power.runtime_status == RPM_SUSPENDED)
 281		retval = 1;
 282
 283	return retval;
 284}
 285
 286static int rpm_get_suppliers(struct device *dev)
 287{
 288	struct device_link *link;
 289
 290	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 291				device_links_read_lock_held()) {
 292		int retval;
 293
 294		if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
 295		    READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
 296			continue;
 297
 298		retval = pm_runtime_get_sync(link->supplier);
 299		/* Ignore suppliers with disabled runtime PM. */
 300		if (retval < 0 && retval != -EACCES) {
 301			pm_runtime_put_noidle(link->supplier);
 302			return retval;
 303		}
 304		refcount_inc(&link->rpm_active);
 305	}
 306	return 0;
 307}
 308
 309static void rpm_put_suppliers(struct device *dev)
 310{
 311	struct device_link *link;
 312
 313	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
 314				device_links_read_lock_held()) {
 315		if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
 316			continue;
 317
 318		while (refcount_dec_not_one(&link->rpm_active))
 319			pm_runtime_put(link->supplier);
 320	}
 321}
 322
 323/**
 324 * __rpm_callback - Run a given runtime PM callback for a given device.
 325 * @cb: Runtime PM callback to run.
 326 * @dev: Device to run the callback for.
 327 */
 328static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 329	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 330{
 331	int retval, idx;
 332	bool use_links = dev->power.links_count > 0;
 333
 334	if (dev->power.irq_safe) {
 335		spin_unlock(&dev->power.lock);
 336	} else {
 337		spin_unlock_irq(&dev->power.lock);
 338
 339		/*
 340		 * Resume suppliers if necessary.
 341		 *
 342		 * The device's runtime PM status cannot change until this
 343		 * routine returns, so it is safe to read the status outside of
 344		 * the lock.
 345		 */
 346		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
 347			idx = device_links_read_lock();
 348
 349			retval = rpm_get_suppliers(dev);
 350			if (retval)
 351				goto fail;
 352
 353			device_links_read_unlock(idx);
 354		}
 355	}
 356
 357	retval = cb(dev);
 358
 359	if (dev->power.irq_safe) {
 360		spin_lock(&dev->power.lock);
 361	} else {
 362		/*
 363		 * If the device is suspending and the callback has returned
 364		 * success, drop the usage counters of the suppliers that have
 365		 * been reference counted on its resume.
 366		 *
 367		 * Do that if resume fails too.
 368		 */
 369		if (use_links
 370		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
 371		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
 372			idx = device_links_read_lock();
 373
 374 fail:
 375			rpm_put_suppliers(dev);
 376
 377			device_links_read_unlock(idx);
 378		}
 379
 380		spin_lock_irq(&dev->power.lock);
 381	}
 382
 383	return retval;
 384}
 385
 386/**
 387 * rpm_idle - Notify device bus type if the device can be suspended.
 388 * @dev: Device to notify the bus type about.
 389 * @rpmflags: Flag bits.
 390 *
 391 * Check if the device's runtime PM status allows it to be suspended.  If
 392 * another idle notification has been started earlier, return immediately.  If
 393 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 394 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
 395 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
 396 *
 397 * This function must be called under dev->power.lock with interrupts disabled.
 398 */
 399static int rpm_idle(struct device *dev, int rpmflags)
 400{
 401	int (*callback)(struct device *);
 402	int retval;
 403
 404	trace_rpm_idle_rcuidle(dev, rpmflags);
 405	retval = rpm_check_suspend_allowed(dev);
 406	if (retval < 0)
 407		;	/* Conditions are wrong. */
 408
 409	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
 410	else if (dev->power.runtime_status != RPM_ACTIVE)
 411		retval = -EAGAIN;
 412
 413	/*
 414	 * Any pending request other than an idle notification takes
 415	 * precedence over us, except that the timer may be running.
 416	 */
 417	else if (dev->power.request_pending &&
 418	    dev->power.request > RPM_REQ_IDLE)
 419		retval = -EAGAIN;
 420
 421	/* Act as though RPM_NOWAIT is always set. */
 422	else if (dev->power.idle_notification)
 423		retval = -EINPROGRESS;
 424	if (retval)
 425		goto out;
 426
 427	/* Pending requests need to be canceled. */
 428	dev->power.request = RPM_REQ_NONE;
 429
 430	if (dev->power.no_callbacks)
 
 
 431		goto out;
 
 432
 433	/* Carry out an asynchronous or a synchronous idle notification. */
 434	if (rpmflags & RPM_ASYNC) {
 435		dev->power.request = RPM_REQ_IDLE;
 436		if (!dev->power.request_pending) {
 437			dev->power.request_pending = true;
 438			queue_work(pm_wq, &dev->power.work);
 439		}
 440		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
 441		return 0;
 442	}
 443
 444	dev->power.idle_notification = true;
 445
 446	callback = RPM_GET_CALLBACK(dev, runtime_idle);
 
 
 
 
 
 
 
 
 
 447
 448	if (callback)
 449		retval = __rpm_callback(callback, dev);
 
 
 
 
 
 
 
 
 
 
 
 450
 451	dev->power.idle_notification = false;
 452	wake_up_all(&dev->power.wait_queue);
 453
 454 out:
 455	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 456	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 457}
 458
 459/**
 460 * rpm_callback - Run a given runtime PM callback for a given device.
 461 * @cb: Runtime PM callback to run.
 462 * @dev: Device to run the callback for.
 463 */
 464static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 
 465{
 466	int retval;
 467
 468	if (!cb)
 469		return -ENOSYS;
 470
 471	if (dev->power.memalloc_noio) {
 472		unsigned int noio_flag;
 
 
 
 
 473
 474		/*
 475		 * Deadlock might be caused if memory allocation with
 476		 * GFP_KERNEL happens inside runtime_suspend and
 477		 * runtime_resume callbacks of one block device's
 478		 * ancestor or the block device itself. Network
 479		 * device might be thought as part of iSCSI block
 480		 * device, so network device and its ancestor should
 481		 * be marked as memalloc_noio too.
 482		 */
 483		noio_flag = memalloc_noio_save();
 484		retval = __rpm_callback(cb, dev);
 485		memalloc_noio_restore(noio_flag);
 486	} else {
 487		retval = __rpm_callback(cb, dev);
 488	}
 489
 490	dev->power.runtime_error = retval;
 491	return retval != -EACCES ? retval : -EIO;
 492}
 493
 494/**
 495 * rpm_suspend - Carry out runtime suspend of given device.
 496 * @dev: Device to suspend.
 497 * @rpmflags: Flag bits.
 498 *
 499 * Check if the device's runtime PM status allows it to be suspended.
 500 * Cancel a pending idle notification, autosuspend or suspend. If
 501 * another suspend has been started earlier, either return immediately
 502 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
 503 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
 504 * otherwise run the ->runtime_suspend() callback directly. When
 505 * ->runtime_suspend succeeded, if a deferred resume was requested while
 506 * the callback was running then carry it out, otherwise send an idle
 507 * notification for its parent (if the suspend succeeded and both
 508 * ignore_children of parent->power and irq_safe of dev->power are not set).
 509 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 510 * flag is set and the next autosuspend-delay expiration time is in the
 511 * future, schedule another autosuspend attempt.
 512 *
 513 * This function must be called under dev->power.lock with interrupts disabled.
 514 */
 515static int rpm_suspend(struct device *dev, int rpmflags)
 516	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 517{
 518	int (*callback)(struct device *);
 519	struct device *parent = NULL;
 520	int retval;
 521
 522	trace_rpm_suspend_rcuidle(dev, rpmflags);
 523
 524 repeat:
 525	retval = rpm_check_suspend_allowed(dev);
 526
 527	if (retval < 0)
 528		;	/* Conditions are wrong. */
 529
 530	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 531	else if (dev->power.runtime_status == RPM_RESUMING &&
 532	    !(rpmflags & RPM_ASYNC))
 533		retval = -EAGAIN;
 534	if (retval)
 535		goto out;
 536
 537	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
 538	if ((rpmflags & RPM_AUTO)
 539	    && dev->power.runtime_status != RPM_SUSPENDING) {
 540		u64 expires = pm_runtime_autosuspend_expiration(dev);
 541
 542		if (expires != 0) {
 543			/* Pending requests need to be canceled. */
 544			dev->power.request = RPM_REQ_NONE;
 545
 546			/*
 547			 * Optimization: If the timer is already running and is
 548			 * set to expire at or before the autosuspend delay,
 549			 * avoid the overhead of resetting it.  Just let it
 550			 * expire; pm_suspend_timer_fn() will take care of the
 551			 * rest.
 552			 */
 553			if (!(dev->power.timer_expires &&
 554					dev->power.timer_expires <= expires)) {
 555				/*
 556				 * We add a slack of 25% to gather wakeups
 557				 * without sacrificing the granularity.
 558				 */
 559				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
 560						    (NSEC_PER_MSEC >> 2);
 561
 562				dev->power.timer_expires = expires;
 563				hrtimer_start_range_ns(&dev->power.suspend_timer,
 564						ns_to_ktime(expires),
 565						slack,
 566						HRTIMER_MODE_ABS);
 567			}
 568			dev->power.timer_autosuspends = 1;
 569			goto out;
 570		}
 571	}
 572
 573	/* Other scheduled or pending requests need to be canceled. */
 574	pm_runtime_cancel_pending(dev);
 575
 576	if (dev->power.runtime_status == RPM_SUSPENDING) {
 577		DEFINE_WAIT(wait);
 578
 579		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 580			retval = -EINPROGRESS;
 581			goto out;
 582		}
 583
 584		if (dev->power.irq_safe) {
 585			spin_unlock(&dev->power.lock);
 586
 587			cpu_relax();
 588
 589			spin_lock(&dev->power.lock);
 590			goto repeat;
 591		}
 592
 593		/* Wait for the other suspend running in parallel with us. */
 594		for (;;) {
 595			prepare_to_wait(&dev->power.wait_queue, &wait,
 596					TASK_UNINTERRUPTIBLE);
 597			if (dev->power.runtime_status != RPM_SUSPENDING)
 598				break;
 599
 600			spin_unlock_irq(&dev->power.lock);
 601
 602			schedule();
 603
 604			spin_lock_irq(&dev->power.lock);
 605		}
 606		finish_wait(&dev->power.wait_queue, &wait);
 607		goto repeat;
 608	}
 609
 
 610	if (dev->power.no_callbacks)
 611		goto no_callback;	/* Assume success. */
 612
 613	/* Carry out an asynchronous or a synchronous suspend. */
 614	if (rpmflags & RPM_ASYNC) {
 615		dev->power.request = (rpmflags & RPM_AUTO) ?
 616		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 617		if (!dev->power.request_pending) {
 618			dev->power.request_pending = true;
 619			queue_work(pm_wq, &dev->power.work);
 620		}
 621		goto out;
 622	}
 623
 624	__update_runtime_status(dev, RPM_SUSPENDING);
 625
 626	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 
 
 
 
 
 
 
 
 
 627
 628	dev_pm_enable_wake_irq_check(dev, true);
 629	retval = rpm_callback(callback, dev);
 630	if (retval)
 631		goto fail;
 632
 
 
 
 
 
 633 no_callback:
 634	__update_runtime_status(dev, RPM_SUSPENDED);
 635	pm_runtime_deactivate_timer(dev);
 636
 637	if (dev->parent) {
 638		parent = dev->parent;
 639		atomic_add_unless(&parent->power.child_count, -1, 0);
 
 640	}
 641	wake_up_all(&dev->power.wait_queue);
 642
 643	if (dev->power.deferred_resume) {
 644		dev->power.deferred_resume = false;
 645		rpm_resume(dev, 0);
 646		retval = -EAGAIN;
 647		goto out;
 648	}
 649
 650	/* Maybe the parent is now able to suspend. */
 651	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
 652		spin_unlock(&dev->power.lock);
 653
 654		spin_lock(&parent->power.lock);
 655		rpm_idle(parent, RPM_ASYNC);
 656		spin_unlock(&parent->power.lock);
 657
 658		spin_lock(&dev->power.lock);
 659	}
 660
 661 out:
 662	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 663
 664	return retval;
 665
 666 fail:
 667	dev_pm_disable_wake_irq_check(dev);
 668	__update_runtime_status(dev, RPM_ACTIVE);
 669	dev->power.deferred_resume = false;
 670	wake_up_all(&dev->power.wait_queue);
 671
 672	if (retval == -EAGAIN || retval == -EBUSY) {
 673		dev->power.runtime_error = 0;
 674
 675		/*
 676		 * If the callback routine failed an autosuspend, and
 677		 * if the last_busy time has been updated so that there
 678		 * is a new autosuspend expiration time, automatically
 679		 * reschedule another autosuspend.
 680		 */
 681		if ((rpmflags & RPM_AUTO) &&
 682		    pm_runtime_autosuspend_expiration(dev) != 0)
 683			goto repeat;
 684	} else {
 685		pm_runtime_cancel_pending(dev);
 686	}
 687	goto out;
 688}
 689
 690/**
 691 * rpm_resume - Carry out runtime resume of given device.
 692 * @dev: Device to resume.
 693 * @rpmflags: Flag bits.
 694 *
 695 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 696 * any scheduled or pending requests.  If another resume has been started
 697 * earlier, either return immediately or wait for it to finish, depending on the
 698 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 699 * parallel with this function, either tell the other process to resume after
 700 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 701 * flag is set then queue a resume request; otherwise run the
 702 * ->runtime_resume() callback directly.  Queue an idle notification for the
 703 * device if the resume succeeded.
 704 *
 705 * This function must be called under dev->power.lock with interrupts disabled.
 706 */
 707static int rpm_resume(struct device *dev, int rpmflags)
 708	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 709{
 710	int (*callback)(struct device *);
 711	struct device *parent = NULL;
 712	int retval = 0;
 713
 714	trace_rpm_resume_rcuidle(dev, rpmflags);
 715
 716 repeat:
 717	if (dev->power.runtime_error)
 718		retval = -EINVAL;
 719	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
 720	    && dev->power.runtime_status == RPM_ACTIVE)
 721		retval = 1;
 722	else if (dev->power.disable_depth > 0)
 723		retval = -EACCES;
 724	if (retval)
 725		goto out;
 726
 727	/*
 728	 * Other scheduled or pending requests need to be canceled.  Small
 729	 * optimization: If an autosuspend timer is running, leave it running
 730	 * rather than cancelling it now only to restart it again in the near
 731	 * future.
 732	 */
 733	dev->power.request = RPM_REQ_NONE;
 734	if (!dev->power.timer_autosuspends)
 735		pm_runtime_deactivate_timer(dev);
 736
 737	if (dev->power.runtime_status == RPM_ACTIVE) {
 738		retval = 1;
 739		goto out;
 740	}
 741
 742	if (dev->power.runtime_status == RPM_RESUMING
 743	    || dev->power.runtime_status == RPM_SUSPENDING) {
 744		DEFINE_WAIT(wait);
 745
 746		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 747			if (dev->power.runtime_status == RPM_SUSPENDING)
 748				dev->power.deferred_resume = true;
 749			else
 750				retval = -EINPROGRESS;
 751			goto out;
 752		}
 753
 754		if (dev->power.irq_safe) {
 755			spin_unlock(&dev->power.lock);
 756
 757			cpu_relax();
 758
 759			spin_lock(&dev->power.lock);
 760			goto repeat;
 761		}
 762
 763		/* Wait for the operation carried out in parallel with us. */
 764		for (;;) {
 765			prepare_to_wait(&dev->power.wait_queue, &wait,
 766					TASK_UNINTERRUPTIBLE);
 767			if (dev->power.runtime_status != RPM_RESUMING
 768			    && dev->power.runtime_status != RPM_SUSPENDING)
 769				break;
 770
 771			spin_unlock_irq(&dev->power.lock);
 772
 773			schedule();
 774
 775			spin_lock_irq(&dev->power.lock);
 776		}
 777		finish_wait(&dev->power.wait_queue, &wait);
 778		goto repeat;
 779	}
 780
 781	/*
 782	 * See if we can skip waking up the parent.  This is safe only if
 783	 * power.no_callbacks is set, because otherwise we don't know whether
 784	 * the resume will actually succeed.
 785	 */
 786	if (dev->power.no_callbacks && !parent && dev->parent) {
 787		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 788		if (dev->parent->power.disable_depth > 0
 789		    || dev->parent->power.ignore_children
 790		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 791			atomic_inc(&dev->parent->power.child_count);
 792			spin_unlock(&dev->parent->power.lock);
 793			retval = 1;
 794			goto no_callback;	/* Assume success. */
 795		}
 796		spin_unlock(&dev->parent->power.lock);
 797	}
 798
 799	/* Carry out an asynchronous or a synchronous resume. */
 800	if (rpmflags & RPM_ASYNC) {
 801		dev->power.request = RPM_REQ_RESUME;
 802		if (!dev->power.request_pending) {
 803			dev->power.request_pending = true;
 804			queue_work(pm_wq, &dev->power.work);
 805		}
 806		retval = 0;
 807		goto out;
 808	}
 809
 810	if (!parent && dev->parent) {
 811		/*
 812		 * Increment the parent's usage counter and resume it if
 813		 * necessary.  Not needed if dev is irq-safe; then the
 814		 * parent is permanently resumed.
 815		 */
 816		parent = dev->parent;
 817		if (dev->power.irq_safe)
 818			goto skip_parent;
 819		spin_unlock(&dev->power.lock);
 820
 821		pm_runtime_get_noresume(parent);
 822
 823		spin_lock(&parent->power.lock);
 824		/*
 825		 * Resume the parent if it has runtime PM enabled and not been
 826		 * set to ignore its children.
 827		 */
 828		if (!parent->power.disable_depth
 829		    && !parent->power.ignore_children) {
 830			rpm_resume(parent, 0);
 831			if (parent->power.runtime_status != RPM_ACTIVE)
 832				retval = -EBUSY;
 833		}
 834		spin_unlock(&parent->power.lock);
 835
 836		spin_lock(&dev->power.lock);
 837		if (retval)
 838			goto out;
 839		goto repeat;
 840	}
 841 skip_parent:
 842
 843	if (dev->power.no_callbacks)
 844		goto no_callback;	/* Assume success. */
 845
 846	__update_runtime_status(dev, RPM_RESUMING);
 847
 848	callback = RPM_GET_CALLBACK(dev, runtime_resume);
 
 
 
 
 
 
 
 
 
 849
 850	dev_pm_disable_wake_irq_check(dev);
 851	retval = rpm_callback(callback, dev);
 852	if (retval) {
 853		__update_runtime_status(dev, RPM_SUSPENDED);
 854		pm_runtime_cancel_pending(dev);
 855		dev_pm_enable_wake_irq_check(dev, false);
 856	} else {
 857 no_callback:
 858		__update_runtime_status(dev, RPM_ACTIVE);
 859		pm_runtime_mark_last_busy(dev);
 860		if (parent)
 861			atomic_inc(&parent->power.child_count);
 862	}
 863	wake_up_all(&dev->power.wait_queue);
 864
 865	if (retval >= 0)
 866		rpm_idle(dev, RPM_ASYNC);
 867
 868 out:
 869	if (parent && !dev->power.irq_safe) {
 870		spin_unlock_irq(&dev->power.lock);
 871
 872		pm_runtime_put(parent);
 873
 874		spin_lock_irq(&dev->power.lock);
 875	}
 876
 877	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 878
 879	return retval;
 880}
 881
 882/**
 883 * pm_runtime_work - Universal runtime PM work function.
 884 * @work: Work structure used for scheduling the execution of this function.
 885 *
 886 * Use @work to get the device object the work is to be done for, determine what
 887 * is to be done and execute the appropriate runtime PM function.
 888 */
 889static void pm_runtime_work(struct work_struct *work)
 890{
 891	struct device *dev = container_of(work, struct device, power.work);
 892	enum rpm_request req;
 893
 894	spin_lock_irq(&dev->power.lock);
 895
 896	if (!dev->power.request_pending)
 897		goto out;
 898
 899	req = dev->power.request;
 900	dev->power.request = RPM_REQ_NONE;
 901	dev->power.request_pending = false;
 902
 903	switch (req) {
 904	case RPM_REQ_NONE:
 905		break;
 906	case RPM_REQ_IDLE:
 907		rpm_idle(dev, RPM_NOWAIT);
 908		break;
 909	case RPM_REQ_SUSPEND:
 910		rpm_suspend(dev, RPM_NOWAIT);
 911		break;
 912	case RPM_REQ_AUTOSUSPEND:
 913		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 914		break;
 915	case RPM_REQ_RESUME:
 916		rpm_resume(dev, RPM_NOWAIT);
 917		break;
 918	}
 919
 920 out:
 921	spin_unlock_irq(&dev->power.lock);
 922}
 923
 924/**
 925 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 926 * @data: Device pointer passed by pm_schedule_suspend().
 927 *
 928 * Check if the time is right and queue a suspend request.
 929 */
 930static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
 931{
 932	struct device *dev = container_of(timer, struct device, power.suspend_timer);
 933	unsigned long flags;
 934	u64 expires;
 935
 936	spin_lock_irqsave(&dev->power.lock, flags);
 937
 938	expires = dev->power.timer_expires;
 939	/*
 940	 * If 'expires' is after the current time, we've been called
 941	 * too early.
 942	 */
 943	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
 944		dev->power.timer_expires = 0;
 945		rpm_suspend(dev, dev->power.timer_autosuspends ?
 946		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 947	}
 948
 949	spin_unlock_irqrestore(&dev->power.lock, flags);
 950
 951	return HRTIMER_NORESTART;
 952}
 953
 954/**
 955 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 956 * @dev: Device to suspend.
 957 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 958 */
 959int pm_schedule_suspend(struct device *dev, unsigned int delay)
 960{
 961	unsigned long flags;
 962	u64 expires;
 963	int retval;
 964
 965	spin_lock_irqsave(&dev->power.lock, flags);
 966
 967	if (!delay) {
 968		retval = rpm_suspend(dev, RPM_ASYNC);
 969		goto out;
 970	}
 971
 972	retval = rpm_check_suspend_allowed(dev);
 973	if (retval)
 974		goto out;
 975
 976	/* Other scheduled or pending requests need to be canceled. */
 977	pm_runtime_cancel_pending(dev);
 978
 979	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
 980	dev->power.timer_expires = expires;
 981	dev->power.timer_autosuspends = 0;
 982	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
 983
 984 out:
 985	spin_unlock_irqrestore(&dev->power.lock, flags);
 986
 987	return retval;
 988}
 989EXPORT_SYMBOL_GPL(pm_schedule_suspend);
 990
 991/**
 992 * __pm_runtime_idle - Entry point for runtime idle operations.
 993 * @dev: Device to send idle notification for.
 994 * @rpmflags: Flag bits.
 995 *
 996 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 997 * return immediately if it is larger than zero.  Then carry out an idle
 998 * notification, either synchronous or asynchronous.
 999 *
1000 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1001 * or if pm_runtime_irq_safe() has been called.
1002 */
1003int __pm_runtime_idle(struct device *dev, int rpmflags)
1004{
1005	unsigned long flags;
1006	int retval;
1007
1008	if (rpmflags & RPM_GET_PUT) {
1009		if (!atomic_dec_and_test(&dev->power.usage_count))
1010			return 0;
1011	}
1012
1013	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1014
1015	spin_lock_irqsave(&dev->power.lock, flags);
1016	retval = rpm_idle(dev, rpmflags);
1017	spin_unlock_irqrestore(&dev->power.lock, flags);
1018
1019	return retval;
1020}
1021EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1022
1023/**
1024 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1025 * @dev: Device to suspend.
1026 * @rpmflags: Flag bits.
1027 *
1028 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1029 * return immediately if it is larger than zero.  Then carry out a suspend,
1030 * either synchronous or asynchronous.
1031 *
1032 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1033 * or if pm_runtime_irq_safe() has been called.
1034 */
1035int __pm_runtime_suspend(struct device *dev, int rpmflags)
1036{
1037	unsigned long flags;
1038	int retval;
1039
1040	if (rpmflags & RPM_GET_PUT) {
1041		if (!atomic_dec_and_test(&dev->power.usage_count))
1042			return 0;
1043	}
1044
1045	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1046
1047	spin_lock_irqsave(&dev->power.lock, flags);
1048	retval = rpm_suspend(dev, rpmflags);
1049	spin_unlock_irqrestore(&dev->power.lock, flags);
1050
1051	return retval;
1052}
1053EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1054
1055/**
1056 * __pm_runtime_resume - Entry point for runtime resume operations.
1057 * @dev: Device to resume.
1058 * @rpmflags: Flag bits.
1059 *
1060 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1061 * carry out a resume, either synchronous or asynchronous.
1062 *
1063 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1064 * or if pm_runtime_irq_safe() has been called.
1065 */
1066int __pm_runtime_resume(struct device *dev, int rpmflags)
1067{
1068	unsigned long flags;
1069	int retval;
1070
1071	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1072			dev->power.runtime_status != RPM_ACTIVE);
1073
1074	if (rpmflags & RPM_GET_PUT)
1075		atomic_inc(&dev->power.usage_count);
1076
1077	spin_lock_irqsave(&dev->power.lock, flags);
1078	retval = rpm_resume(dev, rpmflags);
1079	spin_unlock_irqrestore(&dev->power.lock, flags);
1080
1081	return retval;
1082}
1083EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1084
1085/**
1086 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1087 * @dev: Device to handle.
1088 *
1089 * Return -EINVAL if runtime PM is disabled for the device.
1090 *
1091 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1092 * and the runtime PM usage counter is nonzero, increment the counter and
1093 * return 1.  Otherwise return 0 without changing the counter.
1094 */
1095int pm_runtime_get_if_in_use(struct device *dev)
1096{
1097	unsigned long flags;
1098	int retval;
1099
1100	spin_lock_irqsave(&dev->power.lock, flags);
1101	retval = dev->power.disable_depth > 0 ? -EINVAL :
1102		dev->power.runtime_status == RPM_ACTIVE
1103			&& atomic_inc_not_zero(&dev->power.usage_count);
1104	spin_unlock_irqrestore(&dev->power.lock, flags);
1105	return retval;
1106}
1107EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1108
1109/**
1110 * __pm_runtime_set_status - Set runtime PM status of a device.
1111 * @dev: Device to handle.
1112 * @status: New runtime PM status of the device.
1113 *
1114 * If runtime PM of the device is disabled or its power.runtime_error field is
1115 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1116 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1117 * However, if the device has a parent and the parent is not active, and the
1118 * parent's power.ignore_children flag is unset, the device's status cannot be
1119 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1120 *
1121 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1122 * and the device parent's counter of unsuspended children is modified to
1123 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1124 * notification request for the parent is submitted.
1125 *
1126 * If @dev has any suppliers (as reflected by device links to them), and @status
1127 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1128 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1129 * of the @status value) and the suppliers will be deacticated on exit.  The
1130 * error returned by the failing supplier activation will be returned in that
1131 * case.
1132 */
1133int __pm_runtime_set_status(struct device *dev, unsigned int status)
1134{
1135	struct device *parent = dev->parent;
 
1136	bool notify_parent = false;
1137	int error = 0;
1138
1139	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1140		return -EINVAL;
1141
1142	spin_lock_irq(&dev->power.lock);
1143
1144	/*
1145	 * Prevent PM-runtime from being enabled for the device or return an
1146	 * error if it is enabled already and working.
1147	 */
1148	if (dev->power.runtime_error || dev->power.disable_depth)
1149		dev->power.disable_depth++;
1150	else
1151		error = -EAGAIN;
1152
1153	spin_unlock_irq(&dev->power.lock);
1154
1155	if (error)
1156		return error;
1157
1158	/*
1159	 * If the new status is RPM_ACTIVE, the suppliers can be activated
1160	 * upfront regardless of the current status, because next time
1161	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1162	 * involved will be dropped down to one anyway.
1163	 */
1164	if (status == RPM_ACTIVE) {
1165		int idx = device_links_read_lock();
1166
1167		error = rpm_get_suppliers(dev);
1168		if (error)
1169			status = RPM_SUSPENDED;
1170
1171		device_links_read_unlock(idx);
1172	}
1173
1174	spin_lock_irq(&dev->power.lock);
 
1175
1176	if (dev->power.runtime_status == status || !parent)
 
 
 
 
 
1177		goto out_set;
 
1178
1179	if (status == RPM_SUSPENDED) {
1180		atomic_add_unless(&parent->power.child_count, -1, 0);
1181		notify_parent = !parent->power.ignore_children;
1182	} else {
1183		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1184
1185		/*
1186		 * It is invalid to put an active child under a parent that is
1187		 * not active, has runtime PM enabled and the
1188		 * 'power.ignore_children' flag unset.
1189		 */
1190		if (!parent->power.disable_depth
1191		    && !parent->power.ignore_children
1192		    && parent->power.runtime_status != RPM_ACTIVE) {
1193			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1194				dev_name(dev),
1195				dev_name(parent));
1196			error = -EBUSY;
1197		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1198			atomic_inc(&parent->power.child_count);
1199		}
1200
1201		spin_unlock(&parent->power.lock);
1202
1203		if (error) {
1204			status = RPM_SUSPENDED;
1205			goto out;
1206		}
1207	}
1208
1209 out_set:
1210	__update_runtime_status(dev, status);
1211	if (!error)
1212		dev->power.runtime_error = 0;
1213
1214 out:
1215	spin_unlock_irq(&dev->power.lock);
1216
1217	if (notify_parent)
1218		pm_request_idle(parent);
1219
1220	if (status == RPM_SUSPENDED) {
1221		int idx = device_links_read_lock();
1222
1223		rpm_put_suppliers(dev);
1224
1225		device_links_read_unlock(idx);
1226	}
1227
1228	pm_runtime_enable(dev);
1229
1230	return error;
1231}
1232EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1233
1234/**
1235 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1236 * @dev: Device to handle.
1237 *
1238 * Flush all pending requests for the device from pm_wq and wait for all
1239 * runtime PM operations involving the device in progress to complete.
1240 *
1241 * Should be called under dev->power.lock with interrupts disabled.
1242 */
1243static void __pm_runtime_barrier(struct device *dev)
1244{
1245	pm_runtime_deactivate_timer(dev);
1246
1247	if (dev->power.request_pending) {
1248		dev->power.request = RPM_REQ_NONE;
1249		spin_unlock_irq(&dev->power.lock);
1250
1251		cancel_work_sync(&dev->power.work);
1252
1253		spin_lock_irq(&dev->power.lock);
1254		dev->power.request_pending = false;
1255	}
1256
1257	if (dev->power.runtime_status == RPM_SUSPENDING
1258	    || dev->power.runtime_status == RPM_RESUMING
1259	    || dev->power.idle_notification) {
1260		DEFINE_WAIT(wait);
1261
1262		/* Suspend, wake-up or idle notification in progress. */
1263		for (;;) {
1264			prepare_to_wait(&dev->power.wait_queue, &wait,
1265					TASK_UNINTERRUPTIBLE);
1266			if (dev->power.runtime_status != RPM_SUSPENDING
1267			    && dev->power.runtime_status != RPM_RESUMING
1268			    && !dev->power.idle_notification)
1269				break;
1270			spin_unlock_irq(&dev->power.lock);
1271
1272			schedule();
1273
1274			spin_lock_irq(&dev->power.lock);
1275		}
1276		finish_wait(&dev->power.wait_queue, &wait);
1277	}
1278}
1279
1280/**
1281 * pm_runtime_barrier - Flush pending requests and wait for completions.
1282 * @dev: Device to handle.
1283 *
1284 * Prevent the device from being suspended by incrementing its usage counter and
1285 * if there's a pending resume request for the device, wake the device up.
1286 * Next, make sure that all pending requests for the device have been flushed
1287 * from pm_wq and wait for all runtime PM operations involving the device in
1288 * progress to complete.
1289 *
1290 * Return value:
1291 * 1, if there was a resume request pending and the device had to be woken up,
1292 * 0, otherwise
1293 */
1294int pm_runtime_barrier(struct device *dev)
1295{
1296	int retval = 0;
1297
1298	pm_runtime_get_noresume(dev);
1299	spin_lock_irq(&dev->power.lock);
1300
1301	if (dev->power.request_pending
1302	    && dev->power.request == RPM_REQ_RESUME) {
1303		rpm_resume(dev, 0);
1304		retval = 1;
1305	}
1306
1307	__pm_runtime_barrier(dev);
1308
1309	spin_unlock_irq(&dev->power.lock);
1310	pm_runtime_put_noidle(dev);
1311
1312	return retval;
1313}
1314EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1315
1316/**
1317 * __pm_runtime_disable - Disable runtime PM of a device.
1318 * @dev: Device to handle.
1319 * @check_resume: If set, check if there's a resume request for the device.
1320 *
1321 * Increment power.disable_depth for the device and if it was zero previously,
1322 * cancel all pending runtime PM requests for the device and wait for all
1323 * operations in progress to complete.  The device can be either active or
1324 * suspended after its runtime PM has been disabled.
1325 *
1326 * If @check_resume is set and there's a resume request pending when
1327 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1328 * function will wake up the device before disabling its runtime PM.
1329 */
1330void __pm_runtime_disable(struct device *dev, bool check_resume)
1331{
1332	spin_lock_irq(&dev->power.lock);
1333
1334	if (dev->power.disable_depth > 0) {
1335		dev->power.disable_depth++;
1336		goto out;
1337	}
1338
1339	/*
1340	 * Wake up the device if there's a resume request pending, because that
1341	 * means there probably is some I/O to process and disabling runtime PM
1342	 * shouldn't prevent the device from processing the I/O.
1343	 */
1344	if (check_resume && dev->power.request_pending
1345	    && dev->power.request == RPM_REQ_RESUME) {
1346		/*
1347		 * Prevent suspends and idle notifications from being carried
1348		 * out after we have woken up the device.
1349		 */
1350		pm_runtime_get_noresume(dev);
1351
1352		rpm_resume(dev, 0);
1353
1354		pm_runtime_put_noidle(dev);
1355	}
1356
1357	/* Update time accounting before disabling PM-runtime. */
1358	update_pm_runtime_accounting(dev);
1359
1360	if (!dev->power.disable_depth++)
1361		__pm_runtime_barrier(dev);
1362
1363 out:
1364	spin_unlock_irq(&dev->power.lock);
1365}
1366EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1367
1368/**
1369 * pm_runtime_enable - Enable runtime PM of a device.
1370 * @dev: Device to handle.
1371 */
1372void pm_runtime_enable(struct device *dev)
1373{
1374	unsigned long flags;
1375
1376	spin_lock_irqsave(&dev->power.lock, flags);
1377
1378	if (dev->power.disable_depth > 0) {
1379		dev->power.disable_depth--;
1380
1381		/* About to enable runtime pm, set accounting_timestamp to now */
1382		if (!dev->power.disable_depth)
1383			dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1384	} else {
1385		dev_warn(dev, "Unbalanced %s!\n", __func__);
1386	}
1387
1388	WARN(!dev->power.disable_depth &&
1389	     dev->power.runtime_status == RPM_SUSPENDED &&
1390	     !dev->power.ignore_children &&
1391	     atomic_read(&dev->power.child_count) > 0,
1392	     "Enabling runtime PM for inactive device (%s) with active children\n",
1393	     dev_name(dev));
1394
1395	spin_unlock_irqrestore(&dev->power.lock, flags);
1396}
1397EXPORT_SYMBOL_GPL(pm_runtime_enable);
1398
1399/**
1400 * pm_runtime_forbid - Block runtime PM of a device.
1401 * @dev: Device to handle.
1402 *
1403 * Increase the device's usage count and clear its power.runtime_auto flag,
1404 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1405 * for it.
1406 */
1407void pm_runtime_forbid(struct device *dev)
1408{
1409	spin_lock_irq(&dev->power.lock);
1410	if (!dev->power.runtime_auto)
1411		goto out;
1412
1413	dev->power.runtime_auto = false;
1414	atomic_inc(&dev->power.usage_count);
1415	rpm_resume(dev, 0);
1416
1417 out:
1418	spin_unlock_irq(&dev->power.lock);
1419}
1420EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1421
1422/**
1423 * pm_runtime_allow - Unblock runtime PM of a device.
1424 * @dev: Device to handle.
1425 *
1426 * Decrease the device's usage count and set its power.runtime_auto flag.
1427 */
1428void pm_runtime_allow(struct device *dev)
1429{
1430	spin_lock_irq(&dev->power.lock);
1431	if (dev->power.runtime_auto)
1432		goto out;
1433
1434	dev->power.runtime_auto = true;
1435	if (atomic_dec_and_test(&dev->power.usage_count))
1436		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1437
1438 out:
1439	spin_unlock_irq(&dev->power.lock);
1440}
1441EXPORT_SYMBOL_GPL(pm_runtime_allow);
1442
1443/**
1444 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1445 * @dev: Device to handle.
1446 *
1447 * Set the power.no_callbacks flag, which tells the PM core that this
1448 * device is power-managed through its parent and has no runtime PM
1449 * callbacks of its own.  The runtime sysfs attributes will be removed.
1450 */
1451void pm_runtime_no_callbacks(struct device *dev)
1452{
1453	spin_lock_irq(&dev->power.lock);
1454	dev->power.no_callbacks = 1;
1455	spin_unlock_irq(&dev->power.lock);
1456	if (device_is_registered(dev))
1457		rpm_sysfs_remove(dev);
1458}
1459EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1460
1461/**
1462 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1463 * @dev: Device to handle
1464 *
1465 * Set the power.irq_safe flag, which tells the PM core that the
1466 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1467 * always be invoked with the spinlock held and interrupts disabled.  It also
1468 * causes the parent's usage counter to be permanently incremented, preventing
1469 * the parent from runtime suspending -- otherwise an irq-safe child might have
1470 * to wait for a non-irq-safe parent.
1471 */
1472void pm_runtime_irq_safe(struct device *dev)
1473{
1474	if (dev->parent)
1475		pm_runtime_get_sync(dev->parent);
1476	spin_lock_irq(&dev->power.lock);
1477	dev->power.irq_safe = 1;
1478	spin_unlock_irq(&dev->power.lock);
1479}
1480EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1481
1482/**
1483 * update_autosuspend - Handle a change to a device's autosuspend settings.
1484 * @dev: Device to handle.
1485 * @old_delay: The former autosuspend_delay value.
1486 * @old_use: The former use_autosuspend value.
1487 *
1488 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1489 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1490 *
1491 * This function must be called under dev->power.lock with interrupts disabled.
1492 */
1493static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1494{
1495	int delay = dev->power.autosuspend_delay;
1496
1497	/* Should runtime suspend be prevented now? */
1498	if (dev->power.use_autosuspend && delay < 0) {
1499
1500		/* If it used to be allowed then prevent it. */
1501		if (!old_use || old_delay >= 0) {
1502			atomic_inc(&dev->power.usage_count);
1503			rpm_resume(dev, 0);
1504		}
1505	}
1506
1507	/* Runtime suspend should be allowed now. */
1508	else {
1509
1510		/* If it used to be prevented then allow it. */
1511		if (old_use && old_delay < 0)
1512			atomic_dec(&dev->power.usage_count);
1513
1514		/* Maybe we can autosuspend now. */
1515		rpm_idle(dev, RPM_AUTO);
1516	}
1517}
1518
1519/**
1520 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1521 * @dev: Device to handle.
1522 * @delay: Value of the new delay in milliseconds.
1523 *
1524 * Set the device's power.autosuspend_delay value.  If it changes to negative
1525 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1526 * changes the other way, allow runtime suspends.
1527 */
1528void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1529{
1530	int old_delay, old_use;
1531
1532	spin_lock_irq(&dev->power.lock);
1533	old_delay = dev->power.autosuspend_delay;
1534	old_use = dev->power.use_autosuspend;
1535	dev->power.autosuspend_delay = delay;
1536	update_autosuspend(dev, old_delay, old_use);
1537	spin_unlock_irq(&dev->power.lock);
1538}
1539EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1540
1541/**
1542 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1543 * @dev: Device to handle.
1544 * @use: New value for use_autosuspend.
1545 *
1546 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1547 * suspends as needed.
1548 */
1549void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1550{
1551	int old_delay, old_use;
1552
1553	spin_lock_irq(&dev->power.lock);
1554	old_delay = dev->power.autosuspend_delay;
1555	old_use = dev->power.use_autosuspend;
1556	dev->power.use_autosuspend = use;
1557	update_autosuspend(dev, old_delay, old_use);
1558	spin_unlock_irq(&dev->power.lock);
1559}
1560EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1561
1562/**
1563 * pm_runtime_init - Initialize runtime PM fields in given device object.
1564 * @dev: Device object to initialize.
1565 */
1566void pm_runtime_init(struct device *dev)
1567{
1568	dev->power.runtime_status = RPM_SUSPENDED;
1569	dev->power.idle_notification = false;
1570
1571	dev->power.disable_depth = 1;
1572	atomic_set(&dev->power.usage_count, 0);
1573
1574	dev->power.runtime_error = 0;
1575
1576	atomic_set(&dev->power.child_count, 0);
1577	pm_suspend_ignore_children(dev, false);
1578	dev->power.runtime_auto = true;
1579
1580	dev->power.request_pending = false;
1581	dev->power.request = RPM_REQ_NONE;
1582	dev->power.deferred_resume = false;
 
1583	INIT_WORK(&dev->power.work, pm_runtime_work);
1584
1585	dev->power.timer_expires = 0;
1586	hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1587	dev->power.suspend_timer.function = pm_suspend_timer_fn;
1588
1589	init_waitqueue_head(&dev->power.wait_queue);
1590}
1591
1592/**
1593 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1594 * @dev: Device object to re-initialize.
1595 */
1596void pm_runtime_reinit(struct device *dev)
1597{
1598	if (!pm_runtime_enabled(dev)) {
1599		if (dev->power.runtime_status == RPM_ACTIVE)
1600			pm_runtime_set_suspended(dev);
1601		if (dev->power.irq_safe) {
1602			spin_lock_irq(&dev->power.lock);
1603			dev->power.irq_safe = 0;
1604			spin_unlock_irq(&dev->power.lock);
1605			if (dev->parent)
1606				pm_runtime_put(dev->parent);
1607		}
1608	}
1609}
1610
1611/**
1612 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1613 * @dev: Device object being removed from device hierarchy.
1614 */
1615void pm_runtime_remove(struct device *dev)
1616{
1617	__pm_runtime_disable(dev, false);
1618	pm_runtime_reinit(dev);
1619}
1620
1621/**
1622 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1623 * @dev: Device whose driver is going to be removed.
1624 *
1625 * Check links from this device to any consumers and if any of them have active
1626 * runtime PM references to the device, drop the usage counter of the device
1627 * (as many times as needed).
1628 *
1629 * Links with the DL_FLAG_MANAGED flag unset are ignored.
1630 *
1631 * Since the device is guaranteed to be runtime-active at the point this is
1632 * called, nothing else needs to be done here.
1633 *
1634 * Moreover, this is called after device_links_busy() has returned 'false', so
1635 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1636 * therefore rpm_active can't be manipulated concurrently.
1637 */
1638void pm_runtime_clean_up_links(struct device *dev)
1639{
1640	struct device_link *link;
1641	int idx;
1642
1643	idx = device_links_read_lock();
1644
1645	list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
1646				device_links_read_lock_held()) {
1647		if (!(link->flags & DL_FLAG_MANAGED))
1648			continue;
1649
1650		while (refcount_dec_not_one(&link->rpm_active))
1651			pm_runtime_put_noidle(dev);
1652	}
1653
1654	device_links_read_unlock(idx);
1655}
1656
1657/**
1658 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1659 * @dev: Consumer device.
1660 */
1661void pm_runtime_get_suppliers(struct device *dev)
1662{
1663	struct device_link *link;
1664	int idx;
1665
1666	idx = device_links_read_lock();
1667
1668	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1669				device_links_read_lock_held())
1670		if (link->flags & DL_FLAG_PM_RUNTIME) {
1671			link->supplier_preactivated = true;
1672			refcount_inc(&link->rpm_active);
1673			pm_runtime_get_sync(link->supplier);
1674		}
1675
1676	device_links_read_unlock(idx);
1677}
1678
1679/**
1680 * pm_runtime_put_suppliers - Drop references to supplier devices.
1681 * @dev: Consumer device.
1682 */
1683void pm_runtime_put_suppliers(struct device *dev)
1684{
1685	struct device_link *link;
1686	int idx;
1687
1688	idx = device_links_read_lock();
1689
1690	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1691				device_links_read_lock_held())
1692		if (link->supplier_preactivated) {
1693			link->supplier_preactivated = false;
1694			if (refcount_dec_not_one(&link->rpm_active))
1695				pm_runtime_put(link->supplier);
1696		}
1697
1698	device_links_read_unlock(idx);
1699}
1700
1701void pm_runtime_new_link(struct device *dev)
1702{
1703	spin_lock_irq(&dev->power.lock);
1704	dev->power.links_count++;
1705	spin_unlock_irq(&dev->power.lock);
1706}
1707
1708void pm_runtime_drop_link(struct device *dev)
1709{
1710	spin_lock_irq(&dev->power.lock);
1711	WARN_ON(dev->power.links_count == 0);
1712	dev->power.links_count--;
1713	spin_unlock_irq(&dev->power.lock);
1714}
1715
1716static bool pm_runtime_need_not_resume(struct device *dev)
1717{
1718	return atomic_read(&dev->power.usage_count) <= 1 &&
1719		(atomic_read(&dev->power.child_count) == 0 ||
1720		 dev->power.ignore_children);
1721}
1722
1723/**
1724 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1725 * @dev: Device to suspend.
1726 *
1727 * Disable runtime PM so we safely can check the device's runtime PM status and
1728 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1729 * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1730 * usage and children counters don't indicate that the device was in use before
1731 * the system-wide transition under way, decrement its parent's children counter
1732 * (if there is a parent).  Keep runtime PM disabled to preserve the state
1733 * unless we encounter errors.
1734 *
1735 * Typically this function may be invoked from a system suspend callback to make
1736 * sure the device is put into low power state and it should only be used during
1737 * system-wide PM transitions to sleep states.  It assumes that the analogous
1738 * pm_runtime_force_resume() will be used to resume the device.
1739 */
1740int pm_runtime_force_suspend(struct device *dev)
1741{
1742	int (*callback)(struct device *);
1743	int ret;
1744
1745	pm_runtime_disable(dev);
1746	if (pm_runtime_status_suspended(dev))
1747		return 0;
1748
1749	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1750
1751	ret = callback ? callback(dev) : 0;
1752	if (ret)
1753		goto err;
1754
1755	/*
1756	 * If the device can stay in suspend after the system-wide transition
1757	 * to the working state that will follow, drop the children counter of
1758	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1759	 * function will be called again for it in the meantime.
1760	 */
1761	if (pm_runtime_need_not_resume(dev))
1762		pm_runtime_set_suspended(dev);
1763	else
1764		__update_runtime_status(dev, RPM_SUSPENDED);
1765
1766	return 0;
1767
1768err:
1769	pm_runtime_enable(dev);
1770	return ret;
1771}
1772EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1773
1774/**
1775 * pm_runtime_force_resume - Force a device into resume state if needed.
1776 * @dev: Device to resume.
1777 *
1778 * Prior invoking this function we expect the user to have brought the device
1779 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1780 * those actions and bring the device into full power, if it is expected to be
1781 * used on system resume.  In the other case, we defer the resume to be managed
1782 * via runtime PM.
1783 *
1784 * Typically this function may be invoked from a system resume callback.
1785 */
1786int pm_runtime_force_resume(struct device *dev)
1787{
1788	int (*callback)(struct device *);
1789	int ret = 0;
1790
1791	if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1792		goto out;
1793
1794	/*
1795	 * The value of the parent's children counter is correct already, so
1796	 * just update the status of the device.
1797	 */
1798	__update_runtime_status(dev, RPM_ACTIVE);
1799
1800	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1801
1802	ret = callback ? callback(dev) : 0;
1803	if (ret) {
1804		pm_runtime_set_suspended(dev);
1805		goto out;
1806	}
1807
1808	pm_runtime_mark_last_busy(dev);
1809out:
1810	pm_runtime_enable(dev);
1811	return ret;
1812}
1813EXPORT_SYMBOL_GPL(pm_runtime_force_resume);