Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/wakeup.c - System wakeup events framework
   4 *
   5 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 */
   7#define pr_fmt(fmt) "PM: " fmt
   8
   9#include <linux/device.h>
  10#include <linux/slab.h>
  11#include <linux/sched/signal.h>
  12#include <linux/capability.h>
  13#include <linux/export.h>
  14#include <linux/suspend.h>
  15#include <linux/seq_file.h>
  16#include <linux/debugfs.h>
  17#include <linux/pm_wakeirq.h>
  18#include <trace/events/power.h>
  19
  20#include "power.h"
  21
  22#ifndef CONFIG_SUSPEND
  23suspend_state_t pm_suspend_target_state;
  24#define pm_suspend_target_state	(PM_SUSPEND_ON)
  25#endif
  26
  27#define list_for_each_entry_rcu_locked(pos, head, member) \
  28	list_for_each_entry_rcu(pos, head, member, \
  29		srcu_read_lock_held(&wakeup_srcu))
  30/*
  31 * If set, the suspend/hibernate code will abort transitions to a sleep state
  32 * if wakeup events are registered during or immediately before the transition.
  33 */
  34bool events_check_enabled __read_mostly;
  35
  36/* First wakeup IRQ seen by the kernel in the last cycle. */
  37unsigned int pm_wakeup_irq __read_mostly;
  38
  39/* If greater than 0 and the system is suspending, terminate the suspend. */
  40static atomic_t pm_abort_suspend __read_mostly;
  41
  42/*
  43 * Combined counters of registered wakeup events and wakeup events in progress.
  44 * They need to be modified together atomically, so it's better to use one
  45 * atomic variable to hold them both.
  46 */
  47static atomic_t combined_event_count = ATOMIC_INIT(0);
  48
  49#define IN_PROGRESS_BITS	(sizeof(int) * 4)
  50#define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
  51
  52static void split_counters(unsigned int *cnt, unsigned int *inpr)
  53{
  54	unsigned int comb = atomic_read(&combined_event_count);
  55
  56	*cnt = (comb >> IN_PROGRESS_BITS);
  57	*inpr = comb & MAX_IN_PROGRESS;
  58}
  59
  60/* A preserved old value of the events counter. */
  61static unsigned int saved_count;
  62
  63static DEFINE_RAW_SPINLOCK(events_lock);
  64
  65static void pm_wakeup_timer_fn(struct timer_list *t);
  66
  67static LIST_HEAD(wakeup_sources);
  68
  69static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
  70
  71DEFINE_STATIC_SRCU(wakeup_srcu);
  72
  73static struct wakeup_source deleted_ws = {
  74	.name = "deleted",
  75	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
  76};
  77
  78static DEFINE_IDA(wakeup_ida);
  79
  80/**
  81 * wakeup_source_create - Create a struct wakeup_source object.
  82 * @name: Name of the new wakeup source.
  83 */
  84struct wakeup_source *wakeup_source_create(const char *name)
  85{
  86	struct wakeup_source *ws;
  87	const char *ws_name;
  88	int id;
  89
  90	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
  91	if (!ws)
  92		goto err_ws;
  93
  94	ws_name = kstrdup_const(name, GFP_KERNEL);
  95	if (!ws_name)
  96		goto err_name;
  97	ws->name = ws_name;
  98
  99	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
 100	if (id < 0)
 101		goto err_id;
 102	ws->id = id;
 103
 104	return ws;
 105
 106err_id:
 107	kfree_const(ws->name);
 108err_name:
 109	kfree(ws);
 110err_ws:
 111	return NULL;
 112}
 113EXPORT_SYMBOL_GPL(wakeup_source_create);
 114
 115/*
 116 * Record wakeup_source statistics being deleted into a dummy wakeup_source.
 117 */
 118static void wakeup_source_record(struct wakeup_source *ws)
 119{
 120	unsigned long flags;
 121
 122	spin_lock_irqsave(&deleted_ws.lock, flags);
 123
 124	if (ws->event_count) {
 125		deleted_ws.total_time =
 126			ktime_add(deleted_ws.total_time, ws->total_time);
 127		deleted_ws.prevent_sleep_time =
 128			ktime_add(deleted_ws.prevent_sleep_time,
 129				  ws->prevent_sleep_time);
 130		deleted_ws.max_time =
 131			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
 132				deleted_ws.max_time : ws->max_time;
 133		deleted_ws.event_count += ws->event_count;
 134		deleted_ws.active_count += ws->active_count;
 135		deleted_ws.relax_count += ws->relax_count;
 136		deleted_ws.expire_count += ws->expire_count;
 137		deleted_ws.wakeup_count += ws->wakeup_count;
 138	}
 139
 140	spin_unlock_irqrestore(&deleted_ws.lock, flags);
 141}
 142
 143static void wakeup_source_free(struct wakeup_source *ws)
 144{
 145	ida_free(&wakeup_ida, ws->id);
 146	kfree_const(ws->name);
 147	kfree(ws);
 148}
 149
 150/**
 151 * wakeup_source_destroy - Destroy a struct wakeup_source object.
 152 * @ws: Wakeup source to destroy.
 153 *
 154 * Use only for wakeup source objects created with wakeup_source_create().
 155 */
 156void wakeup_source_destroy(struct wakeup_source *ws)
 157{
 158	if (!ws)
 159		return;
 160
 161	__pm_relax(ws);
 162	wakeup_source_record(ws);
 163	wakeup_source_free(ws);
 164}
 165EXPORT_SYMBOL_GPL(wakeup_source_destroy);
 166
 167/**
 168 * wakeup_source_add - Add given object to the list of wakeup sources.
 169 * @ws: Wakeup source object to add to the list.
 170 */
 171void wakeup_source_add(struct wakeup_source *ws)
 172{
 173	unsigned long flags;
 174
 175	if (WARN_ON(!ws))
 176		return;
 177
 178	spin_lock_init(&ws->lock);
 179	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
 180	ws->active = false;
 181
 182	raw_spin_lock_irqsave(&events_lock, flags);
 183	list_add_rcu(&ws->entry, &wakeup_sources);
 184	raw_spin_unlock_irqrestore(&events_lock, flags);
 185}
 186EXPORT_SYMBOL_GPL(wakeup_source_add);
 187
 188/**
 189 * wakeup_source_remove - Remove given object from the wakeup sources list.
 190 * @ws: Wakeup source object to remove from the list.
 191 */
 192void wakeup_source_remove(struct wakeup_source *ws)
 193{
 194	unsigned long flags;
 195
 196	if (WARN_ON(!ws))
 197		return;
 198
 199	raw_spin_lock_irqsave(&events_lock, flags);
 200	list_del_rcu(&ws->entry);
 201	raw_spin_unlock_irqrestore(&events_lock, flags);
 202	synchronize_srcu(&wakeup_srcu);
 203
 204	del_timer_sync(&ws->timer);
 205	/*
 206	 * Clear timer.function to make wakeup_source_not_registered() treat
 207	 * this wakeup source as not registered.
 208	 */
 209	ws->timer.function = NULL;
 210}
 211EXPORT_SYMBOL_GPL(wakeup_source_remove);
 212
 213/**
 214 * wakeup_source_register - Create wakeup source and add it to the list.
 215 * @dev: Device this wakeup source is associated with (or NULL if virtual).
 216 * @name: Name of the wakeup source to register.
 217 */
 218struct wakeup_source *wakeup_source_register(struct device *dev,
 219					     const char *name)
 220{
 221	struct wakeup_source *ws;
 222	int ret;
 223
 224	ws = wakeup_source_create(name);
 225	if (ws) {
 226		if (!dev || device_is_registered(dev)) {
 227			ret = wakeup_source_sysfs_add(dev, ws);
 228			if (ret) {
 229				wakeup_source_free(ws);
 230				return NULL;
 231			}
 232		}
 233		wakeup_source_add(ws);
 234	}
 235	return ws;
 236}
 237EXPORT_SYMBOL_GPL(wakeup_source_register);
 238
 239/**
 240 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
 241 * @ws: Wakeup source object to unregister.
 242 */
 243void wakeup_source_unregister(struct wakeup_source *ws)
 244{
 245	if (ws) {
 246		wakeup_source_remove(ws);
 247		if (ws->dev)
 248			wakeup_source_sysfs_remove(ws);
 249
 250		wakeup_source_destroy(ws);
 251	}
 252}
 253EXPORT_SYMBOL_GPL(wakeup_source_unregister);
 254
 255/**
 256 * wakeup_sources_read_lock - Lock wakeup source list for read.
 257 *
 258 * Returns an index of srcu lock for struct wakeup_srcu.
 259 * This index must be passed to the matching wakeup_sources_read_unlock().
 260 */
 261int wakeup_sources_read_lock(void)
 262{
 263	return srcu_read_lock(&wakeup_srcu);
 264}
 265EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
 266
 267/**
 268 * wakeup_sources_read_unlock - Unlock wakeup source list.
 269 * @idx: return value from corresponding wakeup_sources_read_lock()
 270 */
 271void wakeup_sources_read_unlock(int idx)
 272{
 273	srcu_read_unlock(&wakeup_srcu, idx);
 274}
 275EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
 276
 277/**
 278 * wakeup_sources_walk_start - Begin a walk on wakeup source list
 279 *
 280 * Returns first object of the list of wakeup sources.
 281 *
 282 * Note that to be safe, wakeup sources list needs to be locked by calling
 283 * wakeup_source_read_lock() for this.
 284 */
 285struct wakeup_source *wakeup_sources_walk_start(void)
 286{
 287	struct list_head *ws_head = &wakeup_sources;
 288
 289	return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
 290}
 291EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
 292
 293/**
 294 * wakeup_sources_walk_next - Get next wakeup source from the list
 295 * @ws: Previous wakeup source object
 296 *
 297 * Note that to be safe, wakeup sources list needs to be locked by calling
 298 * wakeup_source_read_lock() for this.
 299 */
 300struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
 301{
 302	struct list_head *ws_head = &wakeup_sources;
 303
 304	return list_next_or_null_rcu(ws_head, &ws->entry,
 305				struct wakeup_source, entry);
 306}
 307EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
 308
 309/**
 310 * device_wakeup_attach - Attach a wakeup source object to a device object.
 311 * @dev: Device to handle.
 312 * @ws: Wakeup source object to attach to @dev.
 313 *
 314 * This causes @dev to be treated as a wakeup device.
 315 */
 316static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
 317{
 318	spin_lock_irq(&dev->power.lock);
 319	if (dev->power.wakeup) {
 320		spin_unlock_irq(&dev->power.lock);
 321		return -EEXIST;
 322	}
 323	dev->power.wakeup = ws;
 324	if (dev->power.wakeirq)
 325		device_wakeup_attach_irq(dev, dev->power.wakeirq);
 326	spin_unlock_irq(&dev->power.lock);
 327	return 0;
 328}
 329
 330/**
 331 * device_wakeup_enable - Enable given device to be a wakeup source.
 332 * @dev: Device to handle.
 333 *
 334 * Create a wakeup source object, register it and attach it to @dev.
 335 */
 336int device_wakeup_enable(struct device *dev)
 337{
 338	struct wakeup_source *ws;
 339	int ret;
 340
 341	if (!dev || !dev->power.can_wakeup)
 342		return -EINVAL;
 343
 344	if (pm_suspend_target_state != PM_SUSPEND_ON)
 345		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
 346
 347	ws = wakeup_source_register(dev, dev_name(dev));
 348	if (!ws)
 349		return -ENOMEM;
 350
 351	ret = device_wakeup_attach(dev, ws);
 352	if (ret)
 353		wakeup_source_unregister(ws);
 354
 355	return ret;
 356}
 357EXPORT_SYMBOL_GPL(device_wakeup_enable);
 358
 359/**
 360 * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
 361 * @dev: Device to handle
 362 * @wakeirq: Device specific wakeirq entry
 363 *
 364 * Attach a device wakeirq to the wakeup source so the device
 365 * wake IRQ can be configured automatically for suspend and
 366 * resume.
 367 *
 368 * Call under the device's power.lock lock.
 369 */
 370void device_wakeup_attach_irq(struct device *dev,
 371			     struct wake_irq *wakeirq)
 372{
 373	struct wakeup_source *ws;
 374
 375	ws = dev->power.wakeup;
 376	if (!ws)
 377		return;
 378
 379	if (ws->wakeirq)
 380		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
 381
 382	ws->wakeirq = wakeirq;
 383}
 384
 385/**
 386 * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
 387 * @dev: Device to handle
 388 *
 389 * Removes a device wakeirq from the wakeup source.
 390 *
 391 * Call under the device's power.lock lock.
 392 */
 393void device_wakeup_detach_irq(struct device *dev)
 394{
 395	struct wakeup_source *ws;
 396
 397	ws = dev->power.wakeup;
 398	if (ws)
 399		ws->wakeirq = NULL;
 400}
 401
 402/**
 403 * device_wakeup_arm_wake_irqs -
 404 *
 405 * Iterates over the list of device wakeirqs to arm them.
 406 */
 407void device_wakeup_arm_wake_irqs(void)
 408{
 409	struct wakeup_source *ws;
 410	int srcuidx;
 411
 412	srcuidx = srcu_read_lock(&wakeup_srcu);
 413	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
 414		dev_pm_arm_wake_irq(ws->wakeirq);
 415	srcu_read_unlock(&wakeup_srcu, srcuidx);
 416}
 417
 418/**
 419 * device_wakeup_disarm_wake_irqs -
 420 *
 421 * Iterates over the list of device wakeirqs to disarm them.
 422 */
 423void device_wakeup_disarm_wake_irqs(void)
 424{
 425	struct wakeup_source *ws;
 426	int srcuidx;
 427
 428	srcuidx = srcu_read_lock(&wakeup_srcu);
 429	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
 430		dev_pm_disarm_wake_irq(ws->wakeirq);
 431	srcu_read_unlock(&wakeup_srcu, srcuidx);
 432}
 433
 434/**
 435 * device_wakeup_detach - Detach a device's wakeup source object from it.
 436 * @dev: Device to detach the wakeup source object from.
 437 *
 438 * After it returns, @dev will not be treated as a wakeup device any more.
 439 */
 440static struct wakeup_source *device_wakeup_detach(struct device *dev)
 441{
 442	struct wakeup_source *ws;
 443
 444	spin_lock_irq(&dev->power.lock);
 445	ws = dev->power.wakeup;
 446	dev->power.wakeup = NULL;
 447	spin_unlock_irq(&dev->power.lock);
 448	return ws;
 449}
 450
 451/**
 452 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
 453 * @dev: Device to handle.
 454 *
 455 * Detach the @dev's wakeup source object from it, unregister this wakeup source
 456 * object and destroy it.
 457 */
 458int device_wakeup_disable(struct device *dev)
 459{
 460	struct wakeup_source *ws;
 461
 462	if (!dev || !dev->power.can_wakeup)
 463		return -EINVAL;
 464
 465	ws = device_wakeup_detach(dev);
 466	wakeup_source_unregister(ws);
 467	return 0;
 468}
 469EXPORT_SYMBOL_GPL(device_wakeup_disable);
 470
 471/**
 472 * device_set_wakeup_capable - Set/reset device wakeup capability flag.
 473 * @dev: Device to handle.
 474 * @capable: Whether or not @dev is capable of waking up the system from sleep.
 475 *
 476 * If @capable is set, set the @dev's power.can_wakeup flag and add its
 477 * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
 478 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
 479 *
 480 * This function may sleep and it can't be called from any context where
 481 * sleeping is not allowed.
 482 */
 483void device_set_wakeup_capable(struct device *dev, bool capable)
 484{
 485	if (!!dev->power.can_wakeup == !!capable)
 486		return;
 487
 488	dev->power.can_wakeup = capable;
 489	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
 490		if (capable) {
 491			int ret = wakeup_sysfs_add(dev);
 492
 493			if (ret)
 494				dev_info(dev, "Wakeup sysfs attributes not added\n");
 495		} else {
 496			wakeup_sysfs_remove(dev);
 497		}
 498	}
 499}
 500EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
 501
 502/**
 503 * device_init_wakeup - Device wakeup initialization.
 504 * @dev: Device to handle.
 505 * @enable: Whether or not to enable @dev as a wakeup device.
 506 *
 507 * By default, most devices should leave wakeup disabled.  The exceptions are
 508 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
 509 * possibly network interfaces, etc.  Also, devices that don't generate their
 510 * own wakeup requests but merely forward requests from one bus to another
 511 * (like PCI bridges) should have wakeup enabled by default.
 512 */
 513int device_init_wakeup(struct device *dev, bool enable)
 514{
 515	int ret = 0;
 516
 517	if (!dev)
 518		return -EINVAL;
 519
 520	if (enable) {
 521		device_set_wakeup_capable(dev, true);
 522		ret = device_wakeup_enable(dev);
 523	} else {
 524		device_wakeup_disable(dev);
 525		device_set_wakeup_capable(dev, false);
 526	}
 527
 528	return ret;
 529}
 530EXPORT_SYMBOL_GPL(device_init_wakeup);
 531
 532/**
 533 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
 534 * @dev: Device to handle.
 535 * @enable: enable/disable flag
 536 */
 537int device_set_wakeup_enable(struct device *dev, bool enable)
 538{
 539	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
 540}
 541EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
 542
 543/**
 544 * wakeup_source_not_registered - validate the given wakeup source.
 545 * @ws: Wakeup source to be validated.
 546 */
 547static bool wakeup_source_not_registered(struct wakeup_source *ws)
 548{
 549	/*
 550	 * Use timer struct to check if the given source is initialized
 551	 * by wakeup_source_add.
 552	 */
 553	return ws->timer.function != pm_wakeup_timer_fn;
 554}
 555
 556/*
 557 * The functions below use the observation that each wakeup event starts a
 558 * period in which the system should not be suspended.  The moment this period
 559 * will end depends on how the wakeup event is going to be processed after being
 560 * detected and all of the possible cases can be divided into two distinct
 561 * groups.
 562 *
 563 * First, a wakeup event may be detected by the same functional unit that will
 564 * carry out the entire processing of it and possibly will pass it to user space
 565 * for further processing.  In that case the functional unit that has detected
 566 * the event may later "close" the "no suspend" period associated with it
 567 * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
 568 * pm_relax(), balanced with each other, is supposed to be used in such
 569 * situations.
 570 *
 571 * Second, a wakeup event may be detected by one functional unit and processed
 572 * by another one.  In that case the unit that has detected it cannot really
 573 * "close" the "no suspend" period associated with it, unless it knows in
 574 * advance what's going to happen to the event during processing.  This
 575 * knowledge, however, may not be available to it, so it can simply specify time
 576 * to wait before the system can be suspended and pass it as the second
 577 * argument of pm_wakeup_event().
 578 *
 579 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
 580 * "no suspend" period will be ended either by the pm_relax(), or by the timer
 581 * function executed when the timer expires, whichever comes first.
 582 */
 583
 584/**
 585 * wakeup_source_activate - Mark given wakeup source as active.
 586 * @ws: Wakeup source to handle.
 587 *
 588 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
 589 * core of the event by incrementing the counter of of wakeup events being
 590 * processed.
 591 */
 592static void wakeup_source_activate(struct wakeup_source *ws)
 593{
 594	unsigned int cec;
 595
 596	if (WARN_ONCE(wakeup_source_not_registered(ws),
 597			"unregistered wakeup source\n"))
 598		return;
 599
 600	ws->active = true;
 601	ws->active_count++;
 602	ws->last_time = ktime_get();
 603	if (ws->autosleep_enabled)
 604		ws->start_prevent_time = ws->last_time;
 605
 606	/* Increment the counter of events in progress. */
 607	cec = atomic_inc_return(&combined_event_count);
 608
 609	trace_wakeup_source_activate(ws->name, cec);
 610}
 611
 612/**
 613 * wakeup_source_report_event - Report wakeup event using the given source.
 614 * @ws: Wakeup source to report the event for.
 615 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 616 */
 617static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
 618{
 619	ws->event_count++;
 620	/* This is racy, but the counter is approximate anyway. */
 621	if (events_check_enabled)
 622		ws->wakeup_count++;
 623
 624	if (!ws->active)
 625		wakeup_source_activate(ws);
 626
 627	if (hard)
 628		pm_system_wakeup();
 629}
 630
 631/**
 632 * __pm_stay_awake - Notify the PM core of a wakeup event.
 633 * @ws: Wakeup source object associated with the source of the event.
 634 *
 635 * It is safe to call this function from interrupt context.
 636 */
 637void __pm_stay_awake(struct wakeup_source *ws)
 638{
 639	unsigned long flags;
 640
 641	if (!ws)
 642		return;
 643
 644	spin_lock_irqsave(&ws->lock, flags);
 645
 646	wakeup_source_report_event(ws, false);
 647	del_timer(&ws->timer);
 648	ws->timer_expires = 0;
 649
 650	spin_unlock_irqrestore(&ws->lock, flags);
 651}
 652EXPORT_SYMBOL_GPL(__pm_stay_awake);
 653
 654/**
 655 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
 656 * @dev: Device the wakeup event is related to.
 657 *
 658 * Notify the PM core of a wakeup event (signaled by @dev) by calling
 659 * __pm_stay_awake for the @dev's wakeup source object.
 660 *
 661 * Call this function after detecting of a wakeup event if pm_relax() is going
 662 * to be called directly after processing the event (and possibly passing it to
 663 * user space for further processing).
 664 */
 665void pm_stay_awake(struct device *dev)
 666{
 667	unsigned long flags;
 668
 669	if (!dev)
 670		return;
 671
 672	spin_lock_irqsave(&dev->power.lock, flags);
 673	__pm_stay_awake(dev->power.wakeup);
 674	spin_unlock_irqrestore(&dev->power.lock, flags);
 675}
 676EXPORT_SYMBOL_GPL(pm_stay_awake);
 677
 678#ifdef CONFIG_PM_AUTOSLEEP
 679static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
 680{
 681	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
 682	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
 683}
 684#else
 685static inline void update_prevent_sleep_time(struct wakeup_source *ws,
 686					     ktime_t now) {}
 687#endif
 688
 689/**
 690 * wakeup_source_deactivate - Mark given wakeup source as inactive.
 691 * @ws: Wakeup source to handle.
 692 *
 693 * Update the @ws' statistics and notify the PM core that the wakeup source has
 694 * become inactive by decrementing the counter of wakeup events being processed
 695 * and incrementing the counter of registered wakeup events.
 696 */
 697static void wakeup_source_deactivate(struct wakeup_source *ws)
 698{
 699	unsigned int cnt, inpr, cec;
 700	ktime_t duration;
 701	ktime_t now;
 702
 703	ws->relax_count++;
 704	/*
 705	 * __pm_relax() may be called directly or from a timer function.
 706	 * If it is called directly right after the timer function has been
 707	 * started, but before the timer function calls __pm_relax(), it is
 708	 * possible that __pm_stay_awake() will be called in the meantime and
 709	 * will set ws->active.  Then, ws->active may be cleared immediately
 710	 * by the __pm_relax() called from the timer function, but in such a
 711	 * case ws->relax_count will be different from ws->active_count.
 712	 */
 713	if (ws->relax_count != ws->active_count) {
 714		ws->relax_count--;
 715		return;
 716	}
 717
 718	ws->active = false;
 719
 720	now = ktime_get();
 721	duration = ktime_sub(now, ws->last_time);
 722	ws->total_time = ktime_add(ws->total_time, duration);
 723	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
 724		ws->max_time = duration;
 725
 726	ws->last_time = now;
 727	del_timer(&ws->timer);
 728	ws->timer_expires = 0;
 729
 730	if (ws->autosleep_enabled)
 731		update_prevent_sleep_time(ws, now);
 732
 733	/*
 734	 * Increment the counter of registered wakeup events and decrement the
 735	 * couter of wakeup events in progress simultaneously.
 736	 */
 737	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
 738	trace_wakeup_source_deactivate(ws->name, cec);
 739
 740	split_counters(&cnt, &inpr);
 741	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
 742		wake_up(&wakeup_count_wait_queue);
 743}
 744
 745/**
 746 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
 747 * @ws: Wakeup source object associated with the source of the event.
 748 *
 749 * Call this function for wakeup events whose processing started with calling
 750 * __pm_stay_awake().
 751 *
 752 * It is safe to call it from interrupt context.
 753 */
 754void __pm_relax(struct wakeup_source *ws)
 755{
 756	unsigned long flags;
 757
 758	if (!ws)
 759		return;
 760
 761	spin_lock_irqsave(&ws->lock, flags);
 762	if (ws->active)
 763		wakeup_source_deactivate(ws);
 764	spin_unlock_irqrestore(&ws->lock, flags);
 765}
 766EXPORT_SYMBOL_GPL(__pm_relax);
 767
 768/**
 769 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
 770 * @dev: Device that signaled the event.
 771 *
 772 * Execute __pm_relax() for the @dev's wakeup source object.
 773 */
 774void pm_relax(struct device *dev)
 775{
 776	unsigned long flags;
 777
 778	if (!dev)
 779		return;
 780
 781	spin_lock_irqsave(&dev->power.lock, flags);
 782	__pm_relax(dev->power.wakeup);
 783	spin_unlock_irqrestore(&dev->power.lock, flags);
 784}
 785EXPORT_SYMBOL_GPL(pm_relax);
 786
 787/**
 788 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
 789 * @t: timer list
 790 *
 791 * Call wakeup_source_deactivate() for the wakeup source whose address is stored
 792 * in @data if it is currently active and its timer has not been canceled and
 793 * the expiration time of the timer is not in future.
 794 */
 795static void pm_wakeup_timer_fn(struct timer_list *t)
 796{
 797	struct wakeup_source *ws = from_timer(ws, t, timer);
 798	unsigned long flags;
 799
 800	spin_lock_irqsave(&ws->lock, flags);
 801
 802	if (ws->active && ws->timer_expires
 803	    && time_after_eq(jiffies, ws->timer_expires)) {
 804		wakeup_source_deactivate(ws);
 805		ws->expire_count++;
 806	}
 807
 808	spin_unlock_irqrestore(&ws->lock, flags);
 809}
 810
 811/**
 812 * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
 813 * @ws: Wakeup source object associated with the event source.
 814 * @msec: Anticipated event processing time (in milliseconds).
 815 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 816 *
 817 * Notify the PM core of a wakeup event whose source is @ws that will take
 818 * approximately @msec milliseconds to be processed by the kernel.  If @ws is
 819 * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
 820 * execute pm_wakeup_timer_fn() in future.
 821 *
 822 * It is safe to call this function from interrupt context.
 823 */
 824void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
 825{
 826	unsigned long flags;
 827	unsigned long expires;
 828
 829	if (!ws)
 830		return;
 831
 832	spin_lock_irqsave(&ws->lock, flags);
 833
 834	wakeup_source_report_event(ws, hard);
 835
 836	if (!msec) {
 837		wakeup_source_deactivate(ws);
 838		goto unlock;
 839	}
 840
 841	expires = jiffies + msecs_to_jiffies(msec);
 842	if (!expires)
 843		expires = 1;
 844
 845	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
 846		mod_timer(&ws->timer, expires);
 847		ws->timer_expires = expires;
 848	}
 849
 850 unlock:
 851	spin_unlock_irqrestore(&ws->lock, flags);
 852}
 853EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
 854
 855/**
 856 * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
 857 * @dev: Device the wakeup event is related to.
 858 * @msec: Anticipated event processing time (in milliseconds).
 859 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 860 *
 861 * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
 862 */
 863void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
 864{
 865	unsigned long flags;
 866
 867	if (!dev)
 868		return;
 869
 870	spin_lock_irqsave(&dev->power.lock, flags);
 871	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
 872	spin_unlock_irqrestore(&dev->power.lock, flags);
 873}
 874EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
 875
 876void pm_print_active_wakeup_sources(void)
 877{
 878	struct wakeup_source *ws;
 879	int srcuidx, active = 0;
 880	struct wakeup_source *last_activity_ws = NULL;
 881
 882	srcuidx = srcu_read_lock(&wakeup_srcu);
 883	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
 884		if (ws->active) {
 885			pm_pr_dbg("active wakeup source: %s\n", ws->name);
 886			active = 1;
 887		} else if (!active &&
 888			   (!last_activity_ws ||
 889			    ktime_to_ns(ws->last_time) >
 890			    ktime_to_ns(last_activity_ws->last_time))) {
 891			last_activity_ws = ws;
 892		}
 893	}
 894
 895	if (!active && last_activity_ws)
 896		pm_pr_dbg("last active wakeup source: %s\n",
 897			last_activity_ws->name);
 898	srcu_read_unlock(&wakeup_srcu, srcuidx);
 899}
 900EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
 901
 902/**
 903 * pm_wakeup_pending - Check if power transition in progress should be aborted.
 904 *
 905 * Compare the current number of registered wakeup events with its preserved
 906 * value from the past and return true if new wakeup events have been registered
 907 * since the old value was stored.  Also return true if the current number of
 908 * wakeup events being processed is different from zero.
 909 */
 910bool pm_wakeup_pending(void)
 911{
 912	unsigned long flags;
 913	bool ret = false;
 914
 915	raw_spin_lock_irqsave(&events_lock, flags);
 916	if (events_check_enabled) {
 917		unsigned int cnt, inpr;
 918
 919		split_counters(&cnt, &inpr);
 920		ret = (cnt != saved_count || inpr > 0);
 921		events_check_enabled = !ret;
 922	}
 923	raw_spin_unlock_irqrestore(&events_lock, flags);
 924
 925	if (ret) {
 926		pm_pr_dbg("Wakeup pending, aborting suspend\n");
 927		pm_print_active_wakeup_sources();
 928	}
 929
 930	return ret || atomic_read(&pm_abort_suspend) > 0;
 931}
 932
 933void pm_system_wakeup(void)
 934{
 935	atomic_inc(&pm_abort_suspend);
 936	s2idle_wake();
 937}
 938EXPORT_SYMBOL_GPL(pm_system_wakeup);
 939
 940void pm_system_cancel_wakeup(void)
 941{
 942	atomic_dec_if_positive(&pm_abort_suspend);
 943}
 944
 945void pm_wakeup_clear(bool reset)
 946{
 947	pm_wakeup_irq = 0;
 948	if (reset)
 949		atomic_set(&pm_abort_suspend, 0);
 950}
 951
 952void pm_system_irq_wakeup(unsigned int irq_number)
 953{
 954	if (pm_wakeup_irq == 0) {
 955		pm_wakeup_irq = irq_number;
 956		pm_system_wakeup();
 957	}
 958}
 959
 960/**
 961 * pm_get_wakeup_count - Read the number of registered wakeup events.
 962 * @count: Address to store the value at.
 963 * @block: Whether or not to block.
 964 *
 965 * Store the number of registered wakeup events at the address in @count.  If
 966 * @block is set, block until the current number of wakeup events being
 967 * processed is zero.
 968 *
 969 * Return 'false' if the current number of wakeup events being processed is
 970 * nonzero.  Otherwise return 'true'.
 971 */
 972bool pm_get_wakeup_count(unsigned int *count, bool block)
 973{
 974	unsigned int cnt, inpr;
 975
 976	if (block) {
 977		DEFINE_WAIT(wait);
 978
 979		for (;;) {
 980			prepare_to_wait(&wakeup_count_wait_queue, &wait,
 981					TASK_INTERRUPTIBLE);
 982			split_counters(&cnt, &inpr);
 983			if (inpr == 0 || signal_pending(current))
 984				break;
 985			pm_print_active_wakeup_sources();
 986			schedule();
 987		}
 988		finish_wait(&wakeup_count_wait_queue, &wait);
 989	}
 990
 991	split_counters(&cnt, &inpr);
 992	*count = cnt;
 993	return !inpr;
 994}
 995
 996/**
 997 * pm_save_wakeup_count - Save the current number of registered wakeup events.
 998 * @count: Value to compare with the current number of registered wakeup events.
 999 *
1000 * If @count is equal to the current number of registered wakeup events and the
1001 * current number of wakeup events being processed is zero, store @count as the
1002 * old number of registered wakeup events for pm_check_wakeup_events(), enable
1003 * wakeup events detection and return 'true'.  Otherwise disable wakeup events
1004 * detection and return 'false'.
1005 */
1006bool pm_save_wakeup_count(unsigned int count)
1007{
1008	unsigned int cnt, inpr;
1009	unsigned long flags;
1010
1011	events_check_enabled = false;
1012	raw_spin_lock_irqsave(&events_lock, flags);
1013	split_counters(&cnt, &inpr);
1014	if (cnt == count && inpr == 0) {
1015		saved_count = count;
1016		events_check_enabled = true;
1017	}
1018	raw_spin_unlock_irqrestore(&events_lock, flags);
1019	return events_check_enabled;
1020}
1021
1022#ifdef CONFIG_PM_AUTOSLEEP
1023/**
1024 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
1025 * @set: Whether to set or to clear the autosleep_enabled flags.
1026 */
1027void pm_wakep_autosleep_enabled(bool set)
1028{
1029	struct wakeup_source *ws;
1030	ktime_t now = ktime_get();
1031	int srcuidx;
1032
1033	srcuidx = srcu_read_lock(&wakeup_srcu);
1034	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1035		spin_lock_irq(&ws->lock);
1036		if (ws->autosleep_enabled != set) {
1037			ws->autosleep_enabled = set;
1038			if (ws->active) {
1039				if (set)
1040					ws->start_prevent_time = now;
1041				else
1042					update_prevent_sleep_time(ws, now);
1043			}
1044		}
1045		spin_unlock_irq(&ws->lock);
1046	}
1047	srcu_read_unlock(&wakeup_srcu, srcuidx);
1048}
1049#endif /* CONFIG_PM_AUTOSLEEP */
1050
1051/**
1052 * print_wakeup_source_stats - Print wakeup source statistics information.
1053 * @m: seq_file to print the statistics into.
1054 * @ws: Wakeup source object to print the statistics for.
1055 */
1056static int print_wakeup_source_stats(struct seq_file *m,
1057				     struct wakeup_source *ws)
1058{
1059	unsigned long flags;
1060	ktime_t total_time;
1061	ktime_t max_time;
1062	unsigned long active_count;
1063	ktime_t active_time;
1064	ktime_t prevent_sleep_time;
1065
1066	spin_lock_irqsave(&ws->lock, flags);
1067
1068	total_time = ws->total_time;
1069	max_time = ws->max_time;
1070	prevent_sleep_time = ws->prevent_sleep_time;
1071	active_count = ws->active_count;
1072	if (ws->active) {
1073		ktime_t now = ktime_get();
1074
1075		active_time = ktime_sub(now, ws->last_time);
1076		total_time = ktime_add(total_time, active_time);
1077		if (active_time > max_time)
1078			max_time = active_time;
1079
1080		if (ws->autosleep_enabled)
1081			prevent_sleep_time = ktime_add(prevent_sleep_time,
1082				ktime_sub(now, ws->start_prevent_time));
1083	} else {
1084		active_time = 0;
1085	}
1086
1087	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1088		   ws->name, active_count, ws->event_count,
1089		   ws->wakeup_count, ws->expire_count,
1090		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1091		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1092		   ktime_to_ms(prevent_sleep_time));
1093
1094	spin_unlock_irqrestore(&ws->lock, flags);
1095
1096	return 0;
1097}
1098
1099static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1100					loff_t *pos)
1101{
1102	struct wakeup_source *ws;
1103	loff_t n = *pos;
1104	int *srcuidx = m->private;
1105
1106	if (n == 0) {
1107		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1108			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1109			"last_change\tprevent_suspend_time\n");
1110	}
1111
1112	*srcuidx = srcu_read_lock(&wakeup_srcu);
1113	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1114		if (n-- <= 0)
1115			return ws;
1116	}
1117
1118	return NULL;
1119}
1120
1121static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1122					void *v, loff_t *pos)
1123{
1124	struct wakeup_source *ws = v;
1125	struct wakeup_source *next_ws = NULL;
1126
1127	++(*pos);
1128
1129	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1130		next_ws = ws;
1131		break;
1132	}
1133
1134	if (!next_ws)
1135		print_wakeup_source_stats(m, &deleted_ws);
1136
1137	return next_ws;
1138}
1139
1140static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1141{
1142	int *srcuidx = m->private;
1143
1144	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1145}
1146
1147/**
1148 * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1149 * @m: seq_file to print the statistics into.
1150 * @v: wakeup_source of each iteration
1151 */
1152static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1153{
1154	struct wakeup_source *ws = v;
1155
1156	print_wakeup_source_stats(m, ws);
1157
1158	return 0;
1159}
1160
1161static const struct seq_operations wakeup_sources_stats_seq_ops = {
1162	.start = wakeup_sources_stats_seq_start,
1163	.next  = wakeup_sources_stats_seq_next,
1164	.stop  = wakeup_sources_stats_seq_stop,
1165	.show  = wakeup_sources_stats_seq_show,
1166};
1167
1168static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1169{
1170	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1171}
1172
1173static const struct file_operations wakeup_sources_stats_fops = {
1174	.owner = THIS_MODULE,
1175	.open = wakeup_sources_stats_open,
1176	.read = seq_read,
1177	.llseek = seq_lseek,
1178	.release = seq_release_private,
1179};
1180
1181static int __init wakeup_sources_debugfs_init(void)
1182{
1183	debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
1184			    &wakeup_sources_stats_fops);
1185	return 0;
1186}
1187
1188postcore_initcall(wakeup_sources_debugfs_init);
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/wakeup.c - System wakeup events framework
   4 *
   5 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 */
   7#define pr_fmt(fmt) "PM: " fmt
   8
   9#include <linux/device.h>
  10#include <linux/slab.h>
  11#include <linux/sched/signal.h>
  12#include <linux/capability.h>
  13#include <linux/export.h>
  14#include <linux/suspend.h>
  15#include <linux/seq_file.h>
  16#include <linux/debugfs.h>
  17#include <linux/pm_wakeirq.h>
  18#include <trace/events/power.h>
  19
  20#include "power.h"
  21
  22#ifndef CONFIG_SUSPEND
  23suspend_state_t pm_suspend_target_state;
  24#define pm_suspend_target_state	(PM_SUSPEND_ON)
  25#endif
  26
  27#define list_for_each_entry_rcu_locked(pos, head, member) \
  28	list_for_each_entry_rcu(pos, head, member, \
  29		srcu_read_lock_held(&wakeup_srcu))
  30/*
  31 * If set, the suspend/hibernate code will abort transitions to a sleep state
  32 * if wakeup events are registered during or immediately before the transition.
  33 */
  34bool events_check_enabled __read_mostly;
  35
  36/* First wakeup IRQ seen by the kernel in the last cycle. */
  37unsigned int pm_wakeup_irq __read_mostly;
  38
  39/* If greater than 0 and the system is suspending, terminate the suspend. */
  40static atomic_t pm_abort_suspend __read_mostly;
  41
  42/*
  43 * Combined counters of registered wakeup events and wakeup events in progress.
  44 * They need to be modified together atomically, so it's better to use one
  45 * atomic variable to hold them both.
  46 */
  47static atomic_t combined_event_count = ATOMIC_INIT(0);
  48
  49#define IN_PROGRESS_BITS	(sizeof(int) * 4)
  50#define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
  51
  52static void split_counters(unsigned int *cnt, unsigned int *inpr)
  53{
  54	unsigned int comb = atomic_read(&combined_event_count);
  55
  56	*cnt = (comb >> IN_PROGRESS_BITS);
  57	*inpr = comb & MAX_IN_PROGRESS;
  58}
  59
  60/* A preserved old value of the events counter. */
  61static unsigned int saved_count;
  62
  63static DEFINE_RAW_SPINLOCK(events_lock);
  64
  65static void pm_wakeup_timer_fn(struct timer_list *t);
  66
  67static LIST_HEAD(wakeup_sources);
  68
  69static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
  70
  71DEFINE_STATIC_SRCU(wakeup_srcu);
  72
  73static struct wakeup_source deleted_ws = {
  74	.name = "deleted",
  75	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
  76};
  77
  78static DEFINE_IDA(wakeup_ida);
  79
  80/**
  81 * wakeup_source_create - Create a struct wakeup_source object.
  82 * @name: Name of the new wakeup source.
  83 */
  84struct wakeup_source *wakeup_source_create(const char *name)
  85{
  86	struct wakeup_source *ws;
  87	const char *ws_name;
  88	int id;
  89
  90	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
  91	if (!ws)
  92		goto err_ws;
  93
  94	ws_name = kstrdup_const(name, GFP_KERNEL);
  95	if (!ws_name)
  96		goto err_name;
  97	ws->name = ws_name;
  98
  99	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
 100	if (id < 0)
 101		goto err_id;
 102	ws->id = id;
 103
 104	return ws;
 105
 106err_id:
 107	kfree_const(ws->name);
 108err_name:
 109	kfree(ws);
 110err_ws:
 111	return NULL;
 112}
 113EXPORT_SYMBOL_GPL(wakeup_source_create);
 114
 115/*
 116 * Record wakeup_source statistics being deleted into a dummy wakeup_source.
 117 */
 118static void wakeup_source_record(struct wakeup_source *ws)
 119{
 120	unsigned long flags;
 121
 122	spin_lock_irqsave(&deleted_ws.lock, flags);
 123
 124	if (ws->event_count) {
 125		deleted_ws.total_time =
 126			ktime_add(deleted_ws.total_time, ws->total_time);
 127		deleted_ws.prevent_sleep_time =
 128			ktime_add(deleted_ws.prevent_sleep_time,
 129				  ws->prevent_sleep_time);
 130		deleted_ws.max_time =
 131			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
 132				deleted_ws.max_time : ws->max_time;
 133		deleted_ws.event_count += ws->event_count;
 134		deleted_ws.active_count += ws->active_count;
 135		deleted_ws.relax_count += ws->relax_count;
 136		deleted_ws.expire_count += ws->expire_count;
 137		deleted_ws.wakeup_count += ws->wakeup_count;
 138	}
 139
 140	spin_unlock_irqrestore(&deleted_ws.lock, flags);
 141}
 142
 143static void wakeup_source_free(struct wakeup_source *ws)
 144{
 145	ida_free(&wakeup_ida, ws->id);
 146	kfree_const(ws->name);
 147	kfree(ws);
 148}
 149
 150/**
 151 * wakeup_source_destroy - Destroy a struct wakeup_source object.
 152 * @ws: Wakeup source to destroy.
 153 *
 154 * Use only for wakeup source objects created with wakeup_source_create().
 155 */
 156void wakeup_source_destroy(struct wakeup_source *ws)
 157{
 158	if (!ws)
 159		return;
 160
 161	__pm_relax(ws);
 162	wakeup_source_record(ws);
 163	wakeup_source_free(ws);
 164}
 165EXPORT_SYMBOL_GPL(wakeup_source_destroy);
 166
 167/**
 168 * wakeup_source_add - Add given object to the list of wakeup sources.
 169 * @ws: Wakeup source object to add to the list.
 170 */
 171void wakeup_source_add(struct wakeup_source *ws)
 172{
 173	unsigned long flags;
 174
 175	if (WARN_ON(!ws))
 176		return;
 177
 178	spin_lock_init(&ws->lock);
 179	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
 180	ws->active = false;
 181
 182	raw_spin_lock_irqsave(&events_lock, flags);
 183	list_add_rcu(&ws->entry, &wakeup_sources);
 184	raw_spin_unlock_irqrestore(&events_lock, flags);
 185}
 186EXPORT_SYMBOL_GPL(wakeup_source_add);
 187
 188/**
 189 * wakeup_source_remove - Remove given object from the wakeup sources list.
 190 * @ws: Wakeup source object to remove from the list.
 191 */
 192void wakeup_source_remove(struct wakeup_source *ws)
 193{
 194	unsigned long flags;
 195
 196	if (WARN_ON(!ws))
 197		return;
 198
 199	raw_spin_lock_irqsave(&events_lock, flags);
 200	list_del_rcu(&ws->entry);
 201	raw_spin_unlock_irqrestore(&events_lock, flags);
 202	synchronize_srcu(&wakeup_srcu);
 203
 204	del_timer_sync(&ws->timer);
 205	/*
 206	 * Clear timer.function to make wakeup_source_not_registered() treat
 207	 * this wakeup source as not registered.
 208	 */
 209	ws->timer.function = NULL;
 210}
 211EXPORT_SYMBOL_GPL(wakeup_source_remove);
 212
 213/**
 214 * wakeup_source_register - Create wakeup source and add it to the list.
 215 * @dev: Device this wakeup source is associated with (or NULL if virtual).
 216 * @name: Name of the wakeup source to register.
 217 */
 218struct wakeup_source *wakeup_source_register(struct device *dev,
 219					     const char *name)
 220{
 221	struct wakeup_source *ws;
 222	int ret;
 223
 224	ws = wakeup_source_create(name);
 225	if (ws) {
 226		if (!dev || device_is_registered(dev)) {
 227			ret = wakeup_source_sysfs_add(dev, ws);
 228			if (ret) {
 229				wakeup_source_free(ws);
 230				return NULL;
 231			}
 232		}
 233		wakeup_source_add(ws);
 234	}
 235	return ws;
 236}
 237EXPORT_SYMBOL_GPL(wakeup_source_register);
 238
 239/**
 240 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
 241 * @ws: Wakeup source object to unregister.
 242 */
 243void wakeup_source_unregister(struct wakeup_source *ws)
 244{
 245	if (ws) {
 246		wakeup_source_remove(ws);
 247		if (ws->dev)
 248			wakeup_source_sysfs_remove(ws);
 249
 250		wakeup_source_destroy(ws);
 251	}
 252}
 253EXPORT_SYMBOL_GPL(wakeup_source_unregister);
 254
 255/**
 256 * wakeup_sources_read_lock - Lock wakeup source list for read.
 257 *
 258 * Returns an index of srcu lock for struct wakeup_srcu.
 259 * This index must be passed to the matching wakeup_sources_read_unlock().
 260 */
 261int wakeup_sources_read_lock(void)
 262{
 263	return srcu_read_lock(&wakeup_srcu);
 264}
 265EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
 266
 267/**
 268 * wakeup_sources_read_unlock - Unlock wakeup source list.
 269 * @idx: return value from corresponding wakeup_sources_read_lock()
 270 */
 271void wakeup_sources_read_unlock(int idx)
 272{
 273	srcu_read_unlock(&wakeup_srcu, idx);
 274}
 275EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
 276
 277/**
 278 * wakeup_sources_walk_start - Begin a walk on wakeup source list
 279 *
 280 * Returns first object of the list of wakeup sources.
 281 *
 282 * Note that to be safe, wakeup sources list needs to be locked by calling
 283 * wakeup_source_read_lock() for this.
 284 */
 285struct wakeup_source *wakeup_sources_walk_start(void)
 286{
 287	struct list_head *ws_head = &wakeup_sources;
 288
 289	return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
 290}
 291EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
 292
 293/**
 294 * wakeup_sources_walk_next - Get next wakeup source from the list
 295 * @ws: Previous wakeup source object
 296 *
 297 * Note that to be safe, wakeup sources list needs to be locked by calling
 298 * wakeup_source_read_lock() for this.
 299 */
 300struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
 301{
 302	struct list_head *ws_head = &wakeup_sources;
 303
 304	return list_next_or_null_rcu(ws_head, &ws->entry,
 305				struct wakeup_source, entry);
 306}
 307EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
 308
 309/**
 310 * device_wakeup_attach - Attach a wakeup source object to a device object.
 311 * @dev: Device to handle.
 312 * @ws: Wakeup source object to attach to @dev.
 313 *
 314 * This causes @dev to be treated as a wakeup device.
 315 */
 316static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
 317{
 318	spin_lock_irq(&dev->power.lock);
 319	if (dev->power.wakeup) {
 320		spin_unlock_irq(&dev->power.lock);
 321		return -EEXIST;
 322	}
 323	dev->power.wakeup = ws;
 324	if (dev->power.wakeirq)
 325		device_wakeup_attach_irq(dev, dev->power.wakeirq);
 326	spin_unlock_irq(&dev->power.lock);
 327	return 0;
 328}
 329
 330/**
 331 * device_wakeup_enable - Enable given device to be a wakeup source.
 332 * @dev: Device to handle.
 333 *
 334 * Create a wakeup source object, register it and attach it to @dev.
 335 */
 336int device_wakeup_enable(struct device *dev)
 337{
 338	struct wakeup_source *ws;
 339	int ret;
 340
 341	if (!dev || !dev->power.can_wakeup)
 342		return -EINVAL;
 343
 344	if (pm_suspend_target_state != PM_SUSPEND_ON)
 345		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
 346
 347	ws = wakeup_source_register(dev, dev_name(dev));
 348	if (!ws)
 349		return -ENOMEM;
 350
 351	ret = device_wakeup_attach(dev, ws);
 352	if (ret)
 353		wakeup_source_unregister(ws);
 354
 355	return ret;
 356}
 357EXPORT_SYMBOL_GPL(device_wakeup_enable);
 358
 359/**
 360 * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
 361 * @dev: Device to handle
 362 * @wakeirq: Device specific wakeirq entry
 363 *
 364 * Attach a device wakeirq to the wakeup source so the device
 365 * wake IRQ can be configured automatically for suspend and
 366 * resume.
 367 *
 368 * Call under the device's power.lock lock.
 369 */
 370void device_wakeup_attach_irq(struct device *dev,
 371			     struct wake_irq *wakeirq)
 372{
 373	struct wakeup_source *ws;
 374
 375	ws = dev->power.wakeup;
 376	if (!ws)
 377		return;
 378
 379	if (ws->wakeirq)
 380		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
 381
 382	ws->wakeirq = wakeirq;
 383}
 384
 385/**
 386 * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
 387 * @dev: Device to handle
 388 *
 389 * Removes a device wakeirq from the wakeup source.
 390 *
 391 * Call under the device's power.lock lock.
 392 */
 393void device_wakeup_detach_irq(struct device *dev)
 394{
 395	struct wakeup_source *ws;
 396
 397	ws = dev->power.wakeup;
 398	if (ws)
 399		ws->wakeirq = NULL;
 400}
 401
 402/**
 403 * device_wakeup_arm_wake_irqs(void)
 404 *
 405 * Itereates over the list of device wakeirqs to arm them.
 406 */
 407void device_wakeup_arm_wake_irqs(void)
 408{
 409	struct wakeup_source *ws;
 410	int srcuidx;
 411
 412	srcuidx = srcu_read_lock(&wakeup_srcu);
 413	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
 414		dev_pm_arm_wake_irq(ws->wakeirq);
 415	srcu_read_unlock(&wakeup_srcu, srcuidx);
 416}
 417
 418/**
 419 * device_wakeup_disarm_wake_irqs(void)
 420 *
 421 * Itereates over the list of device wakeirqs to disarm them.
 422 */
 423void device_wakeup_disarm_wake_irqs(void)
 424{
 425	struct wakeup_source *ws;
 426	int srcuidx;
 427
 428	srcuidx = srcu_read_lock(&wakeup_srcu);
 429	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
 430		dev_pm_disarm_wake_irq(ws->wakeirq);
 431	srcu_read_unlock(&wakeup_srcu, srcuidx);
 432}
 433
 434/**
 435 * device_wakeup_detach - Detach a device's wakeup source object from it.
 436 * @dev: Device to detach the wakeup source object from.
 437 *
 438 * After it returns, @dev will not be treated as a wakeup device any more.
 439 */
 440static struct wakeup_source *device_wakeup_detach(struct device *dev)
 441{
 442	struct wakeup_source *ws;
 443
 444	spin_lock_irq(&dev->power.lock);
 445	ws = dev->power.wakeup;
 446	dev->power.wakeup = NULL;
 447	spin_unlock_irq(&dev->power.lock);
 448	return ws;
 449}
 450
 451/**
 452 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
 453 * @dev: Device to handle.
 454 *
 455 * Detach the @dev's wakeup source object from it, unregister this wakeup source
 456 * object and destroy it.
 457 */
 458int device_wakeup_disable(struct device *dev)
 459{
 460	struct wakeup_source *ws;
 461
 462	if (!dev || !dev->power.can_wakeup)
 463		return -EINVAL;
 464
 465	ws = device_wakeup_detach(dev);
 466	wakeup_source_unregister(ws);
 467	return 0;
 468}
 469EXPORT_SYMBOL_GPL(device_wakeup_disable);
 470
 471/**
 472 * device_set_wakeup_capable - Set/reset device wakeup capability flag.
 473 * @dev: Device to handle.
 474 * @capable: Whether or not @dev is capable of waking up the system from sleep.
 475 *
 476 * If @capable is set, set the @dev's power.can_wakeup flag and add its
 477 * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
 478 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
 479 *
 480 * This function may sleep and it can't be called from any context where
 481 * sleeping is not allowed.
 482 */
 483void device_set_wakeup_capable(struct device *dev, bool capable)
 484{
 485	if (!!dev->power.can_wakeup == !!capable)
 486		return;
 487
 488	dev->power.can_wakeup = capable;
 489	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
 490		if (capable) {
 491			int ret = wakeup_sysfs_add(dev);
 492
 493			if (ret)
 494				dev_info(dev, "Wakeup sysfs attributes not added\n");
 495		} else {
 496			wakeup_sysfs_remove(dev);
 497		}
 498	}
 499}
 500EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
 501
 502/**
 503 * device_init_wakeup - Device wakeup initialization.
 504 * @dev: Device to handle.
 505 * @enable: Whether or not to enable @dev as a wakeup device.
 506 *
 507 * By default, most devices should leave wakeup disabled.  The exceptions are
 508 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
 509 * possibly network interfaces, etc.  Also, devices that don't generate their
 510 * own wakeup requests but merely forward requests from one bus to another
 511 * (like PCI bridges) should have wakeup enabled by default.
 512 */
 513int device_init_wakeup(struct device *dev, bool enable)
 514{
 515	int ret = 0;
 516
 517	if (!dev)
 518		return -EINVAL;
 519
 520	if (enable) {
 521		device_set_wakeup_capable(dev, true);
 522		ret = device_wakeup_enable(dev);
 523	} else {
 524		device_wakeup_disable(dev);
 525		device_set_wakeup_capable(dev, false);
 526	}
 527
 528	return ret;
 529}
 530EXPORT_SYMBOL_GPL(device_init_wakeup);
 531
 532/**
 533 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
 534 * @dev: Device to handle.
 
 535 */
 536int device_set_wakeup_enable(struct device *dev, bool enable)
 537{
 538	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
 539}
 540EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
 541
 542/**
 543 * wakeup_source_not_registered - validate the given wakeup source.
 544 * @ws: Wakeup source to be validated.
 545 */
 546static bool wakeup_source_not_registered(struct wakeup_source *ws)
 547{
 548	/*
 549	 * Use timer struct to check if the given source is initialized
 550	 * by wakeup_source_add.
 551	 */
 552	return ws->timer.function != pm_wakeup_timer_fn;
 553}
 554
 555/*
 556 * The functions below use the observation that each wakeup event starts a
 557 * period in which the system should not be suspended.  The moment this period
 558 * will end depends on how the wakeup event is going to be processed after being
 559 * detected and all of the possible cases can be divided into two distinct
 560 * groups.
 561 *
 562 * First, a wakeup event may be detected by the same functional unit that will
 563 * carry out the entire processing of it and possibly will pass it to user space
 564 * for further processing.  In that case the functional unit that has detected
 565 * the event may later "close" the "no suspend" period associated with it
 566 * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
 567 * pm_relax(), balanced with each other, is supposed to be used in such
 568 * situations.
 569 *
 570 * Second, a wakeup event may be detected by one functional unit and processed
 571 * by another one.  In that case the unit that has detected it cannot really
 572 * "close" the "no suspend" period associated with it, unless it knows in
 573 * advance what's going to happen to the event during processing.  This
 574 * knowledge, however, may not be available to it, so it can simply specify time
 575 * to wait before the system can be suspended and pass it as the second
 576 * argument of pm_wakeup_event().
 577 *
 578 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
 579 * "no suspend" period will be ended either by the pm_relax(), or by the timer
 580 * function executed when the timer expires, whichever comes first.
 581 */
 582
 583/**
 584 * wakup_source_activate - Mark given wakeup source as active.
 585 * @ws: Wakeup source to handle.
 586 *
 587 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
 588 * core of the event by incrementing the counter of of wakeup events being
 589 * processed.
 590 */
 591static void wakeup_source_activate(struct wakeup_source *ws)
 592{
 593	unsigned int cec;
 594
 595	if (WARN_ONCE(wakeup_source_not_registered(ws),
 596			"unregistered wakeup source\n"))
 597		return;
 598
 599	ws->active = true;
 600	ws->active_count++;
 601	ws->last_time = ktime_get();
 602	if (ws->autosleep_enabled)
 603		ws->start_prevent_time = ws->last_time;
 604
 605	/* Increment the counter of events in progress. */
 606	cec = atomic_inc_return(&combined_event_count);
 607
 608	trace_wakeup_source_activate(ws->name, cec);
 609}
 610
 611/**
 612 * wakeup_source_report_event - Report wakeup event using the given source.
 613 * @ws: Wakeup source to report the event for.
 614 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 615 */
 616static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
 617{
 618	ws->event_count++;
 619	/* This is racy, but the counter is approximate anyway. */
 620	if (events_check_enabled)
 621		ws->wakeup_count++;
 622
 623	if (!ws->active)
 624		wakeup_source_activate(ws);
 625
 626	if (hard)
 627		pm_system_wakeup();
 628}
 629
 630/**
 631 * __pm_stay_awake - Notify the PM core of a wakeup event.
 632 * @ws: Wakeup source object associated with the source of the event.
 633 *
 634 * It is safe to call this function from interrupt context.
 635 */
 636void __pm_stay_awake(struct wakeup_source *ws)
 637{
 638	unsigned long flags;
 639
 640	if (!ws)
 641		return;
 642
 643	spin_lock_irqsave(&ws->lock, flags);
 644
 645	wakeup_source_report_event(ws, false);
 646	del_timer(&ws->timer);
 647	ws->timer_expires = 0;
 648
 649	spin_unlock_irqrestore(&ws->lock, flags);
 650}
 651EXPORT_SYMBOL_GPL(__pm_stay_awake);
 652
 653/**
 654 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
 655 * @dev: Device the wakeup event is related to.
 656 *
 657 * Notify the PM core of a wakeup event (signaled by @dev) by calling
 658 * __pm_stay_awake for the @dev's wakeup source object.
 659 *
 660 * Call this function after detecting of a wakeup event if pm_relax() is going
 661 * to be called directly after processing the event (and possibly passing it to
 662 * user space for further processing).
 663 */
 664void pm_stay_awake(struct device *dev)
 665{
 666	unsigned long flags;
 667
 668	if (!dev)
 669		return;
 670
 671	spin_lock_irqsave(&dev->power.lock, flags);
 672	__pm_stay_awake(dev->power.wakeup);
 673	spin_unlock_irqrestore(&dev->power.lock, flags);
 674}
 675EXPORT_SYMBOL_GPL(pm_stay_awake);
 676
 677#ifdef CONFIG_PM_AUTOSLEEP
 678static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
 679{
 680	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
 681	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
 682}
 683#else
 684static inline void update_prevent_sleep_time(struct wakeup_source *ws,
 685					     ktime_t now) {}
 686#endif
 687
 688/**
 689 * wakup_source_deactivate - Mark given wakeup source as inactive.
 690 * @ws: Wakeup source to handle.
 691 *
 692 * Update the @ws' statistics and notify the PM core that the wakeup source has
 693 * become inactive by decrementing the counter of wakeup events being processed
 694 * and incrementing the counter of registered wakeup events.
 695 */
 696static void wakeup_source_deactivate(struct wakeup_source *ws)
 697{
 698	unsigned int cnt, inpr, cec;
 699	ktime_t duration;
 700	ktime_t now;
 701
 702	ws->relax_count++;
 703	/*
 704	 * __pm_relax() may be called directly or from a timer function.
 705	 * If it is called directly right after the timer function has been
 706	 * started, but before the timer function calls __pm_relax(), it is
 707	 * possible that __pm_stay_awake() will be called in the meantime and
 708	 * will set ws->active.  Then, ws->active may be cleared immediately
 709	 * by the __pm_relax() called from the timer function, but in such a
 710	 * case ws->relax_count will be different from ws->active_count.
 711	 */
 712	if (ws->relax_count != ws->active_count) {
 713		ws->relax_count--;
 714		return;
 715	}
 716
 717	ws->active = false;
 718
 719	now = ktime_get();
 720	duration = ktime_sub(now, ws->last_time);
 721	ws->total_time = ktime_add(ws->total_time, duration);
 722	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
 723		ws->max_time = duration;
 724
 725	ws->last_time = now;
 726	del_timer(&ws->timer);
 727	ws->timer_expires = 0;
 728
 729	if (ws->autosleep_enabled)
 730		update_prevent_sleep_time(ws, now);
 731
 732	/*
 733	 * Increment the counter of registered wakeup events and decrement the
 734	 * couter of wakeup events in progress simultaneously.
 735	 */
 736	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
 737	trace_wakeup_source_deactivate(ws->name, cec);
 738
 739	split_counters(&cnt, &inpr);
 740	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
 741		wake_up(&wakeup_count_wait_queue);
 742}
 743
 744/**
 745 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
 746 * @ws: Wakeup source object associated with the source of the event.
 747 *
 748 * Call this function for wakeup events whose processing started with calling
 749 * __pm_stay_awake().
 750 *
 751 * It is safe to call it from interrupt context.
 752 */
 753void __pm_relax(struct wakeup_source *ws)
 754{
 755	unsigned long flags;
 756
 757	if (!ws)
 758		return;
 759
 760	spin_lock_irqsave(&ws->lock, flags);
 761	if (ws->active)
 762		wakeup_source_deactivate(ws);
 763	spin_unlock_irqrestore(&ws->lock, flags);
 764}
 765EXPORT_SYMBOL_GPL(__pm_relax);
 766
 767/**
 768 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
 769 * @dev: Device that signaled the event.
 770 *
 771 * Execute __pm_relax() for the @dev's wakeup source object.
 772 */
 773void pm_relax(struct device *dev)
 774{
 775	unsigned long flags;
 776
 777	if (!dev)
 778		return;
 779
 780	spin_lock_irqsave(&dev->power.lock, flags);
 781	__pm_relax(dev->power.wakeup);
 782	spin_unlock_irqrestore(&dev->power.lock, flags);
 783}
 784EXPORT_SYMBOL_GPL(pm_relax);
 785
 786/**
 787 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
 788 * @data: Address of the wakeup source object associated with the event source.
 789 *
 790 * Call wakeup_source_deactivate() for the wakeup source whose address is stored
 791 * in @data if it is currently active and its timer has not been canceled and
 792 * the expiration time of the timer is not in future.
 793 */
 794static void pm_wakeup_timer_fn(struct timer_list *t)
 795{
 796	struct wakeup_source *ws = from_timer(ws, t, timer);
 797	unsigned long flags;
 798
 799	spin_lock_irqsave(&ws->lock, flags);
 800
 801	if (ws->active && ws->timer_expires
 802	    && time_after_eq(jiffies, ws->timer_expires)) {
 803		wakeup_source_deactivate(ws);
 804		ws->expire_count++;
 805	}
 806
 807	spin_unlock_irqrestore(&ws->lock, flags);
 808}
 809
 810/**
 811 * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
 812 * @ws: Wakeup source object associated with the event source.
 813 * @msec: Anticipated event processing time (in milliseconds).
 814 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 815 *
 816 * Notify the PM core of a wakeup event whose source is @ws that will take
 817 * approximately @msec milliseconds to be processed by the kernel.  If @ws is
 818 * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
 819 * execute pm_wakeup_timer_fn() in future.
 820 *
 821 * It is safe to call this function from interrupt context.
 822 */
 823void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
 824{
 825	unsigned long flags;
 826	unsigned long expires;
 827
 828	if (!ws)
 829		return;
 830
 831	spin_lock_irqsave(&ws->lock, flags);
 832
 833	wakeup_source_report_event(ws, hard);
 834
 835	if (!msec) {
 836		wakeup_source_deactivate(ws);
 837		goto unlock;
 838	}
 839
 840	expires = jiffies + msecs_to_jiffies(msec);
 841	if (!expires)
 842		expires = 1;
 843
 844	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
 845		mod_timer(&ws->timer, expires);
 846		ws->timer_expires = expires;
 847	}
 848
 849 unlock:
 850	spin_unlock_irqrestore(&ws->lock, flags);
 851}
 852EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
 853
 854/**
 855 * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
 856 * @dev: Device the wakeup event is related to.
 857 * @msec: Anticipated event processing time (in milliseconds).
 858 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 859 *
 860 * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
 861 */
 862void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
 863{
 864	unsigned long flags;
 865
 866	if (!dev)
 867		return;
 868
 869	spin_lock_irqsave(&dev->power.lock, flags);
 870	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
 871	spin_unlock_irqrestore(&dev->power.lock, flags);
 872}
 873EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
 874
 875void pm_print_active_wakeup_sources(void)
 876{
 877	struct wakeup_source *ws;
 878	int srcuidx, active = 0;
 879	struct wakeup_source *last_activity_ws = NULL;
 880
 881	srcuidx = srcu_read_lock(&wakeup_srcu);
 882	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
 883		if (ws->active) {
 884			pm_pr_dbg("active wakeup source: %s\n", ws->name);
 885			active = 1;
 886		} else if (!active &&
 887			   (!last_activity_ws ||
 888			    ktime_to_ns(ws->last_time) >
 889			    ktime_to_ns(last_activity_ws->last_time))) {
 890			last_activity_ws = ws;
 891		}
 892	}
 893
 894	if (!active && last_activity_ws)
 895		pm_pr_dbg("last active wakeup source: %s\n",
 896			last_activity_ws->name);
 897	srcu_read_unlock(&wakeup_srcu, srcuidx);
 898}
 899EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
 900
 901/**
 902 * pm_wakeup_pending - Check if power transition in progress should be aborted.
 903 *
 904 * Compare the current number of registered wakeup events with its preserved
 905 * value from the past and return true if new wakeup events have been registered
 906 * since the old value was stored.  Also return true if the current number of
 907 * wakeup events being processed is different from zero.
 908 */
 909bool pm_wakeup_pending(void)
 910{
 911	unsigned long flags;
 912	bool ret = false;
 913
 914	raw_spin_lock_irqsave(&events_lock, flags);
 915	if (events_check_enabled) {
 916		unsigned int cnt, inpr;
 917
 918		split_counters(&cnt, &inpr);
 919		ret = (cnt != saved_count || inpr > 0);
 920		events_check_enabled = !ret;
 921	}
 922	raw_spin_unlock_irqrestore(&events_lock, flags);
 923
 924	if (ret) {
 925		pm_pr_dbg("Wakeup pending, aborting suspend\n");
 926		pm_print_active_wakeup_sources();
 927	}
 928
 929	return ret || atomic_read(&pm_abort_suspend) > 0;
 930}
 931
 932void pm_system_wakeup(void)
 933{
 934	atomic_inc(&pm_abort_suspend);
 935	s2idle_wake();
 936}
 937EXPORT_SYMBOL_GPL(pm_system_wakeup);
 938
 939void pm_system_cancel_wakeup(void)
 940{
 941	atomic_dec_if_positive(&pm_abort_suspend);
 942}
 943
 944void pm_wakeup_clear(bool reset)
 945{
 946	pm_wakeup_irq = 0;
 947	if (reset)
 948		atomic_set(&pm_abort_suspend, 0);
 949}
 950
 951void pm_system_irq_wakeup(unsigned int irq_number)
 952{
 953	if (pm_wakeup_irq == 0) {
 954		pm_wakeup_irq = irq_number;
 955		pm_system_wakeup();
 956	}
 957}
 958
 959/**
 960 * pm_get_wakeup_count - Read the number of registered wakeup events.
 961 * @count: Address to store the value at.
 962 * @block: Whether or not to block.
 963 *
 964 * Store the number of registered wakeup events at the address in @count.  If
 965 * @block is set, block until the current number of wakeup events being
 966 * processed is zero.
 967 *
 968 * Return 'false' if the current number of wakeup events being processed is
 969 * nonzero.  Otherwise return 'true'.
 970 */
 971bool pm_get_wakeup_count(unsigned int *count, bool block)
 972{
 973	unsigned int cnt, inpr;
 974
 975	if (block) {
 976		DEFINE_WAIT(wait);
 977
 978		for (;;) {
 979			prepare_to_wait(&wakeup_count_wait_queue, &wait,
 980					TASK_INTERRUPTIBLE);
 981			split_counters(&cnt, &inpr);
 982			if (inpr == 0 || signal_pending(current))
 983				break;
 984			pm_print_active_wakeup_sources();
 985			schedule();
 986		}
 987		finish_wait(&wakeup_count_wait_queue, &wait);
 988	}
 989
 990	split_counters(&cnt, &inpr);
 991	*count = cnt;
 992	return !inpr;
 993}
 994
 995/**
 996 * pm_save_wakeup_count - Save the current number of registered wakeup events.
 997 * @count: Value to compare with the current number of registered wakeup events.
 998 *
 999 * If @count is equal to the current number of registered wakeup events and the
1000 * current number of wakeup events being processed is zero, store @count as the
1001 * old number of registered wakeup events for pm_check_wakeup_events(), enable
1002 * wakeup events detection and return 'true'.  Otherwise disable wakeup events
1003 * detection and return 'false'.
1004 */
1005bool pm_save_wakeup_count(unsigned int count)
1006{
1007	unsigned int cnt, inpr;
1008	unsigned long flags;
1009
1010	events_check_enabled = false;
1011	raw_spin_lock_irqsave(&events_lock, flags);
1012	split_counters(&cnt, &inpr);
1013	if (cnt == count && inpr == 0) {
1014		saved_count = count;
1015		events_check_enabled = true;
1016	}
1017	raw_spin_unlock_irqrestore(&events_lock, flags);
1018	return events_check_enabled;
1019}
1020
1021#ifdef CONFIG_PM_AUTOSLEEP
1022/**
1023 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
1024 * @enabled: Whether to set or to clear the autosleep_enabled flags.
1025 */
1026void pm_wakep_autosleep_enabled(bool set)
1027{
1028	struct wakeup_source *ws;
1029	ktime_t now = ktime_get();
1030	int srcuidx;
1031
1032	srcuidx = srcu_read_lock(&wakeup_srcu);
1033	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1034		spin_lock_irq(&ws->lock);
1035		if (ws->autosleep_enabled != set) {
1036			ws->autosleep_enabled = set;
1037			if (ws->active) {
1038				if (set)
1039					ws->start_prevent_time = now;
1040				else
1041					update_prevent_sleep_time(ws, now);
1042			}
1043		}
1044		spin_unlock_irq(&ws->lock);
1045	}
1046	srcu_read_unlock(&wakeup_srcu, srcuidx);
1047}
1048#endif /* CONFIG_PM_AUTOSLEEP */
1049
1050/**
1051 * print_wakeup_source_stats - Print wakeup source statistics information.
1052 * @m: seq_file to print the statistics into.
1053 * @ws: Wakeup source object to print the statistics for.
1054 */
1055static int print_wakeup_source_stats(struct seq_file *m,
1056				     struct wakeup_source *ws)
1057{
1058	unsigned long flags;
1059	ktime_t total_time;
1060	ktime_t max_time;
1061	unsigned long active_count;
1062	ktime_t active_time;
1063	ktime_t prevent_sleep_time;
1064
1065	spin_lock_irqsave(&ws->lock, flags);
1066
1067	total_time = ws->total_time;
1068	max_time = ws->max_time;
1069	prevent_sleep_time = ws->prevent_sleep_time;
1070	active_count = ws->active_count;
1071	if (ws->active) {
1072		ktime_t now = ktime_get();
1073
1074		active_time = ktime_sub(now, ws->last_time);
1075		total_time = ktime_add(total_time, active_time);
1076		if (active_time > max_time)
1077			max_time = active_time;
1078
1079		if (ws->autosleep_enabled)
1080			prevent_sleep_time = ktime_add(prevent_sleep_time,
1081				ktime_sub(now, ws->start_prevent_time));
1082	} else {
1083		active_time = 0;
1084	}
1085
1086	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1087		   ws->name, active_count, ws->event_count,
1088		   ws->wakeup_count, ws->expire_count,
1089		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1090		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1091		   ktime_to_ms(prevent_sleep_time));
1092
1093	spin_unlock_irqrestore(&ws->lock, flags);
1094
1095	return 0;
1096}
1097
1098static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1099					loff_t *pos)
1100{
1101	struct wakeup_source *ws;
1102	loff_t n = *pos;
1103	int *srcuidx = m->private;
1104
1105	if (n == 0) {
1106		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1107			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1108			"last_change\tprevent_suspend_time\n");
1109	}
1110
1111	*srcuidx = srcu_read_lock(&wakeup_srcu);
1112	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1113		if (n-- <= 0)
1114			return ws;
1115	}
1116
1117	return NULL;
1118}
1119
1120static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1121					void *v, loff_t *pos)
1122{
1123	struct wakeup_source *ws = v;
1124	struct wakeup_source *next_ws = NULL;
1125
1126	++(*pos);
1127
1128	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1129		next_ws = ws;
1130		break;
1131	}
1132
1133	if (!next_ws)
1134		print_wakeup_source_stats(m, &deleted_ws);
1135
1136	return next_ws;
1137}
1138
1139static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1140{
1141	int *srcuidx = m->private;
1142
1143	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1144}
1145
1146/**
1147 * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1148 * @m: seq_file to print the statistics into.
1149 * @v: wakeup_source of each iteration
1150 */
1151static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1152{
1153	struct wakeup_source *ws = v;
1154
1155	print_wakeup_source_stats(m, ws);
1156
1157	return 0;
1158}
1159
1160static const struct seq_operations wakeup_sources_stats_seq_ops = {
1161	.start = wakeup_sources_stats_seq_start,
1162	.next  = wakeup_sources_stats_seq_next,
1163	.stop  = wakeup_sources_stats_seq_stop,
1164	.show  = wakeup_sources_stats_seq_show,
1165};
1166
1167static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1168{
1169	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1170}
1171
1172static const struct file_operations wakeup_sources_stats_fops = {
1173	.owner = THIS_MODULE,
1174	.open = wakeup_sources_stats_open,
1175	.read = seq_read,
1176	.llseek = seq_lseek,
1177	.release = seq_release_private,
1178};
1179
1180static int __init wakeup_sources_debugfs_init(void)
1181{
1182	debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
1183			    &wakeup_sources_stats_fops);
1184	return 0;
1185}
1186
1187postcore_initcall(wakeup_sources_debugfs_init);