Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/wakeup.c - System wakeup events framework
   4 *
   5 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 */
   7#define pr_fmt(fmt) "PM: " fmt
   8
   9#include <linux/device.h>
  10#include <linux/slab.h>
  11#include <linux/sched/signal.h>
  12#include <linux/capability.h>
  13#include <linux/export.h>
  14#include <linux/suspend.h>
  15#include <linux/seq_file.h>
  16#include <linux/debugfs.h>
  17#include <linux/pm_wakeirq.h>
  18#include <trace/events/power.h>
  19
  20#include "power.h"
  21
 
 
 
 
 
  22#define list_for_each_entry_rcu_locked(pos, head, member) \
  23	list_for_each_entry_rcu(pos, head, member, \
  24		srcu_read_lock_held(&wakeup_srcu))
  25/*
  26 * If set, the suspend/hibernate code will abort transitions to a sleep state
  27 * if wakeup events are registered during or immediately before the transition.
  28 */
  29bool events_check_enabled __read_mostly;
  30
  31/* First wakeup IRQ seen by the kernel in the last cycle. */
  32static unsigned int wakeup_irq[2] __read_mostly;
  33static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
  34
  35/* If greater than 0 and the system is suspending, terminate the suspend. */
  36static atomic_t pm_abort_suspend __read_mostly;
  37
  38/*
  39 * Combined counters of registered wakeup events and wakeup events in progress.
  40 * They need to be modified together atomically, so it's better to use one
  41 * atomic variable to hold them both.
  42 */
  43static atomic_t combined_event_count = ATOMIC_INIT(0);
  44
  45#define IN_PROGRESS_BITS	(sizeof(int) * 4)
  46#define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
  47
  48static void split_counters(unsigned int *cnt, unsigned int *inpr)
  49{
  50	unsigned int comb = atomic_read(&combined_event_count);
  51
  52	*cnt = (comb >> IN_PROGRESS_BITS);
  53	*inpr = comb & MAX_IN_PROGRESS;
  54}
  55
  56/* A preserved old value of the events counter. */
  57static unsigned int saved_count;
  58
  59static DEFINE_RAW_SPINLOCK(events_lock);
  60
  61static void pm_wakeup_timer_fn(struct timer_list *t);
  62
  63static LIST_HEAD(wakeup_sources);
  64
  65static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
  66
  67DEFINE_STATIC_SRCU(wakeup_srcu);
  68
  69static struct wakeup_source deleted_ws = {
  70	.name = "deleted",
  71	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
  72};
  73
  74static DEFINE_IDA(wakeup_ida);
  75
  76/**
  77 * wakeup_source_create - Create a struct wakeup_source object.
  78 * @name: Name of the new wakeup source.
  79 */
  80struct wakeup_source *wakeup_source_create(const char *name)
  81{
  82	struct wakeup_source *ws;
  83	const char *ws_name;
  84	int id;
  85
  86	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
  87	if (!ws)
  88		goto err_ws;
  89
  90	ws_name = kstrdup_const(name, GFP_KERNEL);
  91	if (!ws_name)
  92		goto err_name;
  93	ws->name = ws_name;
  94
  95	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
  96	if (id < 0)
  97		goto err_id;
  98	ws->id = id;
  99
 100	return ws;
 101
 102err_id:
 103	kfree_const(ws->name);
 104err_name:
 105	kfree(ws);
 106err_ws:
 107	return NULL;
 108}
 109EXPORT_SYMBOL_GPL(wakeup_source_create);
 110
 111/*
 112 * Record wakeup_source statistics being deleted into a dummy wakeup_source.
 113 */
 114static void wakeup_source_record(struct wakeup_source *ws)
 115{
 116	unsigned long flags;
 117
 118	spin_lock_irqsave(&deleted_ws.lock, flags);
 119
 120	if (ws->event_count) {
 121		deleted_ws.total_time =
 122			ktime_add(deleted_ws.total_time, ws->total_time);
 123		deleted_ws.prevent_sleep_time =
 124			ktime_add(deleted_ws.prevent_sleep_time,
 125				  ws->prevent_sleep_time);
 126		deleted_ws.max_time =
 127			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
 128				deleted_ws.max_time : ws->max_time;
 129		deleted_ws.event_count += ws->event_count;
 130		deleted_ws.active_count += ws->active_count;
 131		deleted_ws.relax_count += ws->relax_count;
 132		deleted_ws.expire_count += ws->expire_count;
 133		deleted_ws.wakeup_count += ws->wakeup_count;
 134	}
 135
 136	spin_unlock_irqrestore(&deleted_ws.lock, flags);
 137}
 138
 139static void wakeup_source_free(struct wakeup_source *ws)
 140{
 141	ida_free(&wakeup_ida, ws->id);
 142	kfree_const(ws->name);
 143	kfree(ws);
 144}
 145
 146/**
 147 * wakeup_source_destroy - Destroy a struct wakeup_source object.
 148 * @ws: Wakeup source to destroy.
 149 *
 150 * Use only for wakeup source objects created with wakeup_source_create().
 151 */
 152void wakeup_source_destroy(struct wakeup_source *ws)
 153{
 154	if (!ws)
 155		return;
 156
 157	__pm_relax(ws);
 158	wakeup_source_record(ws);
 159	wakeup_source_free(ws);
 160}
 161EXPORT_SYMBOL_GPL(wakeup_source_destroy);
 162
 163/**
 164 * wakeup_source_add - Add given object to the list of wakeup sources.
 165 * @ws: Wakeup source object to add to the list.
 166 */
 167void wakeup_source_add(struct wakeup_source *ws)
 168{
 169	unsigned long flags;
 170
 171	if (WARN_ON(!ws))
 172		return;
 173
 174	spin_lock_init(&ws->lock);
 175	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
 176	ws->active = false;
 177
 178	raw_spin_lock_irqsave(&events_lock, flags);
 179	list_add_rcu(&ws->entry, &wakeup_sources);
 180	raw_spin_unlock_irqrestore(&events_lock, flags);
 181}
 182EXPORT_SYMBOL_GPL(wakeup_source_add);
 183
 184/**
 185 * wakeup_source_remove - Remove given object from the wakeup sources list.
 186 * @ws: Wakeup source object to remove from the list.
 187 */
 188void wakeup_source_remove(struct wakeup_source *ws)
 189{
 190	unsigned long flags;
 191
 192	if (WARN_ON(!ws))
 193		return;
 194
 195	raw_spin_lock_irqsave(&events_lock, flags);
 196	list_del_rcu(&ws->entry);
 197	raw_spin_unlock_irqrestore(&events_lock, flags);
 198	synchronize_srcu(&wakeup_srcu);
 199
 200	del_timer_sync(&ws->timer);
 201	/*
 202	 * Clear timer.function to make wakeup_source_not_registered() treat
 203	 * this wakeup source as not registered.
 204	 */
 205	ws->timer.function = NULL;
 206}
 207EXPORT_SYMBOL_GPL(wakeup_source_remove);
 208
 209/**
 210 * wakeup_source_register - Create wakeup source and add it to the list.
 211 * @dev: Device this wakeup source is associated with (or NULL if virtual).
 212 * @name: Name of the wakeup source to register.
 213 */
 214struct wakeup_source *wakeup_source_register(struct device *dev,
 215					     const char *name)
 216{
 217	struct wakeup_source *ws;
 218	int ret;
 219
 220	ws = wakeup_source_create(name);
 221	if (ws) {
 222		if (!dev || device_is_registered(dev)) {
 223			ret = wakeup_source_sysfs_add(dev, ws);
 224			if (ret) {
 225				wakeup_source_free(ws);
 226				return NULL;
 227			}
 228		}
 229		wakeup_source_add(ws);
 230	}
 231	return ws;
 232}
 233EXPORT_SYMBOL_GPL(wakeup_source_register);
 234
 235/**
 236 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
 237 * @ws: Wakeup source object to unregister.
 238 */
 239void wakeup_source_unregister(struct wakeup_source *ws)
 240{
 241	if (ws) {
 242		wakeup_source_remove(ws);
 243		if (ws->dev)
 244			wakeup_source_sysfs_remove(ws);
 245
 246		wakeup_source_destroy(ws);
 247	}
 248}
 249EXPORT_SYMBOL_GPL(wakeup_source_unregister);
 250
 251/**
 252 * wakeup_sources_read_lock - Lock wakeup source list for read.
 253 *
 254 * Returns an index of srcu lock for struct wakeup_srcu.
 255 * This index must be passed to the matching wakeup_sources_read_unlock().
 256 */
 257int wakeup_sources_read_lock(void)
 258{
 259	return srcu_read_lock(&wakeup_srcu);
 260}
 261EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
 262
 263/**
 264 * wakeup_sources_read_unlock - Unlock wakeup source list.
 265 * @idx: return value from corresponding wakeup_sources_read_lock()
 266 */
 267void wakeup_sources_read_unlock(int idx)
 268{
 269	srcu_read_unlock(&wakeup_srcu, idx);
 270}
 271EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
 272
 273/**
 274 * wakeup_sources_walk_start - Begin a walk on wakeup source list
 275 *
 276 * Returns first object of the list of wakeup sources.
 277 *
 278 * Note that to be safe, wakeup sources list needs to be locked by calling
 279 * wakeup_source_read_lock() for this.
 280 */
 281struct wakeup_source *wakeup_sources_walk_start(void)
 282{
 283	struct list_head *ws_head = &wakeup_sources;
 284
 285	return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
 286}
 287EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
 288
 289/**
 290 * wakeup_sources_walk_next - Get next wakeup source from the list
 291 * @ws: Previous wakeup source object
 292 *
 293 * Note that to be safe, wakeup sources list needs to be locked by calling
 294 * wakeup_source_read_lock() for this.
 295 */
 296struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
 297{
 298	struct list_head *ws_head = &wakeup_sources;
 299
 300	return list_next_or_null_rcu(ws_head, &ws->entry,
 301				struct wakeup_source, entry);
 302}
 303EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
 304
 305/**
 306 * device_wakeup_attach - Attach a wakeup source object to a device object.
 307 * @dev: Device to handle.
 308 * @ws: Wakeup source object to attach to @dev.
 309 *
 310 * This causes @dev to be treated as a wakeup device.
 311 */
 312static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
 313{
 314	spin_lock_irq(&dev->power.lock);
 315	if (dev->power.wakeup) {
 316		spin_unlock_irq(&dev->power.lock);
 317		return -EEXIST;
 318	}
 319	dev->power.wakeup = ws;
 320	if (dev->power.wakeirq)
 321		device_wakeup_attach_irq(dev, dev->power.wakeirq);
 322	spin_unlock_irq(&dev->power.lock);
 323	return 0;
 324}
 325
 326/**
 327 * device_wakeup_enable - Enable given device to be a wakeup source.
 328 * @dev: Device to handle.
 329 *
 330 * Create a wakeup source object, register it and attach it to @dev.
 331 */
 332int device_wakeup_enable(struct device *dev)
 333{
 334	struct wakeup_source *ws;
 335	int ret;
 336
 337	if (!dev || !dev->power.can_wakeup)
 338		return -EINVAL;
 339
 340	if (pm_suspend_target_state != PM_SUSPEND_ON)
 341		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
 342
 343	ws = wakeup_source_register(dev, dev_name(dev));
 344	if (!ws)
 345		return -ENOMEM;
 346
 347	ret = device_wakeup_attach(dev, ws);
 348	if (ret)
 349		wakeup_source_unregister(ws);
 350
 351	return ret;
 352}
 353EXPORT_SYMBOL_GPL(device_wakeup_enable);
 354
 355/**
 356 * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
 357 * @dev: Device to handle
 358 * @wakeirq: Device specific wakeirq entry
 359 *
 360 * Attach a device wakeirq to the wakeup source so the device
 361 * wake IRQ can be configured automatically for suspend and
 362 * resume.
 363 *
 364 * Call under the device's power.lock lock.
 365 */
 366void device_wakeup_attach_irq(struct device *dev,
 367			     struct wake_irq *wakeirq)
 368{
 369	struct wakeup_source *ws;
 370
 371	ws = dev->power.wakeup;
 372	if (!ws)
 373		return;
 374
 375	if (ws->wakeirq)
 376		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
 377
 378	ws->wakeirq = wakeirq;
 379}
 380
 381/**
 382 * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
 383 * @dev: Device to handle
 384 *
 385 * Removes a device wakeirq from the wakeup source.
 386 *
 387 * Call under the device's power.lock lock.
 388 */
 389void device_wakeup_detach_irq(struct device *dev)
 390{
 391	struct wakeup_source *ws;
 392
 393	ws = dev->power.wakeup;
 394	if (ws)
 395		ws->wakeirq = NULL;
 396}
 397
 398/**
 399 * device_wakeup_arm_wake_irqs -
 400 *
 401 * Iterates over the list of device wakeirqs to arm them.
 402 */
 403void device_wakeup_arm_wake_irqs(void)
 404{
 405	struct wakeup_source *ws;
 406	int srcuidx;
 407
 408	srcuidx = srcu_read_lock(&wakeup_srcu);
 409	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
 410		dev_pm_arm_wake_irq(ws->wakeirq);
 411	srcu_read_unlock(&wakeup_srcu, srcuidx);
 412}
 413
 414/**
 415 * device_wakeup_disarm_wake_irqs -
 416 *
 417 * Iterates over the list of device wakeirqs to disarm them.
 418 */
 419void device_wakeup_disarm_wake_irqs(void)
 420{
 421	struct wakeup_source *ws;
 422	int srcuidx;
 423
 424	srcuidx = srcu_read_lock(&wakeup_srcu);
 425	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
 426		dev_pm_disarm_wake_irq(ws->wakeirq);
 427	srcu_read_unlock(&wakeup_srcu, srcuidx);
 428}
 429
 430/**
 431 * device_wakeup_detach - Detach a device's wakeup source object from it.
 432 * @dev: Device to detach the wakeup source object from.
 433 *
 434 * After it returns, @dev will not be treated as a wakeup device any more.
 435 */
 436static struct wakeup_source *device_wakeup_detach(struct device *dev)
 437{
 438	struct wakeup_source *ws;
 439
 440	spin_lock_irq(&dev->power.lock);
 441	ws = dev->power.wakeup;
 442	dev->power.wakeup = NULL;
 443	spin_unlock_irq(&dev->power.lock);
 444	return ws;
 445}
 446
 447/**
 448 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
 449 * @dev: Device to handle.
 450 *
 451 * Detach the @dev's wakeup source object from it, unregister this wakeup source
 452 * object and destroy it.
 453 */
 454void device_wakeup_disable(struct device *dev)
 455{
 456	struct wakeup_source *ws;
 457
 458	if (!dev || !dev->power.can_wakeup)
 459		return;
 460
 461	ws = device_wakeup_detach(dev);
 462	wakeup_source_unregister(ws);
 
 463}
 464EXPORT_SYMBOL_GPL(device_wakeup_disable);
 465
 466/**
 467 * device_set_wakeup_capable - Set/reset device wakeup capability flag.
 468 * @dev: Device to handle.
 469 * @capable: Whether or not @dev is capable of waking up the system from sleep.
 470 *
 471 * If @capable is set, set the @dev's power.can_wakeup flag and add its
 472 * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
 473 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
 474 *
 475 * This function may sleep and it can't be called from any context where
 476 * sleeping is not allowed.
 477 */
 478void device_set_wakeup_capable(struct device *dev, bool capable)
 479{
 480	if (!!dev->power.can_wakeup == !!capable)
 481		return;
 482
 483	dev->power.can_wakeup = capable;
 484	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
 485		if (capable) {
 486			int ret = wakeup_sysfs_add(dev);
 487
 488			if (ret)
 489				dev_info(dev, "Wakeup sysfs attributes not added\n");
 490		} else {
 491			wakeup_sysfs_remove(dev);
 492		}
 493	}
 494}
 495EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
 496
 497/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
 499 * @dev: Device to handle.
 500 * @enable: enable/disable flag
 501 */
 502int device_set_wakeup_enable(struct device *dev, bool enable)
 503{
 504	if (enable)
 505		return device_wakeup_enable(dev);
 506
 507	device_wakeup_disable(dev);
 508	return 0;
 509}
 510EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
 511
 512/**
 513 * wakeup_source_not_registered - validate the given wakeup source.
 514 * @ws: Wakeup source to be validated.
 515 */
 516static bool wakeup_source_not_registered(struct wakeup_source *ws)
 517{
 518	/*
 519	 * Use timer struct to check if the given source is initialized
 520	 * by wakeup_source_add.
 521	 */
 522	return ws->timer.function != pm_wakeup_timer_fn;
 523}
 524
 525/*
 526 * The functions below use the observation that each wakeup event starts a
 527 * period in which the system should not be suspended.  The moment this period
 528 * will end depends on how the wakeup event is going to be processed after being
 529 * detected and all of the possible cases can be divided into two distinct
 530 * groups.
 531 *
 532 * First, a wakeup event may be detected by the same functional unit that will
 533 * carry out the entire processing of it and possibly will pass it to user space
 534 * for further processing.  In that case the functional unit that has detected
 535 * the event may later "close" the "no suspend" period associated with it
 536 * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
 537 * pm_relax(), balanced with each other, is supposed to be used in such
 538 * situations.
 539 *
 540 * Second, a wakeup event may be detected by one functional unit and processed
 541 * by another one.  In that case the unit that has detected it cannot really
 542 * "close" the "no suspend" period associated with it, unless it knows in
 543 * advance what's going to happen to the event during processing.  This
 544 * knowledge, however, may not be available to it, so it can simply specify time
 545 * to wait before the system can be suspended and pass it as the second
 546 * argument of pm_wakeup_event().
 547 *
 548 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
 549 * "no suspend" period will be ended either by the pm_relax(), or by the timer
 550 * function executed when the timer expires, whichever comes first.
 551 */
 552
 553/**
 554 * wakeup_source_activate - Mark given wakeup source as active.
 555 * @ws: Wakeup source to handle.
 556 *
 557 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
 558 * core of the event by incrementing the counter of the wakeup events being
 559 * processed.
 560 */
 561static void wakeup_source_activate(struct wakeup_source *ws)
 562{
 563	unsigned int cec;
 564
 565	if (WARN_ONCE(wakeup_source_not_registered(ws),
 566			"unregistered wakeup source\n"))
 567		return;
 568
 569	ws->active = true;
 570	ws->active_count++;
 571	ws->last_time = ktime_get();
 572	if (ws->autosleep_enabled)
 573		ws->start_prevent_time = ws->last_time;
 574
 575	/* Increment the counter of events in progress. */
 576	cec = atomic_inc_return(&combined_event_count);
 577
 578	trace_wakeup_source_activate(ws->name, cec);
 579}
 580
 581/**
 582 * wakeup_source_report_event - Report wakeup event using the given source.
 583 * @ws: Wakeup source to report the event for.
 584 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 585 */
 586static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
 587{
 588	ws->event_count++;
 589	/* This is racy, but the counter is approximate anyway. */
 590	if (events_check_enabled)
 591		ws->wakeup_count++;
 592
 593	if (!ws->active)
 594		wakeup_source_activate(ws);
 595
 596	if (hard)
 597		pm_system_wakeup();
 598}
 599
 600/**
 601 * __pm_stay_awake - Notify the PM core of a wakeup event.
 602 * @ws: Wakeup source object associated with the source of the event.
 603 *
 604 * It is safe to call this function from interrupt context.
 605 */
 606void __pm_stay_awake(struct wakeup_source *ws)
 607{
 608	unsigned long flags;
 609
 610	if (!ws)
 611		return;
 612
 613	spin_lock_irqsave(&ws->lock, flags);
 614
 615	wakeup_source_report_event(ws, false);
 616	del_timer(&ws->timer);
 617	ws->timer_expires = 0;
 618
 619	spin_unlock_irqrestore(&ws->lock, flags);
 620}
 621EXPORT_SYMBOL_GPL(__pm_stay_awake);
 622
 623/**
 624 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
 625 * @dev: Device the wakeup event is related to.
 626 *
 627 * Notify the PM core of a wakeup event (signaled by @dev) by calling
 628 * __pm_stay_awake for the @dev's wakeup source object.
 629 *
 630 * Call this function after detecting of a wakeup event if pm_relax() is going
 631 * to be called directly after processing the event (and possibly passing it to
 632 * user space for further processing).
 633 */
 634void pm_stay_awake(struct device *dev)
 635{
 636	unsigned long flags;
 637
 638	if (!dev)
 639		return;
 640
 641	spin_lock_irqsave(&dev->power.lock, flags);
 642	__pm_stay_awake(dev->power.wakeup);
 643	spin_unlock_irqrestore(&dev->power.lock, flags);
 644}
 645EXPORT_SYMBOL_GPL(pm_stay_awake);
 646
 647#ifdef CONFIG_PM_AUTOSLEEP
 648static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
 649{
 650	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
 651	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
 652}
 653#else
 654static inline void update_prevent_sleep_time(struct wakeup_source *ws,
 655					     ktime_t now) {}
 656#endif
 657
 658/**
 659 * wakeup_source_deactivate - Mark given wakeup source as inactive.
 660 * @ws: Wakeup source to handle.
 661 *
 662 * Update the @ws' statistics and notify the PM core that the wakeup source has
 663 * become inactive by decrementing the counter of wakeup events being processed
 664 * and incrementing the counter of registered wakeup events.
 665 */
 666static void wakeup_source_deactivate(struct wakeup_source *ws)
 667{
 668	unsigned int cnt, inpr, cec;
 669	ktime_t duration;
 670	ktime_t now;
 671
 672	ws->relax_count++;
 673	/*
 674	 * __pm_relax() may be called directly or from a timer function.
 675	 * If it is called directly right after the timer function has been
 676	 * started, but before the timer function calls __pm_relax(), it is
 677	 * possible that __pm_stay_awake() will be called in the meantime and
 678	 * will set ws->active.  Then, ws->active may be cleared immediately
 679	 * by the __pm_relax() called from the timer function, but in such a
 680	 * case ws->relax_count will be different from ws->active_count.
 681	 */
 682	if (ws->relax_count != ws->active_count) {
 683		ws->relax_count--;
 684		return;
 685	}
 686
 687	ws->active = false;
 688
 689	now = ktime_get();
 690	duration = ktime_sub(now, ws->last_time);
 691	ws->total_time = ktime_add(ws->total_time, duration);
 692	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
 693		ws->max_time = duration;
 694
 695	ws->last_time = now;
 696	del_timer(&ws->timer);
 697	ws->timer_expires = 0;
 698
 699	if (ws->autosleep_enabled)
 700		update_prevent_sleep_time(ws, now);
 701
 702	/*
 703	 * Increment the counter of registered wakeup events and decrement the
 704	 * counter of wakeup events in progress simultaneously.
 705	 */
 706	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
 707	trace_wakeup_source_deactivate(ws->name, cec);
 708
 709	split_counters(&cnt, &inpr);
 710	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
 711		wake_up(&wakeup_count_wait_queue);
 712}
 713
 714/**
 715 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
 716 * @ws: Wakeup source object associated with the source of the event.
 717 *
 718 * Call this function for wakeup events whose processing started with calling
 719 * __pm_stay_awake().
 720 *
 721 * It is safe to call it from interrupt context.
 722 */
 723void __pm_relax(struct wakeup_source *ws)
 724{
 725	unsigned long flags;
 726
 727	if (!ws)
 728		return;
 729
 730	spin_lock_irqsave(&ws->lock, flags);
 731	if (ws->active)
 732		wakeup_source_deactivate(ws);
 733	spin_unlock_irqrestore(&ws->lock, flags);
 734}
 735EXPORT_SYMBOL_GPL(__pm_relax);
 736
 737/**
 738 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
 739 * @dev: Device that signaled the event.
 740 *
 741 * Execute __pm_relax() for the @dev's wakeup source object.
 742 */
 743void pm_relax(struct device *dev)
 744{
 745	unsigned long flags;
 746
 747	if (!dev)
 748		return;
 749
 750	spin_lock_irqsave(&dev->power.lock, flags);
 751	__pm_relax(dev->power.wakeup);
 752	spin_unlock_irqrestore(&dev->power.lock, flags);
 753}
 754EXPORT_SYMBOL_GPL(pm_relax);
 755
 756/**
 757 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
 758 * @t: timer list
 759 *
 760 * Call wakeup_source_deactivate() for the wakeup source whose address is stored
 761 * in @data if it is currently active and its timer has not been canceled and
 762 * the expiration time of the timer is not in future.
 763 */
 764static void pm_wakeup_timer_fn(struct timer_list *t)
 765{
 766	struct wakeup_source *ws = from_timer(ws, t, timer);
 767	unsigned long flags;
 768
 769	spin_lock_irqsave(&ws->lock, flags);
 770
 771	if (ws->active && ws->timer_expires
 772	    && time_after_eq(jiffies, ws->timer_expires)) {
 773		wakeup_source_deactivate(ws);
 774		ws->expire_count++;
 775	}
 776
 777	spin_unlock_irqrestore(&ws->lock, flags);
 778}
 779
 780/**
 781 * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
 782 * @ws: Wakeup source object associated with the event source.
 783 * @msec: Anticipated event processing time (in milliseconds).
 784 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 785 *
 786 * Notify the PM core of a wakeup event whose source is @ws that will take
 787 * approximately @msec milliseconds to be processed by the kernel.  If @ws is
 788 * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
 789 * execute pm_wakeup_timer_fn() in future.
 790 *
 791 * It is safe to call this function from interrupt context.
 792 */
 793void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
 794{
 795	unsigned long flags;
 796	unsigned long expires;
 797
 798	if (!ws)
 799		return;
 800
 801	spin_lock_irqsave(&ws->lock, flags);
 802
 803	wakeup_source_report_event(ws, hard);
 804
 805	if (!msec) {
 806		wakeup_source_deactivate(ws);
 807		goto unlock;
 808	}
 809
 810	expires = jiffies + msecs_to_jiffies(msec);
 811	if (!expires)
 812		expires = 1;
 813
 814	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
 815		mod_timer(&ws->timer, expires);
 816		ws->timer_expires = expires;
 817	}
 818
 819 unlock:
 820	spin_unlock_irqrestore(&ws->lock, flags);
 821}
 822EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
 823
 824/**
 825 * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
 826 * @dev: Device the wakeup event is related to.
 827 * @msec: Anticipated event processing time (in milliseconds).
 828 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 829 *
 830 * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
 831 */
 832void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
 833{
 834	unsigned long flags;
 835
 836	if (!dev)
 837		return;
 838
 839	spin_lock_irqsave(&dev->power.lock, flags);
 840	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
 841	spin_unlock_irqrestore(&dev->power.lock, flags);
 842}
 843EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
 844
 845void pm_print_active_wakeup_sources(void)
 846{
 847	struct wakeup_source *ws;
 848	int srcuidx, active = 0;
 849	struct wakeup_source *last_activity_ws = NULL;
 850
 851	srcuidx = srcu_read_lock(&wakeup_srcu);
 852	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
 853		if (ws->active) {
 854			pm_pr_dbg("active wakeup source: %s\n", ws->name);
 855			active = 1;
 856		} else if (!active &&
 857			   (!last_activity_ws ||
 858			    ktime_to_ns(ws->last_time) >
 859			    ktime_to_ns(last_activity_ws->last_time))) {
 860			last_activity_ws = ws;
 861		}
 862	}
 863
 864	if (!active && last_activity_ws)
 865		pm_pr_dbg("last active wakeup source: %s\n",
 866			last_activity_ws->name);
 867	srcu_read_unlock(&wakeup_srcu, srcuidx);
 868}
 869EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
 870
 871/**
 872 * pm_wakeup_pending - Check if power transition in progress should be aborted.
 873 *
 874 * Compare the current number of registered wakeup events with its preserved
 875 * value from the past and return true if new wakeup events have been registered
 876 * since the old value was stored.  Also return true if the current number of
 877 * wakeup events being processed is different from zero.
 878 */
 879bool pm_wakeup_pending(void)
 880{
 881	unsigned long flags;
 882	bool ret = false;
 883
 884	raw_spin_lock_irqsave(&events_lock, flags);
 885	if (events_check_enabled) {
 886		unsigned int cnt, inpr;
 887
 888		split_counters(&cnt, &inpr);
 889		ret = (cnt != saved_count || inpr > 0);
 890		events_check_enabled = !ret;
 891	}
 892	raw_spin_unlock_irqrestore(&events_lock, flags);
 893
 894	if (ret) {
 895		pm_pr_dbg("Wakeup pending, aborting suspend\n");
 896		pm_print_active_wakeup_sources();
 897	}
 898
 899	return ret || atomic_read(&pm_abort_suspend) > 0;
 900}
 901EXPORT_SYMBOL_GPL(pm_wakeup_pending);
 902
 903void pm_system_wakeup(void)
 904{
 905	atomic_inc(&pm_abort_suspend);
 906	s2idle_wake();
 907}
 908EXPORT_SYMBOL_GPL(pm_system_wakeup);
 909
 910void pm_system_cancel_wakeup(void)
 911{
 912	atomic_dec_if_positive(&pm_abort_suspend);
 913}
 914
 915void pm_wakeup_clear(unsigned int irq_number)
 916{
 917	raw_spin_lock_irq(&wakeup_irq_lock);
 918
 919	if (irq_number && wakeup_irq[0] == irq_number)
 920		wakeup_irq[0] = wakeup_irq[1];
 921	else
 922		wakeup_irq[0] = 0;
 923
 924	wakeup_irq[1] = 0;
 925
 926	raw_spin_unlock_irq(&wakeup_irq_lock);
 927
 928	if (!irq_number)
 929		atomic_set(&pm_abort_suspend, 0);
 930}
 931
 932void pm_system_irq_wakeup(unsigned int irq_number)
 933{
 934	unsigned long flags;
 935
 936	raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
 937
 938	if (wakeup_irq[0] == 0)
 939		wakeup_irq[0] = irq_number;
 940	else if (wakeup_irq[1] == 0)
 941		wakeup_irq[1] = irq_number;
 942	else
 943		irq_number = 0;
 944
 945	pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
 946
 947	raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
 948
 949	if (irq_number)
 950		pm_system_wakeup();
 951}
 952
 953unsigned int pm_wakeup_irq(void)
 954{
 955	return wakeup_irq[0];
 956}
 957
 958/**
 959 * pm_get_wakeup_count - Read the number of registered wakeup events.
 960 * @count: Address to store the value at.
 961 * @block: Whether or not to block.
 962 *
 963 * Store the number of registered wakeup events at the address in @count.  If
 964 * @block is set, block until the current number of wakeup events being
 965 * processed is zero.
 966 *
 967 * Return 'false' if the current number of wakeup events being processed is
 968 * nonzero.  Otherwise return 'true'.
 969 */
 970bool pm_get_wakeup_count(unsigned int *count, bool block)
 971{
 972	unsigned int cnt, inpr;
 973
 974	if (block) {
 975		DEFINE_WAIT(wait);
 976
 977		for (;;) {
 978			prepare_to_wait(&wakeup_count_wait_queue, &wait,
 979					TASK_INTERRUPTIBLE);
 980			split_counters(&cnt, &inpr);
 981			if (inpr == 0 || signal_pending(current))
 982				break;
 983			pm_print_active_wakeup_sources();
 984			schedule();
 985		}
 986		finish_wait(&wakeup_count_wait_queue, &wait);
 987	}
 988
 989	split_counters(&cnt, &inpr);
 990	*count = cnt;
 991	return !inpr;
 992}
 993
 994/**
 995 * pm_save_wakeup_count - Save the current number of registered wakeup events.
 996 * @count: Value to compare with the current number of registered wakeup events.
 997 *
 998 * If @count is equal to the current number of registered wakeup events and the
 999 * current number of wakeup events being processed is zero, store @count as the
1000 * old number of registered wakeup events for pm_check_wakeup_events(), enable
1001 * wakeup events detection and return 'true'.  Otherwise disable wakeup events
1002 * detection and return 'false'.
1003 */
1004bool pm_save_wakeup_count(unsigned int count)
1005{
1006	unsigned int cnt, inpr;
1007	unsigned long flags;
1008
1009	events_check_enabled = false;
1010	raw_spin_lock_irqsave(&events_lock, flags);
1011	split_counters(&cnt, &inpr);
1012	if (cnt == count && inpr == 0) {
1013		saved_count = count;
1014		events_check_enabled = true;
1015	}
1016	raw_spin_unlock_irqrestore(&events_lock, flags);
1017	return events_check_enabled;
1018}
1019
1020#ifdef CONFIG_PM_AUTOSLEEP
1021/**
1022 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
1023 * @set: Whether to set or to clear the autosleep_enabled flags.
1024 */
1025void pm_wakep_autosleep_enabled(bool set)
1026{
1027	struct wakeup_source *ws;
1028	ktime_t now = ktime_get();
1029	int srcuidx;
1030
1031	srcuidx = srcu_read_lock(&wakeup_srcu);
1032	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1033		spin_lock_irq(&ws->lock);
1034		if (ws->autosleep_enabled != set) {
1035			ws->autosleep_enabled = set;
1036			if (ws->active) {
1037				if (set)
1038					ws->start_prevent_time = now;
1039				else
1040					update_prevent_sleep_time(ws, now);
1041			}
1042		}
1043		spin_unlock_irq(&ws->lock);
1044	}
1045	srcu_read_unlock(&wakeup_srcu, srcuidx);
1046}
1047#endif /* CONFIG_PM_AUTOSLEEP */
1048
1049/**
1050 * print_wakeup_source_stats - Print wakeup source statistics information.
1051 * @m: seq_file to print the statistics into.
1052 * @ws: Wakeup source object to print the statistics for.
1053 */
1054static int print_wakeup_source_stats(struct seq_file *m,
1055				     struct wakeup_source *ws)
1056{
1057	unsigned long flags;
1058	ktime_t total_time;
1059	ktime_t max_time;
1060	unsigned long active_count;
1061	ktime_t active_time;
1062	ktime_t prevent_sleep_time;
1063
1064	spin_lock_irqsave(&ws->lock, flags);
1065
1066	total_time = ws->total_time;
1067	max_time = ws->max_time;
1068	prevent_sleep_time = ws->prevent_sleep_time;
1069	active_count = ws->active_count;
1070	if (ws->active) {
1071		ktime_t now = ktime_get();
1072
1073		active_time = ktime_sub(now, ws->last_time);
1074		total_time = ktime_add(total_time, active_time);
1075		if (active_time > max_time)
1076			max_time = active_time;
1077
1078		if (ws->autosleep_enabled)
1079			prevent_sleep_time = ktime_add(prevent_sleep_time,
1080				ktime_sub(now, ws->start_prevent_time));
1081	} else {
1082		active_time = 0;
1083	}
1084
1085	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1086		   ws->name, active_count, ws->event_count,
1087		   ws->wakeup_count, ws->expire_count,
1088		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1089		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1090		   ktime_to_ms(prevent_sleep_time));
1091
1092	spin_unlock_irqrestore(&ws->lock, flags);
1093
1094	return 0;
1095}
1096
1097static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1098					loff_t *pos)
1099{
1100	struct wakeup_source *ws;
1101	loff_t n = *pos;
1102	int *srcuidx = m->private;
1103
1104	if (n == 0) {
1105		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1106			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1107			"last_change\tprevent_suspend_time\n");
1108	}
1109
1110	*srcuidx = srcu_read_lock(&wakeup_srcu);
1111	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1112		if (n-- <= 0)
1113			return ws;
1114	}
1115
1116	return NULL;
1117}
1118
1119static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1120					void *v, loff_t *pos)
1121{
1122	struct wakeup_source *ws = v;
1123	struct wakeup_source *next_ws = NULL;
1124
1125	++(*pos);
1126
1127	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1128		next_ws = ws;
1129		break;
1130	}
1131
1132	if (!next_ws)
1133		print_wakeup_source_stats(m, &deleted_ws);
1134
1135	return next_ws;
1136}
1137
1138static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1139{
1140	int *srcuidx = m->private;
1141
1142	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1143}
1144
1145/**
1146 * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1147 * @m: seq_file to print the statistics into.
1148 * @v: wakeup_source of each iteration
1149 */
1150static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1151{
1152	struct wakeup_source *ws = v;
1153
1154	print_wakeup_source_stats(m, ws);
1155
1156	return 0;
1157}
1158
1159static const struct seq_operations wakeup_sources_stats_seq_ops = {
1160	.start = wakeup_sources_stats_seq_start,
1161	.next  = wakeup_sources_stats_seq_next,
1162	.stop  = wakeup_sources_stats_seq_stop,
1163	.show  = wakeup_sources_stats_seq_show,
1164};
1165
1166static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1167{
1168	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1169}
1170
1171static const struct file_operations wakeup_sources_stats_fops = {
1172	.owner = THIS_MODULE,
1173	.open = wakeup_sources_stats_open,
1174	.read = seq_read,
1175	.llseek = seq_lseek,
1176	.release = seq_release_private,
1177};
1178
1179static int __init wakeup_sources_debugfs_init(void)
1180{
1181	debugfs_create_file("wakeup_sources", 0444, NULL, NULL,
1182			    &wakeup_sources_stats_fops);
1183	return 0;
1184}
1185
1186postcore_initcall(wakeup_sources_debugfs_init);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/wakeup.c - System wakeup events framework
   4 *
   5 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   6 */
   7#define pr_fmt(fmt) "PM: " fmt
   8
   9#include <linux/device.h>
  10#include <linux/slab.h>
  11#include <linux/sched/signal.h>
  12#include <linux/capability.h>
  13#include <linux/export.h>
  14#include <linux/suspend.h>
  15#include <linux/seq_file.h>
  16#include <linux/debugfs.h>
  17#include <linux/pm_wakeirq.h>
  18#include <trace/events/power.h>
  19
  20#include "power.h"
  21
  22#ifndef CONFIG_SUSPEND
  23suspend_state_t pm_suspend_target_state;
  24#define pm_suspend_target_state	(PM_SUSPEND_ON)
  25#endif
  26
  27#define list_for_each_entry_rcu_locked(pos, head, member) \
  28	list_for_each_entry_rcu(pos, head, member, \
  29		srcu_read_lock_held(&wakeup_srcu))
  30/*
  31 * If set, the suspend/hibernate code will abort transitions to a sleep state
  32 * if wakeup events are registered during or immediately before the transition.
  33 */
  34bool events_check_enabled __read_mostly;
  35
  36/* First wakeup IRQ seen by the kernel in the last cycle. */
  37unsigned int pm_wakeup_irq __read_mostly;
 
  38
  39/* If greater than 0 and the system is suspending, terminate the suspend. */
  40static atomic_t pm_abort_suspend __read_mostly;
  41
  42/*
  43 * Combined counters of registered wakeup events and wakeup events in progress.
  44 * They need to be modified together atomically, so it's better to use one
  45 * atomic variable to hold them both.
  46 */
  47static atomic_t combined_event_count = ATOMIC_INIT(0);
  48
  49#define IN_PROGRESS_BITS	(sizeof(int) * 4)
  50#define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
  51
  52static void split_counters(unsigned int *cnt, unsigned int *inpr)
  53{
  54	unsigned int comb = atomic_read(&combined_event_count);
  55
  56	*cnt = (comb >> IN_PROGRESS_BITS);
  57	*inpr = comb & MAX_IN_PROGRESS;
  58}
  59
  60/* A preserved old value of the events counter. */
  61static unsigned int saved_count;
  62
  63static DEFINE_RAW_SPINLOCK(events_lock);
  64
  65static void pm_wakeup_timer_fn(struct timer_list *t);
  66
  67static LIST_HEAD(wakeup_sources);
  68
  69static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
  70
  71DEFINE_STATIC_SRCU(wakeup_srcu);
  72
  73static struct wakeup_source deleted_ws = {
  74	.name = "deleted",
  75	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
  76};
  77
  78static DEFINE_IDA(wakeup_ida);
  79
  80/**
  81 * wakeup_source_create - Create a struct wakeup_source object.
  82 * @name: Name of the new wakeup source.
  83 */
  84struct wakeup_source *wakeup_source_create(const char *name)
  85{
  86	struct wakeup_source *ws;
  87	const char *ws_name;
  88	int id;
  89
  90	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
  91	if (!ws)
  92		goto err_ws;
  93
  94	ws_name = kstrdup_const(name, GFP_KERNEL);
  95	if (!ws_name)
  96		goto err_name;
  97	ws->name = ws_name;
  98
  99	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
 100	if (id < 0)
 101		goto err_id;
 102	ws->id = id;
 103
 104	return ws;
 105
 106err_id:
 107	kfree_const(ws->name);
 108err_name:
 109	kfree(ws);
 110err_ws:
 111	return NULL;
 112}
 113EXPORT_SYMBOL_GPL(wakeup_source_create);
 114
 115/*
 116 * Record wakeup_source statistics being deleted into a dummy wakeup_source.
 117 */
 118static void wakeup_source_record(struct wakeup_source *ws)
 119{
 120	unsigned long flags;
 121
 122	spin_lock_irqsave(&deleted_ws.lock, flags);
 123
 124	if (ws->event_count) {
 125		deleted_ws.total_time =
 126			ktime_add(deleted_ws.total_time, ws->total_time);
 127		deleted_ws.prevent_sleep_time =
 128			ktime_add(deleted_ws.prevent_sleep_time,
 129				  ws->prevent_sleep_time);
 130		deleted_ws.max_time =
 131			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
 132				deleted_ws.max_time : ws->max_time;
 133		deleted_ws.event_count += ws->event_count;
 134		deleted_ws.active_count += ws->active_count;
 135		deleted_ws.relax_count += ws->relax_count;
 136		deleted_ws.expire_count += ws->expire_count;
 137		deleted_ws.wakeup_count += ws->wakeup_count;
 138	}
 139
 140	spin_unlock_irqrestore(&deleted_ws.lock, flags);
 141}
 142
 143static void wakeup_source_free(struct wakeup_source *ws)
 144{
 145	ida_free(&wakeup_ida, ws->id);
 146	kfree_const(ws->name);
 147	kfree(ws);
 148}
 149
 150/**
 151 * wakeup_source_destroy - Destroy a struct wakeup_source object.
 152 * @ws: Wakeup source to destroy.
 153 *
 154 * Use only for wakeup source objects created with wakeup_source_create().
 155 */
 156void wakeup_source_destroy(struct wakeup_source *ws)
 157{
 158	if (!ws)
 159		return;
 160
 161	__pm_relax(ws);
 162	wakeup_source_record(ws);
 163	wakeup_source_free(ws);
 164}
 165EXPORT_SYMBOL_GPL(wakeup_source_destroy);
 166
 167/**
 168 * wakeup_source_add - Add given object to the list of wakeup sources.
 169 * @ws: Wakeup source object to add to the list.
 170 */
 171void wakeup_source_add(struct wakeup_source *ws)
 172{
 173	unsigned long flags;
 174
 175	if (WARN_ON(!ws))
 176		return;
 177
 178	spin_lock_init(&ws->lock);
 179	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
 180	ws->active = false;
 181
 182	raw_spin_lock_irqsave(&events_lock, flags);
 183	list_add_rcu(&ws->entry, &wakeup_sources);
 184	raw_spin_unlock_irqrestore(&events_lock, flags);
 185}
 186EXPORT_SYMBOL_GPL(wakeup_source_add);
 187
 188/**
 189 * wakeup_source_remove - Remove given object from the wakeup sources list.
 190 * @ws: Wakeup source object to remove from the list.
 191 */
 192void wakeup_source_remove(struct wakeup_source *ws)
 193{
 194	unsigned long flags;
 195
 196	if (WARN_ON(!ws))
 197		return;
 198
 199	raw_spin_lock_irqsave(&events_lock, flags);
 200	list_del_rcu(&ws->entry);
 201	raw_spin_unlock_irqrestore(&events_lock, flags);
 202	synchronize_srcu(&wakeup_srcu);
 203
 204	del_timer_sync(&ws->timer);
 205	/*
 206	 * Clear timer.function to make wakeup_source_not_registered() treat
 207	 * this wakeup source as not registered.
 208	 */
 209	ws->timer.function = NULL;
 210}
 211EXPORT_SYMBOL_GPL(wakeup_source_remove);
 212
 213/**
 214 * wakeup_source_register - Create wakeup source and add it to the list.
 215 * @dev: Device this wakeup source is associated with (or NULL if virtual).
 216 * @name: Name of the wakeup source to register.
 217 */
 218struct wakeup_source *wakeup_source_register(struct device *dev,
 219					     const char *name)
 220{
 221	struct wakeup_source *ws;
 222	int ret;
 223
 224	ws = wakeup_source_create(name);
 225	if (ws) {
 226		if (!dev || device_is_registered(dev)) {
 227			ret = wakeup_source_sysfs_add(dev, ws);
 228			if (ret) {
 229				wakeup_source_free(ws);
 230				return NULL;
 231			}
 232		}
 233		wakeup_source_add(ws);
 234	}
 235	return ws;
 236}
 237EXPORT_SYMBOL_GPL(wakeup_source_register);
 238
 239/**
 240 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
 241 * @ws: Wakeup source object to unregister.
 242 */
 243void wakeup_source_unregister(struct wakeup_source *ws)
 244{
 245	if (ws) {
 246		wakeup_source_remove(ws);
 247		if (ws->dev)
 248			wakeup_source_sysfs_remove(ws);
 249
 250		wakeup_source_destroy(ws);
 251	}
 252}
 253EXPORT_SYMBOL_GPL(wakeup_source_unregister);
 254
 255/**
 256 * wakeup_sources_read_lock - Lock wakeup source list for read.
 257 *
 258 * Returns an index of srcu lock for struct wakeup_srcu.
 259 * This index must be passed to the matching wakeup_sources_read_unlock().
 260 */
 261int wakeup_sources_read_lock(void)
 262{
 263	return srcu_read_lock(&wakeup_srcu);
 264}
 265EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
 266
 267/**
 268 * wakeup_sources_read_unlock - Unlock wakeup source list.
 269 * @idx: return value from corresponding wakeup_sources_read_lock()
 270 */
 271void wakeup_sources_read_unlock(int idx)
 272{
 273	srcu_read_unlock(&wakeup_srcu, idx);
 274}
 275EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
 276
 277/**
 278 * wakeup_sources_walk_start - Begin a walk on wakeup source list
 279 *
 280 * Returns first object of the list of wakeup sources.
 281 *
 282 * Note that to be safe, wakeup sources list needs to be locked by calling
 283 * wakeup_source_read_lock() for this.
 284 */
 285struct wakeup_source *wakeup_sources_walk_start(void)
 286{
 287	struct list_head *ws_head = &wakeup_sources;
 288
 289	return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
 290}
 291EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
 292
 293/**
 294 * wakeup_sources_walk_next - Get next wakeup source from the list
 295 * @ws: Previous wakeup source object
 296 *
 297 * Note that to be safe, wakeup sources list needs to be locked by calling
 298 * wakeup_source_read_lock() for this.
 299 */
 300struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
 301{
 302	struct list_head *ws_head = &wakeup_sources;
 303
 304	return list_next_or_null_rcu(ws_head, &ws->entry,
 305				struct wakeup_source, entry);
 306}
 307EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
 308
 309/**
 310 * device_wakeup_attach - Attach a wakeup source object to a device object.
 311 * @dev: Device to handle.
 312 * @ws: Wakeup source object to attach to @dev.
 313 *
 314 * This causes @dev to be treated as a wakeup device.
 315 */
 316static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
 317{
 318	spin_lock_irq(&dev->power.lock);
 319	if (dev->power.wakeup) {
 320		spin_unlock_irq(&dev->power.lock);
 321		return -EEXIST;
 322	}
 323	dev->power.wakeup = ws;
 324	if (dev->power.wakeirq)
 325		device_wakeup_attach_irq(dev, dev->power.wakeirq);
 326	spin_unlock_irq(&dev->power.lock);
 327	return 0;
 328}
 329
 330/**
 331 * device_wakeup_enable - Enable given device to be a wakeup source.
 332 * @dev: Device to handle.
 333 *
 334 * Create a wakeup source object, register it and attach it to @dev.
 335 */
 336int device_wakeup_enable(struct device *dev)
 337{
 338	struct wakeup_source *ws;
 339	int ret;
 340
 341	if (!dev || !dev->power.can_wakeup)
 342		return -EINVAL;
 343
 344	if (pm_suspend_target_state != PM_SUSPEND_ON)
 345		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
 346
 347	ws = wakeup_source_register(dev, dev_name(dev));
 348	if (!ws)
 349		return -ENOMEM;
 350
 351	ret = device_wakeup_attach(dev, ws);
 352	if (ret)
 353		wakeup_source_unregister(ws);
 354
 355	return ret;
 356}
 357EXPORT_SYMBOL_GPL(device_wakeup_enable);
 358
 359/**
 360 * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
 361 * @dev: Device to handle
 362 * @wakeirq: Device specific wakeirq entry
 363 *
 364 * Attach a device wakeirq to the wakeup source so the device
 365 * wake IRQ can be configured automatically for suspend and
 366 * resume.
 367 *
 368 * Call under the device's power.lock lock.
 369 */
 370void device_wakeup_attach_irq(struct device *dev,
 371			     struct wake_irq *wakeirq)
 372{
 373	struct wakeup_source *ws;
 374
 375	ws = dev->power.wakeup;
 376	if (!ws)
 377		return;
 378
 379	if (ws->wakeirq)
 380		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
 381
 382	ws->wakeirq = wakeirq;
 383}
 384
 385/**
 386 * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
 387 * @dev: Device to handle
 388 *
 389 * Removes a device wakeirq from the wakeup source.
 390 *
 391 * Call under the device's power.lock lock.
 392 */
 393void device_wakeup_detach_irq(struct device *dev)
 394{
 395	struct wakeup_source *ws;
 396
 397	ws = dev->power.wakeup;
 398	if (ws)
 399		ws->wakeirq = NULL;
 400}
 401
 402/**
 403 * device_wakeup_arm_wake_irqs -
 404 *
 405 * Iterates over the list of device wakeirqs to arm them.
 406 */
 407void device_wakeup_arm_wake_irqs(void)
 408{
 409	struct wakeup_source *ws;
 410	int srcuidx;
 411
 412	srcuidx = srcu_read_lock(&wakeup_srcu);
 413	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
 414		dev_pm_arm_wake_irq(ws->wakeirq);
 415	srcu_read_unlock(&wakeup_srcu, srcuidx);
 416}
 417
 418/**
 419 * device_wakeup_disarm_wake_irqs -
 420 *
 421 * Iterates over the list of device wakeirqs to disarm them.
 422 */
 423void device_wakeup_disarm_wake_irqs(void)
 424{
 425	struct wakeup_source *ws;
 426	int srcuidx;
 427
 428	srcuidx = srcu_read_lock(&wakeup_srcu);
 429	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
 430		dev_pm_disarm_wake_irq(ws->wakeirq);
 431	srcu_read_unlock(&wakeup_srcu, srcuidx);
 432}
 433
 434/**
 435 * device_wakeup_detach - Detach a device's wakeup source object from it.
 436 * @dev: Device to detach the wakeup source object from.
 437 *
 438 * After it returns, @dev will not be treated as a wakeup device any more.
 439 */
 440static struct wakeup_source *device_wakeup_detach(struct device *dev)
 441{
 442	struct wakeup_source *ws;
 443
 444	spin_lock_irq(&dev->power.lock);
 445	ws = dev->power.wakeup;
 446	dev->power.wakeup = NULL;
 447	spin_unlock_irq(&dev->power.lock);
 448	return ws;
 449}
 450
 451/**
 452 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
 453 * @dev: Device to handle.
 454 *
 455 * Detach the @dev's wakeup source object from it, unregister this wakeup source
 456 * object and destroy it.
 457 */
 458int device_wakeup_disable(struct device *dev)
 459{
 460	struct wakeup_source *ws;
 461
 462	if (!dev || !dev->power.can_wakeup)
 463		return -EINVAL;
 464
 465	ws = device_wakeup_detach(dev);
 466	wakeup_source_unregister(ws);
 467	return 0;
 468}
 469EXPORT_SYMBOL_GPL(device_wakeup_disable);
 470
 471/**
 472 * device_set_wakeup_capable - Set/reset device wakeup capability flag.
 473 * @dev: Device to handle.
 474 * @capable: Whether or not @dev is capable of waking up the system from sleep.
 475 *
 476 * If @capable is set, set the @dev's power.can_wakeup flag and add its
 477 * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
 478 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
 479 *
 480 * This function may sleep and it can't be called from any context where
 481 * sleeping is not allowed.
 482 */
 483void device_set_wakeup_capable(struct device *dev, bool capable)
 484{
 485	if (!!dev->power.can_wakeup == !!capable)
 486		return;
 487
 488	dev->power.can_wakeup = capable;
 489	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
 490		if (capable) {
 491			int ret = wakeup_sysfs_add(dev);
 492
 493			if (ret)
 494				dev_info(dev, "Wakeup sysfs attributes not added\n");
 495		} else {
 496			wakeup_sysfs_remove(dev);
 497		}
 498	}
 499}
 500EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
 501
 502/**
 503 * device_init_wakeup - Device wakeup initialization.
 504 * @dev: Device to handle.
 505 * @enable: Whether or not to enable @dev as a wakeup device.
 506 *
 507 * By default, most devices should leave wakeup disabled.  The exceptions are
 508 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
 509 * possibly network interfaces, etc.  Also, devices that don't generate their
 510 * own wakeup requests but merely forward requests from one bus to another
 511 * (like PCI bridges) should have wakeup enabled by default.
 512 */
 513int device_init_wakeup(struct device *dev, bool enable)
 514{
 515	int ret = 0;
 516
 517	if (!dev)
 518		return -EINVAL;
 519
 520	if (enable) {
 521		device_set_wakeup_capable(dev, true);
 522		ret = device_wakeup_enable(dev);
 523	} else {
 524		device_wakeup_disable(dev);
 525		device_set_wakeup_capable(dev, false);
 526	}
 527
 528	return ret;
 529}
 530EXPORT_SYMBOL_GPL(device_init_wakeup);
 531
 532/**
 533 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
 534 * @dev: Device to handle.
 535 * @enable: enable/disable flag
 536 */
 537int device_set_wakeup_enable(struct device *dev, bool enable)
 538{
 539	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
 
 
 
 
 540}
 541EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
 542
 543/**
 544 * wakeup_source_not_registered - validate the given wakeup source.
 545 * @ws: Wakeup source to be validated.
 546 */
 547static bool wakeup_source_not_registered(struct wakeup_source *ws)
 548{
 549	/*
 550	 * Use timer struct to check if the given source is initialized
 551	 * by wakeup_source_add.
 552	 */
 553	return ws->timer.function != pm_wakeup_timer_fn;
 554}
 555
 556/*
 557 * The functions below use the observation that each wakeup event starts a
 558 * period in which the system should not be suspended.  The moment this period
 559 * will end depends on how the wakeup event is going to be processed after being
 560 * detected and all of the possible cases can be divided into two distinct
 561 * groups.
 562 *
 563 * First, a wakeup event may be detected by the same functional unit that will
 564 * carry out the entire processing of it and possibly will pass it to user space
 565 * for further processing.  In that case the functional unit that has detected
 566 * the event may later "close" the "no suspend" period associated with it
 567 * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
 568 * pm_relax(), balanced with each other, is supposed to be used in such
 569 * situations.
 570 *
 571 * Second, a wakeup event may be detected by one functional unit and processed
 572 * by another one.  In that case the unit that has detected it cannot really
 573 * "close" the "no suspend" period associated with it, unless it knows in
 574 * advance what's going to happen to the event during processing.  This
 575 * knowledge, however, may not be available to it, so it can simply specify time
 576 * to wait before the system can be suspended and pass it as the second
 577 * argument of pm_wakeup_event().
 578 *
 579 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
 580 * "no suspend" period will be ended either by the pm_relax(), or by the timer
 581 * function executed when the timer expires, whichever comes first.
 582 */
 583
 584/**
 585 * wakeup_source_activate - Mark given wakeup source as active.
 586 * @ws: Wakeup source to handle.
 587 *
 588 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
 589 * core of the event by incrementing the counter of of wakeup events being
 590 * processed.
 591 */
 592static void wakeup_source_activate(struct wakeup_source *ws)
 593{
 594	unsigned int cec;
 595
 596	if (WARN_ONCE(wakeup_source_not_registered(ws),
 597			"unregistered wakeup source\n"))
 598		return;
 599
 600	ws->active = true;
 601	ws->active_count++;
 602	ws->last_time = ktime_get();
 603	if (ws->autosleep_enabled)
 604		ws->start_prevent_time = ws->last_time;
 605
 606	/* Increment the counter of events in progress. */
 607	cec = atomic_inc_return(&combined_event_count);
 608
 609	trace_wakeup_source_activate(ws->name, cec);
 610}
 611
 612/**
 613 * wakeup_source_report_event - Report wakeup event using the given source.
 614 * @ws: Wakeup source to report the event for.
 615 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 616 */
 617static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
 618{
 619	ws->event_count++;
 620	/* This is racy, but the counter is approximate anyway. */
 621	if (events_check_enabled)
 622		ws->wakeup_count++;
 623
 624	if (!ws->active)
 625		wakeup_source_activate(ws);
 626
 627	if (hard)
 628		pm_system_wakeup();
 629}
 630
 631/**
 632 * __pm_stay_awake - Notify the PM core of a wakeup event.
 633 * @ws: Wakeup source object associated with the source of the event.
 634 *
 635 * It is safe to call this function from interrupt context.
 636 */
 637void __pm_stay_awake(struct wakeup_source *ws)
 638{
 639	unsigned long flags;
 640
 641	if (!ws)
 642		return;
 643
 644	spin_lock_irqsave(&ws->lock, flags);
 645
 646	wakeup_source_report_event(ws, false);
 647	del_timer(&ws->timer);
 648	ws->timer_expires = 0;
 649
 650	spin_unlock_irqrestore(&ws->lock, flags);
 651}
 652EXPORT_SYMBOL_GPL(__pm_stay_awake);
 653
 654/**
 655 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
 656 * @dev: Device the wakeup event is related to.
 657 *
 658 * Notify the PM core of a wakeup event (signaled by @dev) by calling
 659 * __pm_stay_awake for the @dev's wakeup source object.
 660 *
 661 * Call this function after detecting of a wakeup event if pm_relax() is going
 662 * to be called directly after processing the event (and possibly passing it to
 663 * user space for further processing).
 664 */
 665void pm_stay_awake(struct device *dev)
 666{
 667	unsigned long flags;
 668
 669	if (!dev)
 670		return;
 671
 672	spin_lock_irqsave(&dev->power.lock, flags);
 673	__pm_stay_awake(dev->power.wakeup);
 674	spin_unlock_irqrestore(&dev->power.lock, flags);
 675}
 676EXPORT_SYMBOL_GPL(pm_stay_awake);
 677
 678#ifdef CONFIG_PM_AUTOSLEEP
 679static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
 680{
 681	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
 682	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
 683}
 684#else
 685static inline void update_prevent_sleep_time(struct wakeup_source *ws,
 686					     ktime_t now) {}
 687#endif
 688
 689/**
 690 * wakeup_source_deactivate - Mark given wakeup source as inactive.
 691 * @ws: Wakeup source to handle.
 692 *
 693 * Update the @ws' statistics and notify the PM core that the wakeup source has
 694 * become inactive by decrementing the counter of wakeup events being processed
 695 * and incrementing the counter of registered wakeup events.
 696 */
 697static void wakeup_source_deactivate(struct wakeup_source *ws)
 698{
 699	unsigned int cnt, inpr, cec;
 700	ktime_t duration;
 701	ktime_t now;
 702
 703	ws->relax_count++;
 704	/*
 705	 * __pm_relax() may be called directly or from a timer function.
 706	 * If it is called directly right after the timer function has been
 707	 * started, but before the timer function calls __pm_relax(), it is
 708	 * possible that __pm_stay_awake() will be called in the meantime and
 709	 * will set ws->active.  Then, ws->active may be cleared immediately
 710	 * by the __pm_relax() called from the timer function, but in such a
 711	 * case ws->relax_count will be different from ws->active_count.
 712	 */
 713	if (ws->relax_count != ws->active_count) {
 714		ws->relax_count--;
 715		return;
 716	}
 717
 718	ws->active = false;
 719
 720	now = ktime_get();
 721	duration = ktime_sub(now, ws->last_time);
 722	ws->total_time = ktime_add(ws->total_time, duration);
 723	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
 724		ws->max_time = duration;
 725
 726	ws->last_time = now;
 727	del_timer(&ws->timer);
 728	ws->timer_expires = 0;
 729
 730	if (ws->autosleep_enabled)
 731		update_prevent_sleep_time(ws, now);
 732
 733	/*
 734	 * Increment the counter of registered wakeup events and decrement the
 735	 * couter of wakeup events in progress simultaneously.
 736	 */
 737	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
 738	trace_wakeup_source_deactivate(ws->name, cec);
 739
 740	split_counters(&cnt, &inpr);
 741	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
 742		wake_up(&wakeup_count_wait_queue);
 743}
 744
 745/**
 746 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
 747 * @ws: Wakeup source object associated with the source of the event.
 748 *
 749 * Call this function for wakeup events whose processing started with calling
 750 * __pm_stay_awake().
 751 *
 752 * It is safe to call it from interrupt context.
 753 */
 754void __pm_relax(struct wakeup_source *ws)
 755{
 756	unsigned long flags;
 757
 758	if (!ws)
 759		return;
 760
 761	spin_lock_irqsave(&ws->lock, flags);
 762	if (ws->active)
 763		wakeup_source_deactivate(ws);
 764	spin_unlock_irqrestore(&ws->lock, flags);
 765}
 766EXPORT_SYMBOL_GPL(__pm_relax);
 767
 768/**
 769 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
 770 * @dev: Device that signaled the event.
 771 *
 772 * Execute __pm_relax() for the @dev's wakeup source object.
 773 */
 774void pm_relax(struct device *dev)
 775{
 776	unsigned long flags;
 777
 778	if (!dev)
 779		return;
 780
 781	spin_lock_irqsave(&dev->power.lock, flags);
 782	__pm_relax(dev->power.wakeup);
 783	spin_unlock_irqrestore(&dev->power.lock, flags);
 784}
 785EXPORT_SYMBOL_GPL(pm_relax);
 786
 787/**
 788 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
 789 * @t: timer list
 790 *
 791 * Call wakeup_source_deactivate() for the wakeup source whose address is stored
 792 * in @data if it is currently active and its timer has not been canceled and
 793 * the expiration time of the timer is not in future.
 794 */
 795static void pm_wakeup_timer_fn(struct timer_list *t)
 796{
 797	struct wakeup_source *ws = from_timer(ws, t, timer);
 798	unsigned long flags;
 799
 800	spin_lock_irqsave(&ws->lock, flags);
 801
 802	if (ws->active && ws->timer_expires
 803	    && time_after_eq(jiffies, ws->timer_expires)) {
 804		wakeup_source_deactivate(ws);
 805		ws->expire_count++;
 806	}
 807
 808	spin_unlock_irqrestore(&ws->lock, flags);
 809}
 810
 811/**
 812 * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
 813 * @ws: Wakeup source object associated with the event source.
 814 * @msec: Anticipated event processing time (in milliseconds).
 815 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 816 *
 817 * Notify the PM core of a wakeup event whose source is @ws that will take
 818 * approximately @msec milliseconds to be processed by the kernel.  If @ws is
 819 * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
 820 * execute pm_wakeup_timer_fn() in future.
 821 *
 822 * It is safe to call this function from interrupt context.
 823 */
 824void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
 825{
 826	unsigned long flags;
 827	unsigned long expires;
 828
 829	if (!ws)
 830		return;
 831
 832	spin_lock_irqsave(&ws->lock, flags);
 833
 834	wakeup_source_report_event(ws, hard);
 835
 836	if (!msec) {
 837		wakeup_source_deactivate(ws);
 838		goto unlock;
 839	}
 840
 841	expires = jiffies + msecs_to_jiffies(msec);
 842	if (!expires)
 843		expires = 1;
 844
 845	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
 846		mod_timer(&ws->timer, expires);
 847		ws->timer_expires = expires;
 848	}
 849
 850 unlock:
 851	spin_unlock_irqrestore(&ws->lock, flags);
 852}
 853EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
 854
 855/**
 856 * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
 857 * @dev: Device the wakeup event is related to.
 858 * @msec: Anticipated event processing time (in milliseconds).
 859 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
 860 *
 861 * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
 862 */
 863void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
 864{
 865	unsigned long flags;
 866
 867	if (!dev)
 868		return;
 869
 870	spin_lock_irqsave(&dev->power.lock, flags);
 871	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
 872	spin_unlock_irqrestore(&dev->power.lock, flags);
 873}
 874EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
 875
 876void pm_print_active_wakeup_sources(void)
 877{
 878	struct wakeup_source *ws;
 879	int srcuidx, active = 0;
 880	struct wakeup_source *last_activity_ws = NULL;
 881
 882	srcuidx = srcu_read_lock(&wakeup_srcu);
 883	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
 884		if (ws->active) {
 885			pm_pr_dbg("active wakeup source: %s\n", ws->name);
 886			active = 1;
 887		} else if (!active &&
 888			   (!last_activity_ws ||
 889			    ktime_to_ns(ws->last_time) >
 890			    ktime_to_ns(last_activity_ws->last_time))) {
 891			last_activity_ws = ws;
 892		}
 893	}
 894
 895	if (!active && last_activity_ws)
 896		pm_pr_dbg("last active wakeup source: %s\n",
 897			last_activity_ws->name);
 898	srcu_read_unlock(&wakeup_srcu, srcuidx);
 899}
 900EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
 901
 902/**
 903 * pm_wakeup_pending - Check if power transition in progress should be aborted.
 904 *
 905 * Compare the current number of registered wakeup events with its preserved
 906 * value from the past and return true if new wakeup events have been registered
 907 * since the old value was stored.  Also return true if the current number of
 908 * wakeup events being processed is different from zero.
 909 */
 910bool pm_wakeup_pending(void)
 911{
 912	unsigned long flags;
 913	bool ret = false;
 914
 915	raw_spin_lock_irqsave(&events_lock, flags);
 916	if (events_check_enabled) {
 917		unsigned int cnt, inpr;
 918
 919		split_counters(&cnt, &inpr);
 920		ret = (cnt != saved_count || inpr > 0);
 921		events_check_enabled = !ret;
 922	}
 923	raw_spin_unlock_irqrestore(&events_lock, flags);
 924
 925	if (ret) {
 926		pm_pr_dbg("Wakeup pending, aborting suspend\n");
 927		pm_print_active_wakeup_sources();
 928	}
 929
 930	return ret || atomic_read(&pm_abort_suspend) > 0;
 931}
 
 932
 933void pm_system_wakeup(void)
 934{
 935	atomic_inc(&pm_abort_suspend);
 936	s2idle_wake();
 937}
 938EXPORT_SYMBOL_GPL(pm_system_wakeup);
 939
 940void pm_system_cancel_wakeup(void)
 941{
 942	atomic_dec_if_positive(&pm_abort_suspend);
 943}
 944
 945void pm_wakeup_clear(bool reset)
 946{
 947	pm_wakeup_irq = 0;
 948	if (reset)
 
 
 
 
 
 
 
 
 
 
 949		atomic_set(&pm_abort_suspend, 0);
 950}
 951
 952void pm_system_irq_wakeup(unsigned int irq_number)
 953{
 954	if (pm_wakeup_irq == 0) {
 955		pm_wakeup_irq = irq_number;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 956		pm_system_wakeup();
 957	}
 
 
 
 
 958}
 959
 960/**
 961 * pm_get_wakeup_count - Read the number of registered wakeup events.
 962 * @count: Address to store the value at.
 963 * @block: Whether or not to block.
 964 *
 965 * Store the number of registered wakeup events at the address in @count.  If
 966 * @block is set, block until the current number of wakeup events being
 967 * processed is zero.
 968 *
 969 * Return 'false' if the current number of wakeup events being processed is
 970 * nonzero.  Otherwise return 'true'.
 971 */
 972bool pm_get_wakeup_count(unsigned int *count, bool block)
 973{
 974	unsigned int cnt, inpr;
 975
 976	if (block) {
 977		DEFINE_WAIT(wait);
 978
 979		for (;;) {
 980			prepare_to_wait(&wakeup_count_wait_queue, &wait,
 981					TASK_INTERRUPTIBLE);
 982			split_counters(&cnt, &inpr);
 983			if (inpr == 0 || signal_pending(current))
 984				break;
 985			pm_print_active_wakeup_sources();
 986			schedule();
 987		}
 988		finish_wait(&wakeup_count_wait_queue, &wait);
 989	}
 990
 991	split_counters(&cnt, &inpr);
 992	*count = cnt;
 993	return !inpr;
 994}
 995
 996/**
 997 * pm_save_wakeup_count - Save the current number of registered wakeup events.
 998 * @count: Value to compare with the current number of registered wakeup events.
 999 *
1000 * If @count is equal to the current number of registered wakeup events and the
1001 * current number of wakeup events being processed is zero, store @count as the
1002 * old number of registered wakeup events for pm_check_wakeup_events(), enable
1003 * wakeup events detection and return 'true'.  Otherwise disable wakeup events
1004 * detection and return 'false'.
1005 */
1006bool pm_save_wakeup_count(unsigned int count)
1007{
1008	unsigned int cnt, inpr;
1009	unsigned long flags;
1010
1011	events_check_enabled = false;
1012	raw_spin_lock_irqsave(&events_lock, flags);
1013	split_counters(&cnt, &inpr);
1014	if (cnt == count && inpr == 0) {
1015		saved_count = count;
1016		events_check_enabled = true;
1017	}
1018	raw_spin_unlock_irqrestore(&events_lock, flags);
1019	return events_check_enabled;
1020}
1021
1022#ifdef CONFIG_PM_AUTOSLEEP
1023/**
1024 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
1025 * @set: Whether to set or to clear the autosleep_enabled flags.
1026 */
1027void pm_wakep_autosleep_enabled(bool set)
1028{
1029	struct wakeup_source *ws;
1030	ktime_t now = ktime_get();
1031	int srcuidx;
1032
1033	srcuidx = srcu_read_lock(&wakeup_srcu);
1034	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1035		spin_lock_irq(&ws->lock);
1036		if (ws->autosleep_enabled != set) {
1037			ws->autosleep_enabled = set;
1038			if (ws->active) {
1039				if (set)
1040					ws->start_prevent_time = now;
1041				else
1042					update_prevent_sleep_time(ws, now);
1043			}
1044		}
1045		spin_unlock_irq(&ws->lock);
1046	}
1047	srcu_read_unlock(&wakeup_srcu, srcuidx);
1048}
1049#endif /* CONFIG_PM_AUTOSLEEP */
1050
1051/**
1052 * print_wakeup_source_stats - Print wakeup source statistics information.
1053 * @m: seq_file to print the statistics into.
1054 * @ws: Wakeup source object to print the statistics for.
1055 */
1056static int print_wakeup_source_stats(struct seq_file *m,
1057				     struct wakeup_source *ws)
1058{
1059	unsigned long flags;
1060	ktime_t total_time;
1061	ktime_t max_time;
1062	unsigned long active_count;
1063	ktime_t active_time;
1064	ktime_t prevent_sleep_time;
1065
1066	spin_lock_irqsave(&ws->lock, flags);
1067
1068	total_time = ws->total_time;
1069	max_time = ws->max_time;
1070	prevent_sleep_time = ws->prevent_sleep_time;
1071	active_count = ws->active_count;
1072	if (ws->active) {
1073		ktime_t now = ktime_get();
1074
1075		active_time = ktime_sub(now, ws->last_time);
1076		total_time = ktime_add(total_time, active_time);
1077		if (active_time > max_time)
1078			max_time = active_time;
1079
1080		if (ws->autosleep_enabled)
1081			prevent_sleep_time = ktime_add(prevent_sleep_time,
1082				ktime_sub(now, ws->start_prevent_time));
1083	} else {
1084		active_time = 0;
1085	}
1086
1087	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1088		   ws->name, active_count, ws->event_count,
1089		   ws->wakeup_count, ws->expire_count,
1090		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1091		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1092		   ktime_to_ms(prevent_sleep_time));
1093
1094	spin_unlock_irqrestore(&ws->lock, flags);
1095
1096	return 0;
1097}
1098
1099static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1100					loff_t *pos)
1101{
1102	struct wakeup_source *ws;
1103	loff_t n = *pos;
1104	int *srcuidx = m->private;
1105
1106	if (n == 0) {
1107		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1108			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1109			"last_change\tprevent_suspend_time\n");
1110	}
1111
1112	*srcuidx = srcu_read_lock(&wakeup_srcu);
1113	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1114		if (n-- <= 0)
1115			return ws;
1116	}
1117
1118	return NULL;
1119}
1120
1121static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1122					void *v, loff_t *pos)
1123{
1124	struct wakeup_source *ws = v;
1125	struct wakeup_source *next_ws = NULL;
1126
1127	++(*pos);
1128
1129	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1130		next_ws = ws;
1131		break;
1132	}
1133
1134	if (!next_ws)
1135		print_wakeup_source_stats(m, &deleted_ws);
1136
1137	return next_ws;
1138}
1139
1140static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1141{
1142	int *srcuidx = m->private;
1143
1144	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1145}
1146
1147/**
1148 * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1149 * @m: seq_file to print the statistics into.
1150 * @v: wakeup_source of each iteration
1151 */
1152static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1153{
1154	struct wakeup_source *ws = v;
1155
1156	print_wakeup_source_stats(m, ws);
1157
1158	return 0;
1159}
1160
1161static const struct seq_operations wakeup_sources_stats_seq_ops = {
1162	.start = wakeup_sources_stats_seq_start,
1163	.next  = wakeup_sources_stats_seq_next,
1164	.stop  = wakeup_sources_stats_seq_stop,
1165	.show  = wakeup_sources_stats_seq_show,
1166};
1167
1168static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1169{
1170	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1171}
1172
1173static const struct file_operations wakeup_sources_stats_fops = {
1174	.owner = THIS_MODULE,
1175	.open = wakeup_sources_stats_open,
1176	.read = seq_read,
1177	.llseek = seq_lseek,
1178	.release = seq_release_private,
1179};
1180
1181static int __init wakeup_sources_debugfs_init(void)
1182{
1183	debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
1184			    &wakeup_sources_stats_fops);
1185	return 0;
1186}
1187
1188postcore_initcall(wakeup_sources_debugfs_init);