Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * drivers/base/power/wakeup.c - System wakeup events framework
  3 *
  4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  5 *
  6 * This file is released under the GPLv2.
  7 */
  8
  9#include <linux/device.h>
 10#include <linux/slab.h>
 11#include <linux/sched.h>
 12#include <linux/capability.h>
 
 13#include <linux/suspend.h>
 14#include <linux/seq_file.h>
 15#include <linux/debugfs.h>
 
 16
 17#include "power.h"
 18
 19#define TIMEOUT		100
 20
 21/*
 22 * If set, the suspend/hibernate code will abort transitions to a sleep state
 23 * if wakeup events are registered during or immediately before the transition.
 24 */
 25bool events_check_enabled;
 26
 27/*
 28 * Combined counters of registered wakeup events and wakeup events in progress.
 29 * They need to be modified together atomically, so it's better to use one
 30 * atomic variable to hold them both.
 31 */
 32static atomic_t combined_event_count = ATOMIC_INIT(0);
 33
 34#define IN_PROGRESS_BITS	(sizeof(int) * 4)
 35#define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
 36
 37static void split_counters(unsigned int *cnt, unsigned int *inpr)
 38{
 39	unsigned int comb = atomic_read(&combined_event_count);
 40
 41	*cnt = (comb >> IN_PROGRESS_BITS);
 42	*inpr = comb & MAX_IN_PROGRESS;
 43}
 44
 45/* A preserved old value of the events counter. */
 46static unsigned int saved_count;
 47
 48static DEFINE_SPINLOCK(events_lock);
 49
 50static void pm_wakeup_timer_fn(unsigned long data);
 51
 52static LIST_HEAD(wakeup_sources);
 53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54/**
 55 * wakeup_source_create - Create a struct wakeup_source object.
 56 * @name: Name of the new wakeup source.
 57 */
 58struct wakeup_source *wakeup_source_create(const char *name)
 59{
 60	struct wakeup_source *ws;
 61
 62	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 63	if (!ws)
 64		return NULL;
 65
 66	spin_lock_init(&ws->lock);
 67	if (name)
 68		ws->name = kstrdup(name, GFP_KERNEL);
 69
 70	return ws;
 71}
 72EXPORT_SYMBOL_GPL(wakeup_source_create);
 73
 74/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75 * wakeup_source_destroy - Destroy a struct wakeup_source object.
 76 * @ws: Wakeup source to destroy.
 
 
 77 */
 78void wakeup_source_destroy(struct wakeup_source *ws)
 79{
 80	if (!ws)
 81		return;
 82
 83	spin_lock_irq(&ws->lock);
 84	while (ws->active) {
 85		spin_unlock_irq(&ws->lock);
 86
 87		schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
 88
 89		spin_lock_irq(&ws->lock);
 90	}
 91	spin_unlock_irq(&ws->lock);
 92
 93	kfree(ws->name);
 94	kfree(ws);
 95}
 96EXPORT_SYMBOL_GPL(wakeup_source_destroy);
 97
 98/**
 99 * wakeup_source_add - Add given object to the list of wakeup sources.
100 * @ws: Wakeup source object to add to the list.
101 */
102void wakeup_source_add(struct wakeup_source *ws)
103{
 
 
104	if (WARN_ON(!ws))
105		return;
106
 
107	setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
108	ws->active = false;
 
109
110	spin_lock_irq(&events_lock);
111	list_add_rcu(&ws->entry, &wakeup_sources);
112	spin_unlock_irq(&events_lock);
113}
114EXPORT_SYMBOL_GPL(wakeup_source_add);
115
116/**
117 * wakeup_source_remove - Remove given object from the wakeup sources list.
118 * @ws: Wakeup source object to remove from the list.
119 */
120void wakeup_source_remove(struct wakeup_source *ws)
121{
 
 
122	if (WARN_ON(!ws))
123		return;
124
125	spin_lock_irq(&events_lock);
126	list_del_rcu(&ws->entry);
127	spin_unlock_irq(&events_lock);
128	synchronize_rcu();
129}
130EXPORT_SYMBOL_GPL(wakeup_source_remove);
131
132/**
133 * wakeup_source_register - Create wakeup source and add it to the list.
134 * @name: Name of the wakeup source to register.
135 */
136struct wakeup_source *wakeup_source_register(const char *name)
137{
138	struct wakeup_source *ws;
139
140	ws = wakeup_source_create(name);
141	if (ws)
142		wakeup_source_add(ws);
143
144	return ws;
145}
146EXPORT_SYMBOL_GPL(wakeup_source_register);
147
148/**
149 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
150 * @ws: Wakeup source object to unregister.
151 */
152void wakeup_source_unregister(struct wakeup_source *ws)
153{
154	wakeup_source_remove(ws);
155	wakeup_source_destroy(ws);
 
 
156}
157EXPORT_SYMBOL_GPL(wakeup_source_unregister);
158
159/**
160 * device_wakeup_attach - Attach a wakeup source object to a device object.
161 * @dev: Device to handle.
162 * @ws: Wakeup source object to attach to @dev.
163 *
164 * This causes @dev to be treated as a wakeup device.
165 */
166static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
167{
168	spin_lock_irq(&dev->power.lock);
169	if (dev->power.wakeup) {
170		spin_unlock_irq(&dev->power.lock);
171		return -EEXIST;
172	}
173	dev->power.wakeup = ws;
174	spin_unlock_irq(&dev->power.lock);
175	return 0;
176}
177
178/**
179 * device_wakeup_enable - Enable given device to be a wakeup source.
180 * @dev: Device to handle.
181 *
182 * Create a wakeup source object, register it and attach it to @dev.
183 */
184int device_wakeup_enable(struct device *dev)
185{
186	struct wakeup_source *ws;
187	int ret;
188
189	if (!dev || !dev->power.can_wakeup)
190		return -EINVAL;
191
192	ws = wakeup_source_register(dev_name(dev));
193	if (!ws)
194		return -ENOMEM;
195
196	ret = device_wakeup_attach(dev, ws);
197	if (ret)
198		wakeup_source_unregister(ws);
199
200	return ret;
201}
202EXPORT_SYMBOL_GPL(device_wakeup_enable);
203
204/**
205 * device_wakeup_detach - Detach a device's wakeup source object from it.
206 * @dev: Device to detach the wakeup source object from.
207 *
208 * After it returns, @dev will not be treated as a wakeup device any more.
209 */
210static struct wakeup_source *device_wakeup_detach(struct device *dev)
211{
212	struct wakeup_source *ws;
213
214	spin_lock_irq(&dev->power.lock);
215	ws = dev->power.wakeup;
216	dev->power.wakeup = NULL;
217	spin_unlock_irq(&dev->power.lock);
218	return ws;
219}
220
221/**
222 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
223 * @dev: Device to handle.
224 *
225 * Detach the @dev's wakeup source object from it, unregister this wakeup source
226 * object and destroy it.
227 */
228int device_wakeup_disable(struct device *dev)
229{
230	struct wakeup_source *ws;
231
232	if (!dev || !dev->power.can_wakeup)
233		return -EINVAL;
234
235	ws = device_wakeup_detach(dev);
236	if (ws)
237		wakeup_source_unregister(ws);
238
239	return 0;
240}
241EXPORT_SYMBOL_GPL(device_wakeup_disable);
242
243/**
244 * device_set_wakeup_capable - Set/reset device wakeup capability flag.
245 * @dev: Device to handle.
246 * @capable: Whether or not @dev is capable of waking up the system from sleep.
247 *
248 * If @capable is set, set the @dev's power.can_wakeup flag and add its
249 * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
250 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
251 *
252 * This function may sleep and it can't be called from any context where
253 * sleeping is not allowed.
254 */
255void device_set_wakeup_capable(struct device *dev, bool capable)
256{
257	if (!!dev->power.can_wakeup == !!capable)
258		return;
259
260	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
261		if (capable) {
262			if (wakeup_sysfs_add(dev))
263				return;
264		} else {
265			wakeup_sysfs_remove(dev);
266		}
267	}
268	dev->power.can_wakeup = capable;
269}
270EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
271
272/**
273 * device_init_wakeup - Device wakeup initialization.
274 * @dev: Device to handle.
275 * @enable: Whether or not to enable @dev as a wakeup device.
276 *
277 * By default, most devices should leave wakeup disabled.  The exceptions are
278 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
279 * possibly network interfaces, etc.
 
 
280 */
281int device_init_wakeup(struct device *dev, bool enable)
282{
283	int ret = 0;
284
285	if (enable) {
286		device_set_wakeup_capable(dev, true);
287		ret = device_wakeup_enable(dev);
288	} else {
289		device_set_wakeup_capable(dev, false);
290	}
291
292	return ret;
293}
294EXPORT_SYMBOL_GPL(device_init_wakeup);
295
296/**
297 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
298 * @dev: Device to handle.
299 */
300int device_set_wakeup_enable(struct device *dev, bool enable)
301{
302	if (!dev || !dev->power.can_wakeup)
303		return -EINVAL;
304
305	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
306}
307EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
308
309/*
310 * The functions below use the observation that each wakeup event starts a
311 * period in which the system should not be suspended.  The moment this period
312 * will end depends on how the wakeup event is going to be processed after being
313 * detected and all of the possible cases can be divided into two distinct
314 * groups.
315 *
316 * First, a wakeup event may be detected by the same functional unit that will
317 * carry out the entire processing of it and possibly will pass it to user space
318 * for further processing.  In that case the functional unit that has detected
319 * the event may later "close" the "no suspend" period associated with it
320 * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
321 * pm_relax(), balanced with each other, is supposed to be used in such
322 * situations.
323 *
324 * Second, a wakeup event may be detected by one functional unit and processed
325 * by another one.  In that case the unit that has detected it cannot really
326 * "close" the "no suspend" period associated with it, unless it knows in
327 * advance what's going to happen to the event during processing.  This
328 * knowledge, however, may not be available to it, so it can simply specify time
329 * to wait before the system can be suspended and pass it as the second
330 * argument of pm_wakeup_event().
331 *
332 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
333 * "no suspend" period will be ended either by the pm_relax(), or by the timer
334 * function executed when the timer expires, whichever comes first.
335 */
336
337/**
338 * wakup_source_activate - Mark given wakeup source as active.
339 * @ws: Wakeup source to handle.
340 *
341 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
342 * core of the event by incrementing the counter of of wakeup events being
343 * processed.
344 */
345static void wakeup_source_activate(struct wakeup_source *ws)
346{
 
 
 
 
 
 
 
 
347	ws->active = true;
348	ws->active_count++;
349	ws->timer_expires = jiffies;
350	ws->last_time = ktime_get();
 
 
351
352	/* Increment the counter of events in progress. */
353	atomic_inc(&combined_event_count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354}
355
356/**
357 * __pm_stay_awake - Notify the PM core of a wakeup event.
358 * @ws: Wakeup source object associated with the source of the event.
359 *
360 * It is safe to call this function from interrupt context.
361 */
362void __pm_stay_awake(struct wakeup_source *ws)
363{
364	unsigned long flags;
365
366	if (!ws)
367		return;
368
369	spin_lock_irqsave(&ws->lock, flags);
370	ws->event_count++;
371	if (!ws->active)
372		wakeup_source_activate(ws);
 
 
373	spin_unlock_irqrestore(&ws->lock, flags);
374}
375EXPORT_SYMBOL_GPL(__pm_stay_awake);
376
377/**
378 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
379 * @dev: Device the wakeup event is related to.
380 *
381 * Notify the PM core of a wakeup event (signaled by @dev) by calling
382 * __pm_stay_awake for the @dev's wakeup source object.
383 *
384 * Call this function after detecting of a wakeup event if pm_relax() is going
385 * to be called directly after processing the event (and possibly passing it to
386 * user space for further processing).
387 */
388void pm_stay_awake(struct device *dev)
389{
390	unsigned long flags;
391
392	if (!dev)
393		return;
394
395	spin_lock_irqsave(&dev->power.lock, flags);
396	__pm_stay_awake(dev->power.wakeup);
397	spin_unlock_irqrestore(&dev->power.lock, flags);
398}
399EXPORT_SYMBOL_GPL(pm_stay_awake);
400
 
 
 
 
 
 
 
 
 
 
 
401/**
402 * wakup_source_deactivate - Mark given wakeup source as inactive.
403 * @ws: Wakeup source to handle.
404 *
405 * Update the @ws' statistics and notify the PM core that the wakeup source has
406 * become inactive by decrementing the counter of wakeup events being processed
407 * and incrementing the counter of registered wakeup events.
408 */
409static void wakeup_source_deactivate(struct wakeup_source *ws)
410{
 
411	ktime_t duration;
412	ktime_t now;
413
414	ws->relax_count++;
415	/*
416	 * __pm_relax() may be called directly or from a timer function.
417	 * If it is called directly right after the timer function has been
418	 * started, but before the timer function calls __pm_relax(), it is
419	 * possible that __pm_stay_awake() will be called in the meantime and
420	 * will set ws->active.  Then, ws->active may be cleared immediately
421	 * by the __pm_relax() called from the timer function, but in such a
422	 * case ws->relax_count will be different from ws->active_count.
423	 */
424	if (ws->relax_count != ws->active_count) {
425		ws->relax_count--;
426		return;
427	}
428
429	ws->active = false;
430
431	now = ktime_get();
432	duration = ktime_sub(now, ws->last_time);
433	ws->total_time = ktime_add(ws->total_time, duration);
434	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
435		ws->max_time = duration;
436
 
437	del_timer(&ws->timer);
 
 
 
 
438
439	/*
440	 * Increment the counter of registered wakeup events and decrement the
441	 * couter of wakeup events in progress simultaneously.
442	 */
443	atomic_add(MAX_IN_PROGRESS, &combined_event_count);
 
 
 
 
 
444}
445
446/**
447 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
448 * @ws: Wakeup source object associated with the source of the event.
449 *
450 * Call this function for wakeup events whose processing started with calling
451 * __pm_stay_awake().
452 *
453 * It is safe to call it from interrupt context.
454 */
455void __pm_relax(struct wakeup_source *ws)
456{
457	unsigned long flags;
458
459	if (!ws)
460		return;
461
462	spin_lock_irqsave(&ws->lock, flags);
463	if (ws->active)
464		wakeup_source_deactivate(ws);
465	spin_unlock_irqrestore(&ws->lock, flags);
466}
467EXPORT_SYMBOL_GPL(__pm_relax);
468
469/**
470 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
471 * @dev: Device that signaled the event.
472 *
473 * Execute __pm_relax() for the @dev's wakeup source object.
474 */
475void pm_relax(struct device *dev)
476{
477	unsigned long flags;
478
479	if (!dev)
480		return;
481
482	spin_lock_irqsave(&dev->power.lock, flags);
483	__pm_relax(dev->power.wakeup);
484	spin_unlock_irqrestore(&dev->power.lock, flags);
485}
486EXPORT_SYMBOL_GPL(pm_relax);
487
488/**
489 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
490 * @data: Address of the wakeup source object associated with the event source.
491 *
492 * Call __pm_relax() for the wakeup source whose address is stored in @data.
 
 
493 */
494static void pm_wakeup_timer_fn(unsigned long data)
495{
496	__pm_relax((struct wakeup_source *)data);
 
 
 
 
 
 
 
 
 
 
 
497}
498
499/**
500 * __pm_wakeup_event - Notify the PM core of a wakeup event.
501 * @ws: Wakeup source object associated with the event source.
502 * @msec: Anticipated event processing time (in milliseconds).
503 *
504 * Notify the PM core of a wakeup event whose source is @ws that will take
505 * approximately @msec milliseconds to be processed by the kernel.  If @ws is
506 * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
507 * execute pm_wakeup_timer_fn() in future.
508 *
509 * It is safe to call this function from interrupt context.
510 */
511void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
512{
513	unsigned long flags;
514	unsigned long expires;
515
516	if (!ws)
517		return;
518
519	spin_lock_irqsave(&ws->lock, flags);
520
521	ws->event_count++;
522	if (!ws->active)
523		wakeup_source_activate(ws);
524
525	if (!msec) {
526		wakeup_source_deactivate(ws);
527		goto unlock;
528	}
529
530	expires = jiffies + msecs_to_jiffies(msec);
531	if (!expires)
532		expires = 1;
533
534	if (time_after(expires, ws->timer_expires)) {
535		mod_timer(&ws->timer, expires);
536		ws->timer_expires = expires;
537	}
538
539 unlock:
540	spin_unlock_irqrestore(&ws->lock, flags);
541}
542EXPORT_SYMBOL_GPL(__pm_wakeup_event);
543
544
545/**
546 * pm_wakeup_event - Notify the PM core of a wakeup event.
547 * @dev: Device the wakeup event is related to.
548 * @msec: Anticipated event processing time (in milliseconds).
549 *
550 * Call __pm_wakeup_event() for the @dev's wakeup source object.
551 */
552void pm_wakeup_event(struct device *dev, unsigned int msec)
553{
554	unsigned long flags;
555
556	if (!dev)
557		return;
558
559	spin_lock_irqsave(&dev->power.lock, flags);
560	__pm_wakeup_event(dev->power.wakeup, msec);
561	spin_unlock_irqrestore(&dev->power.lock, flags);
562}
563EXPORT_SYMBOL_GPL(pm_wakeup_event);
564
565/**
566 * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
567 */
568static void pm_wakeup_update_hit_counts(void)
569{
570	unsigned long flags;
571	struct wakeup_source *ws;
 
 
572
573	rcu_read_lock();
574	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
575		spin_lock_irqsave(&ws->lock, flags);
576		if (ws->active)
577			ws->hit_count++;
578		spin_unlock_irqrestore(&ws->lock, flags);
 
 
 
 
 
579	}
 
 
 
 
580	rcu_read_unlock();
581}
 
582
583/**
584 * pm_wakeup_pending - Check if power transition in progress should be aborted.
585 *
586 * Compare the current number of registered wakeup events with its preserved
587 * value from the past and return true if new wakeup events have been registered
588 * since the old value was stored.  Also return true if the current number of
589 * wakeup events being processed is different from zero.
590 */
591bool pm_wakeup_pending(void)
592{
593	unsigned long flags;
594	bool ret = false;
595
596	spin_lock_irqsave(&events_lock, flags);
597	if (events_check_enabled) {
598		unsigned int cnt, inpr;
599
600		split_counters(&cnt, &inpr);
601		ret = (cnt != saved_count || inpr > 0);
602		events_check_enabled = !ret;
603	}
604	spin_unlock_irqrestore(&events_lock, flags);
605	if (ret)
606		pm_wakeup_update_hit_counts();
 
 
 
 
607	return ret;
608}
609
610/**
611 * pm_get_wakeup_count - Read the number of registered wakeup events.
612 * @count: Address to store the value at.
 
613 *
614 * Store the number of registered wakeup events at the address in @count.  Block
615 * if the current number of wakeup events being processed is nonzero.
 
616 *
617 * Return 'false' if the wait for the number of wakeup events being processed to
618 * drop down to zero has been interrupted by a signal (and the current number
619 * of wakeup events being processed is still nonzero).  Otherwise return 'true'.
620 */
621bool pm_get_wakeup_count(unsigned int *count)
622{
623	unsigned int cnt, inpr;
624
625	for (;;) {
626		split_counters(&cnt, &inpr);
627		if (inpr == 0 || signal_pending(current))
628			break;
629		pm_wakeup_update_hit_counts();
630		schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
 
 
 
 
 
 
 
631	}
632
633	split_counters(&cnt, &inpr);
634	*count = cnt;
635	return !inpr;
636}
637
638/**
639 * pm_save_wakeup_count - Save the current number of registered wakeup events.
640 * @count: Value to compare with the current number of registered wakeup events.
641 *
642 * If @count is equal to the current number of registered wakeup events and the
643 * current number of wakeup events being processed is zero, store @count as the
644 * old number of registered wakeup events for pm_check_wakeup_events(), enable
645 * wakeup events detection and return 'true'.  Otherwise disable wakeup events
646 * detection and return 'false'.
647 */
648bool pm_save_wakeup_count(unsigned int count)
649{
650	unsigned int cnt, inpr;
 
651
652	events_check_enabled = false;
653	spin_lock_irq(&events_lock);
654	split_counters(&cnt, &inpr);
655	if (cnt == count && inpr == 0) {
656		saved_count = count;
657		events_check_enabled = true;
658	}
659	spin_unlock_irq(&events_lock);
660	if (!events_check_enabled)
661		pm_wakeup_update_hit_counts();
662	return events_check_enabled;
663}
664
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
665static struct dentry *wakeup_sources_stats_dentry;
666
667/**
668 * print_wakeup_source_stats - Print wakeup source statistics information.
669 * @m: seq_file to print the statistics into.
670 * @ws: Wakeup source object to print the statistics for.
671 */
672static int print_wakeup_source_stats(struct seq_file *m,
673				     struct wakeup_source *ws)
674{
675	unsigned long flags;
676	ktime_t total_time;
677	ktime_t max_time;
678	unsigned long active_count;
679	ktime_t active_time;
 
680	int ret;
681
682	spin_lock_irqsave(&ws->lock, flags);
683
684	total_time = ws->total_time;
685	max_time = ws->max_time;
 
686	active_count = ws->active_count;
687	if (ws->active) {
688		active_time = ktime_sub(ktime_get(), ws->last_time);
 
 
689		total_time = ktime_add(total_time, active_time);
690		if (active_time.tv64 > max_time.tv64)
691			max_time = active_time;
 
 
 
 
692	} else {
693		active_time = ktime_set(0, 0);
694	}
695
696	ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t"
697			"%lld\t\t%lld\t\t%lld\t\t%lld\n",
698			ws->name, active_count, ws->event_count, ws->hit_count,
 
699			ktime_to_ms(active_time), ktime_to_ms(total_time),
700			ktime_to_ms(max_time), ktime_to_ms(ws->last_time));
 
701
702	spin_unlock_irqrestore(&ws->lock, flags);
703
704	return ret;
705}
706
707/**
708 * wakeup_sources_stats_show - Print wakeup sources statistics information.
709 * @m: seq_file to print the statistics into.
710 */
711static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
712{
713	struct wakeup_source *ws;
714
715	seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t"
716		"active_since\ttotal_time\tmax_time\tlast_change\n");
 
717
718	rcu_read_lock();
719	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
720		print_wakeup_source_stats(m, ws);
721	rcu_read_unlock();
722
723	return 0;
724}
725
726static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
727{
728	return single_open(file, wakeup_sources_stats_show, NULL);
729}
730
731static const struct file_operations wakeup_sources_stats_fops = {
732	.owner = THIS_MODULE,
733	.open = wakeup_sources_stats_open,
734	.read = seq_read,
735	.llseek = seq_lseek,
736	.release = single_release,
737};
738
739static int __init wakeup_sources_debugfs_init(void)
740{
741	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
742			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
743	return 0;
744}
745
746postcore_initcall(wakeup_sources_debugfs_init);
v3.15
  1/*
  2 * drivers/base/power/wakeup.c - System wakeup events framework
  3 *
  4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  5 *
  6 * This file is released under the GPLv2.
  7 */
  8
  9#include <linux/device.h>
 10#include <linux/slab.h>
 11#include <linux/sched.h>
 12#include <linux/capability.h>
 13#include <linux/export.h>
 14#include <linux/suspend.h>
 15#include <linux/seq_file.h>
 16#include <linux/debugfs.h>
 17#include <trace/events/power.h>
 18
 19#include "power.h"
 20
 
 
 21/*
 22 * If set, the suspend/hibernate code will abort transitions to a sleep state
 23 * if wakeup events are registered during or immediately before the transition.
 24 */
 25bool events_check_enabled __read_mostly;
 26
 27/*
 28 * Combined counters of registered wakeup events and wakeup events in progress.
 29 * They need to be modified together atomically, so it's better to use one
 30 * atomic variable to hold them both.
 31 */
 32static atomic_t combined_event_count = ATOMIC_INIT(0);
 33
 34#define IN_PROGRESS_BITS	(sizeof(int) * 4)
 35#define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
 36
 37static void split_counters(unsigned int *cnt, unsigned int *inpr)
 38{
 39	unsigned int comb = atomic_read(&combined_event_count);
 40
 41	*cnt = (comb >> IN_PROGRESS_BITS);
 42	*inpr = comb & MAX_IN_PROGRESS;
 43}
 44
 45/* A preserved old value of the events counter. */
 46static unsigned int saved_count;
 47
 48static DEFINE_SPINLOCK(events_lock);
 49
 50static void pm_wakeup_timer_fn(unsigned long data);
 51
 52static LIST_HEAD(wakeup_sources);
 53
 54static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
 55
 56/**
 57 * wakeup_source_prepare - Prepare a new wakeup source for initialization.
 58 * @ws: Wakeup source to prepare.
 59 * @name: Pointer to the name of the new wakeup source.
 60 *
 61 * Callers must ensure that the @name string won't be freed when @ws is still in
 62 * use.
 63 */
 64void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
 65{
 66	if (ws) {
 67		memset(ws, 0, sizeof(*ws));
 68		ws->name = name;
 69	}
 70}
 71EXPORT_SYMBOL_GPL(wakeup_source_prepare);
 72
 73/**
 74 * wakeup_source_create - Create a struct wakeup_source object.
 75 * @name: Name of the new wakeup source.
 76 */
 77struct wakeup_source *wakeup_source_create(const char *name)
 78{
 79	struct wakeup_source *ws;
 80
 81	ws = kmalloc(sizeof(*ws), GFP_KERNEL);
 82	if (!ws)
 83		return NULL;
 84
 85	wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
 
 
 
 86	return ws;
 87}
 88EXPORT_SYMBOL_GPL(wakeup_source_create);
 89
 90/**
 91 * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
 92 * @ws: Wakeup source to prepare for destruction.
 93 *
 94 * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
 95 * be run in parallel with this function for the same wakeup source object.
 96 */
 97void wakeup_source_drop(struct wakeup_source *ws)
 98{
 99	if (!ws)
100		return;
101
102	del_timer_sync(&ws->timer);
103	__pm_relax(ws);
104}
105EXPORT_SYMBOL_GPL(wakeup_source_drop);
106
107/**
108 * wakeup_source_destroy - Destroy a struct wakeup_source object.
109 * @ws: Wakeup source to destroy.
110 *
111 * Use only for wakeup source objects created with wakeup_source_create().
112 */
113void wakeup_source_destroy(struct wakeup_source *ws)
114{
115	if (!ws)
116		return;
117
118	wakeup_source_drop(ws);
 
 
 
 
 
 
 
 
 
119	kfree(ws->name);
120	kfree(ws);
121}
122EXPORT_SYMBOL_GPL(wakeup_source_destroy);
123
124/**
125 * wakeup_source_add - Add given object to the list of wakeup sources.
126 * @ws: Wakeup source object to add to the list.
127 */
128void wakeup_source_add(struct wakeup_source *ws)
129{
130	unsigned long flags;
131
132	if (WARN_ON(!ws))
133		return;
134
135	spin_lock_init(&ws->lock);
136	setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
137	ws->active = false;
138	ws->last_time = ktime_get();
139
140	spin_lock_irqsave(&events_lock, flags);
141	list_add_rcu(&ws->entry, &wakeup_sources);
142	spin_unlock_irqrestore(&events_lock, flags);
143}
144EXPORT_SYMBOL_GPL(wakeup_source_add);
145
146/**
147 * wakeup_source_remove - Remove given object from the wakeup sources list.
148 * @ws: Wakeup source object to remove from the list.
149 */
150void wakeup_source_remove(struct wakeup_source *ws)
151{
152	unsigned long flags;
153
154	if (WARN_ON(!ws))
155		return;
156
157	spin_lock_irqsave(&events_lock, flags);
158	list_del_rcu(&ws->entry);
159	spin_unlock_irqrestore(&events_lock, flags);
160	synchronize_rcu();
161}
162EXPORT_SYMBOL_GPL(wakeup_source_remove);
163
164/**
165 * wakeup_source_register - Create wakeup source and add it to the list.
166 * @name: Name of the wakeup source to register.
167 */
168struct wakeup_source *wakeup_source_register(const char *name)
169{
170	struct wakeup_source *ws;
171
172	ws = wakeup_source_create(name);
173	if (ws)
174		wakeup_source_add(ws);
175
176	return ws;
177}
178EXPORT_SYMBOL_GPL(wakeup_source_register);
179
180/**
181 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
182 * @ws: Wakeup source object to unregister.
183 */
184void wakeup_source_unregister(struct wakeup_source *ws)
185{
186	if (ws) {
187		wakeup_source_remove(ws);
188		wakeup_source_destroy(ws);
189	}
190}
191EXPORT_SYMBOL_GPL(wakeup_source_unregister);
192
193/**
194 * device_wakeup_attach - Attach a wakeup source object to a device object.
195 * @dev: Device to handle.
196 * @ws: Wakeup source object to attach to @dev.
197 *
198 * This causes @dev to be treated as a wakeup device.
199 */
200static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
201{
202	spin_lock_irq(&dev->power.lock);
203	if (dev->power.wakeup) {
204		spin_unlock_irq(&dev->power.lock);
205		return -EEXIST;
206	}
207	dev->power.wakeup = ws;
208	spin_unlock_irq(&dev->power.lock);
209	return 0;
210}
211
212/**
213 * device_wakeup_enable - Enable given device to be a wakeup source.
214 * @dev: Device to handle.
215 *
216 * Create a wakeup source object, register it and attach it to @dev.
217 */
218int device_wakeup_enable(struct device *dev)
219{
220	struct wakeup_source *ws;
221	int ret;
222
223	if (!dev || !dev->power.can_wakeup)
224		return -EINVAL;
225
226	ws = wakeup_source_register(dev_name(dev));
227	if (!ws)
228		return -ENOMEM;
229
230	ret = device_wakeup_attach(dev, ws);
231	if (ret)
232		wakeup_source_unregister(ws);
233
234	return ret;
235}
236EXPORT_SYMBOL_GPL(device_wakeup_enable);
237
238/**
239 * device_wakeup_detach - Detach a device's wakeup source object from it.
240 * @dev: Device to detach the wakeup source object from.
241 *
242 * After it returns, @dev will not be treated as a wakeup device any more.
243 */
244static struct wakeup_source *device_wakeup_detach(struct device *dev)
245{
246	struct wakeup_source *ws;
247
248	spin_lock_irq(&dev->power.lock);
249	ws = dev->power.wakeup;
250	dev->power.wakeup = NULL;
251	spin_unlock_irq(&dev->power.lock);
252	return ws;
253}
254
255/**
256 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
257 * @dev: Device to handle.
258 *
259 * Detach the @dev's wakeup source object from it, unregister this wakeup source
260 * object and destroy it.
261 */
262int device_wakeup_disable(struct device *dev)
263{
264	struct wakeup_source *ws;
265
266	if (!dev || !dev->power.can_wakeup)
267		return -EINVAL;
268
269	ws = device_wakeup_detach(dev);
270	if (ws)
271		wakeup_source_unregister(ws);
272
273	return 0;
274}
275EXPORT_SYMBOL_GPL(device_wakeup_disable);
276
277/**
278 * device_set_wakeup_capable - Set/reset device wakeup capability flag.
279 * @dev: Device to handle.
280 * @capable: Whether or not @dev is capable of waking up the system from sleep.
281 *
282 * If @capable is set, set the @dev's power.can_wakeup flag and add its
283 * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
284 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
285 *
286 * This function may sleep and it can't be called from any context where
287 * sleeping is not allowed.
288 */
289void device_set_wakeup_capable(struct device *dev, bool capable)
290{
291	if (!!dev->power.can_wakeup == !!capable)
292		return;
293
294	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
295		if (capable) {
296			if (wakeup_sysfs_add(dev))
297				return;
298		} else {
299			wakeup_sysfs_remove(dev);
300		}
301	}
302	dev->power.can_wakeup = capable;
303}
304EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
305
306/**
307 * device_init_wakeup - Device wakeup initialization.
308 * @dev: Device to handle.
309 * @enable: Whether or not to enable @dev as a wakeup device.
310 *
311 * By default, most devices should leave wakeup disabled.  The exceptions are
312 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
313 * possibly network interfaces, etc.  Also, devices that don't generate their
314 * own wakeup requests but merely forward requests from one bus to another
315 * (like PCI bridges) should have wakeup enabled by default.
316 */
317int device_init_wakeup(struct device *dev, bool enable)
318{
319	int ret = 0;
320
321	if (enable) {
322		device_set_wakeup_capable(dev, true);
323		ret = device_wakeup_enable(dev);
324	} else {
325		device_set_wakeup_capable(dev, false);
326	}
327
328	return ret;
329}
330EXPORT_SYMBOL_GPL(device_init_wakeup);
331
332/**
333 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
334 * @dev: Device to handle.
335 */
336int device_set_wakeup_enable(struct device *dev, bool enable)
337{
338	if (!dev || !dev->power.can_wakeup)
339		return -EINVAL;
340
341	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
342}
343EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
344
345/*
346 * The functions below use the observation that each wakeup event starts a
347 * period in which the system should not be suspended.  The moment this period
348 * will end depends on how the wakeup event is going to be processed after being
349 * detected and all of the possible cases can be divided into two distinct
350 * groups.
351 *
352 * First, a wakeup event may be detected by the same functional unit that will
353 * carry out the entire processing of it and possibly will pass it to user space
354 * for further processing.  In that case the functional unit that has detected
355 * the event may later "close" the "no suspend" period associated with it
356 * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
357 * pm_relax(), balanced with each other, is supposed to be used in such
358 * situations.
359 *
360 * Second, a wakeup event may be detected by one functional unit and processed
361 * by another one.  In that case the unit that has detected it cannot really
362 * "close" the "no suspend" period associated with it, unless it knows in
363 * advance what's going to happen to the event during processing.  This
364 * knowledge, however, may not be available to it, so it can simply specify time
365 * to wait before the system can be suspended and pass it as the second
366 * argument of pm_wakeup_event().
367 *
368 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
369 * "no suspend" period will be ended either by the pm_relax(), or by the timer
370 * function executed when the timer expires, whichever comes first.
371 */
372
373/**
374 * wakup_source_activate - Mark given wakeup source as active.
375 * @ws: Wakeup source to handle.
376 *
377 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
378 * core of the event by incrementing the counter of of wakeup events being
379 * processed.
380 */
381static void wakeup_source_activate(struct wakeup_source *ws)
382{
383	unsigned int cec;
384
385	/*
386	 * active wakeup source should bring the system
387	 * out of PM_SUSPEND_FREEZE state
388	 */
389	freeze_wake();
390
391	ws->active = true;
392	ws->active_count++;
 
393	ws->last_time = ktime_get();
394	if (ws->autosleep_enabled)
395		ws->start_prevent_time = ws->last_time;
396
397	/* Increment the counter of events in progress. */
398	cec = atomic_inc_return(&combined_event_count);
399
400	trace_wakeup_source_activate(ws->name, cec);
401}
402
403/**
404 * wakeup_source_report_event - Report wakeup event using the given source.
405 * @ws: Wakeup source to report the event for.
406 */
407static void wakeup_source_report_event(struct wakeup_source *ws)
408{
409	ws->event_count++;
410	/* This is racy, but the counter is approximate anyway. */
411	if (events_check_enabled)
412		ws->wakeup_count++;
413
414	if (!ws->active)
415		wakeup_source_activate(ws);
416}
417
418/**
419 * __pm_stay_awake - Notify the PM core of a wakeup event.
420 * @ws: Wakeup source object associated with the source of the event.
421 *
422 * It is safe to call this function from interrupt context.
423 */
424void __pm_stay_awake(struct wakeup_source *ws)
425{
426	unsigned long flags;
427
428	if (!ws)
429		return;
430
431	spin_lock_irqsave(&ws->lock, flags);
432
433	wakeup_source_report_event(ws);
434	del_timer(&ws->timer);
435	ws->timer_expires = 0;
436
437	spin_unlock_irqrestore(&ws->lock, flags);
438}
439EXPORT_SYMBOL_GPL(__pm_stay_awake);
440
441/**
442 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
443 * @dev: Device the wakeup event is related to.
444 *
445 * Notify the PM core of a wakeup event (signaled by @dev) by calling
446 * __pm_stay_awake for the @dev's wakeup source object.
447 *
448 * Call this function after detecting of a wakeup event if pm_relax() is going
449 * to be called directly after processing the event (and possibly passing it to
450 * user space for further processing).
451 */
452void pm_stay_awake(struct device *dev)
453{
454	unsigned long flags;
455
456	if (!dev)
457		return;
458
459	spin_lock_irqsave(&dev->power.lock, flags);
460	__pm_stay_awake(dev->power.wakeup);
461	spin_unlock_irqrestore(&dev->power.lock, flags);
462}
463EXPORT_SYMBOL_GPL(pm_stay_awake);
464
465#ifdef CONFIG_PM_AUTOSLEEP
466static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
467{
468	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
469	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
470}
471#else
472static inline void update_prevent_sleep_time(struct wakeup_source *ws,
473					     ktime_t now) {}
474#endif
475
476/**
477 * wakup_source_deactivate - Mark given wakeup source as inactive.
478 * @ws: Wakeup source to handle.
479 *
480 * Update the @ws' statistics and notify the PM core that the wakeup source has
481 * become inactive by decrementing the counter of wakeup events being processed
482 * and incrementing the counter of registered wakeup events.
483 */
484static void wakeup_source_deactivate(struct wakeup_source *ws)
485{
486	unsigned int cnt, inpr, cec;
487	ktime_t duration;
488	ktime_t now;
489
490	ws->relax_count++;
491	/*
492	 * __pm_relax() may be called directly or from a timer function.
493	 * If it is called directly right after the timer function has been
494	 * started, but before the timer function calls __pm_relax(), it is
495	 * possible that __pm_stay_awake() will be called in the meantime and
496	 * will set ws->active.  Then, ws->active may be cleared immediately
497	 * by the __pm_relax() called from the timer function, but in such a
498	 * case ws->relax_count will be different from ws->active_count.
499	 */
500	if (ws->relax_count != ws->active_count) {
501		ws->relax_count--;
502		return;
503	}
504
505	ws->active = false;
506
507	now = ktime_get();
508	duration = ktime_sub(now, ws->last_time);
509	ws->total_time = ktime_add(ws->total_time, duration);
510	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
511		ws->max_time = duration;
512
513	ws->last_time = now;
514	del_timer(&ws->timer);
515	ws->timer_expires = 0;
516
517	if (ws->autosleep_enabled)
518		update_prevent_sleep_time(ws, now);
519
520	/*
521	 * Increment the counter of registered wakeup events and decrement the
522	 * couter of wakeup events in progress simultaneously.
523	 */
524	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
525	trace_wakeup_source_deactivate(ws->name, cec);
526
527	split_counters(&cnt, &inpr);
528	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
529		wake_up(&wakeup_count_wait_queue);
530}
531
532/**
533 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
534 * @ws: Wakeup source object associated with the source of the event.
535 *
536 * Call this function for wakeup events whose processing started with calling
537 * __pm_stay_awake().
538 *
539 * It is safe to call it from interrupt context.
540 */
541void __pm_relax(struct wakeup_source *ws)
542{
543	unsigned long flags;
544
545	if (!ws)
546		return;
547
548	spin_lock_irqsave(&ws->lock, flags);
549	if (ws->active)
550		wakeup_source_deactivate(ws);
551	spin_unlock_irqrestore(&ws->lock, flags);
552}
553EXPORT_SYMBOL_GPL(__pm_relax);
554
555/**
556 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
557 * @dev: Device that signaled the event.
558 *
559 * Execute __pm_relax() for the @dev's wakeup source object.
560 */
561void pm_relax(struct device *dev)
562{
563	unsigned long flags;
564
565	if (!dev)
566		return;
567
568	spin_lock_irqsave(&dev->power.lock, flags);
569	__pm_relax(dev->power.wakeup);
570	spin_unlock_irqrestore(&dev->power.lock, flags);
571}
572EXPORT_SYMBOL_GPL(pm_relax);
573
574/**
575 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
576 * @data: Address of the wakeup source object associated with the event source.
577 *
578 * Call wakeup_source_deactivate() for the wakeup source whose address is stored
579 * in @data if it is currently active and its timer has not been canceled and
580 * the expiration time of the timer is not in future.
581 */
582static void pm_wakeup_timer_fn(unsigned long data)
583{
584	struct wakeup_source *ws = (struct wakeup_source *)data;
585	unsigned long flags;
586
587	spin_lock_irqsave(&ws->lock, flags);
588
589	if (ws->active && ws->timer_expires
590	    && time_after_eq(jiffies, ws->timer_expires)) {
591		wakeup_source_deactivate(ws);
592		ws->expire_count++;
593	}
594
595	spin_unlock_irqrestore(&ws->lock, flags);
596}
597
598/**
599 * __pm_wakeup_event - Notify the PM core of a wakeup event.
600 * @ws: Wakeup source object associated with the event source.
601 * @msec: Anticipated event processing time (in milliseconds).
602 *
603 * Notify the PM core of a wakeup event whose source is @ws that will take
604 * approximately @msec milliseconds to be processed by the kernel.  If @ws is
605 * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
606 * execute pm_wakeup_timer_fn() in future.
607 *
608 * It is safe to call this function from interrupt context.
609 */
610void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
611{
612	unsigned long flags;
613	unsigned long expires;
614
615	if (!ws)
616		return;
617
618	spin_lock_irqsave(&ws->lock, flags);
619
620	wakeup_source_report_event(ws);
 
 
621
622	if (!msec) {
623		wakeup_source_deactivate(ws);
624		goto unlock;
625	}
626
627	expires = jiffies + msecs_to_jiffies(msec);
628	if (!expires)
629		expires = 1;
630
631	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
632		mod_timer(&ws->timer, expires);
633		ws->timer_expires = expires;
634	}
635
636 unlock:
637	spin_unlock_irqrestore(&ws->lock, flags);
638}
639EXPORT_SYMBOL_GPL(__pm_wakeup_event);
640
641
642/**
643 * pm_wakeup_event - Notify the PM core of a wakeup event.
644 * @dev: Device the wakeup event is related to.
645 * @msec: Anticipated event processing time (in milliseconds).
646 *
647 * Call __pm_wakeup_event() for the @dev's wakeup source object.
648 */
649void pm_wakeup_event(struct device *dev, unsigned int msec)
650{
651	unsigned long flags;
652
653	if (!dev)
654		return;
655
656	spin_lock_irqsave(&dev->power.lock, flags);
657	__pm_wakeup_event(dev->power.wakeup, msec);
658	spin_unlock_irqrestore(&dev->power.lock, flags);
659}
660EXPORT_SYMBOL_GPL(pm_wakeup_event);
661
662void pm_print_active_wakeup_sources(void)
 
 
 
663{
 
664	struct wakeup_source *ws;
665	int active = 0;
666	struct wakeup_source *last_activity_ws = NULL;
667
668	rcu_read_lock();
669	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
670		if (ws->active) {
671			pr_info("active wakeup source: %s\n", ws->name);
672			active = 1;
673		} else if (!active &&
674			   (!last_activity_ws ||
675			    ktime_to_ns(ws->last_time) >
676			    ktime_to_ns(last_activity_ws->last_time))) {
677			last_activity_ws = ws;
678		}
679	}
680
681	if (!active && last_activity_ws)
682		pr_info("last active wakeup source: %s\n",
683			last_activity_ws->name);
684	rcu_read_unlock();
685}
686EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
687
688/**
689 * pm_wakeup_pending - Check if power transition in progress should be aborted.
690 *
691 * Compare the current number of registered wakeup events with its preserved
692 * value from the past and return true if new wakeup events have been registered
693 * since the old value was stored.  Also return true if the current number of
694 * wakeup events being processed is different from zero.
695 */
696bool pm_wakeup_pending(void)
697{
698	unsigned long flags;
699	bool ret = false;
700
701	spin_lock_irqsave(&events_lock, flags);
702	if (events_check_enabled) {
703		unsigned int cnt, inpr;
704
705		split_counters(&cnt, &inpr);
706		ret = (cnt != saved_count || inpr > 0);
707		events_check_enabled = !ret;
708	}
709	spin_unlock_irqrestore(&events_lock, flags);
710
711	if (ret) {
712		pr_info("PM: Wakeup pending, aborting suspend\n");
713		pm_print_active_wakeup_sources();
714	}
715
716	return ret;
717}
718
719/**
720 * pm_get_wakeup_count - Read the number of registered wakeup events.
721 * @count: Address to store the value at.
722 * @block: Whether or not to block.
723 *
724 * Store the number of registered wakeup events at the address in @count.  If
725 * @block is set, block until the current number of wakeup events being
726 * processed is zero.
727 *
728 * Return 'false' if the current number of wakeup events being processed is
729 * nonzero.  Otherwise return 'true'.
 
730 */
731bool pm_get_wakeup_count(unsigned int *count, bool block)
732{
733	unsigned int cnt, inpr;
734
735	if (block) {
736		DEFINE_WAIT(wait);
737
738		for (;;) {
739			prepare_to_wait(&wakeup_count_wait_queue, &wait,
740					TASK_INTERRUPTIBLE);
741			split_counters(&cnt, &inpr);
742			if (inpr == 0 || signal_pending(current))
743				break;
744
745			schedule();
746		}
747		finish_wait(&wakeup_count_wait_queue, &wait);
748	}
749
750	split_counters(&cnt, &inpr);
751	*count = cnt;
752	return !inpr;
753}
754
755/**
756 * pm_save_wakeup_count - Save the current number of registered wakeup events.
757 * @count: Value to compare with the current number of registered wakeup events.
758 *
759 * If @count is equal to the current number of registered wakeup events and the
760 * current number of wakeup events being processed is zero, store @count as the
761 * old number of registered wakeup events for pm_check_wakeup_events(), enable
762 * wakeup events detection and return 'true'.  Otherwise disable wakeup events
763 * detection and return 'false'.
764 */
765bool pm_save_wakeup_count(unsigned int count)
766{
767	unsigned int cnt, inpr;
768	unsigned long flags;
769
770	events_check_enabled = false;
771	spin_lock_irqsave(&events_lock, flags);
772	split_counters(&cnt, &inpr);
773	if (cnt == count && inpr == 0) {
774		saved_count = count;
775		events_check_enabled = true;
776	}
777	spin_unlock_irqrestore(&events_lock, flags);
 
 
778	return events_check_enabled;
779}
780
781#ifdef CONFIG_PM_AUTOSLEEP
782/**
783 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
784 * @enabled: Whether to set or to clear the autosleep_enabled flags.
785 */
786void pm_wakep_autosleep_enabled(bool set)
787{
788	struct wakeup_source *ws;
789	ktime_t now = ktime_get();
790
791	rcu_read_lock();
792	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
793		spin_lock_irq(&ws->lock);
794		if (ws->autosleep_enabled != set) {
795			ws->autosleep_enabled = set;
796			if (ws->active) {
797				if (set)
798					ws->start_prevent_time = now;
799				else
800					update_prevent_sleep_time(ws, now);
801			}
802		}
803		spin_unlock_irq(&ws->lock);
804	}
805	rcu_read_unlock();
806}
807#endif /* CONFIG_PM_AUTOSLEEP */
808
809static struct dentry *wakeup_sources_stats_dentry;
810
811/**
812 * print_wakeup_source_stats - Print wakeup source statistics information.
813 * @m: seq_file to print the statistics into.
814 * @ws: Wakeup source object to print the statistics for.
815 */
816static int print_wakeup_source_stats(struct seq_file *m,
817				     struct wakeup_source *ws)
818{
819	unsigned long flags;
820	ktime_t total_time;
821	ktime_t max_time;
822	unsigned long active_count;
823	ktime_t active_time;
824	ktime_t prevent_sleep_time;
825	int ret;
826
827	spin_lock_irqsave(&ws->lock, flags);
828
829	total_time = ws->total_time;
830	max_time = ws->max_time;
831	prevent_sleep_time = ws->prevent_sleep_time;
832	active_count = ws->active_count;
833	if (ws->active) {
834		ktime_t now = ktime_get();
835
836		active_time = ktime_sub(now, ws->last_time);
837		total_time = ktime_add(total_time, active_time);
838		if (active_time.tv64 > max_time.tv64)
839			max_time = active_time;
840
841		if (ws->autosleep_enabled)
842			prevent_sleep_time = ktime_add(prevent_sleep_time,
843				ktime_sub(now, ws->start_prevent_time));
844	} else {
845		active_time = ktime_set(0, 0);
846	}
847
848	ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t"
849			"%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
850			ws->name, active_count, ws->event_count,
851			ws->wakeup_count, ws->expire_count,
852			ktime_to_ms(active_time), ktime_to_ms(total_time),
853			ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
854			ktime_to_ms(prevent_sleep_time));
855
856	spin_unlock_irqrestore(&ws->lock, flags);
857
858	return ret;
859}
860
861/**
862 * wakeup_sources_stats_show - Print wakeup sources statistics information.
863 * @m: seq_file to print the statistics into.
864 */
865static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
866{
867	struct wakeup_source *ws;
868
869	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
870		"expire_count\tactive_since\ttotal_time\tmax_time\t"
871		"last_change\tprevent_suspend_time\n");
872
873	rcu_read_lock();
874	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
875		print_wakeup_source_stats(m, ws);
876	rcu_read_unlock();
877
878	return 0;
879}
880
881static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
882{
883	return single_open(file, wakeup_sources_stats_show, NULL);
884}
885
886static const struct file_operations wakeup_sources_stats_fops = {
887	.owner = THIS_MODULE,
888	.open = wakeup_sources_stats_open,
889	.read = seq_read,
890	.llseek = seq_lseek,
891	.release = single_release,
892};
893
894static int __init wakeup_sources_debugfs_init(void)
895{
896	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
897			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
898	return 0;
899}
900
901postcore_initcall(wakeup_sources_debugfs_init);