Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * kernel/power/wakelock.c
  3 *
  4 * User space wakeup sources support.
  5 *
  6 * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
  7 *
  8 * This code is based on the analogous interface allowing user space to
  9 * manipulate wakelocks on Android.
 10 */
 11
 12#include <linux/capability.h>
 13#include <linux/ctype.h>
 14#include <linux/device.h>
 15#include <linux/err.h>
 16#include <linux/hrtimer.h>
 17#include <linux/list.h>
 18#include <linux/rbtree.h>
 19#include <linux/slab.h>
 
 20
 21#include "power.h"
 22
 23static DEFINE_MUTEX(wakelocks_lock);
 24
 25struct wakelock {
 26	char			*name;
 27	struct rb_node		node;
 28	struct wakeup_source	ws;
 29#ifdef CONFIG_PM_WAKELOCKS_GC
 30	struct list_head	lru;
 31#endif
 32};
 33
 34static struct rb_root wakelocks_tree = RB_ROOT;
 35
 36ssize_t pm_show_wakelocks(char *buf, bool show_active)
 37{
 38	struct rb_node *node;
 39	struct wakelock *wl;
 40	char *str = buf;
 41	char *end = buf + PAGE_SIZE;
 42
 43	mutex_lock(&wakelocks_lock);
 44
 45	for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
 46		wl = rb_entry(node, struct wakelock, node);
 47		if (wl->ws.active == show_active)
 48			str += scnprintf(str, end - str, "%s ", wl->name);
 49	}
 50	if (str > buf)
 51		str--;
 52
 53	str += scnprintf(str, end - str, "\n");
 54
 55	mutex_unlock(&wakelocks_lock);
 56	return (str - buf);
 57}
 58
 59#if CONFIG_PM_WAKELOCKS_LIMIT > 0
 60static unsigned int number_of_wakelocks;
 61
 62static inline bool wakelocks_limit_exceeded(void)
 63{
 64	return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
 65}
 66
 67static inline void increment_wakelocks_number(void)
 68{
 69	number_of_wakelocks++;
 70}
 71
 72static inline void decrement_wakelocks_number(void)
 73{
 74	number_of_wakelocks--;
 75}
 76#else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
 77static inline bool wakelocks_limit_exceeded(void) { return false; }
 78static inline void increment_wakelocks_number(void) {}
 79static inline void decrement_wakelocks_number(void) {}
 80#endif /* CONFIG_PM_WAKELOCKS_LIMIT */
 81
 82#ifdef CONFIG_PM_WAKELOCKS_GC
 83#define WL_GC_COUNT_MAX	100
 84#define WL_GC_TIME_SEC	300
 85
 
 86static LIST_HEAD(wakelocks_lru_list);
 
 87static unsigned int wakelocks_gc_count;
 88
 89static inline void wakelocks_lru_add(struct wakelock *wl)
 90{
 91	list_add(&wl->lru, &wakelocks_lru_list);
 92}
 93
 94static inline void wakelocks_lru_most_recent(struct wakelock *wl)
 95{
 96	list_move(&wl->lru, &wakelocks_lru_list);
 97}
 98
 99static void wakelocks_gc(void)
100{
101	struct wakelock *wl, *aux;
102	ktime_t now;
103
104	if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
105		return;
106
107	now = ktime_get();
108	list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
109		u64 idle_time_ns;
110		bool active;
111
112		spin_lock_irq(&wl->ws.lock);
113		idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
114		active = wl->ws.active;
115		spin_unlock_irq(&wl->ws.lock);
116
117		if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
118			break;
119
120		if (!active) {
121			wakeup_source_remove(&wl->ws);
122			rb_erase(&wl->node, &wakelocks_tree);
123			list_del(&wl->lru);
124			kfree(wl->name);
125			kfree(wl);
126			decrement_wakelocks_number();
127		}
128	}
129	wakelocks_gc_count = 0;
 
 
 
 
 
 
 
 
 
 
130}
131#else /* !CONFIG_PM_WAKELOCKS_GC */
132static inline void wakelocks_lru_add(struct wakelock *wl) {}
133static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
134static inline void wakelocks_gc(void) {}
135#endif /* !CONFIG_PM_WAKELOCKS_GC */
136
137static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
138					    bool add_if_not_found)
139{
140	struct rb_node **node = &wakelocks_tree.rb_node;
141	struct rb_node *parent = *node;
142	struct wakelock *wl;
143
144	while (*node) {
145		int diff;
146
147		parent = *node;
148		wl = rb_entry(*node, struct wakelock, node);
149		diff = strncmp(name, wl->name, len);
150		if (diff == 0) {
151			if (wl->name[len])
152				diff = -1;
153			else
154				return wl;
155		}
156		if (diff < 0)
157			node = &(*node)->rb_left;
158		else
159			node = &(*node)->rb_right;
160	}
161	if (!add_if_not_found)
162		return ERR_PTR(-EINVAL);
163
164	if (wakelocks_limit_exceeded())
165		return ERR_PTR(-ENOSPC);
166
167	/* Not found, we have to add a new one. */
168	wl = kzalloc(sizeof(*wl), GFP_KERNEL);
169	if (!wl)
170		return ERR_PTR(-ENOMEM);
171
172	wl->name = kstrndup(name, len, GFP_KERNEL);
173	if (!wl->name) {
174		kfree(wl);
175		return ERR_PTR(-ENOMEM);
176	}
177	wl->ws.name = wl->name;
178	wakeup_source_add(&wl->ws);
179	rb_link_node(&wl->node, parent, node);
180	rb_insert_color(&wl->node, &wakelocks_tree);
181	wakelocks_lru_add(wl);
182	increment_wakelocks_number();
183	return wl;
184}
185
186int pm_wake_lock(const char *buf)
187{
188	const char *str = buf;
189	struct wakelock *wl;
190	u64 timeout_ns = 0;
191	size_t len;
192	int ret = 0;
193
194	if (!capable(CAP_BLOCK_SUSPEND))
195		return -EPERM;
196
197	while (*str && !isspace(*str))
198		str++;
199
200	len = str - buf;
201	if (!len)
202		return -EINVAL;
203
204	if (*str && *str != '\n') {
205		/* Find out if there's a valid timeout string appended. */
206		ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
207		if (ret)
208			return -EINVAL;
209	}
210
211	mutex_lock(&wakelocks_lock);
212
213	wl = wakelock_lookup_add(buf, len, true);
214	if (IS_ERR(wl)) {
215		ret = PTR_ERR(wl);
216		goto out;
217	}
218	if (timeout_ns) {
219		u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
220
221		do_div(timeout_ms, NSEC_PER_MSEC);
222		__pm_wakeup_event(&wl->ws, timeout_ms);
223	} else {
224		__pm_stay_awake(&wl->ws);
225	}
226
227	wakelocks_lru_most_recent(wl);
228
229 out:
230	mutex_unlock(&wakelocks_lock);
231	return ret;
232}
233
234int pm_wake_unlock(const char *buf)
235{
236	struct wakelock *wl;
237	size_t len;
238	int ret = 0;
239
240	if (!capable(CAP_BLOCK_SUSPEND))
241		return -EPERM;
242
243	len = strlen(buf);
244	if (!len)
245		return -EINVAL;
246
247	if (buf[len-1] == '\n')
248		len--;
249
250	if (!len)
251		return -EINVAL;
252
253	mutex_lock(&wakelocks_lock);
254
255	wl = wakelock_lookup_add(buf, len, false);
256	if (IS_ERR(wl)) {
257		ret = PTR_ERR(wl);
258		goto out;
259	}
260	__pm_relax(&wl->ws);
261
262	wakelocks_lru_most_recent(wl);
263	wakelocks_gc();
264
265 out:
266	mutex_unlock(&wakelocks_lock);
267	return ret;
268}
v4.6
  1/*
  2 * kernel/power/wakelock.c
  3 *
  4 * User space wakeup sources support.
  5 *
  6 * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
  7 *
  8 * This code is based on the analogous interface allowing user space to
  9 * manipulate wakelocks on Android.
 10 */
 11
 12#include <linux/capability.h>
 13#include <linux/ctype.h>
 14#include <linux/device.h>
 15#include <linux/err.h>
 16#include <linux/hrtimer.h>
 17#include <linux/list.h>
 18#include <linux/rbtree.h>
 19#include <linux/slab.h>
 20#include <linux/workqueue.h>
 21
 22#include "power.h"
 23
 24static DEFINE_MUTEX(wakelocks_lock);
 25
 26struct wakelock {
 27	char			*name;
 28	struct rb_node		node;
 29	struct wakeup_source	ws;
 30#ifdef CONFIG_PM_WAKELOCKS_GC
 31	struct list_head	lru;
 32#endif
 33};
 34
 35static struct rb_root wakelocks_tree = RB_ROOT;
 36
 37ssize_t pm_show_wakelocks(char *buf, bool show_active)
 38{
 39	struct rb_node *node;
 40	struct wakelock *wl;
 41	char *str = buf;
 42	char *end = buf + PAGE_SIZE;
 43
 44	mutex_lock(&wakelocks_lock);
 45
 46	for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
 47		wl = rb_entry(node, struct wakelock, node);
 48		if (wl->ws.active == show_active)
 49			str += scnprintf(str, end - str, "%s ", wl->name);
 50	}
 51	if (str > buf)
 52		str--;
 53
 54	str += scnprintf(str, end - str, "\n");
 55
 56	mutex_unlock(&wakelocks_lock);
 57	return (str - buf);
 58}
 59
 60#if CONFIG_PM_WAKELOCKS_LIMIT > 0
 61static unsigned int number_of_wakelocks;
 62
 63static inline bool wakelocks_limit_exceeded(void)
 64{
 65	return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
 66}
 67
 68static inline void increment_wakelocks_number(void)
 69{
 70	number_of_wakelocks++;
 71}
 72
 73static inline void decrement_wakelocks_number(void)
 74{
 75	number_of_wakelocks--;
 76}
 77#else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
 78static inline bool wakelocks_limit_exceeded(void) { return false; }
 79static inline void increment_wakelocks_number(void) {}
 80static inline void decrement_wakelocks_number(void) {}
 81#endif /* CONFIG_PM_WAKELOCKS_LIMIT */
 82
 83#ifdef CONFIG_PM_WAKELOCKS_GC
 84#define WL_GC_COUNT_MAX	100
 85#define WL_GC_TIME_SEC	300
 86
 87static void __wakelocks_gc(struct work_struct *work);
 88static LIST_HEAD(wakelocks_lru_list);
 89static DECLARE_WORK(wakelock_work, __wakelocks_gc);
 90static unsigned int wakelocks_gc_count;
 91
 92static inline void wakelocks_lru_add(struct wakelock *wl)
 93{
 94	list_add(&wl->lru, &wakelocks_lru_list);
 95}
 96
 97static inline void wakelocks_lru_most_recent(struct wakelock *wl)
 98{
 99	list_move(&wl->lru, &wakelocks_lru_list);
100}
101
102static void __wakelocks_gc(struct work_struct *work)
103{
104	struct wakelock *wl, *aux;
105	ktime_t now;
106
107	mutex_lock(&wakelocks_lock);
 
108
109	now = ktime_get();
110	list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
111		u64 idle_time_ns;
112		bool active;
113
114		spin_lock_irq(&wl->ws.lock);
115		idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
116		active = wl->ws.active;
117		spin_unlock_irq(&wl->ws.lock);
118
119		if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
120			break;
121
122		if (!active) {
123			wakeup_source_remove(&wl->ws);
124			rb_erase(&wl->node, &wakelocks_tree);
125			list_del(&wl->lru);
126			kfree(wl->name);
127			kfree(wl);
128			decrement_wakelocks_number();
129		}
130	}
131	wakelocks_gc_count = 0;
132
133	mutex_unlock(&wakelocks_lock);
134}
135
136static void wakelocks_gc(void)
137{
138	if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
139		return;
140
141	schedule_work(&wakelock_work);
142}
143#else /* !CONFIG_PM_WAKELOCKS_GC */
144static inline void wakelocks_lru_add(struct wakelock *wl) {}
145static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
146static inline void wakelocks_gc(void) {}
147#endif /* !CONFIG_PM_WAKELOCKS_GC */
148
149static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
150					    bool add_if_not_found)
151{
152	struct rb_node **node = &wakelocks_tree.rb_node;
153	struct rb_node *parent = *node;
154	struct wakelock *wl;
155
156	while (*node) {
157		int diff;
158
159		parent = *node;
160		wl = rb_entry(*node, struct wakelock, node);
161		diff = strncmp(name, wl->name, len);
162		if (diff == 0) {
163			if (wl->name[len])
164				diff = -1;
165			else
166				return wl;
167		}
168		if (diff < 0)
169			node = &(*node)->rb_left;
170		else
171			node = &(*node)->rb_right;
172	}
173	if (!add_if_not_found)
174		return ERR_PTR(-EINVAL);
175
176	if (wakelocks_limit_exceeded())
177		return ERR_PTR(-ENOSPC);
178
179	/* Not found, we have to add a new one. */
180	wl = kzalloc(sizeof(*wl), GFP_KERNEL);
181	if (!wl)
182		return ERR_PTR(-ENOMEM);
183
184	wl->name = kstrndup(name, len, GFP_KERNEL);
185	if (!wl->name) {
186		kfree(wl);
187		return ERR_PTR(-ENOMEM);
188	}
189	wl->ws.name = wl->name;
190	wakeup_source_add(&wl->ws);
191	rb_link_node(&wl->node, parent, node);
192	rb_insert_color(&wl->node, &wakelocks_tree);
193	wakelocks_lru_add(wl);
194	increment_wakelocks_number();
195	return wl;
196}
197
198int pm_wake_lock(const char *buf)
199{
200	const char *str = buf;
201	struct wakelock *wl;
202	u64 timeout_ns = 0;
203	size_t len;
204	int ret = 0;
205
206	if (!capable(CAP_BLOCK_SUSPEND))
207		return -EPERM;
208
209	while (*str && !isspace(*str))
210		str++;
211
212	len = str - buf;
213	if (!len)
214		return -EINVAL;
215
216	if (*str && *str != '\n') {
217		/* Find out if there's a valid timeout string appended. */
218		ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
219		if (ret)
220			return -EINVAL;
221	}
222
223	mutex_lock(&wakelocks_lock);
224
225	wl = wakelock_lookup_add(buf, len, true);
226	if (IS_ERR(wl)) {
227		ret = PTR_ERR(wl);
228		goto out;
229	}
230	if (timeout_ns) {
231		u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
232
233		do_div(timeout_ms, NSEC_PER_MSEC);
234		__pm_wakeup_event(&wl->ws, timeout_ms);
235	} else {
236		__pm_stay_awake(&wl->ws);
237	}
238
239	wakelocks_lru_most_recent(wl);
240
241 out:
242	mutex_unlock(&wakelocks_lock);
243	return ret;
244}
245
246int pm_wake_unlock(const char *buf)
247{
248	struct wakelock *wl;
249	size_t len;
250	int ret = 0;
251
252	if (!capable(CAP_BLOCK_SUSPEND))
253		return -EPERM;
254
255	len = strlen(buf);
256	if (!len)
257		return -EINVAL;
258
259	if (buf[len-1] == '\n')
260		len--;
261
262	if (!len)
263		return -EINVAL;
264
265	mutex_lock(&wakelocks_lock);
266
267	wl = wakelock_lookup_add(buf, len, false);
268	if (IS_ERR(wl)) {
269		ret = PTR_ERR(wl);
270		goto out;
271	}
272	__pm_relax(&wl->ws);
273
274	wakelocks_lru_most_recent(wl);
275	wakelocks_gc();
276
277 out:
278	mutex_unlock(&wakelocks_lock);
279	return ret;
280}