Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Industrial I/O event handling
  3 *
  4 * Copyright (c) 2008 Jonathan Cameron
  5 *
 
 
 
 
  6 * Based on elements of hwmon and input subsystems.
  7 */
  8
  9#include <linux/anon_inodes.h>
 10#include <linux/device.h>
 11#include <linux/fs.h>
 12#include <linux/kernel.h>
 13#include <linux/kfifo.h>
 14#include <linux/module.h>
 15#include <linux/poll.h>
 16#include <linux/sched.h>
 17#include <linux/slab.h>
 18#include <linux/uaccess.h>
 19#include <linux/wait.h>
 20#include <linux/iio/iio.h>
 21#include <linux/iio/iio-opaque.h>
 22#include "iio_core.h"
 23#include <linux/iio/sysfs.h>
 24#include <linux/iio/events.h>
 25
 26/**
 27 * struct iio_event_interface - chrdev interface for an event line
 28 * @wait:		wait queue to allow blocking reads of events
 29 * @det_events:		list of detected events
 30 * @dev_attr_list:	list of event interface sysfs attribute
 31 * @flags:		file operations related flags including busy flag.
 32 * @group:		event interface sysfs attribute group
 33 * @read_lock:		lock to protect kfifo read operations
 34 */
 35struct iio_event_interface {
 36	wait_queue_head_t	wait;
 37	DECLARE_KFIFO(det_events, struct iio_event_data, 16);
 38
 39	struct list_head	dev_attr_list;
 40	unsigned long		flags;
 41	struct attribute_group	group;
 42	struct mutex		read_lock;
 43};
 44
 45bool iio_event_enabled(const struct iio_event_interface *ev_int)
 46{
 47	return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
 48}
 49
 50/**
 51 * iio_push_event() - try to add event to the list for userspace reading
 52 * @indio_dev:		IIO device structure
 53 * @ev_code:		What event
 54 * @timestamp:		When the event occurred
 55 *
 56 * Note: The caller must make sure that this function is not running
 57 * concurrently for the same indio_dev more than once.
 58 *
 59 * This function may be safely used as soon as a valid reference to iio_dev has
 60 * been obtained via iio_device_alloc(), but any events that are submitted
 61 * before iio_device_register() has successfully completed will be silently
 62 * discarded.
 63 **/
 64int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
 65{
 66	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 67	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
 68	struct iio_event_data ev;
 69	int copied;
 70
 71	if (!ev_int)
 72		return 0;
 73
 74	/* Does anyone care? */
 75	if (iio_event_enabled(ev_int)) {
 
 76
 77		ev.id = ev_code;
 78		ev.timestamp = timestamp;
 79
 80		copied = kfifo_put(&ev_int->det_events, ev);
 81		if (copied != 0)
 82			wake_up_poll(&ev_int->wait, EPOLLIN);
 83	}
 
 84
 85	return 0;
 86}
 87EXPORT_SYMBOL(iio_push_event);
 88
 89/**
 90 * iio_event_poll() - poll the event queue to find out if it has data
 91 * @filep:	File structure pointer to identify the device
 92 * @wait:	Poll table pointer to add the wait queue on
 93 *
 94 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
 95 *	   or a negative error code on failure
 96 */
 97static __poll_t iio_event_poll(struct file *filep,
 98			     struct poll_table_struct *wait)
 99{
100	struct iio_dev *indio_dev = filep->private_data;
101	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
102	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
103	__poll_t events = 0;
104
105	if (!indio_dev->info)
106		return events;
107
108	poll_wait(filep, &ev_int->wait, wait);
109
 
110	if (!kfifo_is_empty(&ev_int->det_events))
111		events = EPOLLIN | EPOLLRDNORM;
 
112
113	return events;
114}
115
116static ssize_t iio_event_chrdev_read(struct file *filep,
117				     char __user *buf,
118				     size_t count,
119				     loff_t *f_ps)
120{
121	struct iio_dev *indio_dev = filep->private_data;
122	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
123	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
124	unsigned int copied;
125	int ret;
126
127	if (!indio_dev->info)
128		return -ENODEV;
129
130	if (count < sizeof(struct iio_event_data))
131		return -EINVAL;
132
133	do {
134		if (kfifo_is_empty(&ev_int->det_events)) {
135			if (filep->f_flags & O_NONBLOCK)
136				return -EAGAIN;
137
138			ret = wait_event_interruptible(ev_int->wait,
139					!kfifo_is_empty(&ev_int->det_events) ||
140					indio_dev->info == NULL);
141			if (ret)
142				return ret;
143			if (indio_dev->info == NULL)
144				return -ENODEV;
145		}
146
147		if (mutex_lock_interruptible(&ev_int->read_lock))
148			return -ERESTARTSYS;
149		ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
150		mutex_unlock(&ev_int->read_lock);
151
152		if (ret)
153			return ret;
 
 
154
155		/*
156		 * If we couldn't read anything from the fifo (a different
157		 * thread might have been faster) we either return -EAGAIN if
158		 * the file descriptor is non-blocking, otherwise we go back to
159		 * sleep and wait for more data to arrive.
160		 */
161		if (copied == 0 && (filep->f_flags & O_NONBLOCK))
162			return -EAGAIN;
163
164	} while (copied == 0);
 
165
166	return copied;
167}
168
169static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
170{
171	struct iio_dev *indio_dev = filep->private_data;
172	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
173	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
174
175	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
176
177	iio_device_put(indio_dev);
 
 
 
 
 
 
 
 
178
179	return 0;
180}
181
182static const struct file_operations iio_event_chrdev_fileops = {
183	.read =  iio_event_chrdev_read,
184	.poll =  iio_event_poll,
185	.release = iio_event_chrdev_release,
186	.owner = THIS_MODULE,
187	.llseek = noop_llseek,
188};
189
190int iio_event_getfd(struct iio_dev *indio_dev)
191{
192	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
193	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
194	int fd;
195
196	if (ev_int == NULL)
197		return -ENODEV;
198
199	fd = mutex_lock_interruptible(&indio_dev->mlock);
200	if (fd)
201		return fd;
202
203	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
204		fd = -EBUSY;
205		goto unlock;
206	}
207
208	iio_device_get(indio_dev);
209
210	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
211				indio_dev, O_RDONLY | O_CLOEXEC);
212	if (fd < 0) {
213		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
214		iio_device_put(indio_dev);
215	} else {
216		kfifo_reset_out(&ev_int->det_events);
217	}
218
219unlock:
220	mutex_unlock(&indio_dev->mlock);
221	return fd;
222}
223
224static const char * const iio_ev_type_text[] = {
225	[IIO_EV_TYPE_THRESH] = "thresh",
226	[IIO_EV_TYPE_MAG] = "mag",
227	[IIO_EV_TYPE_ROC] = "roc",
228	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
229	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
230	[IIO_EV_TYPE_CHANGE] = "change",
231};
232
233static const char * const iio_ev_dir_text[] = {
234	[IIO_EV_DIR_EITHER] = "either",
235	[IIO_EV_DIR_RISING] = "rising",
236	[IIO_EV_DIR_FALLING] = "falling"
237};
238
239static const char * const iio_ev_info_text[] = {
240	[IIO_EV_INFO_ENABLE] = "en",
241	[IIO_EV_INFO_VALUE] = "value",
242	[IIO_EV_INFO_HYSTERESIS] = "hysteresis",
243	[IIO_EV_INFO_PERIOD] = "period",
244	[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
245	[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
246};
247
248static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
249{
250	return attr->c->event_spec[attr->address & 0xffff].dir;
251}
252
253static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
254{
255	return attr->c->event_spec[attr->address & 0xffff].type;
256}
257
258static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
259{
260	return (attr->address >> 16) & 0xffff;
261}
262
263static ssize_t iio_ev_state_store(struct device *dev,
264				  struct device_attribute *attr,
265				  const char *buf,
266				  size_t len)
267{
268	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
269	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
270	int ret;
271	bool val;
272
273	ret = strtobool(buf, &val);
274	if (ret < 0)
275		return ret;
276
277	ret = indio_dev->info->write_event_config(indio_dev,
278		this_attr->c, iio_ev_attr_type(this_attr),
279		iio_ev_attr_dir(this_attr), val);
280
281	return (ret < 0) ? ret : len;
282}
283
284static ssize_t iio_ev_state_show(struct device *dev,
285				 struct device_attribute *attr,
286				 char *buf)
287{
288	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
289	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
290	int val;
 
291
292	val = indio_dev->info->read_event_config(indio_dev,
293		this_attr->c, iio_ev_attr_type(this_attr),
294		iio_ev_attr_dir(this_attr));
295	if (val < 0)
296		return val;
297	else
298		return sprintf(buf, "%d\n", val);
299}
300
301static ssize_t iio_ev_value_show(struct device *dev,
302				 struct device_attribute *attr,
303				 char *buf)
304{
305	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
306	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
307	int val, val2, val_arr[2];
308	int ret;
309
310	ret = indio_dev->info->read_event_value(indio_dev,
311		this_attr->c, iio_ev_attr_type(this_attr),
312		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
313		&val, &val2);
314	if (ret < 0)
315		return ret;
316	val_arr[0] = val;
317	val_arr[1] = val2;
318	return iio_format_value(buf, ret, 2, val_arr);
319}
320
321static ssize_t iio_ev_value_store(struct device *dev,
322				  struct device_attribute *attr,
323				  const char *buf,
324				  size_t len)
325{
326	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
327	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
328	int val, val2;
329	int ret;
330
331	if (!indio_dev->info->write_event_value)
332		return -EINVAL;
333
334	ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
335	if (ret)
336		return ret;
337	ret = indio_dev->info->write_event_value(indio_dev,
338		this_attr->c, iio_ev_attr_type(this_attr),
339		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
340		val, val2);
341	if (ret < 0)
342		return ret;
343
344	return len;
345}
346
347static int iio_device_add_event(struct iio_dev *indio_dev,
348	const struct iio_chan_spec *chan, unsigned int spec_index,
349	enum iio_event_type type, enum iio_event_direction dir,
350	enum iio_shared_by shared_by, const unsigned long *mask)
351{
352	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
353	ssize_t (*show)(struct device *, struct device_attribute *, char *);
354	ssize_t (*store)(struct device *, struct device_attribute *,
355		const char *, size_t);
356	unsigned int attrcount = 0;
357	unsigned int i;
358	char *postfix;
359	int ret;
 
360
361	for_each_set_bit(i, mask, sizeof(*mask)*8) {
362		if (i >= ARRAY_SIZE(iio_ev_info_text))
363			return -EINVAL;
364		if (dir != IIO_EV_DIR_NONE)
365			postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
366					iio_ev_type_text[type],
367					iio_ev_dir_text[dir],
368					iio_ev_info_text[i]);
 
 
 
 
 
 
 
 
 
 
 
 
369		else
370			postfix = kasprintf(GFP_KERNEL, "%s_%s",
371					iio_ev_type_text[type],
372					iio_ev_info_text[i]);
373		if (postfix == NULL)
374			return -ENOMEM;
375
376		if (i == IIO_EV_INFO_ENABLE) {
377			show = iio_ev_state_show;
378			store = iio_ev_state_store;
379		} else {
380			show = iio_ev_value_show;
381			store = iio_ev_value_store;
 
 
 
 
 
 
 
 
 
 
 
 
382		}
383
384		ret = __iio_add_chan_devattr(postfix, chan, show, store,
385			 (i << 16) | spec_index, shared_by, &indio_dev->dev,
386			&iio_dev_opaque->event_interface->dev_attr_list);
 
 
 
 
387		kfree(postfix);
388
389		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
390			continue;
391
392		if (ret)
393			return ret;
394
395		attrcount++;
396	}
397
398	return attrcount;
 
399}
400
401static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
402	struct iio_chan_spec const *chan)
403{
404	int ret = 0, i, attrcount = 0;
405	enum iio_event_direction dir;
406	enum iio_event_type type;
407
408	for (i = 0; i < chan->num_event_specs; i++) {
409		type = chan->event_spec[i].type;
410		dir = chan->event_spec[i].dir;
411
412		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
413			IIO_SEPARATE, &chan->event_spec[i].mask_separate);
414		if (ret < 0)
415			return ret;
416		attrcount += ret;
417
418		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
419			IIO_SHARED_BY_TYPE,
420			&chan->event_spec[i].mask_shared_by_type);
421		if (ret < 0)
422			return ret;
423		attrcount += ret;
424
425		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
426			IIO_SHARED_BY_DIR,
427			&chan->event_spec[i].mask_shared_by_dir);
428		if (ret < 0)
429			return ret;
430		attrcount += ret;
431
432		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
433			IIO_SHARED_BY_ALL,
434			&chan->event_spec[i].mask_shared_by_all);
435		if (ret < 0)
436			return ret;
437		attrcount += ret;
438	}
439	ret = attrcount;
440	return ret;
441}
442
443static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
444{
445	int j, ret, attrcount = 0;
446
447	/* Dynamically created from the channels array */
 
448	for (j = 0; j < indio_dev->num_channels; j++) {
449		ret = iio_device_add_event_sysfs(indio_dev,
450						 &indio_dev->channels[j]);
451		if (ret < 0)
452			return ret;
453		attrcount += ret;
454	}
455	return attrcount;
 
 
 
 
 
456}
457
458static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
459{
460	int j;
461
462	for (j = 0; j < indio_dev->num_channels; j++) {
463		if (indio_dev->channels[j].num_event_specs != 0)
464			return true;
465	}
466	return false;
467}
468
469static void iio_setup_ev_int(struct iio_event_interface *ev_int)
470{
471	INIT_KFIFO(ev_int->det_events);
472	init_waitqueue_head(&ev_int->wait);
473	mutex_init(&ev_int->read_lock);
474}
475
476static const char *iio_event_group_name = "events";
477int iio_device_register_eventset(struct iio_dev *indio_dev)
478{
479	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
480	struct iio_dev_attr *p;
481	int ret = 0, attrcount_orig = 0, attrcount, attrn;
482	struct attribute **attr;
483
484	if (!(indio_dev->info->event_attrs ||
485	      iio_check_for_dynamic_events(indio_dev)))
486		return 0;
487
488	iio_dev_opaque->event_interface =
489		kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
490	if (iio_dev_opaque->event_interface == NULL)
491		return -ENOMEM;
492
493	INIT_LIST_HEAD(&iio_dev_opaque->event_interface->dev_attr_list);
494
495	iio_setup_ev_int(iio_dev_opaque->event_interface);
496	if (indio_dev->info->event_attrs != NULL) {
497		attr = indio_dev->info->event_attrs->attrs;
498		while (*attr++ != NULL)
499			attrcount_orig++;
500	}
501	attrcount = attrcount_orig;
502	if (indio_dev->channels) {
503		ret = __iio_add_event_config_attrs(indio_dev);
504		if (ret < 0)
505			goto error_free_setup_event_lines;
506		attrcount += ret;
507	}
508
509	iio_dev_opaque->event_interface->group.name = iio_event_group_name;
510	iio_dev_opaque->event_interface->group.attrs = kcalloc(attrcount + 1,
511							  sizeof(iio_dev_opaque->event_interface->group.attrs[0]),
512							  GFP_KERNEL);
513	if (iio_dev_opaque->event_interface->group.attrs == NULL) {
514		ret = -ENOMEM;
515		goto error_free_setup_event_lines;
516	}
517	if (indio_dev->info->event_attrs)
518		memcpy(iio_dev_opaque->event_interface->group.attrs,
519		       indio_dev->info->event_attrs->attrs,
520		       sizeof(iio_dev_opaque->event_interface->group.attrs[0])
521		       *attrcount_orig);
522	attrn = attrcount_orig;
523	/* Add all elements from the list. */
524	list_for_each_entry(p,
525			    &iio_dev_opaque->event_interface->dev_attr_list,
526			    l)
527		iio_dev_opaque->event_interface->group.attrs[attrn++] =
528			&p->dev_attr.attr;
529	indio_dev->groups[indio_dev->groupcounter++] =
530		&iio_dev_opaque->event_interface->group;
531
532	return 0;
533
534error_free_setup_event_lines:
535	iio_free_chan_devattr_list(&iio_dev_opaque->event_interface->dev_attr_list);
536	kfree(iio_dev_opaque->event_interface);
537	iio_dev_opaque->event_interface = NULL;
538	return ret;
539}
540
541/**
542 * iio_device_wakeup_eventset - Wakes up the event waitqueue
543 * @indio_dev: The IIO device
544 *
545 * Wakes up the event waitqueue used for poll() and blocking read().
546 * Should usually be called when the device is unregistered.
547 */
548void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
549{
550	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
551
552	if (iio_dev_opaque->event_interface == NULL)
553		return;
554	wake_up(&iio_dev_opaque->event_interface->wait);
555}
556
557void iio_device_unregister_eventset(struct iio_dev *indio_dev)
558{
559	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
560
561	if (iio_dev_opaque->event_interface == NULL)
562		return;
563	iio_free_chan_devattr_list(&iio_dev_opaque->event_interface->dev_attr_list);
564	kfree(iio_dev_opaque->event_interface->group.attrs);
565	kfree(iio_dev_opaque->event_interface);
566}
v3.5.6
 
  1/* Industrial I/O event handling
  2 *
  3 * Copyright (c) 2008 Jonathan Cameron
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * Based on elements of hwmon and input subsystems.
 10 */
 11
 12#include <linux/anon_inodes.h>
 13#include <linux/device.h>
 14#include <linux/fs.h>
 15#include <linux/kernel.h>
 16#include <linux/kfifo.h>
 17#include <linux/module.h>
 18#include <linux/poll.h>
 19#include <linux/sched.h>
 20#include <linux/slab.h>
 21#include <linux/uaccess.h>
 22#include <linux/wait.h>
 23#include <linux/iio/iio.h>
 
 24#include "iio_core.h"
 25#include <linux/iio/sysfs.h>
 26#include <linux/iio/events.h>
 27
 28/**
 29 * struct iio_event_interface - chrdev interface for an event line
 30 * @wait:		wait queue to allow blocking reads of events
 31 * @det_events:		list of detected events
 32 * @dev_attr_list:	list of event interface sysfs attribute
 33 * @flags:		file operations related flags including busy flag.
 34 * @group:		event interface sysfs attribute group
 
 35 */
 36struct iio_event_interface {
 37	wait_queue_head_t	wait;
 38	DECLARE_KFIFO(det_events, struct iio_event_data, 16);
 39
 40	struct list_head	dev_attr_list;
 41	unsigned long		flags;
 42	struct attribute_group	group;
 
 43};
 44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
 46{
 47	struct iio_event_interface *ev_int = indio_dev->event_interface;
 
 48	struct iio_event_data ev;
 49	int copied;
 50
 
 
 
 51	/* Does anyone care? */
 52	spin_lock(&ev_int->wait.lock);
 53	if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
 54
 55		ev.id = ev_code;
 56		ev.timestamp = timestamp;
 57
 58		copied = kfifo_put(&ev_int->det_events, &ev);
 59		if (copied != 0)
 60			wake_up_locked_poll(&ev_int->wait, POLLIN);
 61	}
 62	spin_unlock(&ev_int->wait.lock);
 63
 64	return 0;
 65}
 66EXPORT_SYMBOL(iio_push_event);
 67
 68/**
 69 * iio_event_poll() - poll the event queue to find out if it has data
 
 
 
 
 
 70 */
 71static unsigned int iio_event_poll(struct file *filep,
 72			     struct poll_table_struct *wait)
 73{
 74	struct iio_event_interface *ev_int = filep->private_data;
 75	unsigned int events = 0;
 
 
 
 
 
 76
 77	poll_wait(filep, &ev_int->wait, wait);
 78
 79	spin_lock(&ev_int->wait.lock);
 80	if (!kfifo_is_empty(&ev_int->det_events))
 81		events = POLLIN | POLLRDNORM;
 82	spin_unlock(&ev_int->wait.lock);
 83
 84	return events;
 85}
 86
 87static ssize_t iio_event_chrdev_read(struct file *filep,
 88				     char __user *buf,
 89				     size_t count,
 90				     loff_t *f_ps)
 91{
 92	struct iio_event_interface *ev_int = filep->private_data;
 
 
 93	unsigned int copied;
 94	int ret;
 95
 
 
 
 96	if (count < sizeof(struct iio_event_data))
 97		return -EINVAL;
 98
 99	spin_lock(&ev_int->wait.lock);
100	if (kfifo_is_empty(&ev_int->det_events)) {
101		if (filep->f_flags & O_NONBLOCK) {
102			ret = -EAGAIN;
103			goto error_unlock;
 
 
 
 
 
 
 
104		}
105		/* Blocking on device; waiting for something to be there */
106		ret = wait_event_interruptible_locked(ev_int->wait,
107					!kfifo_is_empty(&ev_int->det_events));
 
 
 
108		if (ret)
109			goto error_unlock;
110		/* Single access device so no one else can get the data */
111	}
112
113	ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
 
 
 
 
 
 
 
114
115error_unlock:
116	spin_unlock(&ev_int->wait.lock);
117
118	return ret ? ret : copied;
119}
120
121static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
122{
123	struct iio_event_interface *ev_int = filep->private_data;
 
 
 
 
124
125	spin_lock(&ev_int->wait.lock);
126	__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
127	/*
128	 * In order to maintain a clean state for reopening,
129	 * clear out any awaiting events. The mask will prevent
130	 * any new __iio_push_event calls running.
131	 */
132	kfifo_reset_out(&ev_int->det_events);
133	spin_unlock(&ev_int->wait.lock);
134
135	return 0;
136}
137
138static const struct file_operations iio_event_chrdev_fileops = {
139	.read =  iio_event_chrdev_read,
140	.poll =  iio_event_poll,
141	.release = iio_event_chrdev_release,
142	.owner = THIS_MODULE,
143	.llseek = noop_llseek,
144};
145
146int iio_event_getfd(struct iio_dev *indio_dev)
147{
148	struct iio_event_interface *ev_int = indio_dev->event_interface;
 
149	int fd;
150
151	if (ev_int == NULL)
152		return -ENODEV;
153
154	spin_lock(&ev_int->wait.lock);
155	if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
156		spin_unlock(&ev_int->wait.lock);
157		return -EBUSY;
 
 
 
158	}
159	spin_unlock(&ev_int->wait.lock);
160	fd = anon_inode_getfd("iio:event",
161				&iio_event_chrdev_fileops, ev_int, O_RDONLY);
 
 
162	if (fd < 0) {
163		spin_lock(&ev_int->wait.lock);
164		__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
165		spin_unlock(&ev_int->wait.lock);
 
166	}
 
 
 
167	return fd;
168}
169
170static const char * const iio_ev_type_text[] = {
171	[IIO_EV_TYPE_THRESH] = "thresh",
172	[IIO_EV_TYPE_MAG] = "mag",
173	[IIO_EV_TYPE_ROC] = "roc",
174	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
175	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
 
176};
177
178static const char * const iio_ev_dir_text[] = {
179	[IIO_EV_DIR_EITHER] = "either",
180	[IIO_EV_DIR_RISING] = "rising",
181	[IIO_EV_DIR_FALLING] = "falling"
182};
183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184static ssize_t iio_ev_state_store(struct device *dev,
185				  struct device_attribute *attr,
186				  const char *buf,
187				  size_t len)
188{
189	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
190	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
191	int ret;
192	bool val;
193
194	ret = strtobool(buf, &val);
195	if (ret < 0)
196		return ret;
197
198	ret = indio_dev->info->write_event_config(indio_dev,
199						  this_attr->address,
200						  val);
 
201	return (ret < 0) ? ret : len;
202}
203
204static ssize_t iio_ev_state_show(struct device *dev,
205				 struct device_attribute *attr,
206				 char *buf)
207{
208	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
209	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
210	int val = indio_dev->info->read_event_config(indio_dev,
211						     this_attr->address);
212
 
 
 
213	if (val < 0)
214		return val;
215	else
216		return sprintf(buf, "%d\n", val);
217}
218
219static ssize_t iio_ev_value_show(struct device *dev,
220				 struct device_attribute *attr,
221				 char *buf)
222{
223	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
224	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
225	int val, ret;
 
226
227	ret = indio_dev->info->read_event_value(indio_dev,
228						this_attr->address, &val);
 
 
229	if (ret < 0)
230		return ret;
231
232	return sprintf(buf, "%d\n", val);
 
233}
234
235static ssize_t iio_ev_value_store(struct device *dev,
236				  struct device_attribute *attr,
237				  const char *buf,
238				  size_t len)
239{
240	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
241	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
242	unsigned long val;
243	int ret;
244
245	if (!indio_dev->info->write_event_value)
246		return -EINVAL;
247
248	ret = strict_strtoul(buf, 10, &val);
249	if (ret)
250		return ret;
251
252	ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
253						 val);
 
254	if (ret < 0)
255		return ret;
256
257	return len;
258}
259
260static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
261				      struct iio_chan_spec const *chan)
262{
263	int ret = 0, i, attrcount = 0;
264	u64 mask = 0;
 
 
 
 
 
 
265	char *postfix;
266	if (!chan->event_mask)
267		return 0;
268
269	for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
270		postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
271				    iio_ev_type_text[i/IIO_EV_DIR_MAX],
272				    iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
273		if (postfix == NULL) {
274			ret = -ENOMEM;
275			goto error_ret;
276		}
277		if (chan->modified)
278			mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
279						  i/IIO_EV_DIR_MAX,
280						  i%IIO_EV_DIR_MAX);
281		else if (chan->differential)
282			mask = IIO_EVENT_CODE(chan->type,
283					      0, 0,
284					      i%IIO_EV_DIR_MAX,
285					      i/IIO_EV_DIR_MAX,
286					      0,
287					      chan->channel,
288					      chan->channel2);
289		else
290			mask = IIO_UNMOD_EVENT_CODE(chan->type,
291						    chan->channel,
292						    i/IIO_EV_DIR_MAX,
293						    i%IIO_EV_DIR_MAX);
294
295		ret = __iio_add_chan_devattr(postfix,
296					     chan,
297					     &iio_ev_state_show,
298					     iio_ev_state_store,
299					     mask,
300					     0,
301					     &indio_dev->dev,
302					     &indio_dev->event_interface->
303					     dev_attr_list);
304		kfree(postfix);
305		if (ret)
306			goto error_ret;
307		attrcount++;
308		postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
309				    iio_ev_type_text[i/IIO_EV_DIR_MAX],
310				    iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
311		if (postfix == NULL) {
312			ret = -ENOMEM;
313			goto error_ret;
314		}
315		ret = __iio_add_chan_devattr(postfix, chan,
316					     iio_ev_value_show,
317					     iio_ev_value_store,
318					     mask,
319					     0,
320					     &indio_dev->dev,
321					     &indio_dev->event_interface->
322					     dev_attr_list);
323		kfree(postfix);
 
 
 
 
324		if (ret)
325			goto error_ret;
 
326		attrcount++;
327	}
328	ret = attrcount;
329error_ret:
330	return ret;
331}
332
333static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
 
334{
335	struct iio_dev_attr *p, *n;
336	list_for_each_entry_safe(p, n,
337				 &indio_dev->event_interface->
338				 dev_attr_list, l) {
339		kfree(p->dev_attr.attr.name);
340		kfree(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341	}
 
 
342}
343
344static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
345{
346	int j, ret, attrcount = 0;
347
348	INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
349	/* Dynically created from the channels array */
350	for (j = 0; j < indio_dev->num_channels; j++) {
351		ret = iio_device_add_event_sysfs(indio_dev,
352						 &indio_dev->channels[j]);
353		if (ret < 0)
354			goto error_clear_attrs;
355		attrcount += ret;
356	}
357	return attrcount;
358
359error_clear_attrs:
360	__iio_remove_event_config_attrs(indio_dev);
361
362	return ret;
363}
364
365static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
366{
367	int j;
368
369	for (j = 0; j < indio_dev->num_channels; j++)
370		if (indio_dev->channels[j].event_mask != 0)
371			return true;
 
372	return false;
373}
374
375static void iio_setup_ev_int(struct iio_event_interface *ev_int)
376{
377	INIT_KFIFO(ev_int->det_events);
378	init_waitqueue_head(&ev_int->wait);
 
379}
380
381static const char *iio_event_group_name = "events";
382int iio_device_register_eventset(struct iio_dev *indio_dev)
383{
 
384	struct iio_dev_attr *p;
385	int ret = 0, attrcount_orig = 0, attrcount, attrn;
386	struct attribute **attr;
387
388	if (!(indio_dev->info->event_attrs ||
389	      iio_check_for_dynamic_events(indio_dev)))
390		return 0;
391
392	indio_dev->event_interface =
393		kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
394	if (indio_dev->event_interface == NULL) {
395		ret = -ENOMEM;
396		goto error_ret;
397	}
398
399	iio_setup_ev_int(indio_dev->event_interface);
400	if (indio_dev->info->event_attrs != NULL) {
401		attr = indio_dev->info->event_attrs->attrs;
402		while (*attr++ != NULL)
403			attrcount_orig++;
404	}
405	attrcount = attrcount_orig;
406	if (indio_dev->channels) {
407		ret = __iio_add_event_config_attrs(indio_dev);
408		if (ret < 0)
409			goto error_free_setup_event_lines;
410		attrcount += ret;
411	}
412
413	indio_dev->event_interface->group.name = iio_event_group_name;
414	indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
415							  sizeof(indio_dev->event_interface->group.attrs[0]),
416							  GFP_KERNEL);
417	if (indio_dev->event_interface->group.attrs == NULL) {
418		ret = -ENOMEM;
419		goto error_free_setup_event_lines;
420	}
421	if (indio_dev->info->event_attrs)
422		memcpy(indio_dev->event_interface->group.attrs,
423		       indio_dev->info->event_attrs->attrs,
424		       sizeof(indio_dev->event_interface->group.attrs[0])
425		       *attrcount_orig);
426	attrn = attrcount_orig;
427	/* Add all elements from the list. */
428	list_for_each_entry(p,
429			    &indio_dev->event_interface->dev_attr_list,
430			    l)
431		indio_dev->event_interface->group.attrs[attrn++] =
432			&p->dev_attr.attr;
433	indio_dev->groups[indio_dev->groupcounter++] =
434		&indio_dev->event_interface->group;
435
436	return 0;
437
438error_free_setup_event_lines:
439	__iio_remove_event_config_attrs(indio_dev);
440	kfree(indio_dev->event_interface);
441error_ret:
 
 
442
443	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
444}
445
446void iio_device_unregister_eventset(struct iio_dev *indio_dev)
447{
448	if (indio_dev->event_interface == NULL)
 
 
449		return;
450	__iio_remove_event_config_attrs(indio_dev);
451	kfree(indio_dev->event_interface->group.attrs);
452	kfree(indio_dev->event_interface);
453}