Linux Audio

Check our new training course

Loading...
  1/* The industrial I/O core in kernel channel mapping
  2 *
  3 * Copyright (c) 2011 Jonathan Cameron
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 */
  9#include <linux/err.h>
 10#include <linux/export.h>
 11#include <linux/slab.h>
 12#include <linux/mutex.h>
 13
 14#include <linux/iio/iio.h>
 15#include "iio_core.h"
 16#include <linux/iio/machine.h>
 17#include <linux/iio/driver.h>
 18#include <linux/iio/consumer.h>
 19
 20struct iio_map_internal {
 21	struct iio_dev *indio_dev;
 22	struct iio_map *map;
 23	struct list_head l;
 24};
 25
 26static LIST_HEAD(iio_map_list);
 27static DEFINE_MUTEX(iio_map_list_lock);
 28
 29int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
 30{
 31	int i = 0, ret = 0;
 32	struct iio_map_internal *mapi;
 33
 34	if (maps == NULL)
 35		return 0;
 36
 37	mutex_lock(&iio_map_list_lock);
 38	while (maps[i].consumer_dev_name != NULL) {
 39		mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
 40		if (mapi == NULL) {
 41			ret = -ENOMEM;
 42			goto error_ret;
 43		}
 44		mapi->map = &maps[i];
 45		mapi->indio_dev = indio_dev;
 46		list_add(&mapi->l, &iio_map_list);
 47		i++;
 48	}
 49error_ret:
 50	mutex_unlock(&iio_map_list_lock);
 51
 52	return ret;
 53}
 54EXPORT_SYMBOL_GPL(iio_map_array_register);
 55
 56
 57/* Assumes the exact same array (e.g. memory locations)
 58 * used at unregistration as used at registration rather than
 59 * more complex checking of contents.
 60 */
 61int iio_map_array_unregister(struct iio_dev *indio_dev,
 62			     struct iio_map *maps)
 63{
 64	int i = 0, ret = 0;
 65	bool found_it;
 66	struct iio_map_internal *mapi;
 67
 68	if (maps == NULL)
 69		return 0;
 70
 71	mutex_lock(&iio_map_list_lock);
 72	while (maps[i].consumer_dev_name != NULL) {
 73		found_it = false;
 74		list_for_each_entry(mapi, &iio_map_list, l)
 75			if (&maps[i] == mapi->map) {
 76				list_del(&mapi->l);
 77				kfree(mapi);
 78				found_it = true;
 79				break;
 80			}
 81		if (found_it == false) {
 82			ret = -ENODEV;
 83			goto error_ret;
 84		}
 85		i++;
 86	}
 87error_ret:
 88	mutex_unlock(&iio_map_list_lock);
 89
 90	return ret;
 91}
 92EXPORT_SYMBOL_GPL(iio_map_array_unregister);
 93
 94static const struct iio_chan_spec
 95*iio_chan_spec_from_name(const struct iio_dev *indio_dev,
 96			 const char *name)
 97{
 98	int i;
 99	const struct iio_chan_spec *chan = NULL;
100
101	for (i = 0; i < indio_dev->num_channels; i++)
102		if (indio_dev->channels[i].datasheet_name &&
103		    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
104			chan = &indio_dev->channels[i];
105			break;
106		}
107	return chan;
108}
109
110
111struct iio_channel *iio_st_channel_get(const char *name,
112				       const char *channel_name)
113{
114	struct iio_map_internal *c_i = NULL, *c = NULL;
115	struct iio_channel *channel;
116
117	if (name == NULL && channel_name == NULL)
118		return ERR_PTR(-ENODEV);
119
120	/* first find matching entry the channel map */
121	mutex_lock(&iio_map_list_lock);
122	list_for_each_entry(c_i, &iio_map_list, l) {
123		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
124		    (channel_name &&
125		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
126			continue;
127		c = c_i;
128		get_device(&c->indio_dev->dev);
129		break;
130	}
131	mutex_unlock(&iio_map_list_lock);
132	if (c == NULL)
133		return ERR_PTR(-ENODEV);
134
135	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
136	if (channel == NULL)
137		return ERR_PTR(-ENOMEM);
138
139	channel->indio_dev = c->indio_dev;
140
141	if (c->map->adc_channel_label)
142		channel->channel =
143			iio_chan_spec_from_name(channel->indio_dev,
144						c->map->adc_channel_label);
145
146	return channel;
147}
148EXPORT_SYMBOL_GPL(iio_st_channel_get);
149
150void iio_st_channel_release(struct iio_channel *channel)
151{
152	put_device(&channel->indio_dev->dev);
153	kfree(channel);
154}
155EXPORT_SYMBOL_GPL(iio_st_channel_release);
156
157struct iio_channel *iio_st_channel_get_all(const char *name)
158{
159	struct iio_channel *chans;
160	struct iio_map_internal *c = NULL;
161	int nummaps = 0;
162	int mapind = 0;
163	int i, ret;
164
165	if (name == NULL)
166		return ERR_PTR(-EINVAL);
167
168	mutex_lock(&iio_map_list_lock);
169	/* first count the matching maps */
170	list_for_each_entry(c, &iio_map_list, l)
171		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
172			continue;
173		else
174			nummaps++;
175
176	if (nummaps == 0) {
177		ret = -ENODEV;
178		goto error_ret;
179	}
180
181	/* NULL terminated array to save passing size */
182	chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
183	if (chans == NULL) {
184		ret = -ENOMEM;
185		goto error_ret;
186	}
187
188	/* for each map fill in the chans element */
189	list_for_each_entry(c, &iio_map_list, l) {
190		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
191			continue;
192		chans[mapind].indio_dev = c->indio_dev;
193		chans[mapind].channel =
194			iio_chan_spec_from_name(chans[mapind].indio_dev,
195						c->map->adc_channel_label);
196		if (chans[mapind].channel == NULL) {
197			ret = -EINVAL;
198			put_device(&chans[mapind].indio_dev->dev);
199			goto error_free_chans;
200		}
201		get_device(&chans[mapind].indio_dev->dev);
202		mapind++;
203	}
204	mutex_unlock(&iio_map_list_lock);
205	if (mapind == 0) {
206		ret = -ENODEV;
207		goto error_free_chans;
208	}
209	return chans;
210
211error_free_chans:
212	for (i = 0; i < nummaps; i++)
213		if (chans[i].indio_dev)
214			put_device(&chans[i].indio_dev->dev);
215	kfree(chans);
216error_ret:
217	mutex_unlock(&iio_map_list_lock);
218
219	return ERR_PTR(ret);
220}
221EXPORT_SYMBOL_GPL(iio_st_channel_get_all);
222
223void iio_st_channel_release_all(struct iio_channel *channels)
224{
225	struct iio_channel *chan = &channels[0];
226
227	while (chan->indio_dev) {
228		put_device(&chan->indio_dev->dev);
229		chan++;
230	}
231	kfree(channels);
232}
233EXPORT_SYMBOL_GPL(iio_st_channel_release_all);
234
235int iio_st_read_channel_raw(struct iio_channel *chan, int *val)
236{
237	int val2, ret;
238
239	mutex_lock(&chan->indio_dev->info_exist_lock);
240	if (chan->indio_dev->info == NULL) {
241		ret = -ENODEV;
242		goto err_unlock;
243	}
244
245	ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
246					      val, &val2, 0);
247err_unlock:
248	mutex_unlock(&chan->indio_dev->info_exist_lock);
249
250	return ret;
251}
252EXPORT_SYMBOL_GPL(iio_st_read_channel_raw);
253
254int iio_st_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
255{
256	int ret;
257
258	mutex_lock(&chan->indio_dev->info_exist_lock);
259	if (chan->indio_dev->info == NULL) {
260		ret = -ENODEV;
261		goto err_unlock;
262	}
263
264	ret = chan->indio_dev->info->read_raw(chan->indio_dev,
265					      chan->channel,
266					      val, val2,
267					      IIO_CHAN_INFO_SCALE);
268err_unlock:
269	mutex_unlock(&chan->indio_dev->info_exist_lock);
270
271	return ret;
272}
273EXPORT_SYMBOL_GPL(iio_st_read_channel_scale);
274
275int iio_st_get_channel_type(struct iio_channel *chan,
276			    enum iio_chan_type *type)
277{
278	int ret = 0;
279	/* Need to verify underlying driver has not gone away */
280
281	mutex_lock(&chan->indio_dev->info_exist_lock);
282	if (chan->indio_dev->info == NULL) {
283		ret = -ENODEV;
284		goto err_unlock;
285	}
286
287	*type = chan->channel->type;
288err_unlock:
289	mutex_unlock(&chan->indio_dev->info_exist_lock);
290
291	return ret;
292}
293EXPORT_SYMBOL_GPL(iio_st_get_channel_type);