Linux Audio

Check our new training course

Loading...
v4.6
 
  1/* The industrial I/O core in kernel channel mapping
  2 *
  3 * Copyright (c) 2011 Jonathan Cameron
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 */
  9#include <linux/err.h>
 10#include <linux/export.h>
 
 11#include <linux/slab.h>
 12#include <linux/mutex.h>
 13#include <linux/of.h>
 14
 15#include <linux/iio/iio.h>
 
 16#include "iio_core.h"
 17#include <linux/iio/machine.h>
 18#include <linux/iio/driver.h>
 19#include <linux/iio/consumer.h>
 20
 21struct iio_map_internal {
 22	struct iio_dev *indio_dev;
 23	struct iio_map *map;
 24	struct list_head l;
 25};
 26
 27static LIST_HEAD(iio_map_list);
 28static DEFINE_MUTEX(iio_map_list_lock);
 29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
 31{
 32	int i = 0, ret = 0;
 33	struct iio_map_internal *mapi;
 34
 35	if (maps == NULL)
 36		return 0;
 37
 38	mutex_lock(&iio_map_list_lock);
 39	while (maps[i].consumer_dev_name != NULL) {
 40		mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
 41		if (mapi == NULL) {
 42			ret = -ENOMEM;
 43			goto error_ret;
 44		}
 45		mapi->map = &maps[i];
 46		mapi->indio_dev = indio_dev;
 47		list_add(&mapi->l, &iio_map_list);
 48		i++;
 49	}
 50error_ret:
 
 
 51	mutex_unlock(&iio_map_list_lock);
 52
 53	return ret;
 54}
 55EXPORT_SYMBOL_GPL(iio_map_array_register);
 56
 57
 58/*
 59 * Remove all map entries associated with the given iio device
 60 */
 61int iio_map_array_unregister(struct iio_dev *indio_dev)
 62{
 63	int ret = -ENODEV;
 64	struct iio_map_internal *mapi, *next;
 65
 66	mutex_lock(&iio_map_list_lock);
 67	list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
 68		if (indio_dev == mapi->indio_dev) {
 69			list_del(&mapi->l);
 70			kfree(mapi);
 71			ret = 0;
 72		}
 73	}
 74	mutex_unlock(&iio_map_list_lock);
 
 75	return ret;
 76}
 77EXPORT_SYMBOL_GPL(iio_map_array_unregister);
 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79static const struct iio_chan_spec
 80*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
 81{
 82	int i;
 83	const struct iio_chan_spec *chan = NULL;
 84
 85	for (i = 0; i < indio_dev->num_channels; i++)
 86		if (indio_dev->channels[i].datasheet_name &&
 87		    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
 88			chan = &indio_dev->channels[i];
 89			break;
 90		}
 91	return chan;
 92}
 93
 94#ifdef CONFIG_OF
 95
 96static int iio_dev_node_match(struct device *dev, void *data)
 97{
 98	return dev->of_node == data && dev->type == &iio_device_type;
 99}
100
101/**
102 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
103 * @indio_dev:	pointer to the iio_dev structure
104 * @iiospec:	IIO specifier as found in the device tree
105 *
106 * This is simple translation function, suitable for the most 1:1 mapped
107 * channels in IIO chips. This function performs only one sanity check:
108 * whether IIO index is less than num_channels (that is specified in the
109 * iio_dev).
110 */
111static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
112				const struct of_phandle_args *iiospec)
113{
114	if (!iiospec->args_count)
115		return 0;
116
117	if (iiospec->args[0] >= indio_dev->num_channels) {
118		dev_err(&indio_dev->dev, "invalid channel index %u\n",
119			iiospec->args[0]);
120		return -EINVAL;
121	}
122
123	return iiospec->args[0];
124}
125
126static int __of_iio_channel_get(struct iio_channel *channel,
127				struct device_node *np, int index)
128{
 
129	struct device *idev;
130	struct iio_dev *indio_dev;
131	int err;
132	struct of_phandle_args iiospec;
133
134	err = of_parse_phandle_with_args(np, "io-channels",
135					 "#io-channel-cells",
136					 index, &iiospec);
137	if (err)
138		return err;
139
140	idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
141			       iio_dev_node_match);
142	of_node_put(iiospec.np);
143	if (idev == NULL)
144		return -EPROBE_DEFER;
 
145
146	indio_dev = dev_to_iio_dev(idev);
147	channel->indio_dev = indio_dev;
148	if (indio_dev->info->of_xlate)
149		index = indio_dev->info->of_xlate(indio_dev, &iiospec);
150	else
151		index = __of_iio_simple_xlate(indio_dev, &iiospec);
 
152	if (index < 0)
153		goto err_put;
154	channel->channel = &indio_dev->channels[index];
155
156	return 0;
157
158err_put:
159	iio_device_put(indio_dev);
160	return index;
161}
162
163static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
 
164{
165	struct iio_channel *channel;
166	int err;
167
168	if (index < 0)
169		return ERR_PTR(-EINVAL);
170
171	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
172	if (channel == NULL)
173		return ERR_PTR(-ENOMEM);
174
175	err = __of_iio_channel_get(channel, np, index);
176	if (err)
177		goto err_free_channel;
178
179	return channel;
180
181err_free_channel:
182	kfree(channel);
183	return ERR_PTR(err);
184}
185
186static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
187						      const char *name)
188{
189	struct iio_channel *chan = NULL;
190
191	/* Walk up the tree of devices looking for a matching iio channel */
192	while (np) {
193		int index = 0;
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195		/*
196		 * For named iio channels, first look up the name in the
197		 * "io-channel-names" property.  If it cannot be found, the
198		 * index will be an error code, and of_iio_channel_get()
199		 * will fail.
200		 */
201		if (name)
202			index = of_property_match_string(np, "io-channel-names",
203							 name);
204		chan = of_iio_channel_get(np, index);
205		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
206			break;
207		else if (name && index >= 0) {
208			pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
209				np->full_name, name ? name : "", index);
210			return NULL;
211		}
212
213		/*
214		 * No matching IIO channel found on this node.
215		 * If the parent node has a "io-channel-ranges" property,
216		 * then we can try one of its channels.
217		 */
218		np = np->parent;
219		if (np && !of_get_property(np, "io-channel-ranges", NULL))
220			return NULL;
221	}
222
223	return chan;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224}
 
225
226static struct iio_channel *of_iio_channel_get_all(struct device *dev)
227{
 
228	struct iio_channel *chans;
229	int i, mapind, nummaps = 0;
230	int ret;
231
232	do {
233		ret = of_parse_phandle_with_args(dev->of_node,
234						 "io-channels",
235						 "#io-channel-cells",
236						 nummaps, NULL);
237		if (ret < 0)
238			break;
239	} while (++nummaps);
240
241	if (nummaps == 0)	/* no error, return NULL to search map table */
242		return NULL;
243
244	/* NULL terminated array to save passing size */
245	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
246	if (chans == NULL)
247		return ERR_PTR(-ENOMEM);
248
249	/* Search for OF matches */
250	for (mapind = 0; mapind < nummaps; mapind++) {
251		ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
252					   mapind);
253		if (ret)
254			goto error_free_chans;
255	}
256	return chans;
257
258error_free_chans:
259	for (i = 0; i < mapind; i++)
260		iio_device_put(chans[i].indio_dev);
261	kfree(chans);
262	return ERR_PTR(ret);
263}
264
265#else /* CONFIG_OF */
266
267static inline struct iio_channel *
268of_iio_channel_get_by_name(struct device_node *np, const char *name)
269{
270	return NULL;
271}
272
273static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
274{
275	return NULL;
276}
277
278#endif /* CONFIG_OF */
279
280static struct iio_channel *iio_channel_get_sys(const char *name,
281					       const char *channel_name)
282{
283	struct iio_map_internal *c_i = NULL, *c = NULL;
284	struct iio_channel *channel;
285	int err;
286
287	if (name == NULL && channel_name == NULL)
288		return ERR_PTR(-ENODEV);
289
290	/* first find matching entry the channel map */
291	mutex_lock(&iio_map_list_lock);
292	list_for_each_entry(c_i, &iio_map_list, l) {
293		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
294		    (channel_name &&
295		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
296			continue;
297		c = c_i;
298		iio_device_get(c->indio_dev);
299		break;
300	}
301	mutex_unlock(&iio_map_list_lock);
302	if (c == NULL)
303		return ERR_PTR(-ENODEV);
304
305	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
306	if (channel == NULL) {
307		err = -ENOMEM;
308		goto error_no_mem;
309	}
310
311	channel->indio_dev = c->indio_dev;
312
313	if (c->map->adc_channel_label) {
314		channel->channel =
315			iio_chan_spec_from_name(channel->indio_dev,
316						c->map->adc_channel_label);
317
318		if (channel->channel == NULL) {
319			err = -EINVAL;
320			goto error_no_chan;
321		}
322	}
323
324	return channel;
325
326error_no_chan:
327	kfree(channel);
328error_no_mem:
329	iio_device_put(c->indio_dev);
330	return ERR_PTR(err);
331}
332
333struct iio_channel *iio_channel_get(struct device *dev,
334				    const char *channel_name)
335{
336	const char *name = dev ? dev_name(dev) : NULL;
337	struct iio_channel *channel;
338
339	if (dev) {
340		channel = of_iio_channel_get_by_name(dev->of_node,
341						     channel_name);
342		if (channel != NULL)
343			return channel;
344	}
345
346	return iio_channel_get_sys(name, channel_name);
347}
348EXPORT_SYMBOL_GPL(iio_channel_get);
349
350void iio_channel_release(struct iio_channel *channel)
351{
352	if (!channel)
353		return;
354	iio_device_put(channel->indio_dev);
355	kfree(channel);
356}
357EXPORT_SYMBOL_GPL(iio_channel_release);
358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359struct iio_channel *iio_channel_get_all(struct device *dev)
360{
361	const char *name;
362	struct iio_channel *chans;
363	struct iio_map_internal *c = NULL;
364	int nummaps = 0;
365	int mapind = 0;
366	int i, ret;
367
368	if (dev == NULL)
369		return ERR_PTR(-EINVAL);
370
371	chans = of_iio_channel_get_all(dev);
372	if (chans)
 
 
 
 
373		return chans;
374
375	name = dev_name(dev);
376
377	mutex_lock(&iio_map_list_lock);
378	/* first count the matching maps */
379	list_for_each_entry(c, &iio_map_list, l)
380		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
381			continue;
382		else
383			nummaps++;
384
385	if (nummaps == 0) {
386		ret = -ENODEV;
387		goto error_ret;
388	}
389
390	/* NULL terminated array to save passing size */
391	chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
392	if (chans == NULL) {
393		ret = -ENOMEM;
394		goto error_ret;
395	}
396
397	/* for each map fill in the chans element */
398	list_for_each_entry(c, &iio_map_list, l) {
399		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
400			continue;
401		chans[mapind].indio_dev = c->indio_dev;
402		chans[mapind].data = c->map->consumer_data;
403		chans[mapind].channel =
404			iio_chan_spec_from_name(chans[mapind].indio_dev,
405						c->map->adc_channel_label);
406		if (chans[mapind].channel == NULL) {
407			ret = -EINVAL;
408			goto error_free_chans;
409		}
410		iio_device_get(chans[mapind].indio_dev);
411		mapind++;
412	}
413	if (mapind == 0) {
414		ret = -ENODEV;
415		goto error_free_chans;
416	}
417	mutex_unlock(&iio_map_list_lock);
418
419	return chans;
420
421error_free_chans:
422	for (i = 0; i < nummaps; i++)
423		iio_device_put(chans[i].indio_dev);
424	kfree(chans);
425error_ret:
426	mutex_unlock(&iio_map_list_lock);
427
428	return ERR_PTR(ret);
429}
430EXPORT_SYMBOL_GPL(iio_channel_get_all);
431
432void iio_channel_release_all(struct iio_channel *channels)
433{
434	struct iio_channel *chan = &channels[0];
435
436	while (chan->indio_dev) {
437		iio_device_put(chan->indio_dev);
438		chan++;
439	}
440	kfree(channels);
441}
442EXPORT_SYMBOL_GPL(iio_channel_release_all);
443
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
445	enum iio_chan_info_enum info)
446{
447	int unused;
448	int vals[INDIO_MAX_RAW_ELEMENTS];
449	int ret;
450	int val_len = 2;
451
452	if (val2 == NULL)
453		val2 = &unused;
454
455	if(!iio_channel_has_info(chan->channel, info))
456		return -EINVAL;
457
458	if (chan->indio_dev->info->read_raw_multi) {
459		ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
460					chan->channel, INDIO_MAX_RAW_ELEMENTS,
461					vals, &val_len, info);
462		*val = vals[0];
463		*val2 = vals[1];
464	} else
465		ret = chan->indio_dev->info->read_raw(chan->indio_dev,
466					chan->channel, val, val2, info);
 
467
468	return ret;
469}
470
471int iio_read_channel_raw(struct iio_channel *chan, int *val)
472{
 
473	int ret;
474
475	mutex_lock(&chan->indio_dev->info_exist_lock);
476	if (chan->indio_dev->info == NULL) {
477		ret = -ENODEV;
478		goto err_unlock;
479	}
480
481	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
482err_unlock:
483	mutex_unlock(&chan->indio_dev->info_exist_lock);
484
485	return ret;
486}
487EXPORT_SYMBOL_GPL(iio_read_channel_raw);
488
489int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
490{
 
491	int ret;
492
493	mutex_lock(&chan->indio_dev->info_exist_lock);
494	if (chan->indio_dev->info == NULL) {
495		ret = -ENODEV;
496		goto err_unlock;
497	}
498
499	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
500err_unlock:
501	mutex_unlock(&chan->indio_dev->info_exist_lock);
502
503	return ret;
504}
505EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
506
507static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
508	int raw, int *processed, unsigned int scale)
 
509{
510	int scale_type, scale_val, scale_val2, offset;
 
511	s64 raw64 = raw;
512	int ret;
513
514	ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
515	if (ret >= 0)
516		raw64 += offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517
518	scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
519					IIO_CHAN_INFO_SCALE);
520	if (scale_type < 0)
521		return scale_type;
 
 
 
 
 
 
522
523	switch (scale_type) {
524	case IIO_VAL_INT:
525		*processed = raw64 * scale_val;
526		break;
527	case IIO_VAL_INT_PLUS_MICRO:
528		if (scale_val2 < 0)
529			*processed = -raw64 * scale_val;
530		else
531			*processed = raw64 * scale_val;
532		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
533				      1000000LL);
534		break;
535	case IIO_VAL_INT_PLUS_NANO:
536		if (scale_val2 < 0)
537			*processed = -raw64 * scale_val;
538		else
539			*processed = raw64 * scale_val;
540		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
541				      1000000000LL);
542		break;
543	case IIO_VAL_FRACTIONAL:
544		*processed = div_s64(raw64 * (s64)scale_val * scale,
545				     scale_val2);
546		break;
547	case IIO_VAL_FRACTIONAL_LOG2:
548		*processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
549		break;
550	default:
551		return -EINVAL;
552	}
553
554	return 0;
555}
556
557int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
558	int *processed, unsigned int scale)
559{
 
560	int ret;
561
562	mutex_lock(&chan->indio_dev->info_exist_lock);
563	if (chan->indio_dev->info == NULL) {
564		ret = -ENODEV;
565		goto err_unlock;
566	}
567
568	ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
569							scale);
570err_unlock:
571	mutex_unlock(&chan->indio_dev->info_exist_lock);
572
573	return ret;
574}
575EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
576
577int iio_read_channel_processed(struct iio_channel *chan, int *val)
 
578{
 
579	int ret;
580
581	mutex_lock(&chan->indio_dev->info_exist_lock);
582	if (chan->indio_dev->info == NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583		ret = -ENODEV;
584		goto err_unlock;
585	}
586
587	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
588		ret = iio_channel_read(chan, val, NULL,
589				       IIO_CHAN_INFO_PROCESSED);
 
 
 
590	} else {
591		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
592		if (ret < 0)
593			goto err_unlock;
594		ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
 
595	}
596
597err_unlock:
598	mutex_unlock(&chan->indio_dev->info_exist_lock);
599
600	return ret;
601}
 
 
 
 
 
 
 
602EXPORT_SYMBOL_GPL(iio_read_channel_processed);
603
604int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
605{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606	int ret;
607
608	mutex_lock(&chan->indio_dev->info_exist_lock);
609	if (chan->indio_dev->info == NULL) {
610		ret = -ENODEV;
611		goto err_unlock;
612	}
613
614	ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
615err_unlock:
616	mutex_unlock(&chan->indio_dev->info_exist_lock);
617
618	return ret;
619}
620EXPORT_SYMBOL_GPL(iio_read_channel_scale);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
622int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
623{
 
624	int ret = 0;
625	/* Need to verify underlying driver has not gone away */
626
627	mutex_lock(&chan->indio_dev->info_exist_lock);
628	if (chan->indio_dev->info == NULL) {
629		ret = -ENODEV;
630		goto err_unlock;
631	}
632
633	*type = chan->channel->type;
634err_unlock:
635	mutex_unlock(&chan->indio_dev->info_exist_lock);
636
637	return ret;
638}
639EXPORT_SYMBOL_GPL(iio_get_channel_type);
640
641static int iio_channel_write(struct iio_channel *chan, int val, int val2,
642			     enum iio_chan_info_enum info)
643{
644	return chan->indio_dev->info->write_raw(chan->indio_dev,
645						chan->channel, val, val2, info);
646}
647
648int iio_write_channel_raw(struct iio_channel *chan, int val)
 
649{
 
650	int ret;
651
652	mutex_lock(&chan->indio_dev->info_exist_lock);
653	if (chan->indio_dev->info == NULL) {
654		ret = -ENODEV;
655		goto err_unlock;
656	}
657
658	ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
659err_unlock:
660	mutex_unlock(&chan->indio_dev->info_exist_lock);
661
662	return ret;
663}
 
 
 
 
 
 
664EXPORT_SYMBOL_GPL(iio_write_channel_raw);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* The industrial I/O core in kernel channel mapping
   3 *
   4 * Copyright (c) 2011 Jonathan Cameron
 
 
 
 
   5 */
   6#include <linux/err.h>
   7#include <linux/export.h>
   8#include <linux/property.h>
   9#include <linux/slab.h>
  10#include <linux/mutex.h>
 
  11
  12#include <linux/iio/iio.h>
  13#include <linux/iio/iio-opaque.h>
  14#include "iio_core.h"
  15#include <linux/iio/machine.h>
  16#include <linux/iio/driver.h>
  17#include <linux/iio/consumer.h>
  18
  19struct iio_map_internal {
  20	struct iio_dev *indio_dev;
  21	struct iio_map *map;
  22	struct list_head l;
  23};
  24
  25static LIST_HEAD(iio_map_list);
  26static DEFINE_MUTEX(iio_map_list_lock);
  27
  28static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
  29{
  30	int ret = -ENODEV;
  31	struct iio_map_internal *mapi, *next;
  32
  33	list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
  34		if (indio_dev == mapi->indio_dev) {
  35			list_del(&mapi->l);
  36			kfree(mapi);
  37			ret = 0;
  38		}
  39	}
  40	return ret;
  41}
  42
  43int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
  44{
  45	int i = 0, ret = 0;
  46	struct iio_map_internal *mapi;
  47
  48	if (!maps)
  49		return 0;
  50
  51	mutex_lock(&iio_map_list_lock);
  52	while (maps[i].consumer_dev_name) {
  53		mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
  54		if (!mapi) {
  55			ret = -ENOMEM;
  56			goto error_ret;
  57		}
  58		mapi->map = &maps[i];
  59		mapi->indio_dev = indio_dev;
  60		list_add_tail(&mapi->l, &iio_map_list);
  61		i++;
  62	}
  63error_ret:
  64	if (ret)
  65		iio_map_array_unregister_locked(indio_dev);
  66	mutex_unlock(&iio_map_list_lock);
  67
  68	return ret;
  69}
  70EXPORT_SYMBOL_GPL(iio_map_array_register);
  71
 
  72/*
  73 * Remove all map entries associated with the given iio device
  74 */
  75int iio_map_array_unregister(struct iio_dev *indio_dev)
  76{
  77	int ret;
 
  78
  79	mutex_lock(&iio_map_list_lock);
  80	ret = iio_map_array_unregister_locked(indio_dev);
 
 
 
 
 
 
  81	mutex_unlock(&iio_map_list_lock);
  82
  83	return ret;
  84}
  85EXPORT_SYMBOL_GPL(iio_map_array_unregister);
  86
  87static void iio_map_array_unregister_cb(void *indio_dev)
  88{
  89	iio_map_array_unregister(indio_dev);
  90}
  91
  92int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
  93{
  94	int ret;
  95
  96	ret = iio_map_array_register(indio_dev, maps);
  97	if (ret)
  98		return ret;
  99
 100	return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
 101}
 102EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
 103
 104static const struct iio_chan_spec
 105*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
 106{
 107	int i;
 108	const struct iio_chan_spec *chan = NULL;
 109
 110	for (i = 0; i < indio_dev->num_channels; i++)
 111		if (indio_dev->channels[i].datasheet_name &&
 112		    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
 113			chan = &indio_dev->channels[i];
 114			break;
 115		}
 116	return chan;
 117}
 118
 
 
 
 
 
 
 
 119/**
 120 * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
 121 * @indio_dev:	pointer to the iio_dev structure
 122 * @iiospec:	IIO specifier as found in the device tree
 123 *
 124 * This is simple translation function, suitable for the most 1:1 mapped
 125 * channels in IIO chips. This function performs only one sanity check:
 126 * whether IIO index is less than num_channels (that is specified in the
 127 * iio_dev).
 128 */
 129static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
 130				     const struct fwnode_reference_args *iiospec)
 131{
 132	if (!iiospec->nargs)
 133		return 0;
 134
 135	if (iiospec->args[0] >= indio_dev->num_channels) {
 136		dev_err(&indio_dev->dev, "invalid channel index %llu\n",
 137			iiospec->args[0]);
 138		return -EINVAL;
 139	}
 140
 141	return iiospec->args[0];
 142}
 143
 144static int __fwnode_iio_channel_get(struct iio_channel *channel,
 145				    struct fwnode_handle *fwnode, int index)
 146{
 147	struct fwnode_reference_args iiospec;
 148	struct device *idev;
 149	struct iio_dev *indio_dev;
 150	int err;
 
 151
 152	err = fwnode_property_get_reference_args(fwnode, "io-channels",
 153						 "#io-channel-cells", 0,
 154						 index, &iiospec);
 155	if (err)
 156		return err;
 157
 158	idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
 159	if (!idev) {
 160		fwnode_handle_put(iiospec.fwnode);
 
 161		return -EPROBE_DEFER;
 162	}
 163
 164	indio_dev = dev_to_iio_dev(idev);
 165	channel->indio_dev = indio_dev;
 166	if (indio_dev->info->fwnode_xlate)
 167		index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
 168	else
 169		index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
 170	fwnode_handle_put(iiospec.fwnode);
 171	if (index < 0)
 172		goto err_put;
 173	channel->channel = &indio_dev->channels[index];
 174
 175	return 0;
 176
 177err_put:
 178	iio_device_put(indio_dev);
 179	return index;
 180}
 181
 182static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
 183						  int index)
 184{
 185	struct iio_channel *channel;
 186	int err;
 187
 188	if (index < 0)
 189		return ERR_PTR(-EINVAL);
 190
 191	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 192	if (!channel)
 193		return ERR_PTR(-ENOMEM);
 194
 195	err = __fwnode_iio_channel_get(channel, fwnode, index);
 196	if (err)
 197		goto err_free_channel;
 198
 199	return channel;
 200
 201err_free_channel:
 202	kfree(channel);
 203	return ERR_PTR(err);
 204}
 205
 206static struct iio_channel *
 207__fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
 208{
 209	struct iio_channel *chan;
 210	int index = 0;
 
 
 
 211
 212	/*
 213	 * For named iio channels, first look up the name in the
 214	 * "io-channel-names" property.  If it cannot be found, the
 215	 * index will be an error code, and fwnode_iio_channel_get()
 216	 * will fail.
 217	 */
 218	if (name)
 219		index = fwnode_property_match_string(fwnode, "io-channel-names",
 220						     name);
 221
 222	chan = fwnode_iio_channel_get(fwnode, index);
 223	if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
 224		return chan;
 225	if (name) {
 226		if (index >= 0) {
 227			pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
 228			       fwnode, name, index);
 229			/*
 230			 * In this case, we found 'name' in 'io-channel-names'
 231			 * but somehow we still fail so that we should not proceed
 232			 * with any other lookup. Hence, explicitly return -EINVAL
 233			 * (maybe not the better error code) so that the caller
 234			 * won't do a system lookup.
 235			 */
 236			return ERR_PTR(-EINVAL);
 237		}
 238		/*
 239		 * If index < 0, then fwnode_property_get_reference_args() fails
 240		 * with -EINVAL or -ENOENT (ACPI case) which is expected. We
 241		 * should not proceed if we get any other error.
 
 242		 */
 243		if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
 244			return chan;
 245	} else if (PTR_ERR(chan) != -ENOENT) {
 
 
 
 
 
 
 
 
 
 246		/*
 247		 * if !name, then we should only proceed the lookup if
 248		 * fwnode_property_get_reference_args() returns -ENOENT.
 
 249		 */
 250		return chan;
 
 
 251	}
 252
 253	/* so we continue the lookup */
 254	return ERR_PTR(-ENODEV);
 255}
 256
 257struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
 258						   const char *name)
 259{
 260	struct fwnode_handle *parent;
 261	struct iio_channel *chan;
 262
 263	/* Walk up the tree of devices looking for a matching iio channel */
 264	chan = __fwnode_iio_channel_get_by_name(fwnode, name);
 265	if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
 266		return chan;
 267
 268	/*
 269	 * No matching IIO channel found on this node.
 270	 * If the parent node has a "io-channel-ranges" property,
 271	 * then we can try one of its channels.
 272	 */
 273	fwnode_for_each_parent_node(fwnode, parent) {
 274		if (!fwnode_property_present(parent, "io-channel-ranges")) {
 275			fwnode_handle_put(parent);
 276			return ERR_PTR(-ENODEV);
 277		}
 278
 279		chan = __fwnode_iio_channel_get_by_name(fwnode, name);
 280		if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
 281			fwnode_handle_put(parent);
 282 			return chan;
 283		}
 284	}
 285
 286	return ERR_PTR(-ENODEV);
 287}
 288EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
 289
 290static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
 291{
 292	struct fwnode_handle *fwnode = dev_fwnode(dev);
 293	struct iio_channel *chans;
 294	int i, mapind, nummaps = 0;
 295	int ret;
 296
 297	do {
 298		ret = fwnode_property_get_reference_args(fwnode, "io-channels",
 299							 "#io-channel-cells", 0,
 300							 nummaps, NULL);
 
 301		if (ret < 0)
 302			break;
 303	} while (++nummaps);
 304
 305	if (nummaps == 0)
 306		return ERR_PTR(-ENODEV);
 307
 308	/* NULL terminated array to save passing size */
 309	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
 310	if (!chans)
 311		return ERR_PTR(-ENOMEM);
 312
 313	/* Search for FW matches */
 314	for (mapind = 0; mapind < nummaps; mapind++) {
 315		ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
 
 316		if (ret)
 317			goto error_free_chans;
 318	}
 319	return chans;
 320
 321error_free_chans:
 322	for (i = 0; i < mapind; i++)
 323		iio_device_put(chans[i].indio_dev);
 324	kfree(chans);
 325	return ERR_PTR(ret);
 326}
 327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328static struct iio_channel *iio_channel_get_sys(const char *name,
 329					       const char *channel_name)
 330{
 331	struct iio_map_internal *c_i = NULL, *c = NULL;
 332	struct iio_channel *channel;
 333	int err;
 334
 335	if (!(name || channel_name))
 336		return ERR_PTR(-ENODEV);
 337
 338	/* first find matching entry the channel map */
 339	mutex_lock(&iio_map_list_lock);
 340	list_for_each_entry(c_i, &iio_map_list, l) {
 341		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
 342		    (channel_name &&
 343		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
 344			continue;
 345		c = c_i;
 346		iio_device_get(c->indio_dev);
 347		break;
 348	}
 349	mutex_unlock(&iio_map_list_lock);
 350	if (!c)
 351		return ERR_PTR(-ENODEV);
 352
 353	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 354	if (!channel) {
 355		err = -ENOMEM;
 356		goto error_no_mem;
 357	}
 358
 359	channel->indio_dev = c->indio_dev;
 360
 361	if (c->map->adc_channel_label) {
 362		channel->channel =
 363			iio_chan_spec_from_name(channel->indio_dev,
 364						c->map->adc_channel_label);
 365
 366		if (!channel->channel) {
 367			err = -EINVAL;
 368			goto error_no_chan;
 369		}
 370	}
 371
 372	return channel;
 373
 374error_no_chan:
 375	kfree(channel);
 376error_no_mem:
 377	iio_device_put(c->indio_dev);
 378	return ERR_PTR(err);
 379}
 380
 381struct iio_channel *iio_channel_get(struct device *dev,
 382				    const char *channel_name)
 383{
 384	const char *name = dev ? dev_name(dev) : NULL;
 385	struct iio_channel *channel;
 386
 387	if (dev) {
 388		channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
 389							 channel_name);
 390		if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
 391			return channel;
 392	}
 393
 394	return iio_channel_get_sys(name, channel_name);
 395}
 396EXPORT_SYMBOL_GPL(iio_channel_get);
 397
 398void iio_channel_release(struct iio_channel *channel)
 399{
 400	if (!channel)
 401		return;
 402	iio_device_put(channel->indio_dev);
 403	kfree(channel);
 404}
 405EXPORT_SYMBOL_GPL(iio_channel_release);
 406
 407static void devm_iio_channel_free(void *iio_channel)
 408{
 409	iio_channel_release(iio_channel);
 410}
 411
 412struct iio_channel *devm_iio_channel_get(struct device *dev,
 413					 const char *channel_name)
 414{
 415	struct iio_channel *channel;
 416	int ret;
 417
 418	channel = iio_channel_get(dev, channel_name);
 419	if (IS_ERR(channel))
 420		return channel;
 421
 422	ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
 423	if (ret)
 424		return ERR_PTR(ret);
 425
 426	return channel;
 427}
 428EXPORT_SYMBOL_GPL(devm_iio_channel_get);
 429
 430struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
 431							struct fwnode_handle *fwnode,
 432							const char *channel_name)
 433{
 434	struct iio_channel *channel;
 435	int ret;
 436
 437	channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
 438	if (IS_ERR(channel))
 439		return channel;
 440
 441	ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
 442	if (ret)
 443		return ERR_PTR(ret);
 444
 445	return channel;
 446}
 447EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
 448
 449struct iio_channel *iio_channel_get_all(struct device *dev)
 450{
 451	const char *name;
 452	struct iio_channel *chans;
 453	struct iio_map_internal *c = NULL;
 454	int nummaps = 0;
 455	int mapind = 0;
 456	int i, ret;
 457
 458	if (!dev)
 459		return ERR_PTR(-EINVAL);
 460
 461	chans = fwnode_iio_channel_get_all(dev);
 462	/*
 463	 * We only want to carry on if the error is -ENODEV.  Anything else
 464	 * should be reported up the stack.
 465	 */
 466	if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV)
 467		return chans;
 468
 469	name = dev_name(dev);
 470
 471	mutex_lock(&iio_map_list_lock);
 472	/* first count the matching maps */
 473	list_for_each_entry(c, &iio_map_list, l)
 474		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
 475			continue;
 476		else
 477			nummaps++;
 478
 479	if (nummaps == 0) {
 480		ret = -ENODEV;
 481		goto error_ret;
 482	}
 483
 484	/* NULL terminated array to save passing size */
 485	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
 486	if (!chans) {
 487		ret = -ENOMEM;
 488		goto error_ret;
 489	}
 490
 491	/* for each map fill in the chans element */
 492	list_for_each_entry(c, &iio_map_list, l) {
 493		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
 494			continue;
 495		chans[mapind].indio_dev = c->indio_dev;
 496		chans[mapind].data = c->map->consumer_data;
 497		chans[mapind].channel =
 498			iio_chan_spec_from_name(chans[mapind].indio_dev,
 499						c->map->adc_channel_label);
 500		if (!chans[mapind].channel) {
 501			ret = -EINVAL;
 502			goto error_free_chans;
 503		}
 504		iio_device_get(chans[mapind].indio_dev);
 505		mapind++;
 506	}
 507	if (mapind == 0) {
 508		ret = -ENODEV;
 509		goto error_free_chans;
 510	}
 511	mutex_unlock(&iio_map_list_lock);
 512
 513	return chans;
 514
 515error_free_chans:
 516	for (i = 0; i < nummaps; i++)
 517		iio_device_put(chans[i].indio_dev);
 518	kfree(chans);
 519error_ret:
 520	mutex_unlock(&iio_map_list_lock);
 521
 522	return ERR_PTR(ret);
 523}
 524EXPORT_SYMBOL_GPL(iio_channel_get_all);
 525
 526void iio_channel_release_all(struct iio_channel *channels)
 527{
 528	struct iio_channel *chan = &channels[0];
 529
 530	while (chan->indio_dev) {
 531		iio_device_put(chan->indio_dev);
 532		chan++;
 533	}
 534	kfree(channels);
 535}
 536EXPORT_SYMBOL_GPL(iio_channel_release_all);
 537
 538static void devm_iio_channel_free_all(void *iio_channels)
 539{
 540	iio_channel_release_all(iio_channels);
 541}
 542
 543struct iio_channel *devm_iio_channel_get_all(struct device *dev)
 544{
 545	struct iio_channel *channels;
 546	int ret;
 547
 548	channels = iio_channel_get_all(dev);
 549	if (IS_ERR(channels))
 550		return channels;
 551
 552	ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
 553				       channels);
 554	if (ret)
 555		return ERR_PTR(ret);
 556
 557	return channels;
 558}
 559EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
 560
 561static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
 562			    enum iio_chan_info_enum info)
 563{
 564	int unused;
 565	int vals[INDIO_MAX_RAW_ELEMENTS];
 566	int ret;
 567	int val_len = 2;
 568
 569	if (!val2)
 570		val2 = &unused;
 571
 572	if (!iio_channel_has_info(chan->channel, info))
 573		return -EINVAL;
 574
 575	if (chan->indio_dev->info->read_raw_multi) {
 576		ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
 577					chan->channel, INDIO_MAX_RAW_ELEMENTS,
 578					vals, &val_len, info);
 579		*val = vals[0];
 580		*val2 = vals[1];
 581	} else {
 582		ret = chan->indio_dev->info->read_raw(chan->indio_dev,
 583					chan->channel, val, val2, info);
 584	}
 585
 586	return ret;
 587}
 588
 589int iio_read_channel_raw(struct iio_channel *chan, int *val)
 590{
 591	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 592	int ret;
 593
 594	mutex_lock(&iio_dev_opaque->info_exist_lock);
 595	if (!chan->indio_dev->info) {
 596		ret = -ENODEV;
 597		goto err_unlock;
 598	}
 599
 600	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
 601err_unlock:
 602	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 603
 604	return ret;
 605}
 606EXPORT_SYMBOL_GPL(iio_read_channel_raw);
 607
 608int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
 609{
 610	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 611	int ret;
 612
 613	mutex_lock(&iio_dev_opaque->info_exist_lock);
 614	if (!chan->indio_dev->info) {
 615		ret = -ENODEV;
 616		goto err_unlock;
 617	}
 618
 619	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
 620err_unlock:
 621	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 622
 623	return ret;
 624}
 625EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
 626
 627static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
 628						 int raw, int *processed,
 629						 unsigned int scale)
 630{
 631	int scale_type, scale_val, scale_val2;
 632	int offset_type, offset_val, offset_val2;
 633	s64 raw64 = raw;
 
 634
 635	offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
 636				       IIO_CHAN_INFO_OFFSET);
 637	if (offset_type >= 0) {
 638		switch (offset_type) {
 639		case IIO_VAL_INT:
 640			break;
 641		case IIO_VAL_INT_PLUS_MICRO:
 642		case IIO_VAL_INT_PLUS_NANO:
 643			/*
 644			 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
 645			 * implicitely truncate the offset to it's integer form.
 646			 */
 647			break;
 648		case IIO_VAL_FRACTIONAL:
 649			offset_val /= offset_val2;
 650			break;
 651		case IIO_VAL_FRACTIONAL_LOG2:
 652			offset_val >>= offset_val2;
 653			break;
 654		default:
 655			return -EINVAL;
 656		}
 657
 658		raw64 += offset_val;
 659	}
 660
 661	scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
 662				      IIO_CHAN_INFO_SCALE);
 663	if (scale_type < 0) {
 664		/*
 665		 * If no channel scaling is available apply consumer scale to
 666		 * raw value and return.
 667		 */
 668		*processed = raw * scale;
 669		return 0;
 670	}
 671
 672	switch (scale_type) {
 673	case IIO_VAL_INT:
 674		*processed = raw64 * scale_val * scale;
 675		break;
 676	case IIO_VAL_INT_PLUS_MICRO:
 677		if (scale_val2 < 0)
 678			*processed = -raw64 * scale_val;
 679		else
 680			*processed = raw64 * scale_val;
 681		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
 682				      1000000LL);
 683		break;
 684	case IIO_VAL_INT_PLUS_NANO:
 685		if (scale_val2 < 0)
 686			*processed = -raw64 * scale_val;
 687		else
 688			*processed = raw64 * scale_val;
 689		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
 690				      1000000000LL);
 691		break;
 692	case IIO_VAL_FRACTIONAL:
 693		*processed = div_s64(raw64 * (s64)scale_val * scale,
 694				     scale_val2);
 695		break;
 696	case IIO_VAL_FRACTIONAL_LOG2:
 697		*processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
 698		break;
 699	default:
 700		return -EINVAL;
 701	}
 702
 703	return 0;
 704}
 705
 706int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
 707				 int *processed, unsigned int scale)
 708{
 709	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 710	int ret;
 711
 712	mutex_lock(&iio_dev_opaque->info_exist_lock);
 713	if (!chan->indio_dev->info) {
 714		ret = -ENODEV;
 715		goto err_unlock;
 716	}
 717
 718	ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
 719						    scale);
 720err_unlock:
 721	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 722
 723	return ret;
 724}
 725EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
 726
 727int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
 728			       enum iio_chan_info_enum attribute)
 729{
 730	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 731	int ret;
 732
 733	mutex_lock(&iio_dev_opaque->info_exist_lock);
 734	if (!chan->indio_dev->info) {
 735		ret = -ENODEV;
 736		goto err_unlock;
 737	}
 738
 739	ret = iio_channel_read(chan, val, val2, attribute);
 740err_unlock:
 741	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 742
 743	return ret;
 744}
 745EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
 746
 747int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
 748{
 749	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
 750}
 751EXPORT_SYMBOL_GPL(iio_read_channel_offset);
 752
 753int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
 754				     unsigned int scale)
 755{
 756	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 757	int ret;
 758
 759	mutex_lock(&iio_dev_opaque->info_exist_lock);
 760	if (!chan->indio_dev->info) {
 761		ret = -ENODEV;
 762		goto err_unlock;
 763	}
 764
 765	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
 766		ret = iio_channel_read(chan, val, NULL,
 767				       IIO_CHAN_INFO_PROCESSED);
 768		if (ret < 0)
 769			goto err_unlock;
 770		*val *= scale;
 771	} else {
 772		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
 773		if (ret < 0)
 774			goto err_unlock;
 775		ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
 776							    scale);
 777	}
 778
 779err_unlock:
 780	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 781
 782	return ret;
 783}
 784EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
 785
 786int iio_read_channel_processed(struct iio_channel *chan, int *val)
 787{
 788	/* This is just a special case with scale factor 1 */
 789	return iio_read_channel_processed_scale(chan, val, 1);
 790}
 791EXPORT_SYMBOL_GPL(iio_read_channel_processed);
 792
 793int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
 794{
 795	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
 796}
 797EXPORT_SYMBOL_GPL(iio_read_channel_scale);
 798
 799static int iio_channel_read_avail(struct iio_channel *chan,
 800				  const int **vals, int *type, int *length,
 801				  enum iio_chan_info_enum info)
 802{
 803	if (!iio_channel_has_available(chan->channel, info))
 804		return -EINVAL;
 805
 806	return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
 807						 vals, type, length, info);
 808}
 809
 810int iio_read_avail_channel_attribute(struct iio_channel *chan,
 811				     const int **vals, int *type, int *length,
 812				     enum iio_chan_info_enum attribute)
 813{
 814	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 815	int ret;
 816
 817	mutex_lock(&iio_dev_opaque->info_exist_lock);
 818	if (!chan->indio_dev->info) {
 819		ret = -ENODEV;
 820		goto err_unlock;
 821	}
 822
 823	ret = iio_channel_read_avail(chan, vals, type, length, attribute);
 824err_unlock:
 825	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 826
 827	return ret;
 828}
 829EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
 830
 831int iio_read_avail_channel_raw(struct iio_channel *chan,
 832			       const int **vals, int *length)
 833{
 834	int ret;
 835	int type;
 836
 837	ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
 838					       IIO_CHAN_INFO_RAW);
 839
 840	if (ret >= 0 && type != IIO_VAL_INT)
 841		/* raw values are assumed to be IIO_VAL_INT */
 842		ret = -EINVAL;
 843
 844	return ret;
 845}
 846EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
 847
 848static int iio_channel_read_max(struct iio_channel *chan,
 849				int *val, int *val2, int *type,
 850				enum iio_chan_info_enum info)
 851{
 852	int unused;
 853	const int *vals;
 854	int length;
 855	int ret;
 856
 857	if (!val2)
 858		val2 = &unused;
 859
 860	ret = iio_channel_read_avail(chan, &vals, type, &length, info);
 861	switch (ret) {
 862	case IIO_AVAIL_RANGE:
 863		switch (*type) {
 864		case IIO_VAL_INT:
 865			*val = vals[2];
 866			break;
 867		default:
 868			*val = vals[4];
 869			*val2 = vals[5];
 870		}
 871		return 0;
 872
 873	case IIO_AVAIL_LIST:
 874		if (length <= 0)
 875			return -EINVAL;
 876		switch (*type) {
 877		case IIO_VAL_INT:
 878			*val = vals[--length];
 879			while (length) {
 880				if (vals[--length] > *val)
 881					*val = vals[length];
 882			}
 883			break;
 884		default:
 885			/* FIXME: learn about max for other iio values */
 886			return -EINVAL;
 887		}
 888		return 0;
 889
 890	default:
 891		return ret;
 892	}
 893}
 894
 895int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
 896{
 897	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 898	int ret;
 899	int type;
 900
 901	mutex_lock(&iio_dev_opaque->info_exist_lock);
 902	if (!chan->indio_dev->info) {
 903		ret = -ENODEV;
 904		goto err_unlock;
 905	}
 906
 907	ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
 908err_unlock:
 909	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 910
 911	return ret;
 912}
 913EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
 914
 915int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
 916{
 917	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 918	int ret = 0;
 919	/* Need to verify underlying driver has not gone away */
 920
 921	mutex_lock(&iio_dev_opaque->info_exist_lock);
 922	if (!chan->indio_dev->info) {
 923		ret = -ENODEV;
 924		goto err_unlock;
 925	}
 926
 927	*type = chan->channel->type;
 928err_unlock:
 929	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 930
 931	return ret;
 932}
 933EXPORT_SYMBOL_GPL(iio_get_channel_type);
 934
 935static int iio_channel_write(struct iio_channel *chan, int val, int val2,
 936			     enum iio_chan_info_enum info)
 937{
 938	return chan->indio_dev->info->write_raw(chan->indio_dev,
 939						chan->channel, val, val2, info);
 940}
 941
 942int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
 943				enum iio_chan_info_enum attribute)
 944{
 945	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 946	int ret;
 947
 948	mutex_lock(&iio_dev_opaque->info_exist_lock);
 949	if (!chan->indio_dev->info) {
 950		ret = -ENODEV;
 951		goto err_unlock;
 952	}
 953
 954	ret = iio_channel_write(chan, val, val2, attribute);
 955err_unlock:
 956	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 957
 958	return ret;
 959}
 960EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
 961
 962int iio_write_channel_raw(struct iio_channel *chan, int val)
 963{
 964	return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
 965}
 966EXPORT_SYMBOL_GPL(iio_write_channel_raw);
 967
 968unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
 969{
 970	const struct iio_chan_spec_ext_info *ext_info;
 971	unsigned int i = 0;
 972
 973	if (!chan->channel->ext_info)
 974		return i;
 975
 976	for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
 977		++i;
 978
 979	return i;
 980}
 981EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
 982
 983static const struct iio_chan_spec_ext_info *
 984iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
 985{
 986	const struct iio_chan_spec_ext_info *ext_info;
 987
 988	if (!chan->channel->ext_info)
 989		return NULL;
 990
 991	for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
 992		if (!strcmp(attr, ext_info->name))
 993			return ext_info;
 994	}
 995
 996	return NULL;
 997}
 998
 999ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
1000				  const char *attr, char *buf)
1001{
1002	const struct iio_chan_spec_ext_info *ext_info;
1003
1004	ext_info = iio_lookup_ext_info(chan, attr);
1005	if (!ext_info)
1006		return -EINVAL;
1007
1008	return ext_info->read(chan->indio_dev, ext_info->private,
1009			      chan->channel, buf);
1010}
1011EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
1012
1013ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
1014				   const char *buf, size_t len)
1015{
1016	const struct iio_chan_spec_ext_info *ext_info;
1017
1018	ext_info = iio_lookup_ext_info(chan, attr);
1019	if (!ext_info)
1020		return -EINVAL;
1021
1022	return ext_info->write(chan->indio_dev, ext_info->private,
1023			       chan->channel, buf, len);
1024}
1025EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);