Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* The industrial I/O core
   3 *
   4 * Copyright (c) 2008 Jonathan Cameron
   5 *
 
 
 
 
   6 * Handling of buffer allocation / resizing.
   7 *
 
   8 * Things to look at here.
   9 * - Better memory allocation techniques?
  10 * - Alternative access techniques?
  11 */
  12#include <linux/anon_inodes.h>
  13#include <linux/kernel.h>
  14#include <linux/export.h>
  15#include <linux/device.h>
  16#include <linux/file.h>
  17#include <linux/fs.h>
  18#include <linux/cdev.h>
  19#include <linux/slab.h>
  20#include <linux/poll.h>
  21#include <linux/sched/signal.h>
  22
  23#include <linux/iio/iio.h>
  24#include <linux/iio/iio-opaque.h>
  25#include "iio_core.h"
  26#include "iio_core_trigger.h"
  27#include <linux/iio/sysfs.h>
  28#include <linux/iio/buffer.h>
  29#include <linux/iio/buffer_impl.h>
  30
  31static const char * const iio_endian_prefix[] = {
  32	[IIO_BE] = "be",
  33	[IIO_LE] = "le",
  34};
  35
  36static bool iio_buffer_is_active(struct iio_buffer *buf)
  37{
  38	return !list_empty(&buf->buffer_list);
  39}
  40
  41static size_t iio_buffer_data_available(struct iio_buffer *buf)
  42{
  43	return buf->access->data_available(buf);
  44}
  45
  46static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
  47				   struct iio_buffer *buf, size_t required)
  48{
  49	if (!indio_dev->info->hwfifo_flush_to_buffer)
  50		return -ENODEV;
  51
  52	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
  53}
  54
  55static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
  56			     size_t to_wait, int to_flush)
  57{
  58	size_t avail;
  59	int flushed = 0;
  60
  61	/* wakeup if the device was unregistered */
  62	if (!indio_dev->info)
  63		return true;
  64
  65	/* drain the buffer if it was disabled */
  66	if (!iio_buffer_is_active(buf)) {
  67		to_wait = min_t(size_t, to_wait, 1);
  68		to_flush = 0;
  69	}
  70
  71	avail = iio_buffer_data_available(buf);
  72
  73	if (avail >= to_wait) {
  74		/* force a flush for non-blocking reads */
  75		if (!to_wait && avail < to_flush)
  76			iio_buffer_flush_hwfifo(indio_dev, buf,
  77						to_flush - avail);
  78		return true;
  79	}
  80
  81	if (to_flush)
  82		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
  83						  to_wait - avail);
  84	if (flushed <= 0)
  85		return false;
  86
  87	if (avail + flushed >= to_wait)
  88		return true;
  89
  90	return false;
  91}
  92
  93/**
  94 * iio_buffer_read() - chrdev read for buffer access
  95 * @filp:	File structure pointer for the char device
  96 * @buf:	Destination buffer for iio buffer read
  97 * @n:		First n bytes to read
  98 * @f_ps:	Long offset provided by the user as a seek position
  99 *
 100 * This function relies on all buffer implementations having an
 101 * iio_buffer as their first element.
 102 *
 103 * Return: negative values corresponding to error codes or ret != 0
 104 *	   for ending the reading activity
 105 **/
 106static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
 107			       size_t n, loff_t *f_ps)
 108{
 109	struct iio_dev_buffer_pair *ib = filp->private_data;
 110	struct iio_buffer *rb = ib->buffer;
 111	struct iio_dev *indio_dev = ib->indio_dev;
 112	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 113	size_t datum_size;
 114	size_t to_wait;
 115	int ret = 0;
 116
 117	if (!indio_dev->info)
 118		return -ENODEV;
 119
 120	if (!rb || !rb->access->read)
 121		return -EINVAL;
 122
 123	if (rb->direction != IIO_BUFFER_DIRECTION_IN)
 124		return -EPERM;
 125
 126	datum_size = rb->bytes_per_datum;
 127
 128	/*
 129	 * If datum_size is 0 there will never be anything to read from the
 130	 * buffer, so signal end of file now.
 131	 */
 132	if (!datum_size)
 133		return 0;
 134
 135	if (filp->f_flags & O_NONBLOCK)
 136		to_wait = 0;
 137	else
 138		to_wait = min_t(size_t, n / datum_size, rb->watermark);
 139
 140	add_wait_queue(&rb->pollq, &wait);
 141	do {
 142		if (!indio_dev->info) {
 143			ret = -ENODEV;
 144			break;
 145		}
 146
 147		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
 148			if (signal_pending(current)) {
 149				ret = -ERESTARTSYS;
 150				break;
 151			}
 152
 153			wait_woken(&wait, TASK_INTERRUPTIBLE,
 154				   MAX_SCHEDULE_TIMEOUT);
 155			continue;
 156		}
 157
 158		ret = rb->access->read(rb, n, buf);
 159		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
 160			ret = -EAGAIN;
 161	} while (ret == 0);
 162	remove_wait_queue(&rb->pollq, &wait);
 163
 164	return ret;
 165}
 166
 167static size_t iio_buffer_space_available(struct iio_buffer *buf)
 168{
 169	if (buf->access->space_available)
 170		return buf->access->space_available(buf);
 171
 172	return SIZE_MAX;
 173}
 174
 175static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
 176				size_t n, loff_t *f_ps)
 177{
 178	struct iio_dev_buffer_pair *ib = filp->private_data;
 179	struct iio_buffer *rb = ib->buffer;
 180	struct iio_dev *indio_dev = ib->indio_dev;
 181	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 182	int ret = 0;
 183	size_t written;
 184
 185	if (!indio_dev->info)
 186		return -ENODEV;
 187
 188	if (!rb || !rb->access->write)
 189		return -EINVAL;
 190
 191	if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
 192		return -EPERM;
 193
 194	written = 0;
 195	add_wait_queue(&rb->pollq, &wait);
 196	do {
 197		if (!indio_dev->info)
 198			return -ENODEV;
 199
 200		if (!iio_buffer_space_available(rb)) {
 201			if (signal_pending(current)) {
 202				ret = -ERESTARTSYS;
 203				break;
 204			}
 205
 206			if (filp->f_flags & O_NONBLOCK) {
 207				if (!written)
 208					ret = -EAGAIN;
 209				break;
 210			}
 211
 212			wait_woken(&wait, TASK_INTERRUPTIBLE,
 213				   MAX_SCHEDULE_TIMEOUT);
 214			continue;
 215		}
 216
 217		ret = rb->access->write(rb, n - written, buf + written);
 218		if (ret < 0)
 219			break;
 220
 221		written += ret;
 222
 223	} while (written != n);
 224	remove_wait_queue(&rb->pollq, &wait);
 225
 226	return ret < 0 ? ret : written;
 227}
 228
 229/**
 230 * iio_buffer_poll() - poll the buffer to find out if it has data
 231 * @filp:	File structure pointer for device access
 232 * @wait:	Poll table structure pointer for which the driver adds
 233 *		a wait queue
 234 *
 235 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
 236 *	   or 0 for other cases
 237 */
 238static __poll_t iio_buffer_poll(struct file *filp,
 239				struct poll_table_struct *wait)
 240{
 241	struct iio_dev_buffer_pair *ib = filp->private_data;
 242	struct iio_buffer *rb = ib->buffer;
 243	struct iio_dev *indio_dev = ib->indio_dev;
 244
 245	if (!indio_dev->info || !rb)
 246		return 0;
 247
 248	poll_wait(filp, &rb->pollq, wait);
 249
 250	switch (rb->direction) {
 251	case IIO_BUFFER_DIRECTION_IN:
 252		if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
 253			return EPOLLIN | EPOLLRDNORM;
 254		break;
 255	case IIO_BUFFER_DIRECTION_OUT:
 256		if (iio_buffer_space_available(rb))
 257			return EPOLLOUT | EPOLLWRNORM;
 258		break;
 259	}
 260
 261	return 0;
 262}
 263
 264ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
 265				size_t n, loff_t *f_ps)
 266{
 267	struct iio_dev_buffer_pair *ib = filp->private_data;
 268	struct iio_buffer *rb = ib->buffer;
 269
 270	/* check if buffer was opened through new API */
 271	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
 272		return -EBUSY;
 273
 274	return iio_buffer_read(filp, buf, n, f_ps);
 275}
 276
 277ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
 278				 size_t n, loff_t *f_ps)
 279{
 280	struct iio_dev_buffer_pair *ib = filp->private_data;
 281	struct iio_buffer *rb = ib->buffer;
 282
 283	/* check if buffer was opened through new API */
 284	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
 285		return -EBUSY;
 286
 287	return iio_buffer_write(filp, buf, n, f_ps);
 288}
 289
 290__poll_t iio_buffer_poll_wrapper(struct file *filp,
 291				 struct poll_table_struct *wait)
 292{
 293	struct iio_dev_buffer_pair *ib = filp->private_data;
 294	struct iio_buffer *rb = ib->buffer;
 295
 296	/* check if buffer was opened through new API */
 297	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
 298		return 0;
 299
 300	return iio_buffer_poll(filp, wait);
 301}
 302
 303/**
 304 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
 305 * @indio_dev: The IIO device
 306 *
 307 * Wakes up the event waitqueue used for poll(). Should usually
 308 * be called when the device is unregistered.
 309 */
 310void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
 311{
 312	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 313	struct iio_buffer *buffer;
 314	unsigned int i;
 315
 316	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
 317		buffer = iio_dev_opaque->attached_buffers[i];
 318		wake_up(&buffer->pollq);
 319	}
 320}
 321
 322int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
 323{
 324	if (!buffer || !buffer->access || !buffer->access->remove_from)
 325		return -EINVAL;
 326
 327	return buffer->access->remove_from(buffer, data);
 328}
 329EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
 330
 331void iio_buffer_init(struct iio_buffer *buffer)
 332{
 333	INIT_LIST_HEAD(&buffer->demux_list);
 334	INIT_LIST_HEAD(&buffer->buffer_list);
 335	init_waitqueue_head(&buffer->pollq);
 336	kref_init(&buffer->ref);
 337	if (!buffer->watermark)
 338		buffer->watermark = 1;
 339}
 340EXPORT_SYMBOL(iio_buffer_init);
 341
 342void iio_device_detach_buffers(struct iio_dev *indio_dev)
 343{
 344	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 345	struct iio_buffer *buffer;
 346	unsigned int i;
 347
 348	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
 349		buffer = iio_dev_opaque->attached_buffers[i];
 350		iio_buffer_put(buffer);
 351	}
 352
 353	kfree(iio_dev_opaque->attached_buffers);
 354}
 355
 356static ssize_t iio_show_scan_index(struct device *dev,
 357				   struct device_attribute *attr,
 358				   char *buf)
 359{
 360	return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
 361}
 362
 363static ssize_t iio_show_fixed_type(struct device *dev,
 364				   struct device_attribute *attr,
 365				   char *buf)
 366{
 367	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 368	u8 type = this_attr->c->scan_type.endianness;
 369
 370	if (type == IIO_CPU) {
 371#ifdef __LITTLE_ENDIAN
 372		type = IIO_LE;
 373#else
 374		type = IIO_BE;
 375#endif
 376	}
 377	if (this_attr->c->scan_type.repeat > 1)
 378		return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
 379		       iio_endian_prefix[type],
 380		       this_attr->c->scan_type.sign,
 381		       this_attr->c->scan_type.realbits,
 382		       this_attr->c->scan_type.storagebits,
 383		       this_attr->c->scan_type.repeat,
 384		       this_attr->c->scan_type.shift);
 385	else
 386		return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
 387		       iio_endian_prefix[type],
 388		       this_attr->c->scan_type.sign,
 389		       this_attr->c->scan_type.realbits,
 390		       this_attr->c->scan_type.storagebits,
 391		       this_attr->c->scan_type.shift);
 392}
 393
 394static ssize_t iio_scan_el_show(struct device *dev,
 395				struct device_attribute *attr,
 396				char *buf)
 397{
 398	int ret;
 399	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 400
 401	/* Ensure ret is 0 or 1. */
 402	ret = !!test_bit(to_iio_dev_attr(attr)->address,
 403		       buffer->scan_mask);
 404
 405	return sysfs_emit(buf, "%d\n", ret);
 406}
 407
 408/* Note NULL used as error indicator as it doesn't make sense. */
 409static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
 410						unsigned int masklength,
 411						const unsigned long *mask,
 412						bool strict)
 413{
 414	if (bitmap_empty(mask, masklength))
 415		return NULL;
 416	/*
 417	 * The condition here do not handle multi-long masks correctly.
 418	 * It only checks the first long to be zero, and will use such mask
 419	 * as a terminator even if there was bits set after the first long.
 420	 *
 421	 * Correct check would require using:
 422	 * while (!bitmap_empty(av_masks, masklength))
 423	 * instead. This is potentially hazardous because the
 424	 * avaliable_scan_masks is a zero terminated array of longs - and
 425	 * using the proper bitmap_empty() check for multi-long wide masks
 426	 * would require the array to be terminated with multiple zero longs -
 427	 * which is not such an usual pattern.
 428	 *
 429	 * As writing of this no multi-long wide masks were found in-tree, so
 430	 * the simple while (*av_masks) check is working.
 431	 */
 432	while (*av_masks) {
 433		if (strict) {
 434			if (bitmap_equal(mask, av_masks, masklength))
 435				return av_masks;
 436		} else {
 437			if (bitmap_subset(mask, av_masks, masklength))
 438				return av_masks;
 439		}
 440		av_masks += BITS_TO_LONGS(masklength);
 441	}
 442	return NULL;
 443}
 444
 445static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
 446				   const unsigned long *mask)
 447{
 448	if (!indio_dev->setup_ops->validate_scan_mask)
 449		return true;
 450
 451	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
 452}
 453
 454/**
 455 * iio_scan_mask_set() - set particular bit in the scan mask
 456 * @indio_dev: the iio device
 457 * @buffer: the buffer whose scan mask we are interested in
 458 * @bit: the bit to be set.
 459 *
 460 * Note that at this point we have no way of knowing what other
 461 * buffers might request, hence this code only verifies that the
 462 * individual buffers request is plausible.
 463 */
 464static int iio_scan_mask_set(struct iio_dev *indio_dev,
 465			     struct iio_buffer *buffer, int bit)
 466{
 467	const unsigned long *mask;
 468	unsigned long *trialmask;
 469
 470	if (!indio_dev->masklength) {
 471		WARN(1, "Trying to set scanmask prior to registering buffer\n");
 472		return -EINVAL;
 473	}
 474
 475	trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
 476	if (!trialmask)
 477		return -ENOMEM;
 478	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
 479	set_bit(bit, trialmask);
 480
 481	if (!iio_validate_scan_mask(indio_dev, trialmask))
 482		goto err_invalid_mask;
 483
 484	if (indio_dev->available_scan_masks) {
 485		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
 486					   indio_dev->masklength,
 487					   trialmask, false);
 488		if (!mask)
 489			goto err_invalid_mask;
 490	}
 491	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
 492
 493	bitmap_free(trialmask);
 494
 495	return 0;
 496
 497err_invalid_mask:
 498	bitmap_free(trialmask);
 499	return -EINVAL;
 500}
 501
 502static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
 503{
 504	clear_bit(bit, buffer->scan_mask);
 505	return 0;
 506}
 507
 508static int iio_scan_mask_query(struct iio_dev *indio_dev,
 509			       struct iio_buffer *buffer, int bit)
 510{
 511	if (bit > indio_dev->masklength)
 512		return -EINVAL;
 513
 514	if (!buffer->scan_mask)
 515		return 0;
 516
 517	/* Ensure return value is 0 or 1. */
 518	return !!test_bit(bit, buffer->scan_mask);
 519};
 520
 521static ssize_t iio_scan_el_store(struct device *dev,
 522				 struct device_attribute *attr,
 523				 const char *buf,
 524				 size_t len)
 525{
 526	int ret;
 527	bool state;
 528	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 529	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 530	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 531	struct iio_buffer *buffer = this_attr->buffer;
 532
 533	ret = kstrtobool(buf, &state);
 534	if (ret < 0)
 535		return ret;
 536	mutex_lock(&iio_dev_opaque->mlock);
 537	if (iio_buffer_is_active(buffer)) {
 538		ret = -EBUSY;
 539		goto error_ret;
 540	}
 541	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
 542	if (ret < 0)
 543		goto error_ret;
 544	if (!state && ret) {
 545		ret = iio_scan_mask_clear(buffer, this_attr->address);
 546		if (ret)
 547			goto error_ret;
 548	} else if (state && !ret) {
 549		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
 550		if (ret)
 551			goto error_ret;
 552	}
 553
 554error_ret:
 555	mutex_unlock(&iio_dev_opaque->mlock);
 556
 557	return ret < 0 ? ret : len;
 
 558}
 559
 560static ssize_t iio_scan_el_ts_show(struct device *dev,
 561				   struct device_attribute *attr,
 562				   char *buf)
 563{
 564	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 565
 566	return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
 567}
 568
 569static ssize_t iio_scan_el_ts_store(struct device *dev,
 570				    struct device_attribute *attr,
 571				    const char *buf,
 572				    size_t len)
 573{
 574	int ret;
 575	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 576	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 577	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 578	bool state;
 579
 580	ret = kstrtobool(buf, &state);
 581	if (ret < 0)
 582		return ret;
 583
 584	mutex_lock(&iio_dev_opaque->mlock);
 585	if (iio_buffer_is_active(buffer)) {
 586		ret = -EBUSY;
 587		goto error_ret;
 588	}
 589	buffer->scan_timestamp = state;
 590error_ret:
 591	mutex_unlock(&iio_dev_opaque->mlock);
 592
 593	return ret ? ret : len;
 594}
 595
 596static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
 597					struct iio_buffer *buffer,
 598					const struct iio_chan_spec *chan)
 599{
 600	int ret, attrcount = 0;
 
 601
 602	ret = __iio_add_chan_devattr("index",
 603				     chan,
 604				     &iio_show_scan_index,
 605				     NULL,
 606				     0,
 607				     IIO_SEPARATE,
 608				     &indio_dev->dev,
 609				     buffer,
 610				     &buffer->buffer_attr_list);
 611	if (ret)
 612		return ret;
 613	attrcount++;
 614	ret = __iio_add_chan_devattr("type",
 615				     chan,
 616				     &iio_show_fixed_type,
 617				     NULL,
 618				     0,
 619				     IIO_SEPARATE,
 620				     &indio_dev->dev,
 621				     buffer,
 622				     &buffer->buffer_attr_list);
 623	if (ret)
 624		return ret;
 625	attrcount++;
 626	if (chan->type != IIO_TIMESTAMP)
 627		ret = __iio_add_chan_devattr("en",
 628					     chan,
 629					     &iio_scan_el_show,
 630					     &iio_scan_el_store,
 631					     chan->scan_index,
 632					     IIO_SEPARATE,
 633					     &indio_dev->dev,
 634					     buffer,
 635					     &buffer->buffer_attr_list);
 636	else
 637		ret = __iio_add_chan_devattr("en",
 638					     chan,
 639					     &iio_scan_el_ts_show,
 640					     &iio_scan_el_ts_store,
 641					     chan->scan_index,
 642					     IIO_SEPARATE,
 643					     &indio_dev->dev,
 644					     buffer,
 645					     &buffer->buffer_attr_list);
 646	if (ret)
 647		return ret;
 648	attrcount++;
 649	ret = attrcount;
 650	return ret;
 651}
 652
 653static ssize_t length_show(struct device *dev, struct device_attribute *attr,
 654			   char *buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655{
 656	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 
 
 
 
 
 
 
 
 
 
 
 657
 658	return sysfs_emit(buf, "%d\n", buffer->length);
 
 
 
 
 659}
 
 660
 661static ssize_t length_store(struct device *dev, struct device_attribute *attr,
 662			    const char *buf, size_t len)
 
 
 663{
 664	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 665	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 666	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 667	unsigned int val;
 668	int ret;
 669
 670	ret = kstrtouint(buf, 10, &val);
 671	if (ret)
 672		return ret;
 673
 674	if (val == buffer->length)
 675		return len;
 
 676
 677	mutex_lock(&iio_dev_opaque->mlock);
 678	if (iio_buffer_is_active(buffer)) {
 679		ret = -EBUSY;
 680	} else {
 681		buffer->access->set_length(buffer, val);
 
 682		ret = 0;
 683	}
 684	if (ret)
 685		goto out;
 686	if (buffer->length && buffer->length < buffer->watermark)
 687		buffer->watermark = buffer->length;
 688out:
 689	mutex_unlock(&iio_dev_opaque->mlock);
 690
 691	return ret ? ret : len;
 692}
 
 693
 694static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
 695			   char *buf)
 696{
 697	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 698
 699	return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
 700}
 701
 702static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
 703					     unsigned int scan_index)
 704{
 705	const struct iio_chan_spec *ch;
 706	unsigned int bytes;
 707
 708	ch = iio_find_channel_from_si(indio_dev, scan_index);
 709	bytes = ch->scan_type.storagebits / 8;
 710	if (ch->scan_type.repeat > 1)
 711		bytes *= ch->scan_type.repeat;
 712	return bytes;
 713}
 
 714
 715static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
 
 
 
 716{
 717	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 718
 719	return iio_storage_bytes_for_si(indio_dev,
 720					iio_dev_opaque->scan_index_timestamp);
 
 
 
 
 721}
 722
 723static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
 724				  const unsigned long *mask, bool timestamp)
 725{
 726	unsigned int bytes = 0;
 727	int length, i, largest = 0;
 
 728
 729	/* How much space will the demuxed element take? */
 730	for_each_set_bit(i, mask,
 731			 indio_dev->masklength) {
 732		length = iio_storage_bytes_for_si(indio_dev, i);
 
 733		bytes = ALIGN(bytes, length);
 734		bytes += length;
 735		largest = max(largest, length);
 736	}
 737
 738	if (timestamp) {
 739		length = iio_storage_bytes_for_timestamp(indio_dev);
 
 
 740		bytes = ALIGN(bytes, length);
 741		bytes += length;
 742		largest = max(largest, length);
 743	}
 744
 745	bytes = ALIGN(bytes, largest);
 746	return bytes;
 747}
 748
 749static void iio_buffer_activate(struct iio_dev *indio_dev,
 750				struct iio_buffer *buffer)
 751{
 752	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 753
 754	iio_buffer_get(buffer);
 755	list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
 756}
 757
 758static void iio_buffer_deactivate(struct iio_buffer *buffer)
 759{
 760	list_del_init(&buffer->buffer_list);
 761	wake_up_interruptible(&buffer->pollq);
 762	iio_buffer_put(buffer);
 763}
 764
 765static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
 766{
 767	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 768	struct iio_buffer *buffer, *_buffer;
 769
 
 
 
 
 
 
 770	list_for_each_entry_safe(buffer, _buffer,
 771				 &iio_dev_opaque->buffer_list, buffer_list)
 772		iio_buffer_deactivate(buffer);
 773}
 774
 775static int iio_buffer_enable(struct iio_buffer *buffer,
 776			     struct iio_dev *indio_dev)
 777{
 778	if (!buffer->access->enable)
 779		return 0;
 780	return buffer->access->enable(buffer, indio_dev);
 781}
 782
 783static int iio_buffer_disable(struct iio_buffer *buffer,
 784			      struct iio_dev *indio_dev)
 785{
 786	if (!buffer->access->disable)
 787		return 0;
 788	return buffer->access->disable(buffer, indio_dev);
 789}
 790
 791static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
 792					      struct iio_buffer *buffer)
 793{
 794	unsigned int bytes;
 795
 796	if (!buffer->access->set_bytes_per_datum)
 797		return;
 798
 799	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
 800				       buffer->scan_timestamp);
 801
 802	buffer->access->set_bytes_per_datum(buffer, bytes);
 803}
 804
 805static int iio_buffer_request_update(struct iio_dev *indio_dev,
 806				     struct iio_buffer *buffer)
 
 807{
 808	int ret;
 
 
 
 
 809
 810	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
 811	if (buffer->access->request_update) {
 812		ret = buffer->access->request_update(buffer);
 813		if (ret) {
 814			dev_dbg(&indio_dev->dev,
 815				"Buffer not started: buffer parameter update failed (%d)\n",
 816				ret);
 817			return ret;
 
 
 
 
 818		}
 819	}
 820
 821	return 0;
 822}
 823
 824static void iio_free_scan_mask(struct iio_dev *indio_dev,
 825			       const unsigned long *mask)
 826{
 827	/* If the mask is dynamically allocated free it, otherwise do nothing */
 828	if (!indio_dev->available_scan_masks)
 829		bitmap_free(mask);
 830}
 831
 832struct iio_device_config {
 833	unsigned int mode;
 834	unsigned int watermark;
 835	const unsigned long *scan_mask;
 836	unsigned int scan_bytes;
 837	bool scan_timestamp;
 838};
 839
 840static int iio_verify_update(struct iio_dev *indio_dev,
 841			     struct iio_buffer *insert_buffer,
 842			     struct iio_buffer *remove_buffer,
 843			     struct iio_device_config *config)
 844{
 845	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 846	unsigned long *compound_mask;
 847	const unsigned long *scan_mask;
 848	bool strict_scanmask = false;
 849	struct iio_buffer *buffer;
 850	bool scan_timestamp;
 851	unsigned int modes;
 852
 853	if (insert_buffer &&
 854	    bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
 855		dev_dbg(&indio_dev->dev,
 856			"At least one scan element must be enabled first\n");
 857		return -EINVAL;
 858	}
 859
 860	memset(config, 0, sizeof(*config));
 861	config->watermark = ~0;
 862
 863	/*
 864	 * If there is just one buffer and we are removing it there is nothing
 865	 * to verify.
 866	 */
 867	if (remove_buffer && !insert_buffer &&
 868	    list_is_singular(&iio_dev_opaque->buffer_list))
 869		return 0;
 870
 871	modes = indio_dev->modes;
 872
 873	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
 874		if (buffer == remove_buffer)
 875			continue;
 876		modes &= buffer->access->modes;
 877		config->watermark = min(config->watermark, buffer->watermark);
 878	}
 879
 880	if (insert_buffer) {
 881		modes &= insert_buffer->access->modes;
 882		config->watermark = min(config->watermark,
 883					insert_buffer->watermark);
 884	}
 885
 886	/* Definitely possible for devices to support both of these. */
 887	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
 888		config->mode = INDIO_BUFFER_TRIGGERED;
 889	} else if (modes & INDIO_BUFFER_HARDWARE) {
 890		/*
 891		 * Keep things simple for now and only allow a single buffer to
 892		 * be connected in hardware mode.
 893		 */
 894		if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
 895			return -EINVAL;
 896		config->mode = INDIO_BUFFER_HARDWARE;
 897		strict_scanmask = true;
 898	} else if (modes & INDIO_BUFFER_SOFTWARE) {
 899		config->mode = INDIO_BUFFER_SOFTWARE;
 900	} else {
 901		/* Can only occur on first buffer */
 902		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
 903			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
 904		return -EINVAL;
 905	}
 906
 907	/* What scan mask do we actually have? */
 908	compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
 909	if (!compound_mask)
 
 
 
 910		return -ENOMEM;
 
 
 911
 912	scan_timestamp = false;
 913
 914	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
 915		if (buffer == remove_buffer)
 916			continue;
 917		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
 918			  indio_dev->masklength);
 919		scan_timestamp |= buffer->scan_timestamp;
 920	}
 921
 922	if (insert_buffer) {
 923		bitmap_or(compound_mask, compound_mask,
 924			  insert_buffer->scan_mask, indio_dev->masklength);
 925		scan_timestamp |= insert_buffer->scan_timestamp;
 926	}
 927
 928	if (indio_dev->available_scan_masks) {
 929		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
 930						indio_dev->masklength,
 931						compound_mask,
 932						strict_scanmask);
 933		bitmap_free(compound_mask);
 934		if (!scan_mask)
 935			return -EINVAL;
 936	} else {
 937		scan_mask = compound_mask;
 938	}
 939
 940	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
 941						    scan_mask, scan_timestamp);
 942	config->scan_mask = scan_mask;
 943	config->scan_timestamp = scan_timestamp;
 944
 945	return 0;
 946}
 947
 948/**
 949 * struct iio_demux_table - table describing demux memcpy ops
 950 * @from:	index to copy from
 951 * @to:		index to copy to
 952 * @length:	how many bytes to copy
 953 * @l:		list head used for management
 954 */
 955struct iio_demux_table {
 956	unsigned int from;
 957	unsigned int to;
 958	unsigned int length;
 959	struct list_head l;
 960};
 961
 962static void iio_buffer_demux_free(struct iio_buffer *buffer)
 963{
 964	struct iio_demux_table *p, *q;
 965
 966	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
 967		list_del(&p->l);
 968		kfree(p);
 969	}
 970}
 971
 972static int iio_buffer_add_demux(struct iio_buffer *buffer,
 973				struct iio_demux_table **p, unsigned int in_loc,
 974				unsigned int out_loc,
 975				unsigned int length)
 976{
 977	if (*p && (*p)->from + (*p)->length == in_loc &&
 978	    (*p)->to + (*p)->length == out_loc) {
 979		(*p)->length += length;
 980	} else {
 981		*p = kmalloc(sizeof(**p), GFP_KERNEL);
 982		if (!(*p))
 983			return -ENOMEM;
 984		(*p)->from = in_loc;
 985		(*p)->to = out_loc;
 986		(*p)->length = length;
 987		list_add_tail(&(*p)->l, &buffer->demux_list);
 988	}
 989
 990	return 0;
 991}
 992
 993static int iio_buffer_update_demux(struct iio_dev *indio_dev,
 994				   struct iio_buffer *buffer)
 995{
 996	int ret, in_ind = -1, out_ind, length;
 997	unsigned int in_loc = 0, out_loc = 0;
 998	struct iio_demux_table *p = NULL;
 999
1000	/* Clear out any old demux */
1001	iio_buffer_demux_free(buffer);
1002	kfree(buffer->demux_bounce);
1003	buffer->demux_bounce = NULL;
1004
1005	/* First work out which scan mode we will actually have */
1006	if (bitmap_equal(indio_dev->active_scan_mask,
1007			 buffer->scan_mask,
1008			 indio_dev->masklength))
1009		return 0;
1010
1011	/* Now we have the two masks, work from least sig and build up sizes */
1012	for_each_set_bit(out_ind,
1013			 buffer->scan_mask,
1014			 indio_dev->masklength) {
1015		in_ind = find_next_bit(indio_dev->active_scan_mask,
1016				       indio_dev->masklength,
1017				       in_ind + 1);
1018		while (in_ind != out_ind) {
1019			length = iio_storage_bytes_for_si(indio_dev, in_ind);
1020			/* Make sure we are aligned */
1021			in_loc = roundup(in_loc, length) + length;
1022			in_ind = find_next_bit(indio_dev->active_scan_mask,
1023					       indio_dev->masklength,
1024					       in_ind + 1);
1025		}
1026		length = iio_storage_bytes_for_si(indio_dev, in_ind);
1027		out_loc = roundup(out_loc, length);
1028		in_loc = roundup(in_loc, length);
1029		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1030		if (ret)
1031			goto error_clear_mux_table;
1032		out_loc += length;
1033		in_loc += length;
1034	}
1035	/* Relies on scan_timestamp being last */
1036	if (buffer->scan_timestamp) {
1037		length = iio_storage_bytes_for_timestamp(indio_dev);
1038		out_loc = roundup(out_loc, length);
1039		in_loc = roundup(in_loc, length);
1040		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1041		if (ret)
1042			goto error_clear_mux_table;
1043		out_loc += length;
1044	}
1045	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1046	if (!buffer->demux_bounce) {
1047		ret = -ENOMEM;
1048		goto error_clear_mux_table;
1049	}
1050	return 0;
1051
1052error_clear_mux_table:
1053	iio_buffer_demux_free(buffer);
1054
1055	return ret;
1056}
1057
1058static int iio_update_demux(struct iio_dev *indio_dev)
1059{
1060	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1061	struct iio_buffer *buffer;
1062	int ret;
1063
1064	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1065		ret = iio_buffer_update_demux(indio_dev, buffer);
1066		if (ret < 0)
1067			goto error_clear_mux_table;
1068	}
1069	return 0;
1070
1071error_clear_mux_table:
1072	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
1073		iio_buffer_demux_free(buffer);
1074
1075	return ret;
1076}
1077
1078static int iio_enable_buffers(struct iio_dev *indio_dev,
1079			      struct iio_device_config *config)
1080{
1081	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1082	struct iio_buffer *buffer, *tmp = NULL;
1083	int ret;
1084
1085	indio_dev->active_scan_mask = config->scan_mask;
1086	indio_dev->scan_timestamp = config->scan_timestamp;
1087	indio_dev->scan_bytes = config->scan_bytes;
1088	iio_dev_opaque->currentmode = config->mode;
1089
1090	iio_update_demux(indio_dev);
1091
1092	/* Wind up again */
1093	if (indio_dev->setup_ops->preenable) {
1094		ret = indio_dev->setup_ops->preenable(indio_dev);
1095		if (ret) {
1096			dev_dbg(&indio_dev->dev,
1097				"Buffer not started: buffer preenable failed (%d)\n", ret);
1098			goto err_undo_config;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1099		}
1100	}
1101
1102	if (indio_dev->info->update_scan_mode) {
1103		ret = indio_dev->info
1104			->update_scan_mode(indio_dev,
1105					   indio_dev->active_scan_mask);
1106		if (ret < 0) {
1107			dev_dbg(&indio_dev->dev,
1108				"Buffer not started: update scan mode failed (%d)\n",
1109				ret);
1110			goto err_run_postdisable;
1111		}
1112	}
1113
1114	if (indio_dev->info->hwfifo_set_watermark)
1115		indio_dev->info->hwfifo_set_watermark(indio_dev,
1116			config->watermark);
1117
1118	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1119		ret = iio_buffer_enable(buffer, indio_dev);
1120		if (ret) {
1121			tmp = buffer;
1122			goto err_disable_buffers;
1123		}
1124	}
1125
1126	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1127		ret = iio_trigger_attach_poll_func(indio_dev->trig,
1128						   indio_dev->pollfunc);
1129		if (ret)
1130			goto err_disable_buffers;
1131	}
1132
1133	if (indio_dev->setup_ops->postenable) {
1134		ret = indio_dev->setup_ops->postenable(indio_dev);
1135		if (ret) {
1136			dev_dbg(&indio_dev->dev,
1137				"Buffer not started: postenable failed (%d)\n", ret);
1138			goto err_detach_pollfunc;
 
 
 
1139		}
1140	}
1141
1142	return 0;
 
 
 
 
 
1143
1144err_detach_pollfunc:
1145	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1146		iio_trigger_detach_poll_func(indio_dev->trig,
1147					     indio_dev->pollfunc);
1148	}
1149err_disable_buffers:
1150	buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
1151	list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1152					     buffer_list)
1153		iio_buffer_disable(buffer, indio_dev);
1154err_run_postdisable:
1155	if (indio_dev->setup_ops->postdisable)
1156		indio_dev->setup_ops->postdisable(indio_dev);
1157err_undo_config:
1158	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1159	indio_dev->active_scan_mask = NULL;
1160
1161	return ret;
1162}
1163
1164static int iio_disable_buffers(struct iio_dev *indio_dev)
1165{
1166	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1167	struct iio_buffer *buffer;
1168	int ret = 0;
1169	int ret2;
1170
1171	/* Wind down existing buffers - iff there are any */
1172	if (list_empty(&iio_dev_opaque->buffer_list))
1173		return 0;
1174
1175	/*
1176	 * If things go wrong at some step in disable we still need to continue
1177	 * to perform the other steps, otherwise we leave the device in a
1178	 * inconsistent state. We return the error code for the first error we
1179	 * encountered.
1180	 */
1181
1182	if (indio_dev->setup_ops->predisable) {
1183		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1184		if (ret2 && !ret)
1185			ret = ret2;
1186	}
1187
1188	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1189		iio_trigger_detach_poll_func(indio_dev->trig,
1190					     indio_dev->pollfunc);
1191	}
1192
1193	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1194		ret2 = iio_buffer_disable(buffer, indio_dev);
1195		if (ret2 && !ret)
1196			ret = ret2;
1197	}
1198
1199	if (indio_dev->setup_ops->postdisable) {
1200		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1201		if (ret2 && !ret)
1202			ret = ret2;
1203	}
1204
1205	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1206	indio_dev->active_scan_mask = NULL;
1207	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1208
1209	return ret;
1210}
1211
1212static int __iio_update_buffers(struct iio_dev *indio_dev,
1213				struct iio_buffer *insert_buffer,
1214				struct iio_buffer *remove_buffer)
1215{
1216	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1217	struct iio_device_config new_config;
1218	int ret;
1219
1220	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1221				&new_config);
1222	if (ret)
1223		return ret;
1224
1225	if (insert_buffer) {
1226		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1227		if (ret)
1228			goto err_free_config;
1229	}
1230
1231	ret = iio_disable_buffers(indio_dev);
1232	if (ret)
1233		goto err_deactivate_all;
1234
1235	if (remove_buffer)
1236		iio_buffer_deactivate(remove_buffer);
1237	if (insert_buffer)
1238		iio_buffer_activate(indio_dev, insert_buffer);
1239
1240	/* If no buffers in list, we are done */
1241	if (list_empty(&iio_dev_opaque->buffer_list))
1242		return 0;
1243
1244	ret = iio_enable_buffers(indio_dev, &new_config);
1245	if (ret)
1246		goto err_deactivate_all;
1247
1248	return 0;
1249
1250err_deactivate_all:
1251	/*
1252	 * We've already verified that the config is valid earlier. If things go
1253	 * wrong in either enable or disable the most likely reason is an IO
1254	 * error from the device. In this case there is no good recovery
1255	 * strategy. Just make sure to disable everything and leave the device
1256	 * in a sane state.  With a bit of luck the device might come back to
1257	 * life again later and userspace can try again.
1258	 */
1259	iio_buffer_deactivate_all(indio_dev);
1260
1261err_free_config:
1262	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1263	return ret;
1264}
1265
1266int iio_update_buffers(struct iio_dev *indio_dev,
1267		       struct iio_buffer *insert_buffer,
1268		       struct iio_buffer *remove_buffer)
1269{
1270	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1271	int ret;
1272
1273	if (insert_buffer == remove_buffer)
1274		return 0;
1275
1276	if (insert_buffer &&
1277	    insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
1278		return -EINVAL;
1279
1280	mutex_lock(&iio_dev_opaque->info_exist_lock);
1281	mutex_lock(&iio_dev_opaque->mlock);
1282
1283	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1284		insert_buffer = NULL;
1285
1286	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1287		remove_buffer = NULL;
1288
1289	if (!insert_buffer && !remove_buffer) {
1290		ret = 0;
1291		goto out_unlock;
1292	}
1293
1294	if (!indio_dev->info) {
1295		ret = -ENODEV;
1296		goto out_unlock;
1297	}
1298
1299	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1300
1301out_unlock:
1302	mutex_unlock(&iio_dev_opaque->mlock);
1303	mutex_unlock(&iio_dev_opaque->info_exist_lock);
1304
1305	return ret;
1306}
1307EXPORT_SYMBOL_GPL(iio_update_buffers);
1308
1309void iio_disable_all_buffers(struct iio_dev *indio_dev)
1310{
1311	iio_disable_buffers(indio_dev);
1312	iio_buffer_deactivate_all(indio_dev);
1313}
1314
1315static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
1316			    const char *buf, size_t len)
1317{
1318	int ret;
1319	bool requested_state;
1320	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1321	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1322	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1323	bool inlist;
1324
1325	ret = kstrtobool(buf, &requested_state);
1326	if (ret < 0)
1327		return ret;
1328
1329	mutex_lock(&iio_dev_opaque->mlock);
1330
1331	/* Find out if it is in the list */
1332	inlist = iio_buffer_is_active(buffer);
1333	/* Already in desired state */
1334	if (inlist == requested_state)
1335		goto done;
1336
1337	if (requested_state)
1338		ret = __iio_update_buffers(indio_dev, buffer, NULL);
 
1339	else
1340		ret = __iio_update_buffers(indio_dev, NULL, buffer);
 
1341
 
 
1342done:
1343	mutex_unlock(&iio_dev_opaque->mlock);
1344	return (ret < 0) ? ret : len;
1345}
 
1346
1347static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
1348			      char *buf)
 
 
 
 
 
 
 
 
 
1349{
1350	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1351
1352	return sysfs_emit(buf, "%u\n", buffer->watermark);
1353}
 
1354
1355static ssize_t watermark_store(struct device *dev,
1356			       struct device_attribute *attr,
1357			       const char *buf, size_t len)
1358{
1359	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1360	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1361	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1362	unsigned int val;
1363	int ret;
1364
1365	ret = kstrtouint(buf, 10, &val);
1366	if (ret)
1367		return ret;
1368	if (!val)
1369		return -EINVAL;
1370
1371	mutex_lock(&iio_dev_opaque->mlock);
1372
1373	if (val > buffer->length) {
1374		ret = -EINVAL;
1375		goto out;
1376	}
1377
1378	if (iio_buffer_is_active(buffer)) {
1379		ret = -EBUSY;
1380		goto out;
1381	}
1382
1383	buffer->watermark = val;
1384out:
1385	mutex_unlock(&iio_dev_opaque->mlock);
1386
1387	return ret ? ret : len;
1388}
1389
1390static ssize_t data_available_show(struct device *dev,
1391				   struct device_attribute *attr, char *buf)
 
 
 
 
 
 
 
 
 
 
1392{
1393	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1394
1395	return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1396}
1397
1398static ssize_t direction_show(struct device *dev,
1399			      struct device_attribute *attr,
1400			      char *buf)
1401{
1402	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1403
1404	switch (buffer->direction) {
1405	case IIO_BUFFER_DIRECTION_IN:
1406		return sysfs_emit(buf, "in\n");
1407	case IIO_BUFFER_DIRECTION_OUT:
1408		return sysfs_emit(buf, "out\n");
1409	default:
1410		return -EINVAL;
1411	}
1412}
1413
1414static DEVICE_ATTR_RW(length);
1415static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
1416static DEVICE_ATTR_RW(enable);
1417static DEVICE_ATTR_RW(watermark);
1418static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
1419static DEVICE_ATTR_RO(data_available);
1420static DEVICE_ATTR_RO(direction);
1421
1422/*
1423 * When adding new attributes here, put the at the end, at least until
1424 * the code that handles the length/length_ro & watermark/watermark_ro
1425 * assignments gets cleaned up. Otherwise these can create some weird
1426 * duplicate attributes errors under some setups.
1427 */
1428static struct attribute *iio_buffer_attrs[] = {
1429	&dev_attr_length.attr,
1430	&dev_attr_enable.attr,
1431	&dev_attr_watermark.attr,
1432	&dev_attr_data_available.attr,
1433	&dev_attr_direction.attr,
1434};
1435
1436#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1437
1438static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1439					      struct attribute *attr)
1440{
1441	struct device_attribute *dattr = to_dev_attr(attr);
1442	struct iio_dev_attr *iio_attr;
1443
1444	iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1445	if (!iio_attr)
1446		return NULL;
1447
1448	iio_attr->buffer = buffer;
1449	memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1450	iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1451	if (!iio_attr->dev_attr.attr.name) {
1452		kfree(iio_attr);
1453		return NULL;
1454	}
 
1455
1456	sysfs_attr_init(&iio_attr->dev_attr.attr);
1457
1458	list_add(&iio_attr->l, &buffer->buffer_attr_list);
1459
1460	return &iio_attr->dev_attr.attr;
 
 
1461}
 
1462
1463static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1464						   struct attribute **buffer_attrs,
1465						   int buffer_attrcount,
1466						   int scan_el_attrcount)
1467{
1468	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1469	struct attribute_group *group;
1470	struct attribute **attrs;
1471	int ret;
1472
1473	attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1474	if (!attrs)
1475		return -ENOMEM;
1476
1477	memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1478
1479	group = &iio_dev_opaque->legacy_buffer_group;
1480	group->attrs = attrs;
1481	group->name = "buffer";
1482
1483	ret = iio_device_register_sysfs_group(indio_dev, group);
1484	if (ret)
1485		goto error_free_buffer_attrs;
1486
1487	attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1488	if (!attrs) {
1489		ret = -ENOMEM;
1490		goto error_free_buffer_attrs;
1491	}
1492
1493	memcpy(attrs, &buffer_attrs[buffer_attrcount],
1494	       scan_el_attrcount * sizeof(*attrs));
1495
1496	group = &iio_dev_opaque->legacy_scan_el_group;
1497	group->attrs = attrs;
1498	group->name = "scan_elements";
 
1499
1500	ret = iio_device_register_sysfs_group(indio_dev, group);
1501	if (ret)
1502		goto error_free_scan_el_attrs;
 
 
 
 
 
 
 
 
 
 
1503
1504	return 0;
 
 
 
1505
1506error_free_scan_el_attrs:
1507	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1508error_free_buffer_attrs:
1509	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
 
1510
1511	return ret;
1512}
1513
1514static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1515{
1516	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1517
1518	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1519	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1520}
1521
1522static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1523{
1524	struct iio_dev_buffer_pair *ib = filep->private_data;
1525	struct iio_dev *indio_dev = ib->indio_dev;
1526	struct iio_buffer *buffer = ib->buffer;
1527
1528	wake_up(&buffer->pollq);
1529
1530	kfree(ib);
1531	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1532	iio_device_put(indio_dev);
1533
1534	return 0;
1535}
1536
1537static const struct file_operations iio_buffer_chrdev_fileops = {
1538	.owner = THIS_MODULE,
1539	.llseek = noop_llseek,
1540	.read = iio_buffer_read,
1541	.write = iio_buffer_write,
1542	.poll = iio_buffer_poll,
1543	.release = iio_buffer_chrdev_release,
1544};
1545
1546static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1547{
1548	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1549	int __user *ival = (int __user *)arg;
1550	struct iio_dev_buffer_pair *ib;
1551	struct iio_buffer *buffer;
1552	int fd, idx, ret;
1553
1554	if (copy_from_user(&idx, ival, sizeof(idx)))
1555		return -EFAULT;
1556
1557	if (idx >= iio_dev_opaque->attached_buffers_cnt)
1558		return -ENODEV;
1559
1560	iio_device_get(indio_dev);
1561
1562	buffer = iio_dev_opaque->attached_buffers[idx];
1563
1564	if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1565		ret = -EBUSY;
1566		goto error_iio_dev_put;
1567	}
1568
1569	ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1570	if (!ib) {
1571		ret = -ENOMEM;
1572		goto error_clear_busy_bit;
1573	}
1574
1575	ib->indio_dev = indio_dev;
1576	ib->buffer = buffer;
1577
1578	fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1579			      ib, O_RDWR | O_CLOEXEC);
1580	if (fd < 0) {
1581		ret = fd;
1582		goto error_free_ib;
1583	}
1584
1585	if (copy_to_user(ival, &fd, sizeof(fd))) {
1586		/*
1587		 * "Leak" the fd, as there's not much we can do about this
1588		 * anyway. 'fd' might have been closed already, as
1589		 * anon_inode_getfd() called fd_install() on it, which made
1590		 * it reachable by userland.
1591		 *
1592		 * Instead of allowing a malicious user to play tricks with
1593		 * us, rely on the process exit path to do any necessary
1594		 * cleanup, as in releasing the file, if still needed.
1595		 */
1596		return -EFAULT;
1597	}
1598
1599	return 0;
1600
1601error_free_ib:
1602	kfree(ib);
1603error_clear_busy_bit:
1604	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1605error_iio_dev_put:
1606	iio_device_put(indio_dev);
1607	return ret;
1608}
1609
1610static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1611				    unsigned int cmd, unsigned long arg)
1612{
1613	switch (cmd) {
1614	case IIO_BUFFER_GET_FD_IOCTL:
1615		return iio_device_buffer_getfd(indio_dev, arg);
1616	default:
1617		return IIO_IOCTL_UNHANDLED;
1618	}
1619}
 
1620
1621static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1622					     struct iio_dev *indio_dev,
1623					     int index)
1624{
1625	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1626	struct iio_dev_attr *p;
1627	const struct iio_dev_attr *id_attr;
1628	struct attribute **attr;
1629	int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1630	const struct iio_chan_spec *channels;
1631
1632	buffer_attrcount = 0;
1633	if (buffer->attrs) {
1634		while (buffer->attrs[buffer_attrcount])
1635			buffer_attrcount++;
1636	}
1637	buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1638
1639	scan_el_attrcount = 0;
1640	INIT_LIST_HEAD(&buffer->buffer_attr_list);
1641	channels = indio_dev->channels;
1642	if (channels) {
1643		/* new magic */
1644		for (i = 0; i < indio_dev->num_channels; i++) {
1645			if (channels[i].scan_index < 0)
1646				continue;
1647
1648			/* Verify that sample bits fit into storage */
1649			if (channels[i].scan_type.storagebits <
1650			    channels[i].scan_type.realbits +
1651			    channels[i].scan_type.shift) {
1652				dev_err(&indio_dev->dev,
1653					"Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
1654					i, channels[i].scan_type.storagebits,
1655					channels[i].scan_type.realbits,
1656					channels[i].scan_type.shift);
1657				ret = -EINVAL;
1658				goto error_cleanup_dynamic;
1659			}
1660
1661			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1662							   &channels[i]);
1663			if (ret < 0)
1664				goto error_cleanup_dynamic;
1665			scan_el_attrcount += ret;
1666			if (channels[i].type == IIO_TIMESTAMP)
1667				iio_dev_opaque->scan_index_timestamp =
1668					channels[i].scan_index;
 
 
 
 
 
 
 
 
 
1669		}
1670		if (indio_dev->masklength && !buffer->scan_mask) {
1671			buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1672							  GFP_KERNEL);
1673			if (!buffer->scan_mask) {
1674				ret = -ENOMEM;
1675				goto error_cleanup_dynamic;
1676			}
1677		}
 
 
 
 
 
 
 
 
 
 
 
 
1678	}
1679
1680	attrn = buffer_attrcount + scan_el_attrcount;
1681	attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
1682	if (!attr) {
1683		ret = -ENOMEM;
1684		goto error_free_scan_mask;
1685	}
1686
1687	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1688	if (!buffer->access->set_length)
1689		attr[0] = &dev_attr_length_ro.attr;
1690
1691	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1692		attr[2] = &dev_attr_watermark_ro.attr;
1693
1694	if (buffer->attrs)
1695		for (i = 0, id_attr = buffer->attrs[i];
1696		     (id_attr = buffer->attrs[i]); i++)
1697			attr[ARRAY_SIZE(iio_buffer_attrs) + i] =
1698				(struct attribute *)&id_attr->dev_attr.attr;
1699
1700	buffer->buffer_group.attrs = attr;
1701
1702	for (i = 0; i < buffer_attrcount; i++) {
1703		struct attribute *wrapped;
1704
1705		wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1706		if (!wrapped) {
1707			ret = -ENOMEM;
1708			goto error_free_buffer_attrs;
1709		}
1710		attr[i] = wrapped;
 
 
 
 
 
 
 
 
 
 
 
 
1711	}
1712
1713	attrn = 0;
1714	list_for_each_entry(p, &buffer->buffer_attr_list, l)
1715		attr[attrn++] = &p->dev_attr.attr;
1716
1717	buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1718	if (!buffer->buffer_group.name) {
1719		ret = -ENOMEM;
1720		goto error_free_buffer_attrs;
1721	}
1722
1723	ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1724	if (ret)
1725		goto error_free_buffer_attr_group_name;
1726
1727	/* we only need to register the legacy groups for the first buffer */
1728	if (index > 0)
1729		return 0;
1730
1731	ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1732						      buffer_attrcount,
1733						      scan_el_attrcount);
1734	if (ret)
1735		goto error_free_buffer_attr_group_name;
1736
1737	return 0;
1738
1739error_free_buffer_attr_group_name:
1740	kfree(buffer->buffer_group.name);
1741error_free_buffer_attrs:
1742	kfree(buffer->buffer_group.attrs);
1743error_free_scan_mask:
1744	bitmap_free(buffer->scan_mask);
1745error_cleanup_dynamic:
1746	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1747
1748	return ret;
1749}
1750
1751static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
1752					     struct iio_dev *indio_dev,
1753					     int index)
1754{
1755	if (index == 0)
1756		iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1757	bitmap_free(buffer->scan_mask);
1758	kfree(buffer->buffer_group.name);
1759	kfree(buffer->buffer_group.attrs);
1760	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1761}
1762
1763int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1764{
1765	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1766	const struct iio_chan_spec *channels;
1767	struct iio_buffer *buffer;
1768	int ret, i, idx;
1769	size_t sz;
1770
1771	channels = indio_dev->channels;
1772	if (channels) {
1773		int ml = indio_dev->masklength;
1774
1775		for (i = 0; i < indio_dev->num_channels; i++)
1776			ml = max(ml, channels[i].scan_index + 1);
1777		indio_dev->masklength = ml;
1778	}
1779
1780	if (!iio_dev_opaque->attached_buffers_cnt)
1781		return 0;
1782
1783	for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
1784		buffer = iio_dev_opaque->attached_buffers[idx];
1785		ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
1786		if (ret)
1787			goto error_unwind_sysfs_and_mask;
1788	}
1789
1790	sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
1791	iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1792	if (!iio_dev_opaque->buffer_ioctl_handler) {
1793		ret = -ENOMEM;
1794		goto error_unwind_sysfs_and_mask;
1795	}
1796
1797	iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1798	iio_device_ioctl_handler_register(indio_dev,
1799					  iio_dev_opaque->buffer_ioctl_handler);
1800
1801	return 0;
1802
1803error_unwind_sysfs_and_mask:
1804	while (idx--) {
1805		buffer = iio_dev_opaque->attached_buffers[idx];
1806		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
1807	}
1808	return ret;
1809}
1810
1811void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1812{
1813	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1814	struct iio_buffer *buffer;
1815	int i;
1816
1817	if (!iio_dev_opaque->attached_buffers_cnt)
1818		return;
1819
1820	iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1821	kfree(iio_dev_opaque->buffer_ioctl_handler);
1822
1823	for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1824		buffer = iio_dev_opaque->attached_buffers[i];
1825		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
1826	}
1827}
1828
1829/**
1830 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1831 * @indio_dev: the iio device
1832 * @mask: scan mask to be checked
1833 *
1834 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1835 * can be used for devices where only one channel can be active for sampling at
1836 * a time.
1837 */
1838bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1839				   const unsigned long *mask)
1840{
1841	return bitmap_weight(mask, indio_dev->masklength) == 1;
1842}
1843EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1844
1845static const void *iio_demux(struct iio_buffer *buffer,
1846			     const void *datain)
1847{
1848	struct iio_demux_table *t;
1849
1850	if (list_empty(&buffer->demux_list))
1851		return datain;
1852	list_for_each_entry(t, &buffer->demux_list, l)
1853		memcpy(buffer->demux_bounce + t->to,
1854		       datain + t->from, t->length);
1855
1856	return buffer->demux_bounce;
1857}
1858
1859static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1860{
1861	const void *dataout = iio_demux(buffer, data);
1862	int ret;
1863
1864	ret = buffer->access->store_to(buffer, dataout);
1865	if (ret)
1866		return ret;
1867
1868	/*
1869	 * We can't just test for watermark to decide if we wake the poll queue
1870	 * because read may request less samples than the watermark.
1871	 */
1872	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1873	return 0;
1874}
1875
1876/**
1877 * iio_push_to_buffers() - push to a registered buffer.
1878 * @indio_dev:		iio_dev structure for device.
1879 * @data:		Full scan.
1880 */
1881int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1882{
1883	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1884	int ret;
1885	struct iio_buffer *buf;
1886
1887	list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1888		ret = iio_push_to_buffer(buf, data);
1889		if (ret < 0)
1890			return ret;
1891	}
1892
1893	return 0;
1894}
1895EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1896
1897/**
1898 * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
1899 *    no alignment or space requirements.
1900 * @indio_dev:		iio_dev structure for device.
1901 * @data:		channel data excluding the timestamp.
1902 * @data_sz:		size of data.
1903 * @timestamp:		timestamp for the sample data.
1904 *
1905 * This special variant of iio_push_to_buffers_with_timestamp() does
1906 * not require space for the timestamp, or 8 byte alignment of data.
1907 * It does however require an allocation on first call and additional
1908 * copies on all calls, so should be avoided if possible.
1909 */
1910int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
1911					  const void *data,
1912					  size_t data_sz,
1913					  int64_t timestamp)
1914{
1915	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1916
1917	/*
1918	 * Conservative estimate - we can always safely copy the minimum
1919	 * of either the data provided or the length of the destination buffer.
1920	 * This relaxed limit allows the calling drivers to be lax about
1921	 * tracking the size of the data they are pushing, at the cost of
1922	 * unnecessary copying of padding.
1923	 */
1924	data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
1925	if (iio_dev_opaque->bounce_buffer_size !=  indio_dev->scan_bytes) {
1926		void *bb;
1927
1928		bb = devm_krealloc(&indio_dev->dev,
1929				   iio_dev_opaque->bounce_buffer,
1930				   indio_dev->scan_bytes, GFP_KERNEL);
1931		if (!bb)
1932			return -ENOMEM;
1933		iio_dev_opaque->bounce_buffer = bb;
1934		iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
1935	}
1936	memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
1937	return iio_push_to_buffers_with_timestamp(indio_dev,
1938						  iio_dev_opaque->bounce_buffer,
1939						  timestamp);
1940}
1941EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
1942
1943/**
1944 * iio_buffer_release() - Free a buffer's resources
1945 * @ref: Pointer to the kref embedded in the iio_buffer struct
1946 *
1947 * This function is called when the last reference to the buffer has been
1948 * dropped. It will typically free all resources allocated by the buffer. Do not
1949 * call this function manually, always use iio_buffer_put() when done using a
1950 * buffer.
1951 */
1952static void iio_buffer_release(struct kref *ref)
1953{
1954	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1955
1956	buffer->access->release(buffer);
1957}
1958
1959/**
1960 * iio_buffer_get() - Grab a reference to the buffer
1961 * @buffer: The buffer to grab a reference for, may be NULL
1962 *
1963 * Returns the pointer to the buffer that was passed into the function.
1964 */
1965struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1966{
1967	if (buffer)
1968		kref_get(&buffer->ref);
1969
1970	return buffer;
1971}
1972EXPORT_SYMBOL_GPL(iio_buffer_get);
1973
1974/**
1975 * iio_buffer_put() - Release the reference to the buffer
1976 * @buffer: The buffer to release the reference for, may be NULL
1977 */
1978void iio_buffer_put(struct iio_buffer *buffer)
1979{
1980	if (buffer)
1981		kref_put(&buffer->ref, iio_buffer_release);
1982}
1983EXPORT_SYMBOL_GPL(iio_buffer_put);
1984
1985/**
1986 * iio_device_attach_buffer - Attach a buffer to a IIO device
1987 * @indio_dev: The device the buffer should be attached to
1988 * @buffer: The buffer to attach to the device
1989 *
1990 * Return 0 if successful, negative if error.
1991 *
1992 * This function attaches a buffer to a IIO device. The buffer stays attached to
1993 * the device until the device is freed. For legacy reasons, the first attached
1994 * buffer will also be assigned to 'indio_dev->buffer'.
1995 * The array allocated here, will be free'd via the iio_device_detach_buffers()
1996 * call which is handled by the iio_device_free().
1997 */
1998int iio_device_attach_buffer(struct iio_dev *indio_dev,
1999			     struct iio_buffer *buffer)
2000{
2001	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2002	struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
2003	unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
2004
2005	cnt++;
2006
2007	new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
2008	if (!new)
2009		return -ENOMEM;
2010	iio_dev_opaque->attached_buffers = new;
2011
2012	buffer = iio_buffer_get(buffer);
2013
2014	/* first buffer is legacy; attach it to the IIO device directly */
2015	if (!indio_dev->buffer)
2016		indio_dev->buffer = buffer;
2017
2018	iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
2019	iio_dev_opaque->attached_buffers_cnt = cnt;
2020
2021	return 0;
2022}
2023EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
v3.15
 
   1/* The industrial I/O core
   2 *
   3 * Copyright (c) 2008 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * Handling of buffer allocation / resizing.
  10 *
  11 *
  12 * Things to look at here.
  13 * - Better memory allocation techniques?
  14 * - Alternative access techniques?
  15 */
 
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/device.h>
 
  19#include <linux/fs.h>
  20#include <linux/cdev.h>
  21#include <linux/slab.h>
  22#include <linux/poll.h>
  23#include <linux/sched.h>
  24
  25#include <linux/iio/iio.h>
 
  26#include "iio_core.h"
 
  27#include <linux/iio/sysfs.h>
  28#include <linux/iio/buffer.h>
 
  29
  30static const char * const iio_endian_prefix[] = {
  31	[IIO_BE] = "be",
  32	[IIO_LE] = "le",
  33};
  34
  35static bool iio_buffer_is_active(struct iio_buffer *buf)
  36{
  37	return !list_empty(&buf->buffer_list);
  38}
  39
  40static bool iio_buffer_data_available(struct iio_buffer *buf)
  41{
  42	if (buf->access->data_available)
  43		return buf->access->data_available(buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  44
  45	return buf->stufftoread;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46}
  47
  48/**
  49 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
 
 
 
 
  50 *
  51 * This function relies on all buffer implementations having an
  52 * iio_buffer as their first element.
 
 
 
  53 **/
  54ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  55				      size_t n, loff_t *f_ps)
  56{
  57	struct iio_dev *indio_dev = filp->private_data;
  58	struct iio_buffer *rb = indio_dev->buffer;
  59	int ret;
 
 
 
 
  60
  61	if (!indio_dev->info)
  62		return -ENODEV;
  63
  64	if (!rb || !rb->access->read_first_n)
  65		return -EINVAL;
  66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  67	do {
  68		if (!iio_buffer_data_available(rb)) {
  69			if (filp->f_flags & O_NONBLOCK)
  70				return -EAGAIN;
  71
  72			ret = wait_event_interruptible(rb->pollq,
  73					iio_buffer_data_available(rb) ||
  74					indio_dev->info == NULL);
  75			if (ret)
  76				return ret;
  77			if (indio_dev->info == NULL)
  78				return -ENODEV;
 
 
 
  79		}
  80
  81		ret = rb->access->read_first_n(rb, n, buf);
  82		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  83			ret = -EAGAIN;
  84	 } while (ret == 0);
 
  85
  86	return ret;
  87}
  88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89/**
  90 * iio_buffer_poll() - poll the buffer to find out if it has data
 
 
 
 
 
 
  91 */
  92unsigned int iio_buffer_poll(struct file *filp,
  93			     struct poll_table_struct *wait)
  94{
  95	struct iio_dev *indio_dev = filp->private_data;
  96	struct iio_buffer *rb = indio_dev->buffer;
 
  97
  98	if (!indio_dev->info)
  99		return -ENODEV;
 100
 101	poll_wait(filp, &rb->pollq, wait);
 102	if (iio_buffer_data_available(rb))
 103		return POLLIN | POLLRDNORM;
 104	/* need a way of knowing if there may be enough data... */
 
 
 
 
 
 
 
 
 
 105	return 0;
 106}
 107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 108/**
 109 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
 110 * @indio_dev: The IIO device
 111 *
 112 * Wakes up the event waitqueue used for poll(). Should usually
 113 * be called when the device is unregistered.
 114 */
 115void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
 116{
 117	if (!indio_dev->buffer)
 118		return;
 
 
 
 
 
 
 
 
 
 
 
 
 119
 120	wake_up(&indio_dev->buffer->pollq);
 121}
 
 122
 123void iio_buffer_init(struct iio_buffer *buffer)
 124{
 125	INIT_LIST_HEAD(&buffer->demux_list);
 126	INIT_LIST_HEAD(&buffer->buffer_list);
 127	init_waitqueue_head(&buffer->pollq);
 128	kref_init(&buffer->ref);
 
 
 129}
 130EXPORT_SYMBOL(iio_buffer_init);
 131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 132static ssize_t iio_show_scan_index(struct device *dev,
 133				   struct device_attribute *attr,
 134				   char *buf)
 135{
 136	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
 137}
 138
 139static ssize_t iio_show_fixed_type(struct device *dev,
 140				   struct device_attribute *attr,
 141				   char *buf)
 142{
 143	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 144	u8 type = this_attr->c->scan_type.endianness;
 145
 146	if (type == IIO_CPU) {
 147#ifdef __LITTLE_ENDIAN
 148		type = IIO_LE;
 149#else
 150		type = IIO_BE;
 151#endif
 152	}
 153	return sprintf(buf, "%s:%c%d/%d>>%u\n",
 
 
 
 
 
 
 
 
 
 154		       iio_endian_prefix[type],
 155		       this_attr->c->scan_type.sign,
 156		       this_attr->c->scan_type.realbits,
 157		       this_attr->c->scan_type.storagebits,
 158		       this_attr->c->scan_type.shift);
 159}
 160
 161static ssize_t iio_scan_el_show(struct device *dev,
 162				struct device_attribute *attr,
 163				char *buf)
 164{
 165	int ret;
 166	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 167
 168	/* Ensure ret is 0 or 1. */
 169	ret = !!test_bit(to_iio_dev_attr(attr)->address,
 170		       indio_dev->buffer->scan_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171
 172	return sprintf(buf, "%d\n", ret);
 
 
 
 
 
 
 173}
 174
 175static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
 176{
 177	clear_bit(bit, buffer->scan_mask);
 178	return 0;
 179}
 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 181static ssize_t iio_scan_el_store(struct device *dev,
 182				 struct device_attribute *attr,
 183				 const char *buf,
 184				 size_t len)
 185{
 186	int ret;
 187	bool state;
 188	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 189	struct iio_buffer *buffer = indio_dev->buffer;
 190	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 
 191
 192	ret = strtobool(buf, &state);
 193	if (ret < 0)
 194		return ret;
 195	mutex_lock(&indio_dev->mlock);
 196	if (iio_buffer_is_active(indio_dev->buffer)) {
 197		ret = -EBUSY;
 198		goto error_ret;
 199	}
 200	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
 201	if (ret < 0)
 202		goto error_ret;
 203	if (!state && ret) {
 204		ret = iio_scan_mask_clear(buffer, this_attr->address);
 205		if (ret)
 206			goto error_ret;
 207	} else if (state && !ret) {
 208		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
 209		if (ret)
 210			goto error_ret;
 211	}
 212
 213error_ret:
 214	mutex_unlock(&indio_dev->mlock);
 215
 216	return ret < 0 ? ret : len;
 217
 218}
 219
 220static ssize_t iio_scan_el_ts_show(struct device *dev,
 221				   struct device_attribute *attr,
 222				   char *buf)
 223{
 224	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 225	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
 
 226}
 227
 228static ssize_t iio_scan_el_ts_store(struct device *dev,
 229				    struct device_attribute *attr,
 230				    const char *buf,
 231				    size_t len)
 232{
 233	int ret;
 234	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 
 
 235	bool state;
 236
 237	ret = strtobool(buf, &state);
 238	if (ret < 0)
 239		return ret;
 240
 241	mutex_lock(&indio_dev->mlock);
 242	if (iio_buffer_is_active(indio_dev->buffer)) {
 243		ret = -EBUSY;
 244		goto error_ret;
 245	}
 246	indio_dev->buffer->scan_timestamp = state;
 247error_ret:
 248	mutex_unlock(&indio_dev->mlock);
 249
 250	return ret ? ret : len;
 251}
 252
 253static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
 
 254					const struct iio_chan_spec *chan)
 255{
 256	int ret, attrcount = 0;
 257	struct iio_buffer *buffer = indio_dev->buffer;
 258
 259	ret = __iio_add_chan_devattr("index",
 260				     chan,
 261				     &iio_show_scan_index,
 262				     NULL,
 263				     0,
 264				     IIO_SEPARATE,
 265				     &indio_dev->dev,
 266				     &buffer->scan_el_dev_attr_list);
 
 267	if (ret)
 268		return ret;
 269	attrcount++;
 270	ret = __iio_add_chan_devattr("type",
 271				     chan,
 272				     &iio_show_fixed_type,
 273				     NULL,
 274				     0,
 275				     0,
 276				     &indio_dev->dev,
 277				     &buffer->scan_el_dev_attr_list);
 
 278	if (ret)
 279		return ret;
 280	attrcount++;
 281	if (chan->type != IIO_TIMESTAMP)
 282		ret = __iio_add_chan_devattr("en",
 283					     chan,
 284					     &iio_scan_el_show,
 285					     &iio_scan_el_store,
 286					     chan->scan_index,
 287					     0,
 288					     &indio_dev->dev,
 289					     &buffer->scan_el_dev_attr_list);
 
 290	else
 291		ret = __iio_add_chan_devattr("en",
 292					     chan,
 293					     &iio_scan_el_ts_show,
 294					     &iio_scan_el_ts_store,
 295					     chan->scan_index,
 296					     0,
 297					     &indio_dev->dev,
 298					     &buffer->scan_el_dev_attr_list);
 
 299	if (ret)
 300		return ret;
 301	attrcount++;
 302	ret = attrcount;
 303	return ret;
 304}
 305
 306static const char * const iio_scan_elements_group_name = "scan_elements";
 307
 308int iio_buffer_register(struct iio_dev *indio_dev,
 309			const struct iio_chan_spec *channels,
 310			int num_channels)
 311{
 312	struct iio_dev_attr *p;
 313	struct attribute **attr;
 314	struct iio_buffer *buffer = indio_dev->buffer;
 315	int ret, i, attrn, attrcount, attrcount_orig = 0;
 316
 317	if (buffer->attrs)
 318		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
 319
 320	if (buffer->scan_el_attrs != NULL) {
 321		attr = buffer->scan_el_attrs->attrs;
 322		while (*attr++ != NULL)
 323			attrcount_orig++;
 324	}
 325	attrcount = attrcount_orig;
 326	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
 327	if (channels) {
 328		/* new magic */
 329		for (i = 0; i < num_channels; i++) {
 330			if (channels[i].scan_index < 0)
 331				continue;
 332
 333			/* Establish necessary mask length */
 334			if (channels[i].scan_index >
 335			    (int)indio_dev->masklength - 1)
 336				indio_dev->masklength
 337					= channels[i].scan_index + 1;
 338
 339			ret = iio_buffer_add_channel_sysfs(indio_dev,
 340							 &channels[i]);
 341			if (ret < 0)
 342				goto error_cleanup_dynamic;
 343			attrcount += ret;
 344			if (channels[i].type == IIO_TIMESTAMP)
 345				indio_dev->scan_index_timestamp =
 346					channels[i].scan_index;
 347		}
 348		if (indio_dev->masklength && buffer->scan_mask == NULL) {
 349			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
 350						    sizeof(*buffer->scan_mask),
 351						    GFP_KERNEL);
 352			if (buffer->scan_mask == NULL) {
 353				ret = -ENOMEM;
 354				goto error_cleanup_dynamic;
 355			}
 356		}
 357	}
 358
 359	buffer->scan_el_group.name = iio_scan_elements_group_name;
 360
 361	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
 362					      sizeof(buffer->scan_el_group.attrs[0]),
 363					      GFP_KERNEL);
 364	if (buffer->scan_el_group.attrs == NULL) {
 365		ret = -ENOMEM;
 366		goto error_free_scan_mask;
 367	}
 368	if (buffer->scan_el_attrs)
 369		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
 370		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
 371	attrn = attrcount_orig;
 372
 373	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
 374		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
 375	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
 376
 377	return 0;
 378
 379error_free_scan_mask:
 380	kfree(buffer->scan_mask);
 381error_cleanup_dynamic:
 382	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
 383
 384	return ret;
 385}
 386EXPORT_SYMBOL(iio_buffer_register);
 387
 388void iio_buffer_unregister(struct iio_dev *indio_dev)
 389{
 390	kfree(indio_dev->buffer->scan_mask);
 391	kfree(indio_dev->buffer->scan_el_group.attrs);
 392	iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
 393}
 394EXPORT_SYMBOL(iio_buffer_unregister);
 395
 396ssize_t iio_buffer_read_length(struct device *dev,
 397			       struct device_attribute *attr,
 398			       char *buf)
 399{
 400	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 401	struct iio_buffer *buffer = indio_dev->buffer;
 402
 403	if (buffer->access->get_length)
 404		return sprintf(buf, "%d\n",
 405			       buffer->access->get_length(buffer));
 406
 407	return 0;
 408}
 409EXPORT_SYMBOL(iio_buffer_read_length);
 410
 411ssize_t iio_buffer_write_length(struct device *dev,
 412				struct device_attribute *attr,
 413				const char *buf,
 414				size_t len)
 415{
 416	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 417	struct iio_buffer *buffer = indio_dev->buffer;
 
 418	unsigned int val;
 419	int ret;
 420
 421	ret = kstrtouint(buf, 10, &val);
 422	if (ret)
 423		return ret;
 424
 425	if (buffer->access->get_length)
 426		if (val == buffer->access->get_length(buffer))
 427			return len;
 428
 429	mutex_lock(&indio_dev->mlock);
 430	if (iio_buffer_is_active(indio_dev->buffer)) {
 431		ret = -EBUSY;
 432	} else {
 433		if (buffer->access->set_length)
 434			buffer->access->set_length(buffer, val);
 435		ret = 0;
 436	}
 437	mutex_unlock(&indio_dev->mlock);
 
 
 
 
 
 438
 439	return ret ? ret : len;
 440}
 441EXPORT_SYMBOL(iio_buffer_write_length);
 442
 443ssize_t iio_buffer_show_enable(struct device *dev,
 444			       struct device_attribute *attr,
 445			       char *buf)
 
 
 
 
 
 
 
 446{
 447	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 448	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
 
 
 
 
 
 
 449}
 450EXPORT_SYMBOL(iio_buffer_show_enable);
 451
 452/* Note NULL used as error indicator as it doesn't make sense. */
 453static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
 454					  unsigned int masklength,
 455					  const unsigned long *mask)
 456{
 457	if (bitmap_empty(mask, masklength))
 458		return NULL;
 459	while (*av_masks) {
 460		if (bitmap_subset(mask, av_masks, masklength))
 461			return av_masks;
 462		av_masks += BITS_TO_LONGS(masklength);
 463	}
 464	return NULL;
 465}
 466
 467static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
 468				const unsigned long *mask, bool timestamp)
 469{
 470	const struct iio_chan_spec *ch;
 471	unsigned bytes = 0;
 472	int length, i;
 473
 474	/* How much space will the demuxed element take? */
 475	for_each_set_bit(i, mask,
 476			 indio_dev->masklength) {
 477		ch = iio_find_channel_from_si(indio_dev, i);
 478		length = ch->scan_type.storagebits / 8;
 479		bytes = ALIGN(bytes, length);
 480		bytes += length;
 
 481	}
 
 482	if (timestamp) {
 483		ch = iio_find_channel_from_si(indio_dev,
 484					      indio_dev->scan_index_timestamp);
 485		length = ch->scan_type.storagebits / 8;
 486		bytes = ALIGN(bytes, length);
 487		bytes += length;
 
 488	}
 
 
 489	return bytes;
 490}
 491
 492static void iio_buffer_activate(struct iio_dev *indio_dev,
 493	struct iio_buffer *buffer)
 494{
 
 
 495	iio_buffer_get(buffer);
 496	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
 497}
 498
 499static void iio_buffer_deactivate(struct iio_buffer *buffer)
 500{
 501	list_del_init(&buffer->buffer_list);
 
 502	iio_buffer_put(buffer);
 503}
 504
 505void iio_disable_all_buffers(struct iio_dev *indio_dev)
 506{
 
 507	struct iio_buffer *buffer, *_buffer;
 508
 509	if (list_empty(&indio_dev->buffer_list))
 510		return;
 511
 512	if (indio_dev->setup_ops->predisable)
 513		indio_dev->setup_ops->predisable(indio_dev);
 514
 515	list_for_each_entry_safe(buffer, _buffer,
 516			&indio_dev->buffer_list, buffer_list)
 517		iio_buffer_deactivate(buffer);
 
 518
 519	indio_dev->currentmode = INDIO_DIRECT_MODE;
 520	if (indio_dev->setup_ops->postdisable)
 521		indio_dev->setup_ops->postdisable(indio_dev);
 
 
 
 
 522
 523	if (indio_dev->available_scan_masks == NULL)
 524		kfree(indio_dev->active_scan_mask);
 
 
 
 
 525}
 526
 527static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
 528	struct iio_buffer *buffer)
 529{
 530	unsigned int bytes;
 531
 532	if (!buffer->access->set_bytes_per_datum)
 533		return;
 534
 535	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
 536		buffer->scan_timestamp);
 537
 538	buffer->access->set_bytes_per_datum(buffer, bytes);
 539}
 540
 541static int __iio_update_buffers(struct iio_dev *indio_dev,
 542		       struct iio_buffer *insert_buffer,
 543		       struct iio_buffer *remove_buffer)
 544{
 545	int ret;
 546	int success = 0;
 547	struct iio_buffer *buffer;
 548	unsigned long *compound_mask;
 549	const unsigned long *old_mask;
 550
 551	/* Wind down existing buffers - iff there are any */
 552	if (!list_empty(&indio_dev->buffer_list)) {
 553		if (indio_dev->setup_ops->predisable) {
 554			ret = indio_dev->setup_ops->predisable(indio_dev);
 555			if (ret)
 556				return ret;
 557		}
 558		indio_dev->currentmode = INDIO_DIRECT_MODE;
 559		if (indio_dev->setup_ops->postdisable) {
 560			ret = indio_dev->setup_ops->postdisable(indio_dev);
 561			if (ret)
 562				return ret;
 563		}
 564	}
 565	/* Keep a copy of current setup to allow roll back */
 566	old_mask = indio_dev->active_scan_mask;
 
 
 
 
 
 
 567	if (!indio_dev->available_scan_masks)
 568		indio_dev->active_scan_mask = NULL;
 
 569
 570	if (remove_buffer)
 571		iio_buffer_deactivate(remove_buffer);
 572	if (insert_buffer)
 573		iio_buffer_activate(indio_dev, insert_buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574
 575	/* If no buffers in list, we are done */
 576	if (list_empty(&indio_dev->buffer_list)) {
 577		indio_dev->currentmode = INDIO_DIRECT_MODE;
 578		if (indio_dev->available_scan_masks == NULL)
 579			kfree(old_mask);
 
 580		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 581	}
 582
 583	/* What scan mask do we actually have? */
 584	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
 585				sizeof(long), GFP_KERNEL);
 586	if (compound_mask == NULL) {
 587		if (indio_dev->available_scan_masks == NULL)
 588			kfree(old_mask);
 589		return -ENOMEM;
 590	}
 591	indio_dev->scan_timestamp = 0;
 592
 593	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
 
 
 
 
 594		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
 595			  indio_dev->masklength);
 596		indio_dev->scan_timestamp |= buffer->scan_timestamp;
 597	}
 
 
 
 
 
 
 
 598	if (indio_dev->available_scan_masks) {
 599		indio_dev->active_scan_mask =
 600			iio_scan_mask_match(indio_dev->available_scan_masks,
 601					    indio_dev->masklength,
 602					    compound_mask);
 603		if (indio_dev->active_scan_mask == NULL) {
 604			/*
 605			 * Roll back.
 606			 * Note can only occur when adding a buffer.
 607			 */
 608			iio_buffer_deactivate(insert_buffer);
 609			if (old_mask) {
 610				indio_dev->active_scan_mask = old_mask;
 611				success = -EINVAL;
 612			}
 613			else {
 614				kfree(compound_mask);
 615				ret = -EINVAL;
 616				return ret;
 617			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618		}
 619	} else {
 620		indio_dev->active_scan_mask = compound_mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 621	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 622
 623	iio_update_demux(indio_dev);
 624
 625	/* Wind up again */
 626	if (indio_dev->setup_ops->preenable) {
 627		ret = indio_dev->setup_ops->preenable(indio_dev);
 628		if (ret) {
 629			printk(KERN_ERR
 630			       "Buffer not started: buffer preenable failed (%d)\n", ret);
 631			goto error_remove_inserted;
 632		}
 633	}
 634	indio_dev->scan_bytes =
 635		iio_compute_scan_bytes(indio_dev,
 636				       indio_dev->active_scan_mask,
 637				       indio_dev->scan_timestamp);
 638	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
 639		iio_buffer_update_bytes_per_datum(indio_dev, buffer);
 640		if (buffer->access->request_update) {
 641			ret = buffer->access->request_update(buffer);
 642			if (ret) {
 643				printk(KERN_INFO
 644				       "Buffer not started: buffer parameter update failed (%d)\n", ret);
 645				goto error_run_postdisable;
 646			}
 647		}
 648	}
 
 649	if (indio_dev->info->update_scan_mode) {
 650		ret = indio_dev->info
 651			->update_scan_mode(indio_dev,
 652					   indio_dev->active_scan_mask);
 653		if (ret < 0) {
 654			printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
 655			goto error_run_postdisable;
 
 
 656		}
 657	}
 658	/* Definitely possible for devices to support both of these. */
 659	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
 660		if (!indio_dev->trig) {
 661			printk(KERN_INFO "Buffer not started: no trigger\n");
 662			ret = -EINVAL;
 663			/* Can only occur on first buffer */
 664			goto error_run_postdisable;
 
 
 
 665		}
 666		indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
 667	} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
 668		indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
 669	} else { /* Should never be reached */
 670		ret = -EINVAL;
 671		goto error_run_postdisable;
 
 672	}
 673
 674	if (indio_dev->setup_ops->postenable) {
 675		ret = indio_dev->setup_ops->postenable(indio_dev);
 676		if (ret) {
 677			printk(KERN_INFO
 678			       "Buffer not started: postenable failed (%d)\n", ret);
 679			indio_dev->currentmode = INDIO_DIRECT_MODE;
 680			if (indio_dev->setup_ops->postdisable)
 681				indio_dev->setup_ops->postdisable(indio_dev);
 682			goto error_disable_all_buffers;
 683		}
 684	}
 685
 686	if (indio_dev->available_scan_masks)
 687		kfree(compound_mask);
 688	else
 689		kfree(old_mask);
 690
 691	return success;
 692
 693error_disable_all_buffers:
 694	indio_dev->currentmode = INDIO_DIRECT_MODE;
 695error_run_postdisable:
 
 
 
 
 
 
 
 
 696	if (indio_dev->setup_ops->postdisable)
 697		indio_dev->setup_ops->postdisable(indio_dev);
 698error_remove_inserted:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699	if (insert_buffer)
 700		iio_buffer_deactivate(insert_buffer);
 701	indio_dev->active_scan_mask = old_mask;
 702	kfree(compound_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 703	return ret;
 704}
 705
 706int iio_update_buffers(struct iio_dev *indio_dev,
 707		       struct iio_buffer *insert_buffer,
 708		       struct iio_buffer *remove_buffer)
 709{
 
 710	int ret;
 711
 712	if (insert_buffer == remove_buffer)
 713		return 0;
 714
 715	mutex_lock(&indio_dev->info_exist_lock);
 716	mutex_lock(&indio_dev->mlock);
 
 
 
 
 717
 718	if (insert_buffer && iio_buffer_is_active(insert_buffer))
 719		insert_buffer = NULL;
 720
 721	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
 722		remove_buffer = NULL;
 723
 724	if (!insert_buffer && !remove_buffer) {
 725		ret = 0;
 726		goto out_unlock;
 727	}
 728
 729	if (indio_dev->info == NULL) {
 730		ret = -ENODEV;
 731		goto out_unlock;
 732	}
 733
 734	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
 735
 736out_unlock:
 737	mutex_unlock(&indio_dev->mlock);
 738	mutex_unlock(&indio_dev->info_exist_lock);
 739
 740	return ret;
 741}
 742EXPORT_SYMBOL_GPL(iio_update_buffers);
 743
 744ssize_t iio_buffer_store_enable(struct device *dev,
 745				struct device_attribute *attr,
 746				const char *buf,
 747				size_t len)
 
 
 
 
 748{
 749	int ret;
 750	bool requested_state;
 751	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 
 
 752	bool inlist;
 753
 754	ret = strtobool(buf, &requested_state);
 755	if (ret < 0)
 756		return ret;
 757
 758	mutex_lock(&indio_dev->mlock);
 759
 760	/* Find out if it is in the list */
 761	inlist = iio_buffer_is_active(indio_dev->buffer);
 762	/* Already in desired state */
 763	if (inlist == requested_state)
 764		goto done;
 765
 766	if (requested_state)
 767		ret = __iio_update_buffers(indio_dev,
 768					 indio_dev->buffer, NULL);
 769	else
 770		ret = __iio_update_buffers(indio_dev,
 771					 NULL, indio_dev->buffer);
 772
 773	if (ret < 0)
 774		goto done;
 775done:
 776	mutex_unlock(&indio_dev->mlock);
 777	return (ret < 0) ? ret : len;
 778}
 779EXPORT_SYMBOL(iio_buffer_store_enable);
 780
 781/**
 782 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 783 * @indio_dev: the iio device
 784 * @mask: scan mask to be checked
 785 *
 786 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 787 * can be used for devices where only one channel can be active for sampling at
 788 * a time.
 789 */
 790bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
 791	const unsigned long *mask)
 792{
 793	return bitmap_weight(mask, indio_dev->masklength) == 1;
 
 
 794}
 795EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
 796
 797static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
 798	const unsigned long *mask)
 
 799{
 800	if (!indio_dev->setup_ops->validate_scan_mask)
 801		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802
 803	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
 804}
 805
 806/**
 807 * iio_scan_mask_set() - set particular bit in the scan mask
 808 * @indio_dev: the iio device
 809 * @buffer: the buffer whose scan mask we are interested in
 810 * @bit: the bit to be set.
 811 *
 812 * Note that at this point we have no way of knowing what other
 813 * buffers might request, hence this code only verifies that the
 814 * individual buffers request is plausible.
 815 */
 816int iio_scan_mask_set(struct iio_dev *indio_dev,
 817		      struct iio_buffer *buffer, int bit)
 818{
 819	const unsigned long *mask;
 820	unsigned long *trialmask;
 
 
 821
 822	trialmask = kmalloc(sizeof(*trialmask)*
 823			    BITS_TO_LONGS(indio_dev->masklength),
 824			    GFP_KERNEL);
 
 
 825
 826	if (trialmask == NULL)
 827		return -ENOMEM;
 828	if (!indio_dev->masklength) {
 829		WARN_ON("Trying to set scanmask prior to registering buffer\n");
 830		goto err_invalid_mask;
 
 
 831	}
 832	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
 833	set_bit(bit, trialmask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 834
 835	if (!iio_validate_scan_mask(indio_dev, trialmask))
 836		goto err_invalid_mask;
 
 837
 838	if (indio_dev->available_scan_masks) {
 839		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
 840					   indio_dev->masklength,
 841					   trialmask);
 842		if (!mask)
 843			goto err_invalid_mask;
 844	}
 845	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
 846
 847	kfree(trialmask);
 848
 849	return 0;
 850
 851err_invalid_mask:
 852	kfree(trialmask);
 853	return -EINVAL;
 854}
 855EXPORT_SYMBOL_GPL(iio_scan_mask_set);
 856
 857int iio_scan_mask_query(struct iio_dev *indio_dev,
 858			struct iio_buffer *buffer, int bit)
 
 
 859{
 860	if (bit > indio_dev->masklength)
 861		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862
 863	if (!buffer->scan_mask)
 864		return 0;
 865
 866	/* Ensure return value is 0 or 1. */
 867	return !!test_bit(bit, buffer->scan_mask);
 868};
 869EXPORT_SYMBOL_GPL(iio_scan_mask_query);
 870
 871/**
 872 * struct iio_demux_table() - table describing demux memcpy ops
 873 * @from:	index to copy from
 874 * @to:		index to copy to
 875 * @length:	how many bytes to copy
 876 * @l:		list head used for management
 877 */
 878struct iio_demux_table {
 879	unsigned from;
 880	unsigned to;
 881	unsigned length;
 882	struct list_head l;
 883};
 884
 885static const void *iio_demux(struct iio_buffer *buffer,
 886				 const void *datain)
 887{
 888	struct iio_demux_table *t;
 889
 890	if (list_empty(&buffer->demux_list))
 891		return datain;
 892	list_for_each_entry(t, &buffer->demux_list, l)
 893		memcpy(buffer->demux_bounce + t->to,
 894		       datain + t->from, t->length);
 895
 896	return buffer->demux_bounce;
 897}
 898
 899static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
 900{
 901	const void *dataout = iio_demux(buffer, data);
 902
 903	return buffer->access->store_to(buffer, dataout);
 
 904}
 905
 906static void iio_buffer_demux_free(struct iio_buffer *buffer)
 907{
 908	struct iio_demux_table *p, *q;
 909	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
 910		list_del(&p->l);
 911		kfree(p);
 912	}
 
 
 
 
 
 
 913}
 914
 
 
 
 
 
 
 
 
 915
 916int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
 917{
 918	int ret;
 919	struct iio_buffer *buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920
 921	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
 922		ret = iio_push_to_buffer(buf, data);
 923		if (ret < 0)
 924			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925	}
 926
 927	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928}
 929EXPORT_SYMBOL_GPL(iio_push_to_buffers);
 930
 931static int iio_buffer_update_demux(struct iio_dev *indio_dev,
 932				   struct iio_buffer *buffer)
 
 933{
 934	const struct iio_chan_spec *ch;
 935	int ret, in_ind = -1, out_ind, length;
 936	unsigned in_loc = 0, out_loc = 0;
 937	struct iio_demux_table *p;
 
 
 938
 939	/* Clear out any old demux */
 940	iio_buffer_demux_free(buffer);
 941	kfree(buffer->demux_bounce);
 942	buffer->demux_bounce = NULL;
 
 
 
 
 
 
 
 
 
 
 
 943
 944	/* First work out which scan mode we will actually have */
 945	if (bitmap_equal(indio_dev->active_scan_mask,
 946			 buffer->scan_mask,
 947			 indio_dev->masklength))
 948		return 0;
 
 
 
 
 
 
 
 949
 950	/* Now we have the two masks, work from least sig and build up sizes */
 951	for_each_set_bit(out_ind,
 952			 indio_dev->active_scan_mask,
 953			 indio_dev->masklength) {
 954		in_ind = find_next_bit(indio_dev->active_scan_mask,
 955				       indio_dev->masklength,
 956				       in_ind + 1);
 957		while (in_ind != out_ind) {
 958			in_ind = find_next_bit(indio_dev->active_scan_mask,
 959					       indio_dev->masklength,
 960					       in_ind + 1);
 961			ch = iio_find_channel_from_si(indio_dev, in_ind);
 962			length = ch->scan_type.storagebits/8;
 963			/* Make sure we are aligned */
 964			in_loc += length;
 965			if (in_loc % length)
 966				in_loc += length - in_loc % length;
 967		}
 968		p = kmalloc(sizeof(*p), GFP_KERNEL);
 969		if (p == NULL) {
 970			ret = -ENOMEM;
 971			goto error_clear_mux_table;
 
 
 
 972		}
 973		ch = iio_find_channel_from_si(indio_dev, in_ind);
 974		length = ch->scan_type.storagebits/8;
 975		if (out_loc % length)
 976			out_loc += length - out_loc % length;
 977		if (in_loc % length)
 978			in_loc += length - in_loc % length;
 979		p->from = in_loc;
 980		p->to = out_loc;
 981		p->length = length;
 982		list_add_tail(&p->l, &buffer->demux_list);
 983		out_loc += length;
 984		in_loc += length;
 985	}
 986	/* Relies on scan_timestamp being last */
 987	if (buffer->scan_timestamp) {
 988		p = kmalloc(sizeof(*p), GFP_KERNEL);
 989		if (p == NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 990			ret = -ENOMEM;
 991			goto error_clear_mux_table;
 992		}
 993		ch = iio_find_channel_from_si(indio_dev,
 994			indio_dev->scan_index_timestamp);
 995		length = ch->scan_type.storagebits/8;
 996		if (out_loc % length)
 997			out_loc += length - out_loc % length;
 998		if (in_loc % length)
 999			in_loc += length - in_loc % length;
1000		p->from = in_loc;
1001		p->to = out_loc;
1002		p->length = length;
1003		list_add_tail(&p->l, &buffer->demux_list);
1004		out_loc += length;
1005		in_loc += length;
1006	}
1007	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1008	if (buffer->demux_bounce == NULL) {
 
 
 
 
 
1009		ret = -ENOMEM;
1010		goto error_clear_mux_table;
1011	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012	return 0;
1013
1014error_clear_mux_table:
1015	iio_buffer_demux_free(buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016
 
 
 
 
 
1017	return ret;
1018}
1019
1020int iio_update_demux(struct iio_dev *indio_dev)
1021{
 
1022	struct iio_buffer *buffer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023	int ret;
 
1024
1025	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1026		ret = iio_buffer_update_demux(indio_dev, buffer);
1027		if (ret < 0)
1028			goto error_clear_mux_table;
1029	}
 
1030	return 0;
 
 
1031
1032error_clear_mux_table:
1033	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1034		iio_buffer_demux_free(buffer);
1035
1036	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037}
1038EXPORT_SYMBOL_GPL(iio_update_demux);
1039
1040/**
1041 * iio_buffer_release() - Free a buffer's resources
1042 * @ref: Pointer to the kref embedded in the iio_buffer struct
1043 *
1044 * This function is called when the last reference to the buffer has been
1045 * dropped. It will typically free all resources allocated by the buffer. Do not
1046 * call this function manually, always use iio_buffer_put() when done using a
1047 * buffer.
1048 */
1049static void iio_buffer_release(struct kref *ref)
1050{
1051	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1052
1053	buffer->access->release(buffer);
1054}
1055
1056/**
1057 * iio_buffer_get() - Grab a reference to the buffer
1058 * @buffer: The buffer to grab a reference for, may be NULL
1059 *
1060 * Returns the pointer to the buffer that was passed into the function.
1061 */
1062struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1063{
1064	if (buffer)
1065		kref_get(&buffer->ref);
1066
1067	return buffer;
1068}
1069EXPORT_SYMBOL_GPL(iio_buffer_get);
1070
1071/**
1072 * iio_buffer_put() - Release the reference to the buffer
1073 * @buffer: The buffer to release the reference for, may be NULL
1074 */
1075void iio_buffer_put(struct iio_buffer *buffer)
1076{
1077	if (buffer)
1078		kref_put(&buffer->ref, iio_buffer_release);
1079}
1080EXPORT_SYMBOL_GPL(iio_buffer_put);