Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* The industrial I/O core
   3 *
   4 * Copyright (c) 2008 Jonathan Cameron
   5 *
   6 * Handling of buffer allocation / resizing.
   7 *
   8 * Things to look at here.
   9 * - Better memory allocation techniques?
  10 * - Alternative access techniques?
  11 */
  12#include <linux/atomic.h>
  13#include <linux/anon_inodes.h>
  14#include <linux/cleanup.h>
  15#include <linux/kernel.h>
  16#include <linux/export.h>
  17#include <linux/device.h>
  18#include <linux/dma-buf.h>
  19#include <linux/dma-fence.h>
  20#include <linux/dma-resv.h>
  21#include <linux/file.h>
  22#include <linux/fs.h>
  23#include <linux/cdev.h>
  24#include <linux/slab.h>
  25#include <linux/mm.h>
  26#include <linux/poll.h>
  27#include <linux/sched/signal.h>
  28
  29#include <linux/iio/iio.h>
  30#include <linux/iio/iio-opaque.h>
  31#include "iio_core.h"
  32#include "iio_core_trigger.h"
  33#include <linux/iio/sysfs.h>
  34#include <linux/iio/buffer.h>
  35#include <linux/iio/buffer_impl.h>
  36
  37#define DMABUF_ENQUEUE_TIMEOUT_MS 5000
  38
  39MODULE_IMPORT_NS("DMA_BUF");
  40
  41struct iio_dmabuf_priv {
  42	struct list_head entry;
  43	struct kref ref;
  44
  45	struct iio_buffer *buffer;
  46	struct iio_dma_buffer_block *block;
  47
  48	u64 context;
  49
  50	/* Spinlock used for locking the dma_fence */
  51	spinlock_t lock;
  52
  53	struct dma_buf_attachment *attach;
  54	struct sg_table *sgt;
  55	enum dma_data_direction dir;
  56	atomic_t seqno;
  57};
  58
  59struct iio_dma_fence {
  60	struct dma_fence base;
  61	struct iio_dmabuf_priv *priv;
  62	struct work_struct work;
  63};
  64
  65static const char * const iio_endian_prefix[] = {
  66	[IIO_BE] = "be",
  67	[IIO_LE] = "le",
  68};
  69
  70static bool iio_buffer_is_active(struct iio_buffer *buf)
  71{
  72	return !list_empty(&buf->buffer_list);
  73}
  74
  75static size_t iio_buffer_data_available(struct iio_buffer *buf)
  76{
  77	return buf->access->data_available(buf);
  78}
  79
  80static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
  81				   struct iio_buffer *buf, size_t required)
  82{
  83	if (!indio_dev->info->hwfifo_flush_to_buffer)
  84		return -ENODEV;
  85
  86	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
  87}
  88
  89static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
  90			     size_t to_wait, int to_flush)
  91{
  92	size_t avail;
  93	int flushed = 0;
  94
  95	/* wakeup if the device was unregistered */
  96	if (!indio_dev->info)
  97		return true;
  98
  99	/* drain the buffer if it was disabled */
 100	if (!iio_buffer_is_active(buf)) {
 101		to_wait = min_t(size_t, to_wait, 1);
 102		to_flush = 0;
 103	}
 104
 105	avail = iio_buffer_data_available(buf);
 106
 107	if (avail >= to_wait) {
 108		/* force a flush for non-blocking reads */
 109		if (!to_wait && avail < to_flush)
 110			iio_buffer_flush_hwfifo(indio_dev, buf,
 111						to_flush - avail);
 112		return true;
 113	}
 114
 115	if (to_flush)
 116		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
 117						  to_wait - avail);
 118	if (flushed <= 0)
 119		return false;
 120
 121	if (avail + flushed >= to_wait)
 122		return true;
 123
 124	return false;
 125}
 126
 127/**
 128 * iio_buffer_read() - chrdev read for buffer access
 129 * @filp:	File structure pointer for the char device
 130 * @buf:	Destination buffer for iio buffer read
 131 * @n:		First n bytes to read
 132 * @f_ps:	Long offset provided by the user as a seek position
 133 *
 134 * This function relies on all buffer implementations having an
 135 * iio_buffer as their first element.
 136 *
 137 * Return: negative values corresponding to error codes or ret != 0
 138 *	   for ending the reading activity
 139 **/
 140static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
 141			       size_t n, loff_t *f_ps)
 142{
 143	struct iio_dev_buffer_pair *ib = filp->private_data;
 144	struct iio_buffer *rb = ib->buffer;
 145	struct iio_dev *indio_dev = ib->indio_dev;
 146	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 147	size_t datum_size;
 148	size_t to_wait;
 149	int ret = 0;
 150
 151	if (!indio_dev->info)
 152		return -ENODEV;
 153
 154	if (!rb || !rb->access->read)
 155		return -EINVAL;
 156
 157	if (rb->direction != IIO_BUFFER_DIRECTION_IN)
 158		return -EPERM;
 159
 160	datum_size = rb->bytes_per_datum;
 161
 162	/*
 163	 * If datum_size is 0 there will never be anything to read from the
 164	 * buffer, so signal end of file now.
 165	 */
 166	if (!datum_size)
 167		return 0;
 168
 169	if (filp->f_flags & O_NONBLOCK)
 170		to_wait = 0;
 171	else
 172		to_wait = min_t(size_t, n / datum_size, rb->watermark);
 173
 174	add_wait_queue(&rb->pollq, &wait);
 175	do {
 176		if (!indio_dev->info) {
 177			ret = -ENODEV;
 178			break;
 179		}
 180
 181		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
 182			if (signal_pending(current)) {
 183				ret = -ERESTARTSYS;
 184				break;
 185			}
 186
 187			wait_woken(&wait, TASK_INTERRUPTIBLE,
 188				   MAX_SCHEDULE_TIMEOUT);
 189			continue;
 190		}
 191
 192		ret = rb->access->read(rb, n, buf);
 193		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
 194			ret = -EAGAIN;
 195	} while (ret == 0);
 196	remove_wait_queue(&rb->pollq, &wait);
 197
 198	return ret;
 199}
 200
 201static size_t iio_buffer_space_available(struct iio_buffer *buf)
 202{
 203	if (buf->access->space_available)
 204		return buf->access->space_available(buf);
 205
 206	return SIZE_MAX;
 207}
 208
 209static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
 210				size_t n, loff_t *f_ps)
 211{
 212	struct iio_dev_buffer_pair *ib = filp->private_data;
 213	struct iio_buffer *rb = ib->buffer;
 214	struct iio_dev *indio_dev = ib->indio_dev;
 215	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 216	int ret = 0;
 217	size_t written;
 218
 219	if (!indio_dev->info)
 220		return -ENODEV;
 221
 222	if (!rb || !rb->access->write)
 223		return -EINVAL;
 224
 225	if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
 226		return -EPERM;
 227
 228	written = 0;
 229	add_wait_queue(&rb->pollq, &wait);
 230	do {
 231		if (!indio_dev->info)
 232			return -ENODEV;
 233
 234		if (!iio_buffer_space_available(rb)) {
 235			if (signal_pending(current)) {
 236				ret = -ERESTARTSYS;
 237				break;
 238			}
 239
 240			if (filp->f_flags & O_NONBLOCK) {
 241				if (!written)
 242					ret = -EAGAIN;
 243				break;
 244			}
 245
 246			wait_woken(&wait, TASK_INTERRUPTIBLE,
 247				   MAX_SCHEDULE_TIMEOUT);
 248			continue;
 249		}
 250
 251		ret = rb->access->write(rb, n - written, buf + written);
 252		if (ret < 0)
 253			break;
 254
 255		written += ret;
 256
 257	} while (written != n);
 258	remove_wait_queue(&rb->pollq, &wait);
 259
 260	return ret < 0 ? ret : written;
 261}
 262
 263/**
 264 * iio_buffer_poll() - poll the buffer to find out if it has data
 265 * @filp:	File structure pointer for device access
 266 * @wait:	Poll table structure pointer for which the driver adds
 267 *		a wait queue
 268 *
 269 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
 270 *	   or 0 for other cases
 271 */
 272static __poll_t iio_buffer_poll(struct file *filp,
 273				struct poll_table_struct *wait)
 274{
 275	struct iio_dev_buffer_pair *ib = filp->private_data;
 276	struct iio_buffer *rb = ib->buffer;
 277	struct iio_dev *indio_dev = ib->indio_dev;
 278
 279	if (!indio_dev->info || !rb)
 280		return 0;
 281
 282	poll_wait(filp, &rb->pollq, wait);
 283
 284	switch (rb->direction) {
 285	case IIO_BUFFER_DIRECTION_IN:
 286		if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
 287			return EPOLLIN | EPOLLRDNORM;
 288		break;
 289	case IIO_BUFFER_DIRECTION_OUT:
 290		if (iio_buffer_space_available(rb))
 291			return EPOLLOUT | EPOLLWRNORM;
 292		break;
 293	}
 294
 295	return 0;
 296}
 297
 298ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
 299				size_t n, loff_t *f_ps)
 300{
 301	struct iio_dev_buffer_pair *ib = filp->private_data;
 302	struct iio_buffer *rb = ib->buffer;
 303
 304	/* check if buffer was opened through new API */
 305	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
 306		return -EBUSY;
 307
 308	return iio_buffer_read(filp, buf, n, f_ps);
 309}
 310
 311ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
 312				 size_t n, loff_t *f_ps)
 313{
 314	struct iio_dev_buffer_pair *ib = filp->private_data;
 315	struct iio_buffer *rb = ib->buffer;
 316
 317	/* check if buffer was opened through new API */
 318	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
 319		return -EBUSY;
 320
 321	return iio_buffer_write(filp, buf, n, f_ps);
 322}
 323
 324__poll_t iio_buffer_poll_wrapper(struct file *filp,
 325				 struct poll_table_struct *wait)
 326{
 327	struct iio_dev_buffer_pair *ib = filp->private_data;
 328	struct iio_buffer *rb = ib->buffer;
 329
 330	/* check if buffer was opened through new API */
 331	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
 332		return 0;
 333
 334	return iio_buffer_poll(filp, wait);
 335}
 336
 337/**
 338 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
 339 * @indio_dev: The IIO device
 340 *
 341 * Wakes up the event waitqueue used for poll(). Should usually
 342 * be called when the device is unregistered.
 343 */
 344void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
 345{
 346	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 347	struct iio_buffer *buffer;
 348	unsigned int i;
 349
 350	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
 351		buffer = iio_dev_opaque->attached_buffers[i];
 352		wake_up(&buffer->pollq);
 353	}
 354}
 355
 356int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
 357{
 358	if (!buffer || !buffer->access || !buffer->access->remove_from)
 359		return -EINVAL;
 360
 361	return buffer->access->remove_from(buffer, data);
 362}
 363EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
 364
 365void iio_buffer_init(struct iio_buffer *buffer)
 366{
 367	INIT_LIST_HEAD(&buffer->demux_list);
 368	INIT_LIST_HEAD(&buffer->buffer_list);
 369	INIT_LIST_HEAD(&buffer->dmabufs);
 370	mutex_init(&buffer->dmabufs_mutex);
 371	init_waitqueue_head(&buffer->pollq);
 372	kref_init(&buffer->ref);
 373	if (!buffer->watermark)
 374		buffer->watermark = 1;
 375}
 376EXPORT_SYMBOL(iio_buffer_init);
 377
 378void iio_device_detach_buffers(struct iio_dev *indio_dev)
 379{
 380	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 381	struct iio_buffer *buffer;
 382	unsigned int i;
 383
 384	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
 385		buffer = iio_dev_opaque->attached_buffers[i];
 386		iio_buffer_put(buffer);
 387	}
 388
 389	kfree(iio_dev_opaque->attached_buffers);
 390}
 391
 392static ssize_t iio_show_scan_index(struct device *dev,
 393				   struct device_attribute *attr,
 394				   char *buf)
 395{
 396	return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
 397}
 398
 399static ssize_t iio_show_fixed_type(struct device *dev,
 400				   struct device_attribute *attr,
 401				   char *buf)
 402{
 403	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 404	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 405	const struct iio_scan_type *scan_type;
 406	u8 type;
 407
 408	scan_type = iio_get_current_scan_type(indio_dev, this_attr->c);
 409	if (IS_ERR(scan_type))
 410		return PTR_ERR(scan_type);
 411
 412	type = scan_type->endianness;
 413
 414	if (type == IIO_CPU) {
 415#ifdef __LITTLE_ENDIAN
 416		type = IIO_LE;
 417#else
 418		type = IIO_BE;
 419#endif
 420	}
 421	if (scan_type->repeat > 1)
 422		return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
 423		       iio_endian_prefix[type],
 424		       scan_type->sign,
 425		       scan_type->realbits,
 426		       scan_type->storagebits,
 427		       scan_type->repeat,
 428		       scan_type->shift);
 429	else
 430		return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
 431		       iio_endian_prefix[type],
 432		       scan_type->sign,
 433		       scan_type->realbits,
 434		       scan_type->storagebits,
 435		       scan_type->shift);
 436}
 437
 438static ssize_t iio_scan_el_show(struct device *dev,
 439				struct device_attribute *attr,
 440				char *buf)
 441{
 442	int ret;
 443	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 444
 445	/* Ensure ret is 0 or 1. */
 446	ret = !!test_bit(to_iio_dev_attr(attr)->address,
 447		       buffer->scan_mask);
 448
 449	return sysfs_emit(buf, "%d\n", ret);
 450}
 451
 452/* Note NULL used as error indicator as it doesn't make sense. */
 453static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
 454						unsigned int masklength,
 455						const unsigned long *mask,
 456						bool strict)
 457{
 458	if (bitmap_empty(mask, masklength))
 459		return NULL;
 460	/*
 461	 * The condition here do not handle multi-long masks correctly.
 462	 * It only checks the first long to be zero, and will use such mask
 463	 * as a terminator even if there was bits set after the first long.
 464	 *
 465	 * Correct check would require using:
 466	 * while (!bitmap_empty(av_masks, masklength))
 467	 * instead. This is potentially hazardous because the
 468	 * avaliable_scan_masks is a zero terminated array of longs - and
 469	 * using the proper bitmap_empty() check for multi-long wide masks
 470	 * would require the array to be terminated with multiple zero longs -
 471	 * which is not such an usual pattern.
 472	 *
 473	 * As writing of this no multi-long wide masks were found in-tree, so
 474	 * the simple while (*av_masks) check is working.
 475	 */
 476	while (*av_masks) {
 477		if (strict) {
 478			if (bitmap_equal(mask, av_masks, masklength))
 479				return av_masks;
 480		} else {
 481			if (bitmap_subset(mask, av_masks, masklength))
 482				return av_masks;
 483		}
 484		av_masks += BITS_TO_LONGS(masklength);
 485	}
 486	return NULL;
 487}
 488
 489static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
 490				   const unsigned long *mask)
 491{
 492	if (!indio_dev->setup_ops->validate_scan_mask)
 493		return true;
 494
 495	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
 496}
 497
 498/**
 499 * iio_scan_mask_set() - set particular bit in the scan mask
 500 * @indio_dev: the iio device
 501 * @buffer: the buffer whose scan mask we are interested in
 502 * @bit: the bit to be set.
 503 *
 504 * Note that at this point we have no way of knowing what other
 505 * buffers might request, hence this code only verifies that the
 506 * individual buffers request is plausible.
 507 */
 508static int iio_scan_mask_set(struct iio_dev *indio_dev,
 509			     struct iio_buffer *buffer, int bit)
 510{
 511	unsigned int masklength = iio_get_masklength(indio_dev);
 512	const unsigned long *mask;
 513	unsigned long *trialmask;
 514
 515	if (!masklength) {
 516		WARN(1, "Trying to set scanmask prior to registering buffer\n");
 517		return -EINVAL;
 518	}
 519
 520	trialmask = bitmap_alloc(masklength, GFP_KERNEL);
 521	if (!trialmask)
 522		return -ENOMEM;
 523	bitmap_copy(trialmask, buffer->scan_mask, masklength);
 524	set_bit(bit, trialmask);
 525
 526	if (!iio_validate_scan_mask(indio_dev, trialmask))
 527		goto err_invalid_mask;
 528
 529	if (indio_dev->available_scan_masks) {
 530		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
 531					   masklength, trialmask, false);
 532		if (!mask)
 533			goto err_invalid_mask;
 534	}
 535	bitmap_copy(buffer->scan_mask, trialmask, masklength);
 536
 537	bitmap_free(trialmask);
 538
 539	return 0;
 540
 541err_invalid_mask:
 542	bitmap_free(trialmask);
 543	return -EINVAL;
 544}
 545
 546static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
 547{
 548	clear_bit(bit, buffer->scan_mask);
 549	return 0;
 550}
 551
 552static int iio_scan_mask_query(struct iio_dev *indio_dev,
 553			       struct iio_buffer *buffer, int bit)
 554{
 555	if (bit > iio_get_masklength(indio_dev))
 556		return -EINVAL;
 557
 558	if (!buffer->scan_mask)
 559		return 0;
 560
 561	/* Ensure return value is 0 or 1. */
 562	return !!test_bit(bit, buffer->scan_mask);
 563};
 564
 565static ssize_t iio_scan_el_store(struct device *dev,
 566				 struct device_attribute *attr,
 567				 const char *buf,
 568				 size_t len)
 569{
 570	int ret;
 571	bool state;
 572	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 573	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 574	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 575	struct iio_buffer *buffer = this_attr->buffer;
 576
 577	ret = kstrtobool(buf, &state);
 578	if (ret < 0)
 579		return ret;
 580
 581	guard(mutex)(&iio_dev_opaque->mlock);
 582	if (iio_buffer_is_active(buffer))
 583		return -EBUSY;
 584
 585	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
 586	if (ret < 0)
 587		return ret;
 588
 589	if (state && ret)
 590		return len;
 591
 592	if (state)
 593		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
 594	else
 595		ret = iio_scan_mask_clear(buffer, this_attr->address);
 596	if (ret)
 597		return ret;
 598
 599	return len;
 600}
 601
 602static ssize_t iio_scan_el_ts_show(struct device *dev,
 603				   struct device_attribute *attr,
 604				   char *buf)
 605{
 606	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 607
 608	return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
 609}
 610
 611static ssize_t iio_scan_el_ts_store(struct device *dev,
 612				    struct device_attribute *attr,
 613				    const char *buf,
 614				    size_t len)
 615{
 616	int ret;
 617	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 618	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 619	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 620	bool state;
 621
 622	ret = kstrtobool(buf, &state);
 623	if (ret < 0)
 624		return ret;
 625
 626	guard(mutex)(&iio_dev_opaque->mlock);
 627	if (iio_buffer_is_active(buffer))
 628		return -EBUSY;
 629
 630	buffer->scan_timestamp = state;
 631
 632	return len;
 633}
 634
 635static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
 636					struct iio_buffer *buffer,
 637					const struct iio_chan_spec *chan)
 638{
 639	int ret, attrcount = 0;
 640
 641	ret = __iio_add_chan_devattr("index",
 642				     chan,
 643				     &iio_show_scan_index,
 644				     NULL,
 645				     0,
 646				     IIO_SEPARATE,
 647				     &indio_dev->dev,
 648				     buffer,
 649				     &buffer->buffer_attr_list);
 650	if (ret)
 651		return ret;
 652	attrcount++;
 653	ret = __iio_add_chan_devattr("type",
 654				     chan,
 655				     &iio_show_fixed_type,
 656				     NULL,
 657				     0,
 658				     IIO_SEPARATE,
 659				     &indio_dev->dev,
 660				     buffer,
 661				     &buffer->buffer_attr_list);
 662	if (ret)
 663		return ret;
 664	attrcount++;
 665	if (chan->type != IIO_TIMESTAMP)
 666		ret = __iio_add_chan_devattr("en",
 667					     chan,
 668					     &iio_scan_el_show,
 669					     &iio_scan_el_store,
 670					     chan->scan_index,
 671					     IIO_SEPARATE,
 672					     &indio_dev->dev,
 673					     buffer,
 674					     &buffer->buffer_attr_list);
 675	else
 676		ret = __iio_add_chan_devattr("en",
 677					     chan,
 678					     &iio_scan_el_ts_show,
 679					     &iio_scan_el_ts_store,
 680					     chan->scan_index,
 681					     IIO_SEPARATE,
 682					     &indio_dev->dev,
 683					     buffer,
 684					     &buffer->buffer_attr_list);
 685	if (ret)
 686		return ret;
 687	attrcount++;
 688	ret = attrcount;
 689	return ret;
 690}
 691
 692static ssize_t length_show(struct device *dev, struct device_attribute *attr,
 693			   char *buf)
 694{
 695	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 696
 697	return sysfs_emit(buf, "%d\n", buffer->length);
 698}
 699
 700static ssize_t length_store(struct device *dev, struct device_attribute *attr,
 701			    const char *buf, size_t len)
 702{
 703	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 704	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 705	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 706	unsigned int val;
 707	int ret;
 708
 709	ret = kstrtouint(buf, 10, &val);
 710	if (ret)
 711		return ret;
 712
 713	if (val == buffer->length)
 714		return len;
 715
 716	guard(mutex)(&iio_dev_opaque->mlock);
 717	if (iio_buffer_is_active(buffer))
 718		return -EBUSY;
 719
 720	buffer->access->set_length(buffer, val);
 721
 722	if (buffer->length && buffer->length < buffer->watermark)
 723		buffer->watermark = buffer->length;
 724
 725	return len;
 726}
 727
 728static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
 729			   char *buf)
 730{
 731	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
 732
 733	return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
 734}
 735
 736static int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
 737				    unsigned int scan_index)
 738{
 739	const struct iio_chan_spec *ch;
 740	const struct iio_scan_type *scan_type;
 741	unsigned int bytes;
 742
 743	ch = iio_find_channel_from_si(indio_dev, scan_index);
 744	scan_type = iio_get_current_scan_type(indio_dev, ch);
 745	if (IS_ERR(scan_type))
 746		return PTR_ERR(scan_type);
 747
 748	bytes = scan_type->storagebits / 8;
 749
 750	if (scan_type->repeat > 1)
 751		bytes *= scan_type->repeat;
 752
 753	return bytes;
 754}
 755
 756static int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
 757{
 758	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 759
 760	return iio_storage_bytes_for_si(indio_dev,
 761					iio_dev_opaque->scan_index_timestamp);
 762}
 763
 764static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
 765				  const unsigned long *mask, bool timestamp)
 766{
 767	unsigned int bytes = 0;
 768	int length, i, largest = 0;
 769
 770	/* How much space will the demuxed element take? */
 771	for_each_set_bit(i, mask, iio_get_masklength(indio_dev)) {
 772		length = iio_storage_bytes_for_si(indio_dev, i);
 773		if (length < 0)
 774			return length;
 775
 776		bytes = ALIGN(bytes, length);
 777		bytes += length;
 778		largest = max(largest, length);
 779	}
 780
 781	if (timestamp) {
 782		length = iio_storage_bytes_for_timestamp(indio_dev);
 783		if (length < 0)
 784			return length;
 785
 786		bytes = ALIGN(bytes, length);
 787		bytes += length;
 788		largest = max(largest, length);
 789	}
 790
 791	bytes = ALIGN(bytes, largest);
 792	return bytes;
 793}
 794
 795static void iio_buffer_activate(struct iio_dev *indio_dev,
 796				struct iio_buffer *buffer)
 797{
 798	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 799
 800	iio_buffer_get(buffer);
 801	list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
 802}
 803
 804static void iio_buffer_deactivate(struct iio_buffer *buffer)
 805{
 806	list_del_init(&buffer->buffer_list);
 807	wake_up_interruptible(&buffer->pollq);
 808	iio_buffer_put(buffer);
 809}
 810
 811static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
 812{
 813	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 814	struct iio_buffer *buffer, *_buffer;
 815
 816	list_for_each_entry_safe(buffer, _buffer,
 817				 &iio_dev_opaque->buffer_list, buffer_list)
 818		iio_buffer_deactivate(buffer);
 819}
 820
 821static int iio_buffer_enable(struct iio_buffer *buffer,
 822			     struct iio_dev *indio_dev)
 823{
 824	if (!buffer->access->enable)
 825		return 0;
 826	return buffer->access->enable(buffer, indio_dev);
 827}
 828
 829static int iio_buffer_disable(struct iio_buffer *buffer,
 830			      struct iio_dev *indio_dev)
 831{
 832	if (!buffer->access->disable)
 833		return 0;
 834	return buffer->access->disable(buffer, indio_dev);
 835}
 836
 837static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
 838					      struct iio_buffer *buffer)
 839{
 840	unsigned int bytes;
 841
 842	if (!buffer->access->set_bytes_per_datum)
 843		return;
 844
 845	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
 846				       buffer->scan_timestamp);
 847
 848	buffer->access->set_bytes_per_datum(buffer, bytes);
 849}
 850
 851static int iio_buffer_request_update(struct iio_dev *indio_dev,
 852				     struct iio_buffer *buffer)
 853{
 854	int ret;
 855
 856	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
 857	if (buffer->access->request_update) {
 858		ret = buffer->access->request_update(buffer);
 859		if (ret) {
 860			dev_dbg(&indio_dev->dev,
 861				"Buffer not started: buffer parameter update failed (%d)\n",
 862				ret);
 863			return ret;
 864		}
 865	}
 866
 867	return 0;
 868}
 869
 870static void iio_free_scan_mask(struct iio_dev *indio_dev,
 871			       const unsigned long *mask)
 872{
 873	/* If the mask is dynamically allocated free it, otherwise do nothing */
 874	if (!indio_dev->available_scan_masks)
 875		bitmap_free(mask);
 876}
 877
 878struct iio_device_config {
 879	unsigned int mode;
 880	unsigned int watermark;
 881	const unsigned long *scan_mask;
 882	unsigned int scan_bytes;
 883	bool scan_timestamp;
 884};
 885
 886static int iio_verify_update(struct iio_dev *indio_dev,
 887			     struct iio_buffer *insert_buffer,
 888			     struct iio_buffer *remove_buffer,
 889			     struct iio_device_config *config)
 890{
 891	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
 892	unsigned int masklength = iio_get_masklength(indio_dev);
 893	unsigned long *compound_mask;
 894	const unsigned long *scan_mask;
 895	bool strict_scanmask = false;
 896	struct iio_buffer *buffer;
 897	bool scan_timestamp;
 898	unsigned int modes;
 899
 900	if (insert_buffer &&
 901	    bitmap_empty(insert_buffer->scan_mask, masklength)) {
 902		dev_dbg(&indio_dev->dev,
 903			"At least one scan element must be enabled first\n");
 904		return -EINVAL;
 905	}
 906
 907	memset(config, 0, sizeof(*config));
 908	config->watermark = ~0;
 909
 910	/*
 911	 * If there is just one buffer and we are removing it there is nothing
 912	 * to verify.
 913	 */
 914	if (remove_buffer && !insert_buffer &&
 915	    list_is_singular(&iio_dev_opaque->buffer_list))
 916		return 0;
 917
 918	modes = indio_dev->modes;
 919
 920	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
 921		if (buffer == remove_buffer)
 922			continue;
 923		modes &= buffer->access->modes;
 924		config->watermark = min(config->watermark, buffer->watermark);
 925	}
 926
 927	if (insert_buffer) {
 928		modes &= insert_buffer->access->modes;
 929		config->watermark = min(config->watermark,
 930					insert_buffer->watermark);
 931	}
 932
 933	/* Definitely possible for devices to support both of these. */
 934	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
 935		config->mode = INDIO_BUFFER_TRIGGERED;
 936	} else if (modes & INDIO_BUFFER_HARDWARE) {
 937		/*
 938		 * Keep things simple for now and only allow a single buffer to
 939		 * be connected in hardware mode.
 940		 */
 941		if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
 942			return -EINVAL;
 943		config->mode = INDIO_BUFFER_HARDWARE;
 944		strict_scanmask = true;
 945	} else if (modes & INDIO_BUFFER_SOFTWARE) {
 946		config->mode = INDIO_BUFFER_SOFTWARE;
 947	} else {
 948		/* Can only occur on first buffer */
 949		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
 950			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
 951		return -EINVAL;
 952	}
 953
 954	/* What scan mask do we actually have? */
 955	compound_mask = bitmap_zalloc(masklength, GFP_KERNEL);
 956	if (!compound_mask)
 957		return -ENOMEM;
 958
 959	scan_timestamp = false;
 960
 961	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
 962		if (buffer == remove_buffer)
 963			continue;
 964		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
 965			  masklength);
 966		scan_timestamp |= buffer->scan_timestamp;
 967	}
 968
 969	if (insert_buffer) {
 970		bitmap_or(compound_mask, compound_mask,
 971			  insert_buffer->scan_mask, masklength);
 972		scan_timestamp |= insert_buffer->scan_timestamp;
 973	}
 974
 975	if (indio_dev->available_scan_masks) {
 976		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
 977						masklength, compound_mask,
 978						strict_scanmask);
 979		bitmap_free(compound_mask);
 980		if (!scan_mask)
 981			return -EINVAL;
 982	} else {
 983		scan_mask = compound_mask;
 984	}
 985
 986	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
 987						    scan_mask, scan_timestamp);
 988	config->scan_mask = scan_mask;
 989	config->scan_timestamp = scan_timestamp;
 990
 991	return 0;
 992}
 993
 994/**
 995 * struct iio_demux_table - table describing demux memcpy ops
 996 * @from:	index to copy from
 997 * @to:		index to copy to
 998 * @length:	how many bytes to copy
 999 * @l:		list head used for management
1000 */
1001struct iio_demux_table {
1002	unsigned int from;
1003	unsigned int to;
1004	unsigned int length;
1005	struct list_head l;
1006};
1007
1008static void iio_buffer_demux_free(struct iio_buffer *buffer)
1009{
1010	struct iio_demux_table *p, *q;
1011
1012	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
1013		list_del(&p->l);
1014		kfree(p);
1015	}
1016}
1017
1018static int iio_buffer_add_demux(struct iio_buffer *buffer,
1019				struct iio_demux_table **p, unsigned int in_loc,
1020				unsigned int out_loc,
1021				unsigned int length)
1022{
1023	if (*p && (*p)->from + (*p)->length == in_loc &&
1024	    (*p)->to + (*p)->length == out_loc) {
1025		(*p)->length += length;
1026	} else {
1027		*p = kmalloc(sizeof(**p), GFP_KERNEL);
1028		if (!(*p))
1029			return -ENOMEM;
1030		(*p)->from = in_loc;
1031		(*p)->to = out_loc;
1032		(*p)->length = length;
1033		list_add_tail(&(*p)->l, &buffer->demux_list);
1034	}
1035
1036	return 0;
1037}
1038
1039static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1040				   struct iio_buffer *buffer)
1041{
1042	unsigned int masklength = iio_get_masklength(indio_dev);
1043	int ret, in_ind = -1, out_ind, length;
1044	unsigned int in_loc = 0, out_loc = 0;
1045	struct iio_demux_table *p = NULL;
1046
1047	/* Clear out any old demux */
1048	iio_buffer_demux_free(buffer);
1049	kfree(buffer->demux_bounce);
1050	buffer->demux_bounce = NULL;
1051
1052	/* First work out which scan mode we will actually have */
1053	if (bitmap_equal(indio_dev->active_scan_mask,
1054			 buffer->scan_mask, masklength))
1055		return 0;
1056
1057	/* Now we have the two masks, work from least sig and build up sizes */
1058	for_each_set_bit(out_ind, buffer->scan_mask, masklength) {
1059		in_ind = find_next_bit(indio_dev->active_scan_mask,
1060				       masklength, in_ind + 1);
1061		while (in_ind != out_ind) {
1062			ret = iio_storage_bytes_for_si(indio_dev, in_ind);
1063			if (ret < 0)
1064				goto error_clear_mux_table;
1065
1066			length = ret;
1067			/* Make sure we are aligned */
1068			in_loc = roundup(in_loc, length) + length;
1069			in_ind = find_next_bit(indio_dev->active_scan_mask,
1070					       masklength, in_ind + 1);
1071		}
1072		ret = iio_storage_bytes_for_si(indio_dev, in_ind);
1073		if (ret < 0)
1074			goto error_clear_mux_table;
1075
1076		length = ret;
1077		out_loc = roundup(out_loc, length);
1078		in_loc = roundup(in_loc, length);
1079		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1080		if (ret)
1081			goto error_clear_mux_table;
1082		out_loc += length;
1083		in_loc += length;
1084	}
1085	/* Relies on scan_timestamp being last */
1086	if (buffer->scan_timestamp) {
1087		ret = iio_storage_bytes_for_timestamp(indio_dev);
1088		if (ret < 0)
1089			goto error_clear_mux_table;
1090
1091		length = ret;
1092		out_loc = roundup(out_loc, length);
1093		in_loc = roundup(in_loc, length);
1094		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1095		if (ret)
1096			goto error_clear_mux_table;
1097		out_loc += length;
1098	}
1099	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1100	if (!buffer->demux_bounce) {
1101		ret = -ENOMEM;
1102		goto error_clear_mux_table;
1103	}
1104	return 0;
1105
1106error_clear_mux_table:
1107	iio_buffer_demux_free(buffer);
1108
1109	return ret;
1110}
1111
1112static int iio_update_demux(struct iio_dev *indio_dev)
1113{
1114	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1115	struct iio_buffer *buffer;
1116	int ret;
1117
1118	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1119		ret = iio_buffer_update_demux(indio_dev, buffer);
1120		if (ret < 0)
1121			goto error_clear_mux_table;
1122	}
1123	return 0;
1124
1125error_clear_mux_table:
1126	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
1127		iio_buffer_demux_free(buffer);
1128
1129	return ret;
1130}
1131
1132static int iio_enable_buffers(struct iio_dev *indio_dev,
1133			      struct iio_device_config *config)
1134{
1135	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1136	struct iio_buffer *buffer, *tmp = NULL;
1137	int ret;
1138
1139	indio_dev->active_scan_mask = config->scan_mask;
1140	indio_dev->scan_timestamp = config->scan_timestamp;
1141	indio_dev->scan_bytes = config->scan_bytes;
1142	iio_dev_opaque->currentmode = config->mode;
1143
1144	iio_update_demux(indio_dev);
1145
1146	/* Wind up again */
1147	if (indio_dev->setup_ops->preenable) {
1148		ret = indio_dev->setup_ops->preenable(indio_dev);
1149		if (ret) {
1150			dev_dbg(&indio_dev->dev,
1151				"Buffer not started: buffer preenable failed (%d)\n", ret);
1152			goto err_undo_config;
1153		}
1154	}
1155
1156	if (indio_dev->info->update_scan_mode) {
1157		ret = indio_dev->info
1158			->update_scan_mode(indio_dev,
1159					   indio_dev->active_scan_mask);
1160		if (ret < 0) {
1161			dev_dbg(&indio_dev->dev,
1162				"Buffer not started: update scan mode failed (%d)\n",
1163				ret);
1164			goto err_run_postdisable;
1165		}
1166	}
1167
1168	if (indio_dev->info->hwfifo_set_watermark)
1169		indio_dev->info->hwfifo_set_watermark(indio_dev,
1170			config->watermark);
1171
1172	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1173		ret = iio_buffer_enable(buffer, indio_dev);
1174		if (ret) {
1175			tmp = buffer;
1176			goto err_disable_buffers;
1177		}
1178	}
1179
1180	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1181		ret = iio_trigger_attach_poll_func(indio_dev->trig,
1182						   indio_dev->pollfunc);
1183		if (ret)
1184			goto err_disable_buffers;
1185	}
1186
1187	if (indio_dev->setup_ops->postenable) {
1188		ret = indio_dev->setup_ops->postenable(indio_dev);
1189		if (ret) {
1190			dev_dbg(&indio_dev->dev,
1191				"Buffer not started: postenable failed (%d)\n", ret);
1192			goto err_detach_pollfunc;
1193		}
1194	}
1195
1196	return 0;
1197
1198err_detach_pollfunc:
1199	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1200		iio_trigger_detach_poll_func(indio_dev->trig,
1201					     indio_dev->pollfunc);
1202	}
1203err_disable_buffers:
1204	buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
1205	list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1206					     buffer_list)
1207		iio_buffer_disable(buffer, indio_dev);
1208err_run_postdisable:
1209	if (indio_dev->setup_ops->postdisable)
1210		indio_dev->setup_ops->postdisable(indio_dev);
1211err_undo_config:
1212	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1213	indio_dev->active_scan_mask = NULL;
1214
1215	return ret;
1216}
1217
1218static int iio_disable_buffers(struct iio_dev *indio_dev)
1219{
1220	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1221	struct iio_buffer *buffer;
1222	int ret = 0;
1223	int ret2;
1224
1225	/* Wind down existing buffers - iff there are any */
1226	if (list_empty(&iio_dev_opaque->buffer_list))
1227		return 0;
1228
1229	/*
1230	 * If things go wrong at some step in disable we still need to continue
1231	 * to perform the other steps, otherwise we leave the device in a
1232	 * inconsistent state. We return the error code for the first error we
1233	 * encountered.
1234	 */
1235
1236	if (indio_dev->setup_ops->predisable) {
1237		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1238		if (ret2 && !ret)
1239			ret = ret2;
1240	}
1241
1242	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1243		iio_trigger_detach_poll_func(indio_dev->trig,
1244					     indio_dev->pollfunc);
1245	}
1246
1247	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1248		ret2 = iio_buffer_disable(buffer, indio_dev);
1249		if (ret2 && !ret)
1250			ret = ret2;
1251	}
1252
1253	if (indio_dev->setup_ops->postdisable) {
1254		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1255		if (ret2 && !ret)
1256			ret = ret2;
1257	}
1258
1259	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1260	indio_dev->active_scan_mask = NULL;
1261	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1262
1263	return ret;
1264}
1265
1266static int __iio_update_buffers(struct iio_dev *indio_dev,
1267				struct iio_buffer *insert_buffer,
1268				struct iio_buffer *remove_buffer)
1269{
1270	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1271	struct iio_device_config new_config;
1272	int ret;
1273
1274	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1275				&new_config);
1276	if (ret)
1277		return ret;
1278
1279	if (insert_buffer) {
1280		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1281		if (ret)
1282			goto err_free_config;
1283	}
1284
1285	ret = iio_disable_buffers(indio_dev);
1286	if (ret)
1287		goto err_deactivate_all;
1288
1289	if (remove_buffer)
1290		iio_buffer_deactivate(remove_buffer);
1291	if (insert_buffer)
1292		iio_buffer_activate(indio_dev, insert_buffer);
1293
1294	/* If no buffers in list, we are done */
1295	if (list_empty(&iio_dev_opaque->buffer_list))
1296		return 0;
1297
1298	ret = iio_enable_buffers(indio_dev, &new_config);
1299	if (ret)
1300		goto err_deactivate_all;
1301
1302	return 0;
1303
1304err_deactivate_all:
1305	/*
1306	 * We've already verified that the config is valid earlier. If things go
1307	 * wrong in either enable or disable the most likely reason is an IO
1308	 * error from the device. In this case there is no good recovery
1309	 * strategy. Just make sure to disable everything and leave the device
1310	 * in a sane state.  With a bit of luck the device might come back to
1311	 * life again later and userspace can try again.
1312	 */
1313	iio_buffer_deactivate_all(indio_dev);
1314
1315err_free_config:
1316	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1317	return ret;
1318}
1319
1320int iio_update_buffers(struct iio_dev *indio_dev,
1321		       struct iio_buffer *insert_buffer,
1322		       struct iio_buffer *remove_buffer)
1323{
1324	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1325
1326	if (insert_buffer == remove_buffer)
1327		return 0;
1328
1329	if (insert_buffer &&
1330	    insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
1331		return -EINVAL;
1332
1333	guard(mutex)(&iio_dev_opaque->info_exist_lock);
1334	guard(mutex)(&iio_dev_opaque->mlock);
1335
1336	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1337		insert_buffer = NULL;
1338
1339	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1340		remove_buffer = NULL;
1341
1342	if (!insert_buffer && !remove_buffer)
1343		return 0;
1344
1345	if (!indio_dev->info)
1346		return -ENODEV;
1347
1348	return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1349}
1350EXPORT_SYMBOL_GPL(iio_update_buffers);
1351
1352void iio_disable_all_buffers(struct iio_dev *indio_dev)
1353{
1354	iio_disable_buffers(indio_dev);
1355	iio_buffer_deactivate_all(indio_dev);
1356}
1357
1358static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
1359			    const char *buf, size_t len)
1360{
1361	int ret;
1362	bool requested_state;
1363	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1364	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1365	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1366	bool inlist;
1367
1368	ret = kstrtobool(buf, &requested_state);
1369	if (ret < 0)
1370		return ret;
1371
1372	guard(mutex)(&iio_dev_opaque->mlock);
1373
1374	/* Find out if it is in the list */
1375	inlist = iio_buffer_is_active(buffer);
1376	/* Already in desired state */
1377	if (inlist == requested_state)
1378		return len;
1379
1380	if (requested_state)
1381		ret = __iio_update_buffers(indio_dev, buffer, NULL);
1382	else
1383		ret = __iio_update_buffers(indio_dev, NULL, buffer);
1384	if (ret)
1385		return ret;
1386
1387	return len;
1388}
1389
1390static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
1391			      char *buf)
1392{
1393	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1394
1395	return sysfs_emit(buf, "%u\n", buffer->watermark);
1396}
1397
1398static ssize_t watermark_store(struct device *dev,
1399			       struct device_attribute *attr,
1400			       const char *buf, size_t len)
1401{
1402	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1403	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1404	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1405	unsigned int val;
1406	int ret;
1407
1408	ret = kstrtouint(buf, 10, &val);
1409	if (ret)
1410		return ret;
1411	if (!val)
1412		return -EINVAL;
1413
1414	guard(mutex)(&iio_dev_opaque->mlock);
1415
1416	if (val > buffer->length)
1417		return -EINVAL;
1418
1419	if (iio_buffer_is_active(buffer))
1420		return -EBUSY;
1421
1422	buffer->watermark = val;
1423
1424	return len;
1425}
1426
1427static ssize_t data_available_show(struct device *dev,
1428				   struct device_attribute *attr, char *buf)
1429{
1430	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1431
1432	return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1433}
1434
1435static ssize_t direction_show(struct device *dev,
1436			      struct device_attribute *attr,
1437			      char *buf)
1438{
1439	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1440
1441	switch (buffer->direction) {
1442	case IIO_BUFFER_DIRECTION_IN:
1443		return sysfs_emit(buf, "in\n");
1444	case IIO_BUFFER_DIRECTION_OUT:
1445		return sysfs_emit(buf, "out\n");
1446	default:
1447		return -EINVAL;
1448	}
1449}
1450
1451static DEVICE_ATTR_RW(length);
1452static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
1453static DEVICE_ATTR_RW(enable);
1454static DEVICE_ATTR_RW(watermark);
1455static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
1456static DEVICE_ATTR_RO(data_available);
1457static DEVICE_ATTR_RO(direction);
1458
1459/*
1460 * When adding new attributes here, put the at the end, at least until
1461 * the code that handles the length/length_ro & watermark/watermark_ro
1462 * assignments gets cleaned up. Otherwise these can create some weird
1463 * duplicate attributes errors under some setups.
1464 */
1465static struct attribute *iio_buffer_attrs[] = {
1466	&dev_attr_length.attr,
1467	&dev_attr_enable.attr,
1468	&dev_attr_watermark.attr,
1469	&dev_attr_data_available.attr,
1470	&dev_attr_direction.attr,
1471};
1472
1473#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1474
1475static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1476					      struct attribute *attr)
1477{
1478	struct device_attribute *dattr = to_dev_attr(attr);
1479	struct iio_dev_attr *iio_attr;
1480
1481	iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1482	if (!iio_attr)
1483		return NULL;
1484
1485	iio_attr->buffer = buffer;
1486	memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1487	iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1488	if (!iio_attr->dev_attr.attr.name) {
1489		kfree(iio_attr);
1490		return NULL;
1491	}
1492
1493	sysfs_attr_init(&iio_attr->dev_attr.attr);
1494
1495	list_add(&iio_attr->l, &buffer->buffer_attr_list);
1496
1497	return &iio_attr->dev_attr.attr;
1498}
1499
1500static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1501						   struct attribute **buffer_attrs,
1502						   int buffer_attrcount,
1503						   int scan_el_attrcount)
1504{
1505	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1506	struct attribute_group *group;
1507	struct attribute **attrs;
1508	int ret;
1509
1510	attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1511	if (!attrs)
1512		return -ENOMEM;
1513
1514	memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1515
1516	group = &iio_dev_opaque->legacy_buffer_group;
1517	group->attrs = attrs;
1518	group->name = "buffer";
1519
1520	ret = iio_device_register_sysfs_group(indio_dev, group);
1521	if (ret)
1522		goto error_free_buffer_attrs;
1523
1524	attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1525	if (!attrs) {
1526		ret = -ENOMEM;
1527		goto error_free_buffer_attrs;
1528	}
1529
1530	memcpy(attrs, &buffer_attrs[buffer_attrcount],
1531	       scan_el_attrcount * sizeof(*attrs));
1532
1533	group = &iio_dev_opaque->legacy_scan_el_group;
1534	group->attrs = attrs;
1535	group->name = "scan_elements";
1536
1537	ret = iio_device_register_sysfs_group(indio_dev, group);
1538	if (ret)
1539		goto error_free_scan_el_attrs;
1540
1541	return 0;
1542
1543error_free_scan_el_attrs:
1544	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1545error_free_buffer_attrs:
1546	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1547
1548	return ret;
1549}
1550
1551static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1552{
1553	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1554
1555	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1556	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1557}
1558
1559static void iio_buffer_dmabuf_release(struct kref *ref)
1560{
1561	struct iio_dmabuf_priv *priv = container_of(ref, struct iio_dmabuf_priv, ref);
1562	struct dma_buf_attachment *attach = priv->attach;
1563	struct iio_buffer *buffer = priv->buffer;
1564	struct dma_buf *dmabuf = attach->dmabuf;
1565
1566	dma_resv_lock(dmabuf->resv, NULL);
1567	dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
1568	dma_resv_unlock(dmabuf->resv);
1569
1570	buffer->access->detach_dmabuf(buffer, priv->block);
1571
1572	dma_buf_detach(attach->dmabuf, attach);
1573	dma_buf_put(dmabuf);
1574	kfree(priv);
1575}
1576
1577static void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach)
1578{
1579	struct iio_dmabuf_priv *priv = attach->importer_priv;
1580
1581	kref_get(&priv->ref);
1582}
1583
1584static void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach)
1585{
1586	struct iio_dmabuf_priv *priv = attach->importer_priv;
1587
1588	kref_put(&priv->ref, iio_buffer_dmabuf_release);
1589}
1590
1591static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1592{
1593	struct iio_dev_buffer_pair *ib = filep->private_data;
1594	struct iio_dev *indio_dev = ib->indio_dev;
1595	struct iio_buffer *buffer = ib->buffer;
1596	struct iio_dmabuf_priv *priv, *tmp;
1597
1598	wake_up(&buffer->pollq);
1599
1600	guard(mutex)(&buffer->dmabufs_mutex);
1601
1602	/* Close all attached DMABUFs */
1603	list_for_each_entry_safe(priv, tmp, &buffer->dmabufs, entry) {
1604		list_del_init(&priv->entry);
1605		iio_buffer_dmabuf_put(priv->attach);
1606	}
1607
1608	kfree(ib);
1609	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1610	iio_device_put(indio_dev);
1611
1612	return 0;
1613}
1614
1615static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
1616{
1617	if (!nonblock)
1618		return dma_resv_lock_interruptible(dmabuf->resv, NULL);
1619
1620	if (!dma_resv_trylock(dmabuf->resv))
1621		return -EBUSY;
1622
1623	return 0;
1624}
1625
1626static struct dma_buf_attachment *
1627iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib,
1628			   struct dma_buf *dmabuf, bool nonblock)
1629{
1630	struct device *dev = ib->indio_dev->dev.parent;
1631	struct iio_buffer *buffer = ib->buffer;
1632	struct dma_buf_attachment *attach = NULL;
1633	struct iio_dmabuf_priv *priv;
1634
1635	guard(mutex)(&buffer->dmabufs_mutex);
1636
1637	list_for_each_entry(priv, &buffer->dmabufs, entry) {
1638		if (priv->attach->dev == dev
1639		    && priv->attach->dmabuf == dmabuf) {
1640			attach = priv->attach;
1641			break;
1642		}
1643	}
1644
1645	if (attach)
1646		iio_buffer_dmabuf_get(attach);
1647
1648	return attach ?: ERR_PTR(-EPERM);
1649}
1650
1651static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
1652				    int __user *user_fd, bool nonblock)
1653{
1654	struct iio_dev *indio_dev = ib->indio_dev;
1655	struct iio_buffer *buffer = ib->buffer;
1656	struct dma_buf_attachment *attach;
1657	struct iio_dmabuf_priv *priv, *each;
1658	struct dma_buf *dmabuf;
1659	int err, fd;
1660
1661	if (!buffer->access->attach_dmabuf
1662	    || !buffer->access->detach_dmabuf
1663	    || !buffer->access->enqueue_dmabuf)
1664		return -EPERM;
1665
1666	if (copy_from_user(&fd, user_fd, sizeof(fd)))
1667		return -EFAULT;
1668
1669	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1670	if (!priv)
1671		return -ENOMEM;
1672
1673	spin_lock_init(&priv->lock);
1674	priv->context = dma_fence_context_alloc(1);
1675
1676	dmabuf = dma_buf_get(fd);
1677	if (IS_ERR(dmabuf)) {
1678		err = PTR_ERR(dmabuf);
1679		goto err_free_priv;
1680	}
1681
1682	attach = dma_buf_attach(dmabuf, indio_dev->dev.parent);
1683	if (IS_ERR(attach)) {
1684		err = PTR_ERR(attach);
1685		goto err_dmabuf_put;
1686	}
1687
1688	err = iio_dma_resv_lock(dmabuf, nonblock);
1689	if (err)
1690		goto err_dmabuf_detach;
1691
1692	priv->dir = buffer->direction == IIO_BUFFER_DIRECTION_IN
1693		? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1694
1695	priv->sgt = dma_buf_map_attachment(attach, priv->dir);
1696	if (IS_ERR(priv->sgt)) {
1697		err = PTR_ERR(priv->sgt);
1698		dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", err);
1699		goto err_resv_unlock;
1700	}
1701
1702	kref_init(&priv->ref);
1703	priv->buffer = buffer;
1704	priv->attach = attach;
1705	attach->importer_priv = priv;
1706
1707	priv->block = buffer->access->attach_dmabuf(buffer, attach);
1708	if (IS_ERR(priv->block)) {
1709		err = PTR_ERR(priv->block);
1710		goto err_dmabuf_unmap_attachment;
1711	}
1712
1713	dma_resv_unlock(dmabuf->resv);
1714
1715	mutex_lock(&buffer->dmabufs_mutex);
1716
1717	/*
1718	 * Check whether we already have an attachment for this driver/DMABUF
1719	 * combo. If we do, refuse to attach.
1720	 */
1721	list_for_each_entry(each, &buffer->dmabufs, entry) {
1722		if (each->attach->dev == indio_dev->dev.parent
1723		    && each->attach->dmabuf == dmabuf) {
1724			/*
1725			 * We unlocked the reservation object, so going through
1726			 * the cleanup code would mean re-locking it first.
1727			 * At this stage it is simpler to free the attachment
1728			 * using iio_buffer_dma_put().
1729			 */
1730			mutex_unlock(&buffer->dmabufs_mutex);
1731			iio_buffer_dmabuf_put(attach);
1732			return -EBUSY;
1733		}
1734	}
1735
1736	/* Otherwise, add the new attachment to our dmabufs list. */
1737	list_add(&priv->entry, &buffer->dmabufs);
1738	mutex_unlock(&buffer->dmabufs_mutex);
1739
1740	return 0;
1741
1742err_dmabuf_unmap_attachment:
1743	dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
1744err_resv_unlock:
1745	dma_resv_unlock(dmabuf->resv);
1746err_dmabuf_detach:
1747	dma_buf_detach(dmabuf, attach);
1748err_dmabuf_put:
1749	dma_buf_put(dmabuf);
1750err_free_priv:
1751	kfree(priv);
1752
1753	return err;
1754}
1755
1756static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib,
1757				    int __user *user_req, bool nonblock)
1758{
1759	struct iio_buffer *buffer = ib->buffer;
1760	struct iio_dev *indio_dev = ib->indio_dev;
1761	struct iio_dmabuf_priv *priv;
1762	struct dma_buf *dmabuf;
1763	int dmabuf_fd, ret = -EPERM;
1764
1765	if (copy_from_user(&dmabuf_fd, user_req, sizeof(dmabuf_fd)))
1766		return -EFAULT;
1767
1768	dmabuf = dma_buf_get(dmabuf_fd);
1769	if (IS_ERR(dmabuf))
1770		return PTR_ERR(dmabuf);
1771
1772	guard(mutex)(&buffer->dmabufs_mutex);
1773
1774	list_for_each_entry(priv, &buffer->dmabufs, entry) {
1775		if (priv->attach->dev == indio_dev->dev.parent
1776		    && priv->attach->dmabuf == dmabuf) {
1777			list_del(&priv->entry);
1778
1779			/* Unref the reference from iio_buffer_attach_dmabuf() */
1780			iio_buffer_dmabuf_put(priv->attach);
1781			ret = 0;
1782			break;
1783		}
1784	}
1785
1786	dma_buf_put(dmabuf);
1787
1788	return ret;
1789}
1790
1791static const char *
1792iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence)
1793{
1794	return "iio";
1795}
1796
1797static void iio_buffer_dma_fence_release(struct dma_fence *fence)
1798{
1799	struct iio_dma_fence *iio_fence =
1800		container_of(fence, struct iio_dma_fence, base);
1801
1802	kfree(iio_fence);
1803}
1804
1805static const struct dma_fence_ops iio_buffer_dma_fence_ops = {
1806	.get_driver_name	= iio_buffer_dma_fence_get_driver_name,
1807	.get_timeline_name	= iio_buffer_dma_fence_get_driver_name,
1808	.release		= iio_buffer_dma_fence_release,
1809};
1810
1811static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair *ib,
1812				     struct iio_dmabuf __user *iio_dmabuf_req,
1813				     bool nonblock)
1814{
1815	struct iio_buffer *buffer = ib->buffer;
1816	struct iio_dmabuf iio_dmabuf;
1817	struct dma_buf_attachment *attach;
1818	struct iio_dmabuf_priv *priv;
1819	struct iio_dma_fence *fence;
1820	struct dma_buf *dmabuf;
1821	unsigned long timeout;
1822	bool cookie, cyclic, dma_to_ram;
1823	long retl;
1824	u32 seqno;
1825	int ret;
1826
1827	if (copy_from_user(&iio_dmabuf, iio_dmabuf_req, sizeof(iio_dmabuf)))
1828		return -EFAULT;
1829
1830	if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS)
1831		return -EINVAL;
1832
1833	cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC;
1834
1835	/* Cyclic flag is only supported on output buffers */
1836	if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT)
1837		return -EINVAL;
1838
1839	dmabuf = dma_buf_get(iio_dmabuf.fd);
1840	if (IS_ERR(dmabuf))
1841		return PTR_ERR(dmabuf);
1842
1843	if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) {
1844		ret = -EINVAL;
1845		goto err_dmabuf_put;
1846	}
1847
1848	attach = iio_buffer_find_attachment(ib, dmabuf, nonblock);
1849	if (IS_ERR(attach)) {
1850		ret = PTR_ERR(attach);
1851		goto err_dmabuf_put;
1852	}
1853
1854	priv = attach->importer_priv;
1855
1856	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
1857	if (!fence) {
1858		ret = -ENOMEM;
1859		goto err_attachment_put;
1860	}
1861
1862	fence->priv = priv;
1863
1864	seqno = atomic_add_return(1, &priv->seqno);
1865
1866	/*
1867	 * The transfers are guaranteed to be processed in the order they are
1868	 * enqueued, so we can use a simple incrementing sequence number for
1869	 * the dma_fence.
1870	 */
1871	dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops,
1872		       &priv->lock, priv->context, seqno);
1873
1874	ret = iio_dma_resv_lock(dmabuf, nonblock);
1875	if (ret)
1876		goto err_fence_put;
1877
1878	timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
1879	dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN;
1880
1881	/* Make sure we don't have writers */
1882	retl = dma_resv_wait_timeout(dmabuf->resv,
1883				     dma_resv_usage_rw(dma_to_ram),
1884				     true, timeout);
1885	if (retl == 0)
1886		retl = -EBUSY;
1887	if (retl < 0) {
1888		ret = (int)retl;
1889		goto err_resv_unlock;
1890	}
1891
1892	if (buffer->access->lock_queue)
1893		buffer->access->lock_queue(buffer);
1894
1895	ret = dma_resv_reserve_fences(dmabuf->resv, 1);
1896	if (ret)
1897		goto err_queue_unlock;
1898
1899	dma_resv_add_fence(dmabuf->resv, &fence->base,
1900			   dma_to_ram ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
1901	dma_resv_unlock(dmabuf->resv);
1902
1903	cookie = dma_fence_begin_signalling();
1904
1905	ret = buffer->access->enqueue_dmabuf(buffer, priv->block, &fence->base,
1906					     priv->sgt, iio_dmabuf.bytes_used,
1907					     cyclic);
1908	if (ret) {
1909		/*
1910		 * DMABUF enqueue failed, but we already added the fence.
1911		 * Signal the error through the fence completion mechanism.
1912		 */
1913		iio_buffer_signal_dmabuf_done(&fence->base, ret);
1914	}
1915
1916	if (buffer->access->unlock_queue)
1917		buffer->access->unlock_queue(buffer);
1918
1919	dma_fence_end_signalling(cookie);
1920	dma_buf_put(dmabuf);
1921
1922	return ret;
1923
1924err_queue_unlock:
1925	if (buffer->access->unlock_queue)
1926		buffer->access->unlock_queue(buffer);
1927err_resv_unlock:
1928	dma_resv_unlock(dmabuf->resv);
1929err_fence_put:
1930	dma_fence_put(&fence->base);
1931err_attachment_put:
1932	iio_buffer_dmabuf_put(attach);
1933err_dmabuf_put:
1934	dma_buf_put(dmabuf);
1935
1936	return ret;
1937}
1938
1939static void iio_buffer_cleanup(struct work_struct *work)
1940{
1941	struct iio_dma_fence *fence =
1942		container_of(work, struct iio_dma_fence, work);
1943	struct iio_dmabuf_priv *priv = fence->priv;
1944	struct dma_buf_attachment *attach = priv->attach;
1945
1946	dma_fence_put(&fence->base);
1947	iio_buffer_dmabuf_put(attach);
1948}
1949
1950void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret)
1951{
1952	struct iio_dma_fence *iio_fence =
1953		container_of(fence, struct iio_dma_fence, base);
1954	bool cookie = dma_fence_begin_signalling();
1955
1956	/*
1957	 * Get a reference to the fence, so that it's not freed as soon as
1958	 * it's signaled.
1959	 */
1960	dma_fence_get(fence);
1961
1962	fence->error = ret;
1963	dma_fence_signal(fence);
1964	dma_fence_end_signalling(cookie);
1965
1966	/*
1967	 * The fence will be unref'd in iio_buffer_cleanup.
1968	 * It can't be done here, as the unref functions might try to lock the
1969	 * resv object, which can deadlock.
1970	 */
1971	INIT_WORK(&iio_fence->work, iio_buffer_cleanup);
1972	schedule_work(&iio_fence->work);
1973}
1974EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done);
1975
1976static long iio_buffer_chrdev_ioctl(struct file *filp,
1977				    unsigned int cmd, unsigned long arg)
1978{
1979	struct iio_dev_buffer_pair *ib = filp->private_data;
1980	void __user *_arg = (void __user *)arg;
1981	bool nonblock = filp->f_flags & O_NONBLOCK;
1982
1983	switch (cmd) {
1984	case IIO_BUFFER_DMABUF_ATTACH_IOCTL:
1985		return iio_buffer_attach_dmabuf(ib, _arg, nonblock);
1986	case IIO_BUFFER_DMABUF_DETACH_IOCTL:
1987		return iio_buffer_detach_dmabuf(ib, _arg, nonblock);
1988	case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL:
1989		return iio_buffer_enqueue_dmabuf(ib, _arg, nonblock);
1990	default:
1991		return -EINVAL;
1992	}
1993}
1994
1995static const struct file_operations iio_buffer_chrdev_fileops = {
1996	.owner = THIS_MODULE,
1997	.llseek = noop_llseek,
1998	.read = iio_buffer_read,
1999	.write = iio_buffer_write,
2000	.unlocked_ioctl = iio_buffer_chrdev_ioctl,
2001	.compat_ioctl = compat_ptr_ioctl,
2002	.poll = iio_buffer_poll,
2003	.release = iio_buffer_chrdev_release,
2004};
2005
2006static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
2007{
2008	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2009	int __user *ival = (int __user *)arg;
2010	struct iio_dev_buffer_pair *ib;
2011	struct iio_buffer *buffer;
2012	int fd, idx, ret;
2013
2014	if (copy_from_user(&idx, ival, sizeof(idx)))
2015		return -EFAULT;
2016
2017	if (idx >= iio_dev_opaque->attached_buffers_cnt)
2018		return -ENODEV;
2019
2020	iio_device_get(indio_dev);
2021
2022	buffer = iio_dev_opaque->attached_buffers[idx];
2023
2024	if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
2025		ret = -EBUSY;
2026		goto error_iio_dev_put;
2027	}
2028
2029	ib = kzalloc(sizeof(*ib), GFP_KERNEL);
2030	if (!ib) {
2031		ret = -ENOMEM;
2032		goto error_clear_busy_bit;
2033	}
2034
2035	ib->indio_dev = indio_dev;
2036	ib->buffer = buffer;
2037
2038	fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
2039			      ib, O_RDWR | O_CLOEXEC);
2040	if (fd < 0) {
2041		ret = fd;
2042		goto error_free_ib;
2043	}
2044
2045	if (copy_to_user(ival, &fd, sizeof(fd))) {
2046		/*
2047		 * "Leak" the fd, as there's not much we can do about this
2048		 * anyway. 'fd' might have been closed already, as
2049		 * anon_inode_getfd() called fd_install() on it, which made
2050		 * it reachable by userland.
2051		 *
2052		 * Instead of allowing a malicious user to play tricks with
2053		 * us, rely on the process exit path to do any necessary
2054		 * cleanup, as in releasing the file, if still needed.
2055		 */
2056		return -EFAULT;
2057	}
2058
2059	return 0;
2060
2061error_free_ib:
2062	kfree(ib);
2063error_clear_busy_bit:
2064	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
2065error_iio_dev_put:
2066	iio_device_put(indio_dev);
2067	return ret;
2068}
2069
2070static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
2071				    unsigned int cmd, unsigned long arg)
2072{
2073	switch (cmd) {
2074	case IIO_BUFFER_GET_FD_IOCTL:
2075		return iio_device_buffer_getfd(indio_dev, arg);
2076	default:
2077		return IIO_IOCTL_UNHANDLED;
2078	}
2079}
2080
2081static int iio_channel_validate_scan_type(struct device *dev, int ch,
2082					  const struct iio_scan_type *scan_type)
2083{
2084	/* Verify that sample bits fit into storage */
2085	if (scan_type->storagebits < scan_type->realbits + scan_type->shift) {
2086		dev_err(dev,
2087			"Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
2088			ch, scan_type->storagebits,
2089			scan_type->realbits,
2090			scan_type->shift);
2091		return -EINVAL;
2092	}
2093
2094	return 0;
2095}
2096
2097static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
2098					     struct iio_dev *indio_dev,
2099					     int index)
2100{
2101	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2102	unsigned int masklength = iio_get_masklength(indio_dev);
2103	struct iio_dev_attr *p;
2104	const struct iio_dev_attr *id_attr;
2105	struct attribute **attr;
2106	int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
2107	const struct iio_chan_spec *channels;
2108
2109	buffer_attrcount = 0;
2110	if (buffer->attrs) {
2111		while (buffer->attrs[buffer_attrcount])
2112			buffer_attrcount++;
2113	}
2114	buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
2115
2116	scan_el_attrcount = 0;
2117	INIT_LIST_HEAD(&buffer->buffer_attr_list);
2118	channels = indio_dev->channels;
2119	if (channels) {
2120		/* new magic */
2121		for (i = 0; i < indio_dev->num_channels; i++) {
2122			const struct iio_scan_type *scan_type;
2123
2124			if (channels[i].scan_index < 0)
2125				continue;
2126
2127			if (channels[i].has_ext_scan_type) {
2128				int j;
2129
2130				/*
2131				 * get_current_scan_type is required when using
2132				 * extended scan types.
2133				 */
2134				if (!indio_dev->info->get_current_scan_type) {
2135					ret = -EINVAL;
2136					goto error_cleanup_dynamic;
2137				}
2138
2139				for (j = 0; j < channels[i].num_ext_scan_type; j++) {
2140					scan_type = &channels[i].ext_scan_type[j];
2141
2142					ret = iio_channel_validate_scan_type(
2143						&indio_dev->dev, i, scan_type);
2144					if (ret)
2145						goto error_cleanup_dynamic;
2146				}
2147			} else {
2148				scan_type = &channels[i].scan_type;
2149
2150				ret = iio_channel_validate_scan_type(
2151						&indio_dev->dev, i, scan_type);
2152				if (ret)
2153					goto error_cleanup_dynamic;
2154			}
2155
2156			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
2157							   &channels[i]);
2158			if (ret < 0)
2159				goto error_cleanup_dynamic;
2160			scan_el_attrcount += ret;
2161			if (channels[i].type == IIO_TIMESTAMP)
2162				iio_dev_opaque->scan_index_timestamp =
2163					channels[i].scan_index;
2164		}
2165		if (masklength && !buffer->scan_mask) {
2166			buffer->scan_mask = bitmap_zalloc(masklength,
2167							  GFP_KERNEL);
2168			if (!buffer->scan_mask) {
2169				ret = -ENOMEM;
2170				goto error_cleanup_dynamic;
2171			}
2172		}
2173	}
2174
2175	attrn = buffer_attrcount + scan_el_attrcount;
2176	attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
2177	if (!attr) {
2178		ret = -ENOMEM;
2179		goto error_free_scan_mask;
2180	}
2181
2182	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
2183	if (!buffer->access->set_length)
2184		attr[0] = &dev_attr_length_ro.attr;
2185
2186	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
2187		attr[2] = &dev_attr_watermark_ro.attr;
2188
2189	if (buffer->attrs)
2190		for (i = 0, id_attr = buffer->attrs[i];
2191		     (id_attr = buffer->attrs[i]); i++)
2192			attr[ARRAY_SIZE(iio_buffer_attrs) + i] =
2193				(struct attribute *)&id_attr->dev_attr.attr;
2194
2195	buffer->buffer_group.attrs = attr;
2196
2197	for (i = 0; i < buffer_attrcount; i++) {
2198		struct attribute *wrapped;
2199
2200		wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
2201		if (!wrapped) {
2202			ret = -ENOMEM;
2203			goto error_free_buffer_attrs;
2204		}
2205		attr[i] = wrapped;
2206	}
2207
2208	attrn = 0;
2209	list_for_each_entry(p, &buffer->buffer_attr_list, l)
2210		attr[attrn++] = &p->dev_attr.attr;
2211
2212	buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
2213	if (!buffer->buffer_group.name) {
2214		ret = -ENOMEM;
2215		goto error_free_buffer_attrs;
2216	}
2217
2218	ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
2219	if (ret)
2220		goto error_free_buffer_attr_group_name;
2221
2222	/* we only need to register the legacy groups for the first buffer */
2223	if (index > 0)
2224		return 0;
2225
2226	ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
2227						      buffer_attrcount,
2228						      scan_el_attrcount);
2229	if (ret)
2230		goto error_free_buffer_attr_group_name;
2231
2232	return 0;
2233
2234error_free_buffer_attr_group_name:
2235	kfree(buffer->buffer_group.name);
2236error_free_buffer_attrs:
2237	kfree(buffer->buffer_group.attrs);
2238error_free_scan_mask:
2239	bitmap_free(buffer->scan_mask);
2240error_cleanup_dynamic:
2241	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
2242
2243	return ret;
2244}
2245
2246static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
2247					     struct iio_dev *indio_dev,
2248					     int index)
2249{
2250	if (index == 0)
2251		iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
2252	bitmap_free(buffer->scan_mask);
2253	kfree(buffer->buffer_group.name);
2254	kfree(buffer->buffer_group.attrs);
2255	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
2256}
2257
2258int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
2259{
2260	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2261	const struct iio_chan_spec *channels;
2262	struct iio_buffer *buffer;
2263	int ret, i, idx;
2264	size_t sz;
2265
2266	channels = indio_dev->channels;
2267	if (channels) {
2268		int ml = 0;
2269
2270		for (i = 0; i < indio_dev->num_channels; i++)
2271			ml = max(ml, channels[i].scan_index + 1);
2272		ACCESS_PRIVATE(indio_dev, masklength) = ml;
2273	}
2274
2275	if (!iio_dev_opaque->attached_buffers_cnt)
2276		return 0;
2277
2278	for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
2279		buffer = iio_dev_opaque->attached_buffers[idx];
2280		ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
2281		if (ret)
2282			goto error_unwind_sysfs_and_mask;
2283	}
2284
2285	sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
2286	iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
2287	if (!iio_dev_opaque->buffer_ioctl_handler) {
2288		ret = -ENOMEM;
2289		goto error_unwind_sysfs_and_mask;
2290	}
2291
2292	iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
2293	iio_device_ioctl_handler_register(indio_dev,
2294					  iio_dev_opaque->buffer_ioctl_handler);
2295
2296	return 0;
2297
2298error_unwind_sysfs_and_mask:
2299	while (idx--) {
2300		buffer = iio_dev_opaque->attached_buffers[idx];
2301		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
2302	}
2303	return ret;
2304}
2305
2306void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
2307{
2308	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2309	struct iio_buffer *buffer;
2310	int i;
2311
2312	if (!iio_dev_opaque->attached_buffers_cnt)
2313		return;
2314
2315	iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
2316	kfree(iio_dev_opaque->buffer_ioctl_handler);
2317
2318	for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
2319		buffer = iio_dev_opaque->attached_buffers[i];
2320		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
2321	}
2322}
2323
2324/**
2325 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
2326 * @indio_dev: the iio device
2327 * @mask: scan mask to be checked
2328 *
2329 * Return true if exactly one bit is set in the scan mask, false otherwise. It
2330 * can be used for devices where only one channel can be active for sampling at
2331 * a time.
2332 */
2333bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
2334				   const unsigned long *mask)
2335{
2336	return bitmap_weight(mask, iio_get_masklength(indio_dev)) == 1;
2337}
2338EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
2339
2340static const void *iio_demux(struct iio_buffer *buffer,
2341			     const void *datain)
2342{
2343	struct iio_demux_table *t;
2344
2345	if (list_empty(&buffer->demux_list))
2346		return datain;
2347	list_for_each_entry(t, &buffer->demux_list, l)
2348		memcpy(buffer->demux_bounce + t->to,
2349		       datain + t->from, t->length);
2350
2351	return buffer->demux_bounce;
2352}
2353
2354static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
2355{
2356	const void *dataout = iio_demux(buffer, data);
2357	int ret;
2358
2359	ret = buffer->access->store_to(buffer, dataout);
2360	if (ret)
2361		return ret;
2362
2363	/*
2364	 * We can't just test for watermark to decide if we wake the poll queue
2365	 * because read may request less samples than the watermark.
2366	 */
2367	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
2368	return 0;
2369}
2370
2371/**
2372 * iio_push_to_buffers() - push to a registered buffer.
2373 * @indio_dev:		iio_dev structure for device.
2374 * @data:		Full scan.
2375 */
2376int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
2377{
2378	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2379	int ret;
2380	struct iio_buffer *buf;
2381
2382	list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
2383		ret = iio_push_to_buffer(buf, data);
2384		if (ret < 0)
2385			return ret;
2386	}
2387
2388	return 0;
2389}
2390EXPORT_SYMBOL_GPL(iio_push_to_buffers);
2391
2392/**
2393 * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
2394 *    no alignment or space requirements.
2395 * @indio_dev:		iio_dev structure for device.
2396 * @data:		channel data excluding the timestamp.
2397 * @data_sz:		size of data.
2398 * @timestamp:		timestamp for the sample data.
2399 *
2400 * This special variant of iio_push_to_buffers_with_timestamp() does
2401 * not require space for the timestamp, or 8 byte alignment of data.
2402 * It does however require an allocation on first call and additional
2403 * copies on all calls, so should be avoided if possible.
2404 */
2405int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
2406					  const void *data,
2407					  size_t data_sz,
2408					  int64_t timestamp)
2409{
2410	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2411
2412	/*
2413	 * Conservative estimate - we can always safely copy the minimum
2414	 * of either the data provided or the length of the destination buffer.
2415	 * This relaxed limit allows the calling drivers to be lax about
2416	 * tracking the size of the data they are pushing, at the cost of
2417	 * unnecessary copying of padding.
2418	 */
2419	data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
2420	if (iio_dev_opaque->bounce_buffer_size !=  indio_dev->scan_bytes) {
2421		void *bb;
2422
2423		bb = devm_krealloc(&indio_dev->dev,
2424				   iio_dev_opaque->bounce_buffer,
2425				   indio_dev->scan_bytes, GFP_KERNEL);
2426		if (!bb)
2427			return -ENOMEM;
2428		iio_dev_opaque->bounce_buffer = bb;
2429		iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
2430	}
2431	memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
2432	return iio_push_to_buffers_with_timestamp(indio_dev,
2433						  iio_dev_opaque->bounce_buffer,
2434						  timestamp);
2435}
2436EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
2437
2438/**
2439 * iio_buffer_release() - Free a buffer's resources
2440 * @ref: Pointer to the kref embedded in the iio_buffer struct
2441 *
2442 * This function is called when the last reference to the buffer has been
2443 * dropped. It will typically free all resources allocated by the buffer. Do not
2444 * call this function manually, always use iio_buffer_put() when done using a
2445 * buffer.
2446 */
2447static void iio_buffer_release(struct kref *ref)
2448{
2449	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
2450
2451	mutex_destroy(&buffer->dmabufs_mutex);
2452	buffer->access->release(buffer);
2453}
2454
2455/**
2456 * iio_buffer_get() - Grab a reference to the buffer
2457 * @buffer: The buffer to grab a reference for, may be NULL
2458 *
2459 * Returns the pointer to the buffer that was passed into the function.
2460 */
2461struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
2462{
2463	if (buffer)
2464		kref_get(&buffer->ref);
2465
2466	return buffer;
2467}
2468EXPORT_SYMBOL_GPL(iio_buffer_get);
2469
2470/**
2471 * iio_buffer_put() - Release the reference to the buffer
2472 * @buffer: The buffer to release the reference for, may be NULL
2473 */
2474void iio_buffer_put(struct iio_buffer *buffer)
2475{
2476	if (buffer)
2477		kref_put(&buffer->ref, iio_buffer_release);
2478}
2479EXPORT_SYMBOL_GPL(iio_buffer_put);
2480
2481/**
2482 * iio_device_attach_buffer - Attach a buffer to a IIO device
2483 * @indio_dev: The device the buffer should be attached to
2484 * @buffer: The buffer to attach to the device
2485 *
2486 * Return 0 if successful, negative if error.
2487 *
2488 * This function attaches a buffer to a IIO device. The buffer stays attached to
2489 * the device until the device is freed. For legacy reasons, the first attached
2490 * buffer will also be assigned to 'indio_dev->buffer'.
2491 * The array allocated here, will be free'd via the iio_device_detach_buffers()
2492 * call which is handled by the iio_device_free().
2493 */
2494int iio_device_attach_buffer(struct iio_dev *indio_dev,
2495			     struct iio_buffer *buffer)
2496{
2497	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2498	struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
2499	unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
2500
2501	cnt++;
2502
2503	new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
2504	if (!new)
2505		return -ENOMEM;
2506	iio_dev_opaque->attached_buffers = new;
2507
2508	buffer = iio_buffer_get(buffer);
2509
2510	/* first buffer is legacy; attach it to the IIO device directly */
2511	if (!indio_dev->buffer)
2512		indio_dev->buffer = buffer;
2513
2514	iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
2515	iio_dev_opaque->attached_buffers_cnt = cnt;
2516
2517	return 0;
2518}
2519EXPORT_SYMBOL_GPL(iio_device_attach_buffer);