Linux Audio

Check our new training course

Loading...
   1/*
   2 * ispqueue.c
   3 *
   4 * TI OMAP3 ISP - Video buffers queue handling
   5 *
   6 * Copyright (C) 2010 Nokia Corporation
   7 *
   8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
   9 *	     Sakari Ailus <sakari.ailus@iki.fi>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License version 2 as
  13 * published by the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23 * 02110-1301 USA
  24 */
  25
  26#include <asm/cacheflush.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/mm.h>
  29#include <linux/pagemap.h>
  30#include <linux/poll.h>
  31#include <linux/scatterlist.h>
  32#include <linux/sched.h>
  33#include <linux/slab.h>
  34#include <linux/vmalloc.h>
  35
  36#include "ispqueue.h"
  37
  38/* -----------------------------------------------------------------------------
  39 * Video buffers management
  40 */
  41
  42/*
  43 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
  44 *
  45 * The typical operation required here is Cache Invalidation across
  46 * the (user space) buffer address range. And this _must_ be done
  47 * at QBUF stage (and *only* at QBUF).
  48 *
  49 * We try to use optimal cache invalidation function:
  50 * - dmac_map_area:
  51 *    - used when the number of pages are _low_.
  52 *    - it becomes quite slow as the number of pages increase.
  53 *       - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
  54 *       - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
  55 *
  56 * - flush_cache_all:
  57 *    - used when the number of pages are _high_.
  58 *    - time taken in the range of 500-900 us.
  59 *    - has a higher penalty but, as whole dcache + icache is invalidated
  60 */
  61/*
  62 * FIXME: dmac_inv_range crashes randomly on the user space buffer
  63 *        address. Fall back to flush_cache_all for now.
  64 */
  65#define ISP_CACHE_FLUSH_PAGES_MAX       0
  66
  67static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
  68{
  69	if (buf->skip_cache)
  70		return;
  71
  72	if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
  73	    buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
  74		flush_cache_all();
  75	else {
  76		dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
  77			      DMA_FROM_DEVICE);
  78		outer_inv_range(buf->vbuf.m.userptr,
  79				buf->vbuf.m.userptr + buf->vbuf.length);
  80	}
  81}
  82
  83/*
  84 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
  85 *
  86 * Lock the VMAs underlying the given buffer into memory. This avoids the
  87 * userspace buffer mapping from being swapped out, making VIPT cache handling
  88 * easier.
  89 *
  90 * Note that the pages will not be freed as the buffers have been locked to
  91 * memory using by a call to get_user_pages(), but the userspace mapping could
  92 * still disappear if the VMAs are not locked. This is caused by the memory
  93 * management code trying to be as lock-less as possible, which results in the
  94 * userspace mapping manager not finding out that the pages are locked under
  95 * some conditions.
  96 */
  97static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
  98{
  99	struct vm_area_struct *vma;
 100	unsigned long start;
 101	unsigned long end;
 102	int ret = 0;
 103
 104	if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
 105		return 0;
 106
 107	/* We can be called from workqueue context if the current task dies to
 108	 * unlock the VMAs. In that case there's no current memory management
 109	 * context so unlocking can't be performed, but the VMAs have been or
 110	 * are getting destroyed anyway so it doesn't really matter.
 111	 */
 112	if (!current || !current->mm)
 113		return lock ? -EINVAL : 0;
 114
 115	start = buf->vbuf.m.userptr;
 116	end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
 117
 118	down_write(&current->mm->mmap_sem);
 119	spin_lock(&current->mm->page_table_lock);
 120
 121	do {
 122		vma = find_vma(current->mm, start);
 123		if (vma == NULL) {
 124			ret = -EFAULT;
 125			goto out;
 126		}
 127
 128		if (lock)
 129			vma->vm_flags |= VM_LOCKED;
 130		else
 131			vma->vm_flags &= ~VM_LOCKED;
 132
 133		start = vma->vm_end + 1;
 134	} while (vma->vm_end < end);
 135
 136	if (lock)
 137		buf->vm_flags |= VM_LOCKED;
 138	else
 139		buf->vm_flags &= ~VM_LOCKED;
 140
 141out:
 142	spin_unlock(&current->mm->page_table_lock);
 143	up_write(&current->mm->mmap_sem);
 144	return ret;
 145}
 146
 147/*
 148 * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
 149 *
 150 * Iterate over the vmalloc'ed area and create a scatter list entry for every
 151 * page.
 152 */
 153static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
 154{
 155	struct scatterlist *sglist;
 156	unsigned int npages;
 157	unsigned int i;
 158	void *addr;
 159
 160	addr = buf->vaddr;
 161	npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
 162
 163	sglist = vmalloc(npages * sizeof(*sglist));
 164	if (sglist == NULL)
 165		return -ENOMEM;
 166
 167	sg_init_table(sglist, npages);
 168
 169	for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
 170		struct page *page = vmalloc_to_page(addr);
 171
 172		if (page == NULL || PageHighMem(page)) {
 173			vfree(sglist);
 174			return -EINVAL;
 175		}
 176
 177		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
 178	}
 179
 180	buf->sglen = npages;
 181	buf->sglist = sglist;
 182
 183	return 0;
 184}
 185
 186/*
 187 * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
 188 *
 189 * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
 190 */
 191static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
 192{
 193	struct scatterlist *sglist;
 194	unsigned int offset = buf->offset;
 195	unsigned int i;
 196
 197	sglist = vmalloc(buf->npages * sizeof(*sglist));
 198	if (sglist == NULL)
 199		return -ENOMEM;
 200
 201	sg_init_table(sglist, buf->npages);
 202
 203	for (i = 0; i < buf->npages; ++i) {
 204		if (PageHighMem(buf->pages[i])) {
 205			vfree(sglist);
 206			return -EINVAL;
 207		}
 208
 209		sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
 210			    offset);
 211		offset = 0;
 212	}
 213
 214	buf->sglen = buf->npages;
 215	buf->sglist = sglist;
 216
 217	return 0;
 218}
 219
 220/*
 221 * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
 222 *
 223 * Create a scatter list of physically contiguous pages starting at the buffer
 224 * memory physical address.
 225 */
 226static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
 227{
 228	struct scatterlist *sglist;
 229	unsigned int offset = buf->offset;
 230	unsigned long pfn = buf->paddr >> PAGE_SHIFT;
 231	unsigned int i;
 232
 233	sglist = vmalloc(buf->npages * sizeof(*sglist));
 234	if (sglist == NULL)
 235		return -ENOMEM;
 236
 237	sg_init_table(sglist, buf->npages);
 238
 239	for (i = 0; i < buf->npages; ++i, ++pfn) {
 240		sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
 241			    offset);
 242		/* PFNMAP buffers will not get DMA-mapped, set the DMA address
 243		 * manually.
 244		 */
 245		sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
 246		offset = 0;
 247	}
 248
 249	buf->sglen = buf->npages;
 250	buf->sglist = sglist;
 251
 252	return 0;
 253}
 254
 255/*
 256 * isp_video_buffer_cleanup - Release pages for a userspace VMA.
 257 *
 258 * Release pages locked by a call isp_video_buffer_prepare_user and free the
 259 * pages table.
 260 */
 261static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
 262{
 263	enum dma_data_direction direction;
 264	unsigned int i;
 265
 266	if (buf->queue->ops->buffer_cleanup)
 267		buf->queue->ops->buffer_cleanup(buf);
 268
 269	if (!(buf->vm_flags & VM_PFNMAP)) {
 270		direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
 271			  ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 272		dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
 273			     direction);
 274	}
 275
 276	vfree(buf->sglist);
 277	buf->sglist = NULL;
 278	buf->sglen = 0;
 279
 280	if (buf->pages != NULL) {
 281		isp_video_buffer_lock_vma(buf, 0);
 282
 283		for (i = 0; i < buf->npages; ++i)
 284			page_cache_release(buf->pages[i]);
 285
 286		vfree(buf->pages);
 287		buf->pages = NULL;
 288	}
 289
 290	buf->npages = 0;
 291	buf->skip_cache = false;
 292}
 293
 294/*
 295 * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
 296 *
 297 * This function creates a list of pages for a userspace VMA. The number of
 298 * pages is first computed based on the buffer size, and pages are then
 299 * retrieved by a call to get_user_pages.
 300 *
 301 * Pages are pinned to memory by get_user_pages, making them available for DMA
 302 * transfers. However, due to memory management optimization, it seems the
 303 * get_user_pages doesn't guarantee that the pinned pages will not be written
 304 * to swap and removed from the userspace mapping(s). When this happens, a page
 305 * fault can be generated when accessing those unmapped pages.
 306 *
 307 * If the fault is triggered by a page table walk caused by VIPT cache
 308 * management operations, the page fault handler might oops if the MM semaphore
 309 * is held, as it can't handle kernel page faults in that case. To fix that, a
 310 * fixup entry needs to be added to the cache management code, or the userspace
 311 * VMA must be locked to avoid removing pages from the userspace mapping in the
 312 * first place.
 313 *
 314 * If the number of pages retrieved is smaller than the number required by the
 315 * buffer size, the function returns -EFAULT.
 316 */
 317static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
 318{
 319	unsigned long data;
 320	unsigned int first;
 321	unsigned int last;
 322	int ret;
 323
 324	data = buf->vbuf.m.userptr;
 325	first = (data & PAGE_MASK) >> PAGE_SHIFT;
 326	last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
 327
 328	buf->offset = data & ~PAGE_MASK;
 329	buf->npages = last - first + 1;
 330	buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
 331	if (buf->pages == NULL)
 332		return -ENOMEM;
 333
 334	down_read(&current->mm->mmap_sem);
 335	ret = get_user_pages(current, current->mm, data & PAGE_MASK,
 336			     buf->npages,
 337			     buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
 338			     buf->pages, NULL);
 339	up_read(&current->mm->mmap_sem);
 340
 341	if (ret != buf->npages) {
 342		buf->npages = ret < 0 ? 0 : ret;
 343		isp_video_buffer_cleanup(buf);
 344		return -EFAULT;
 345	}
 346
 347	ret = isp_video_buffer_lock_vma(buf, 1);
 348	if (ret < 0)
 349		isp_video_buffer_cleanup(buf);
 350
 351	return ret;
 352}
 353
 354/*
 355 * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
 356 *
 357 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
 358 * memory and if they span a single VMA.
 359 *
 360 * Return 0 if the buffer is valid, or -EFAULT otherwise.
 361 */
 362static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
 363{
 364	struct vm_area_struct *vma;
 365	unsigned long prev_pfn;
 366	unsigned long this_pfn;
 367	unsigned long start;
 368	unsigned long end;
 369	dma_addr_t pa;
 370	int ret = -EFAULT;
 371
 372	start = buf->vbuf.m.userptr;
 373	end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
 374
 375	buf->offset = start & ~PAGE_MASK;
 376	buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
 377	buf->pages = NULL;
 378
 379	down_read(&current->mm->mmap_sem);
 380	vma = find_vma(current->mm, start);
 381	if (vma == NULL || vma->vm_end < end)
 382		goto done;
 383
 384	for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
 385		ret = follow_pfn(vma, start, &this_pfn);
 386		if (ret)
 387			goto done;
 388
 389		if (prev_pfn == 0)
 390			pa = this_pfn << PAGE_SHIFT;
 391		else if (this_pfn != prev_pfn + 1) {
 392			ret = -EFAULT;
 393			goto done;
 394		}
 395
 396		prev_pfn = this_pfn;
 397	}
 398
 399	buf->paddr = pa + buf->offset;
 400	ret = 0;
 401
 402done:
 403	up_read(&current->mm->mmap_sem);
 404	return ret;
 405}
 406
 407/*
 408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
 409 *
 410 * This function locates the VMAs for the buffer's userspace address and checks
 411 * that their flags match. The only flag that we need to care for at the moment
 412 * is VM_PFNMAP.
 413 *
 414 * The buffer vm_flags field is set to the first VMA flags.
 415 *
 416 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
 417 * have incompatible flags.
 418 */
 419static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
 420{
 421	struct vm_area_struct *vma;
 422	pgprot_t vm_page_prot;
 423	unsigned long start;
 424	unsigned long end;
 425	int ret = -EFAULT;
 426
 427	start = buf->vbuf.m.userptr;
 428	end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
 429
 430	down_read(&current->mm->mmap_sem);
 431
 432	do {
 433		vma = find_vma(current->mm, start);
 434		if (vma == NULL)
 435			goto done;
 436
 437		if (start == buf->vbuf.m.userptr) {
 438			buf->vm_flags = vma->vm_flags;
 439			vm_page_prot = vma->vm_page_prot;
 440		}
 441
 442		if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
 443			goto done;
 444
 445		if (vm_page_prot != vma->vm_page_prot)
 446			goto done;
 447
 448		start = vma->vm_end + 1;
 449	} while (vma->vm_end < end);
 450
 451	/* Skip cache management to enhance performances for non-cached or
 452	 * write-combining buffers.
 453	 */
 454	if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
 455	    vm_page_prot == pgprot_writecombine(vm_page_prot))
 456		buf->skip_cache = true;
 457
 458	ret = 0;
 459
 460done:
 461	up_read(&current->mm->mmap_sem);
 462	return ret;
 463}
 464
 465/*
 466 * isp_video_buffer_prepare - Make a buffer ready for operation
 467 *
 468 * Preparing a buffer involves:
 469 *
 470 * - validating VMAs (userspace buffers only)
 471 * - locking pages and VMAs into memory (userspace buffers only)
 472 * - building page and scatter-gather lists
 473 * - mapping buffers for DMA operation
 474 * - performing driver-specific preparation
 475 *
 476 * The function must be called in userspace context with a valid mm context
 477 * (this excludes cleanup paths such as sys_close when the userspace process
 478 * segfaults).
 479 */
 480static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
 481{
 482	enum dma_data_direction direction;
 483	int ret;
 484
 485	switch (buf->vbuf.memory) {
 486	case V4L2_MEMORY_MMAP:
 487		ret = isp_video_buffer_sglist_kernel(buf);
 488		break;
 489
 490	case V4L2_MEMORY_USERPTR:
 491		ret = isp_video_buffer_prepare_vm_flags(buf);
 492		if (ret < 0)
 493			return ret;
 494
 495		if (buf->vm_flags & VM_PFNMAP) {
 496			ret = isp_video_buffer_prepare_pfnmap(buf);
 497			if (ret < 0)
 498				return ret;
 499
 500			ret = isp_video_buffer_sglist_pfnmap(buf);
 501		} else {
 502			ret = isp_video_buffer_prepare_user(buf);
 503			if (ret < 0)
 504				return ret;
 505
 506			ret = isp_video_buffer_sglist_user(buf);
 507		}
 508		break;
 509
 510	default:
 511		return -EINVAL;
 512	}
 513
 514	if (ret < 0)
 515		goto done;
 516
 517	if (!(buf->vm_flags & VM_PFNMAP)) {
 518		direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
 519			  ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 520		ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
 521				 direction);
 522		if (ret != buf->sglen) {
 523			ret = -EFAULT;
 524			goto done;
 525		}
 526	}
 527
 528	if (buf->queue->ops->buffer_prepare)
 529		ret = buf->queue->ops->buffer_prepare(buf);
 530
 531done:
 532	if (ret < 0) {
 533		isp_video_buffer_cleanup(buf);
 534		return ret;
 535	}
 536
 537	return ret;
 538}
 539
 540/*
 541 * isp_video_queue_query - Query the status of a given buffer
 542 *
 543 * Locking: must be called with the queue lock held.
 544 */
 545static void isp_video_buffer_query(struct isp_video_buffer *buf,
 546				   struct v4l2_buffer *vbuf)
 547{
 548	memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
 549
 550	if (buf->vma_use_count)
 551		vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
 552
 553	switch (buf->state) {
 554	case ISP_BUF_STATE_ERROR:
 555		vbuf->flags |= V4L2_BUF_FLAG_ERROR;
 556	case ISP_BUF_STATE_DONE:
 557		vbuf->flags |= V4L2_BUF_FLAG_DONE;
 558	case ISP_BUF_STATE_QUEUED:
 559	case ISP_BUF_STATE_ACTIVE:
 560		vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
 561		break;
 562	case ISP_BUF_STATE_IDLE:
 563	default:
 564		break;
 565	}
 566}
 567
 568/*
 569 * isp_video_buffer_wait - Wait for a buffer to be ready
 570 *
 571 * In non-blocking mode, return immediately with 0 if the buffer is ready or
 572 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
 573 *
 574 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
 575 * queue using the same condition.
 576 */
 577static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
 578{
 579	if (nonblocking) {
 580		return (buf->state != ISP_BUF_STATE_QUEUED &&
 581			buf->state != ISP_BUF_STATE_ACTIVE)
 582			? 0 : -EAGAIN;
 583	}
 584
 585	return wait_event_interruptible(buf->wait,
 586		buf->state != ISP_BUF_STATE_QUEUED &&
 587		buf->state != ISP_BUF_STATE_ACTIVE);
 588}
 589
 590/* -----------------------------------------------------------------------------
 591 * Queue management
 592 */
 593
 594/*
 595 * isp_video_queue_free - Free video buffers memory
 596 *
 597 * Buffers can only be freed if the queue isn't streaming and if no buffer is
 598 * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
 599 *
 600 * This function must be called with the queue lock held.
 601 */
 602static int isp_video_queue_free(struct isp_video_queue *queue)
 603{
 604	unsigned int i;
 605
 606	if (queue->streaming)
 607		return -EBUSY;
 608
 609	for (i = 0; i < queue->count; ++i) {
 610		if (queue->buffers[i]->vma_use_count != 0)
 611			return -EBUSY;
 612	}
 613
 614	for (i = 0; i < queue->count; ++i) {
 615		struct isp_video_buffer *buf = queue->buffers[i];
 616
 617		isp_video_buffer_cleanup(buf);
 618
 619		vfree(buf->vaddr);
 620		buf->vaddr = NULL;
 621
 622		kfree(buf);
 623		queue->buffers[i] = NULL;
 624	}
 625
 626	INIT_LIST_HEAD(&queue->queue);
 627	queue->count = 0;
 628	return 0;
 629}
 630
 631/*
 632 * isp_video_queue_alloc - Allocate video buffers memory
 633 *
 634 * This function must be called with the queue lock held.
 635 */
 636static int isp_video_queue_alloc(struct isp_video_queue *queue,
 637				 unsigned int nbuffers,
 638				 unsigned int size, enum v4l2_memory memory)
 639{
 640	struct isp_video_buffer *buf;
 641	unsigned int i;
 642	void *mem;
 643	int ret;
 644
 645	/* Start by freeing the buffers. */
 646	ret = isp_video_queue_free(queue);
 647	if (ret < 0)
 648		return ret;
 649
 650	/* Bail out of no buffers should be allocated. */
 651	if (nbuffers == 0)
 652		return 0;
 653
 654	/* Initialize the allocated buffers. */
 655	for (i = 0; i < nbuffers; ++i) {
 656		buf = kzalloc(queue->bufsize, GFP_KERNEL);
 657		if (buf == NULL)
 658			break;
 659
 660		if (memory == V4L2_MEMORY_MMAP) {
 661			/* Allocate video buffers memory for mmap mode. Align
 662			 * the size to the page size.
 663			 */
 664			mem = vmalloc_32_user(PAGE_ALIGN(size));
 665			if (mem == NULL) {
 666				kfree(buf);
 667				break;
 668			}
 669
 670			buf->vbuf.m.offset = i * PAGE_ALIGN(size);
 671			buf->vaddr = mem;
 672		}
 673
 674		buf->vbuf.index = i;
 675		buf->vbuf.length = size;
 676		buf->vbuf.type = queue->type;
 677		buf->vbuf.field = V4L2_FIELD_NONE;
 678		buf->vbuf.memory = memory;
 679
 680		buf->queue = queue;
 681		init_waitqueue_head(&buf->wait);
 682
 683		queue->buffers[i] = buf;
 684	}
 685
 686	if (i == 0)
 687		return -ENOMEM;
 688
 689	queue->count = i;
 690	return nbuffers;
 691}
 692
 693/**
 694 * omap3isp_video_queue_cleanup - Clean up the video buffers queue
 695 * @queue: Video buffers queue
 696 *
 697 * Free all allocated resources and clean up the video buffers queue. The queue
 698 * must not be busy (no ongoing video stream) and buffers must have been
 699 * unmapped.
 700 *
 701 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
 702 * unmapped.
 703 */
 704int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
 705{
 706	return isp_video_queue_free(queue);
 707}
 708
 709/**
 710 * omap3isp_video_queue_init - Initialize the video buffers queue
 711 * @queue: Video buffers queue
 712 * @type: V4L2 buffer type (capture or output)
 713 * @ops: Driver-specific queue operations
 714 * @dev: Device used for DMA operations
 715 * @bufsize: Size of the driver-specific buffer structure
 716 *
 717 * Initialize the video buffers queue with the supplied parameters.
 718 *
 719 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
 720 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
 721 *
 722 * Buffer objects will be allocated using the given buffer size to allow room
 723 * for driver-specific fields. Driver-specific buffer structures must start
 724 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
 725 * structure must pass the size of the isp_video_buffer structure in the bufsize
 726 * parameter.
 727 *
 728 * Return 0 on success.
 729 */
 730int omap3isp_video_queue_init(struct isp_video_queue *queue,
 731			      enum v4l2_buf_type type,
 732			      const struct isp_video_queue_operations *ops,
 733			      struct device *dev, unsigned int bufsize)
 734{
 735	INIT_LIST_HEAD(&queue->queue);
 736	mutex_init(&queue->lock);
 737	spin_lock_init(&queue->irqlock);
 738
 739	queue->type = type;
 740	queue->ops = ops;
 741	queue->dev = dev;
 742	queue->bufsize = bufsize;
 743
 744	return 0;
 745}
 746
 747/* -----------------------------------------------------------------------------
 748 * V4L2 operations
 749 */
 750
 751/**
 752 * omap3isp_video_queue_reqbufs - Allocate video buffers memory
 753 *
 754 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
 755 * allocated video buffer objects and, for MMAP buffers, buffer memory.
 756 *
 757 * If the number of buffers is 0, all buffers are freed and the function returns
 758 * without performing any allocation.
 759 *
 760 * If the number of buffers is not 0, currently allocated buffers (if any) are
 761 * freed and the requested number of buffers are allocated. Depending on
 762 * driver-specific requirements and on memory availability, a number of buffer
 763 * smaller or bigger than requested can be allocated. This isn't considered as
 764 * an error.
 765 *
 766 * Return 0 on success or one of the following error codes:
 767 *
 768 * -EINVAL if the buffer type or index are invalid
 769 * -EBUSY if the queue is busy (streaming or buffers mapped)
 770 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
 771 */
 772int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
 773				 struct v4l2_requestbuffers *rb)
 774{
 775	unsigned int nbuffers = rb->count;
 776	unsigned int size;
 777	int ret;
 778
 779	if (rb->type != queue->type)
 780		return -EINVAL;
 781
 782	queue->ops->queue_prepare(queue, &nbuffers, &size);
 783	if (size == 0)
 784		return -EINVAL;
 785
 786	nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
 787
 788	mutex_lock(&queue->lock);
 789
 790	ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
 791	if (ret < 0)
 792		goto done;
 793
 794	rb->count = ret;
 795	ret = 0;
 796
 797done:
 798	mutex_unlock(&queue->lock);
 799	return ret;
 800}
 801
 802/**
 803 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
 804 *
 805 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
 806 * returns the status of a given video buffer.
 807 *
 808 * Return 0 on success or -EINVAL if the buffer type or index are invalid.
 809 */
 810int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
 811				  struct v4l2_buffer *vbuf)
 812{
 813	struct isp_video_buffer *buf;
 814	int ret = 0;
 815
 816	if (vbuf->type != queue->type)
 817		return -EINVAL;
 818
 819	mutex_lock(&queue->lock);
 820
 821	if (vbuf->index >= queue->count) {
 822		ret = -EINVAL;
 823		goto done;
 824	}
 825
 826	buf = queue->buffers[vbuf->index];
 827	isp_video_buffer_query(buf, vbuf);
 828
 829done:
 830	mutex_unlock(&queue->lock);
 831	return ret;
 832}
 833
 834/**
 835 * omap3isp_video_queue_qbuf - Queue a buffer
 836 *
 837 * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
 838 *
 839 * The v4l2_buffer structure passed from userspace is first sanity tested. If
 840 * sane, the buffer is then processed and added to the main queue and, if the
 841 * queue is streaming, to the IRQ queue.
 842 *
 843 * Before being enqueued, USERPTR buffers are checked for address changes. If
 844 * the buffer has a different userspace address, the old memory area is unlocked
 845 * and the new memory area is locked.
 846 */
 847int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
 848			      struct v4l2_buffer *vbuf)
 849{
 850	struct isp_video_buffer *buf;
 851	unsigned long flags;
 852	int ret = -EINVAL;
 853
 854	if (vbuf->type != queue->type)
 855		goto done;
 856
 857	mutex_lock(&queue->lock);
 858
 859	if (vbuf->index >= queue->count)
 860		goto done;
 861
 862	buf = queue->buffers[vbuf->index];
 863
 864	if (vbuf->memory != buf->vbuf.memory)
 865		goto done;
 866
 867	if (buf->state != ISP_BUF_STATE_IDLE)
 868		goto done;
 869
 870	if (vbuf->memory == V4L2_MEMORY_USERPTR &&
 871	    vbuf->length < buf->vbuf.length)
 872		goto done;
 873
 874	if (vbuf->memory == V4L2_MEMORY_USERPTR &&
 875	    vbuf->m.userptr != buf->vbuf.m.userptr) {
 876		isp_video_buffer_cleanup(buf);
 877		buf->vbuf.m.userptr = vbuf->m.userptr;
 878		buf->prepared = 0;
 879	}
 880
 881	if (!buf->prepared) {
 882		ret = isp_video_buffer_prepare(buf);
 883		if (ret < 0)
 884			goto done;
 885		buf->prepared = 1;
 886	}
 887
 888	isp_video_buffer_cache_sync(buf);
 889
 890	buf->state = ISP_BUF_STATE_QUEUED;
 891	list_add_tail(&buf->stream, &queue->queue);
 892
 893	if (queue->streaming) {
 894		spin_lock_irqsave(&queue->irqlock, flags);
 895		queue->ops->buffer_queue(buf);
 896		spin_unlock_irqrestore(&queue->irqlock, flags);
 897	}
 898
 899	ret = 0;
 900
 901done:
 902	mutex_unlock(&queue->lock);
 903	return ret;
 904}
 905
 906/**
 907 * omap3isp_video_queue_dqbuf - Dequeue a buffer
 908 *
 909 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
 910 *
 911 * The v4l2_buffer structure passed from userspace is first sanity tested. If
 912 * sane, the buffer is then processed and added to the main queue and, if the
 913 * queue is streaming, to the IRQ queue.
 914 *
 915 * Before being enqueued, USERPTR buffers are checked for address changes. If
 916 * the buffer has a different userspace address, the old memory area is unlocked
 917 * and the new memory area is locked.
 918 */
 919int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
 920			       struct v4l2_buffer *vbuf, int nonblocking)
 921{
 922	struct isp_video_buffer *buf;
 923	int ret;
 924
 925	if (vbuf->type != queue->type)
 926		return -EINVAL;
 927
 928	mutex_lock(&queue->lock);
 929
 930	if (list_empty(&queue->queue)) {
 931		ret = -EINVAL;
 932		goto done;
 933	}
 934
 935	buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
 936	ret = isp_video_buffer_wait(buf, nonblocking);
 937	if (ret < 0)
 938		goto done;
 939
 940	list_del(&buf->stream);
 941
 942	isp_video_buffer_query(buf, vbuf);
 943	buf->state = ISP_BUF_STATE_IDLE;
 944	vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
 945
 946done:
 947	mutex_unlock(&queue->lock);
 948	return ret;
 949}
 950
 951/**
 952 * omap3isp_video_queue_streamon - Start streaming
 953 *
 954 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
 955 * starts streaming on the queue and calls the buffer_queue operation for all
 956 * queued buffers.
 957 *
 958 * Return 0 on success.
 959 */
 960int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
 961{
 962	struct isp_video_buffer *buf;
 963	unsigned long flags;
 964
 965	mutex_lock(&queue->lock);
 966
 967	if (queue->streaming)
 968		goto done;
 969
 970	queue->streaming = 1;
 971
 972	spin_lock_irqsave(&queue->irqlock, flags);
 973	list_for_each_entry(buf, &queue->queue, stream)
 974		queue->ops->buffer_queue(buf);
 975	spin_unlock_irqrestore(&queue->irqlock, flags);
 976
 977done:
 978	mutex_unlock(&queue->lock);
 979	return 0;
 980}
 981
 982/**
 983 * omap3isp_video_queue_streamoff - Stop streaming
 984 *
 985 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
 986 * stops streaming on the queue and wakes up all the buffers.
 987 *
 988 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
 989 * delayed works before calling this function to make sure no buffer will be
 990 * touched by the driver and/or hardware.
 991 */
 992void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
 993{
 994	struct isp_video_buffer *buf;
 995	unsigned long flags;
 996	unsigned int i;
 997
 998	mutex_lock(&queue->lock);
 999
1000	if (!queue->streaming)
1001		goto done;
1002
1003	queue->streaming = 0;
1004
1005	spin_lock_irqsave(&queue->irqlock, flags);
1006	for (i = 0; i < queue->count; ++i) {
1007		buf = queue->buffers[i];
1008
1009		if (buf->state == ISP_BUF_STATE_ACTIVE)
1010			wake_up(&buf->wait);
1011
1012		buf->state = ISP_BUF_STATE_IDLE;
1013	}
1014	spin_unlock_irqrestore(&queue->irqlock, flags);
1015
1016	INIT_LIST_HEAD(&queue->queue);
1017
1018done:
1019	mutex_unlock(&queue->lock);
1020}
1021
1022/**
1023 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
1024 *
1025 * This function is intended to be used with suspend/resume operations. It
1026 * discards all 'done' buffers as they would be too old to be requested after
1027 * resume.
1028 *
1029 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1030 * delayed works before calling this function to make sure no buffer will be
1031 * touched by the driver and/or hardware.
1032 */
1033void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
1034{
1035	struct isp_video_buffer *buf;
1036	unsigned int i;
1037
1038	mutex_lock(&queue->lock);
1039
1040	if (!queue->streaming)
1041		goto done;
1042
1043	for (i = 0; i < queue->count; ++i) {
1044		buf = queue->buffers[i];
1045
1046		if (buf->state == ISP_BUF_STATE_DONE)
1047			buf->state = ISP_BUF_STATE_ERROR;
1048	}
1049
1050done:
1051	mutex_unlock(&queue->lock);
1052}
1053
1054static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1055{
1056	struct isp_video_buffer *buf = vma->vm_private_data;
1057
1058	buf->vma_use_count++;
1059}
1060
1061static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1062{
1063	struct isp_video_buffer *buf = vma->vm_private_data;
1064
1065	buf->vma_use_count--;
1066}
1067
1068static const struct vm_operations_struct isp_video_queue_vm_ops = {
1069	.open = isp_video_queue_vm_open,
1070	.close = isp_video_queue_vm_close,
1071};
1072
1073/**
1074 * omap3isp_video_queue_mmap - Map buffers to userspace
1075 *
1076 * This function is intended to be used as an mmap() file operation handler. It
1077 * maps a buffer to userspace based on the VMA offset.
1078 *
1079 * Only buffers of memory type MMAP are supported.
1080 */
1081int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
1082			 struct vm_area_struct *vma)
1083{
1084	struct isp_video_buffer *uninitialized_var(buf);
1085	unsigned long size;
1086	unsigned int i;
1087	int ret = 0;
1088
1089	mutex_lock(&queue->lock);
1090
1091	for (i = 0; i < queue->count; ++i) {
1092		buf = queue->buffers[i];
1093		if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1094			break;
1095	}
1096
1097	if (i == queue->count) {
1098		ret = -EINVAL;
1099		goto done;
1100	}
1101
1102	size = vma->vm_end - vma->vm_start;
1103
1104	if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1105	    size != PAGE_ALIGN(buf->vbuf.length)) {
1106		ret = -EINVAL;
1107		goto done;
1108	}
1109
1110	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
1111	if (ret < 0)
1112		goto done;
1113
1114	vma->vm_ops = &isp_video_queue_vm_ops;
1115	vma->vm_private_data = buf;
1116	isp_video_queue_vm_open(vma);
1117
1118done:
1119	mutex_unlock(&queue->lock);
1120	return ret;
1121}
1122
1123/**
1124 * omap3isp_video_queue_poll - Poll video queue state
1125 *
1126 * This function is intended to be used as a poll() file operation handler. It
1127 * polls the state of the video buffer at the front of the queue and returns an
1128 * events mask.
1129 *
1130 * If no buffer is present at the front of the queue, POLLERR is returned.
1131 */
1132unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1133				       struct file *file, poll_table *wait)
1134{
1135	struct isp_video_buffer *buf;
1136	unsigned int mask = 0;
1137
1138	mutex_lock(&queue->lock);
1139	if (list_empty(&queue->queue)) {
1140		mask |= POLLERR;
1141		goto done;
1142	}
1143	buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1144
1145	poll_wait(file, &buf->wait, wait);
1146	if (buf->state == ISP_BUF_STATE_DONE ||
1147	    buf->state == ISP_BUF_STATE_ERROR) {
1148		if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1149			mask |= POLLIN | POLLRDNORM;
1150		else
1151			mask |= POLLOUT | POLLWRNORM;
1152	}
1153
1154done:
1155	mutex_unlock(&queue->lock);
1156	return mask;
1157}