Linux Audio

Check our new training course

Loading...
   1/*
   2 * ispstat.c
   3 *
   4 * TI OMAP3 ISP - Statistics core
   5 *
   6 * Copyright (C) 2010 Nokia Corporation
   7 * Copyright (C) 2009 Texas Instruments, Inc
   8 *
   9 * Contacts: David Cohen <dacohen@gmail.com>
  10 *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  11 *	     Sakari Ailus <sakari.ailus@iki.fi>
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License version 2 as
  15 * published by the Free Software Foundation.
  16 *
  17 * This program is distributed in the hope that it will be useful, but
  18 * WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; if not, write to the Free Software
  24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  25 * 02110-1301 USA
  26 */
  27
  28#include <linux/dma-mapping.h>
  29#include <linux/slab.h>
  30#include <linux/uaccess.h>
  31
  32#include "isp.h"
  33
  34#define IS_COHERENT_BUF(stat)	((stat)->dma_ch >= 0)
  35
  36/*
  37 * MAGIC_SIZE must always be the greatest common divisor of
  38 * AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
  39 */
  40#define MAGIC_SIZE		16
  41#define MAGIC_NUM		0x55
  42
  43/* HACK: AF module seems to be writing one more paxel data than it should. */
  44#define AF_EXTRA_DATA		OMAP3ISP_AF_PAXEL_SIZE
  45
  46/*
  47 * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
  48 * the next buffer to start to be written in the same point where the overflow
  49 * occurred instead of the configured address. The only known way to make it to
  50 * go back to a valid state is having a valid buffer processing. Of course it
  51 * requires at least a doubled buffer size to avoid an access to invalid memory
  52 * region. But it does not fix everything. It may happen more than one
  53 * consecutive SBL overflows. In that case, it might be unpredictable how many
  54 * buffers the allocated memory should fit. For that case, a recover
  55 * configuration was created. It produces the minimum buffer size for each H3A
  56 * module and decrease the change for more SBL overflows. This recover state
  57 * will be enabled every time a SBL overflow occur. As the output buffer size
  58 * isn't big, it's possible to have an extra size able to fit many recover
  59 * buffers making it extreamily unlikely to have an access to invalid memory
  60 * region.
  61 */
  62#define NUM_H3A_RECOVER_BUFS	10
  63
  64/*
  65 * HACK: Because of HW issues the generic layer sometimes need to have
  66 * different behaviour for different statistic modules.
  67 */
  68#define IS_H3A_AF(stat)		((stat) == &(stat)->isp->isp_af)
  69#define IS_H3A_AEWB(stat)	((stat) == &(stat)->isp->isp_aewb)
  70#define IS_H3A(stat)		(IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
  71
  72static void __isp_stat_buf_sync_magic(struct ispstat *stat,
  73				      struct ispstat_buffer *buf,
  74				      u32 buf_size, enum dma_data_direction dir,
  75				      void (*dma_sync)(struct device *,
  76					dma_addr_t, unsigned long, size_t,
  77					enum dma_data_direction))
  78{
  79	struct device *dev = stat->isp->dev;
  80	struct page *pg;
  81	dma_addr_t dma_addr;
  82	u32 offset;
  83
  84	/* Initial magic words */
  85	pg = vmalloc_to_page(buf->virt_addr);
  86	dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
  87	dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
  88
  89	/* Final magic words */
  90	pg = vmalloc_to_page(buf->virt_addr + buf_size);
  91	dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
  92	offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
  93	dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
  94}
  95
  96static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
  97					       struct ispstat_buffer *buf,
  98					       u32 buf_size,
  99					       enum dma_data_direction dir)
 100{
 101	if (IS_COHERENT_BUF(stat))
 102		return;
 103
 104	__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
 105				  dma_sync_single_range_for_device);
 106}
 107
 108static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
 109					    struct ispstat_buffer *buf,
 110					    u32 buf_size,
 111					    enum dma_data_direction dir)
 112{
 113	if (IS_COHERENT_BUF(stat))
 114		return;
 115
 116	__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
 117				  dma_sync_single_range_for_cpu);
 118}
 119
 120static int isp_stat_buf_check_magic(struct ispstat *stat,
 121				    struct ispstat_buffer *buf)
 122{
 123	const u32 buf_size = IS_H3A_AF(stat) ?
 124			     buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
 125	u8 *w;
 126	u8 *end;
 127	int ret = -EINVAL;
 128
 129	isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
 130
 131	/* Checking initial magic numbers. They shouldn't be here anymore. */
 132	for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
 133		if (likely(*w != MAGIC_NUM))
 134			ret = 0;
 135
 136	if (ret) {
 137		dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
 138					"match.\n", stat->subdev.name);
 139		return ret;
 140	}
 141
 142	/* Checking magic numbers at the end. They must be still here. */
 143	for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
 144	     w < end; w++) {
 145		if (unlikely(*w != MAGIC_NUM)) {
 146			dev_dbg(stat->isp->dev, "%s: endding magic check does "
 147				"not match.\n", stat->subdev.name);
 148			return -EINVAL;
 149		}
 150	}
 151
 152	isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
 153					   DMA_FROM_DEVICE);
 154
 155	return 0;
 156}
 157
 158static void isp_stat_buf_insert_magic(struct ispstat *stat,
 159				      struct ispstat_buffer *buf)
 160{
 161	const u32 buf_size = IS_H3A_AF(stat) ?
 162			     stat->buf_size + AF_EXTRA_DATA : stat->buf_size;
 163
 164	isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
 165
 166	/*
 167	 * Inserting MAGIC_NUM at the beginning and end of the buffer.
 168	 * buf->buf_size is set only after the buffer is queued. For now the
 169	 * right buf_size for the current configuration is pointed by
 170	 * stat->buf_size.
 171	 */
 172	memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
 173	memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
 174
 175	isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
 176					   DMA_BIDIRECTIONAL);
 177}
 178
 179static void isp_stat_buf_sync_for_device(struct ispstat *stat,
 180					 struct ispstat_buffer *buf)
 181{
 182	if (IS_COHERENT_BUF(stat))
 183		return;
 184
 185	dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl,
 186			       buf->iovm->sgt->nents, DMA_FROM_DEVICE);
 187}
 188
 189static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
 190				      struct ispstat_buffer *buf)
 191{
 192	if (IS_COHERENT_BUF(stat))
 193		return;
 194
 195	dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl,
 196			    buf->iovm->sgt->nents, DMA_FROM_DEVICE);
 197}
 198
 199static void isp_stat_buf_clear(struct ispstat *stat)
 200{
 201	int i;
 202
 203	for (i = 0; i < STAT_MAX_BUFS; i++)
 204		stat->buf[i].empty = 1;
 205}
 206
 207static struct ispstat_buffer *
 208__isp_stat_buf_find(struct ispstat *stat, int look_empty)
 209{
 210	struct ispstat_buffer *found = NULL;
 211	int i;
 212
 213	for (i = 0; i < STAT_MAX_BUFS; i++) {
 214		struct ispstat_buffer *curr = &stat->buf[i];
 215
 216		/*
 217		 * Don't select the buffer which is being copied to
 218		 * userspace or used by the module.
 219		 */
 220		if (curr == stat->locked_buf || curr == stat->active_buf)
 221			continue;
 222
 223		/* Don't select uninitialised buffers if it's not required */
 224		if (!look_empty && curr->empty)
 225			continue;
 226
 227		/* Pick uninitialised buffer over anything else if look_empty */
 228		if (curr->empty) {
 229			found = curr;
 230			break;
 231		}
 232
 233		/* Choose the oldest buffer */
 234		if (!found ||
 235		    (s32)curr->frame_number - (s32)found->frame_number < 0)
 236			found = curr;
 237	}
 238
 239	return found;
 240}
 241
 242static inline struct ispstat_buffer *
 243isp_stat_buf_find_oldest(struct ispstat *stat)
 244{
 245	return __isp_stat_buf_find(stat, 0);
 246}
 247
 248static inline struct ispstat_buffer *
 249isp_stat_buf_find_oldest_or_empty(struct ispstat *stat)
 250{
 251	return __isp_stat_buf_find(stat, 1);
 252}
 253
 254static int isp_stat_buf_queue(struct ispstat *stat)
 255{
 256	if (!stat->active_buf)
 257		return STAT_NO_BUF;
 258
 259	do_gettimeofday(&stat->active_buf->ts);
 260
 261	stat->active_buf->buf_size = stat->buf_size;
 262	if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
 263		dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
 264			stat->subdev.name);
 265		return STAT_NO_BUF;
 266	}
 267	stat->active_buf->config_counter = stat->config_counter;
 268	stat->active_buf->frame_number = stat->frame_number;
 269	stat->active_buf->empty = 0;
 270	stat->active_buf = NULL;
 271
 272	return STAT_BUF_DONE;
 273}
 274
 275/* Get next free buffer to write the statistics to and mark it active. */
 276static void isp_stat_buf_next(struct ispstat *stat)
 277{
 278	if (unlikely(stat->active_buf))
 279		/* Overwriting unused active buffer */
 280		dev_dbg(stat->isp->dev, "%s: new buffer requested without "
 281					"queuing active one.\n",
 282					stat->subdev.name);
 283	else
 284		stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
 285}
 286
 287static void isp_stat_buf_release(struct ispstat *stat)
 288{
 289	unsigned long flags;
 290
 291	isp_stat_buf_sync_for_device(stat, stat->locked_buf);
 292	spin_lock_irqsave(&stat->isp->stat_lock, flags);
 293	stat->locked_buf = NULL;
 294	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 295}
 296
 297/* Get buffer to userspace. */
 298static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
 299					       struct omap3isp_stat_data *data)
 300{
 301	int rval = 0;
 302	unsigned long flags;
 303	struct ispstat_buffer *buf;
 304
 305	spin_lock_irqsave(&stat->isp->stat_lock, flags);
 306
 307	while (1) {
 308		buf = isp_stat_buf_find_oldest(stat);
 309		if (!buf) {
 310			spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 311			dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n",
 312				stat->subdev.name);
 313			return ERR_PTR(-EBUSY);
 314		}
 315		if (isp_stat_buf_check_magic(stat, buf)) {
 316			dev_dbg(stat->isp->dev, "%s: current buffer has "
 317				"corrupted data\n.", stat->subdev.name);
 318			/* Mark empty because it doesn't have valid data. */
 319			buf->empty = 1;
 320		} else {
 321			/* Buffer isn't corrupted. */
 322			break;
 323		}
 324	}
 325
 326	stat->locked_buf = buf;
 327
 328	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 329
 330	if (buf->buf_size > data->buf_size) {
 331		dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
 332					 "not enough.\n", stat->subdev.name);
 333		isp_stat_buf_release(stat);
 334		return ERR_PTR(-EINVAL);
 335	}
 336
 337	isp_stat_buf_sync_for_cpu(stat, buf);
 338
 339	rval = copy_to_user(data->buf,
 340			    buf->virt_addr,
 341			    buf->buf_size);
 342
 343	if (rval) {
 344		dev_info(stat->isp->dev,
 345			 "%s: failed copying %d bytes of stat data\n",
 346			 stat->subdev.name, rval);
 347		buf = ERR_PTR(-EFAULT);
 348		isp_stat_buf_release(stat);
 349	}
 350
 351	return buf;
 352}
 353
 354static void isp_stat_bufs_free(struct ispstat *stat)
 355{
 356	struct isp_device *isp = stat->isp;
 357	int i;
 358
 359	for (i = 0; i < STAT_MAX_BUFS; i++) {
 360		struct ispstat_buffer *buf = &stat->buf[i];
 361
 362		if (!IS_COHERENT_BUF(stat)) {
 363			if (IS_ERR_OR_NULL((void *)buf->iommu_addr))
 364				continue;
 365			if (buf->iovm)
 366				dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
 367					     buf->iovm->sgt->nents,
 368					     DMA_FROM_DEVICE);
 369			omap_iommu_vfree(isp->domain, isp->dev,
 370							buf->iommu_addr);
 371		} else {
 372			if (!buf->virt_addr)
 373				continue;
 374			dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
 375					  buf->virt_addr, buf->dma_addr);
 376		}
 377		buf->iommu_addr = 0;
 378		buf->iovm = NULL;
 379		buf->dma_addr = 0;
 380		buf->virt_addr = NULL;
 381		buf->empty = 1;
 382	}
 383
 384	dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
 385		stat->subdev.name);
 386
 387	stat->buf_alloc_size = 0;
 388	stat->active_buf = NULL;
 389}
 390
 391static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
 392{
 393	struct isp_device *isp = stat->isp;
 394	int i;
 395
 396	stat->buf_alloc_size = size;
 397
 398	for (i = 0; i < STAT_MAX_BUFS; i++) {
 399		struct ispstat_buffer *buf = &stat->buf[i];
 400		struct iovm_struct *iovm;
 401
 402		WARN_ON(buf->dma_addr);
 403		buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
 404							size, IOMMU_FLAG);
 405		if (IS_ERR((void *)buf->iommu_addr)) {
 406			dev_err(stat->isp->dev,
 407				 "%s: Can't acquire memory for "
 408				 "buffer %d\n", stat->subdev.name, i);
 409			isp_stat_bufs_free(stat);
 410			return -ENOMEM;
 411		}
 412
 413		iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
 414		if (!iovm ||
 415		    !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
 416				DMA_FROM_DEVICE)) {
 417			isp_stat_bufs_free(stat);
 418			return -ENOMEM;
 419		}
 420		buf->iovm = iovm;
 421
 422		buf->virt_addr = omap_da_to_va(stat->isp->dev,
 423					  (u32)buf->iommu_addr);
 424		buf->empty = 1;
 425		dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
 426			"iommu_addr=0x%08lx virt_addr=0x%08lx",
 427			stat->subdev.name, i, buf->iommu_addr,
 428			(unsigned long)buf->virt_addr);
 429	}
 430
 431	return 0;
 432}
 433
 434static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
 435{
 436	int i;
 437
 438	stat->buf_alloc_size = size;
 439
 440	for (i = 0; i < STAT_MAX_BUFS; i++) {
 441		struct ispstat_buffer *buf = &stat->buf[i];
 442
 443		WARN_ON(buf->iommu_addr);
 444		buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
 445					&buf->dma_addr, GFP_KERNEL | GFP_DMA);
 446
 447		if (!buf->virt_addr || !buf->dma_addr) {
 448			dev_info(stat->isp->dev,
 449				 "%s: Can't acquire memory for "
 450				 "DMA buffer %d\n", stat->subdev.name, i);
 451			isp_stat_bufs_free(stat);
 452			return -ENOMEM;
 453		}
 454		buf->empty = 1;
 455
 456		dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
 457			"dma_addr=0x%08lx virt_addr=0x%08lx\n",
 458			stat->subdev.name, i, (unsigned long)buf->dma_addr,
 459			(unsigned long)buf->virt_addr);
 460	}
 461
 462	return 0;
 463}
 464
 465static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
 466{
 467	unsigned long flags;
 468
 469	spin_lock_irqsave(&stat->isp->stat_lock, flags);
 470
 471	BUG_ON(stat->locked_buf != NULL);
 472
 473	/* Are the old buffers big enough? */
 474	if (stat->buf_alloc_size >= size) {
 475		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 476		return 0;
 477	}
 478
 479	if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) {
 480		dev_info(stat->isp->dev,
 481			 "%s: trying to allocate memory when busy\n",
 482			 stat->subdev.name);
 483		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 484		return -EBUSY;
 485	}
 486
 487	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 488
 489	isp_stat_bufs_free(stat);
 490
 491	if (IS_COHERENT_BUF(stat))
 492		return isp_stat_bufs_alloc_dma(stat, size);
 493	else
 494		return isp_stat_bufs_alloc_iommu(stat, size);
 495}
 496
 497static void isp_stat_queue_event(struct ispstat *stat, int err)
 498{
 499	struct video_device *vdev = stat->subdev.devnode;
 500	struct v4l2_event event;
 501	struct omap3isp_stat_event_status *status = (void *)event.u.data;
 502
 503	memset(&event, 0, sizeof(event));
 504	if (!err) {
 505		status->frame_number = stat->frame_number;
 506		status->config_counter = stat->config_counter;
 507	} else {
 508		status->buf_err = 1;
 509	}
 510	event.type = stat->event_type;
 511	v4l2_event_queue(vdev, &event);
 512}
 513
 514
 515/*
 516 * omap3isp_stat_request_statistics - Request statistics.
 517 * @data: Pointer to return statistics data.
 518 *
 519 * Returns 0 if successful.
 520 */
 521int omap3isp_stat_request_statistics(struct ispstat *stat,
 522				     struct omap3isp_stat_data *data)
 523{
 524	struct ispstat_buffer *buf;
 525
 526	if (stat->state != ISPSTAT_ENABLED) {
 527		dev_dbg(stat->isp->dev, "%s: engine not enabled.\n",
 528			stat->subdev.name);
 529		return -EINVAL;
 530	}
 531
 532	mutex_lock(&stat->ioctl_lock);
 533	buf = isp_stat_buf_get(stat, data);
 534	if (IS_ERR(buf)) {
 535		mutex_unlock(&stat->ioctl_lock);
 536		return PTR_ERR(buf);
 537	}
 538
 539	data->ts = buf->ts;
 540	data->config_counter = buf->config_counter;
 541	data->frame_number = buf->frame_number;
 542	data->buf_size = buf->buf_size;
 543
 544	buf->empty = 1;
 545	isp_stat_buf_release(stat);
 546	mutex_unlock(&stat->ioctl_lock);
 547
 548	return 0;
 549}
 550
 551/*
 552 * omap3isp_stat_config - Receives new statistic engine configuration.
 553 * @new_conf: Pointer to config structure.
 554 *
 555 * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
 556 * was unable to allocate memory for the buffer, or other errors if parameters
 557 * are invalid.
 558 */
 559int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
 560{
 561	int ret;
 562	unsigned long irqflags;
 563	struct ispstat_generic_config *user_cfg = new_conf;
 564	u32 buf_size = user_cfg->buf_size;
 565
 566	if (!new_conf) {
 567		dev_dbg(stat->isp->dev, "%s: configuration is NULL\n",
 568			stat->subdev.name);
 569		return -EINVAL;
 570	}
 571
 572	mutex_lock(&stat->ioctl_lock);
 573
 574	dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
 575		"size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
 576
 577	ret = stat->ops->validate_params(stat, new_conf);
 578	if (ret) {
 579		mutex_unlock(&stat->ioctl_lock);
 580		dev_dbg(stat->isp->dev, "%s: configuration values are "
 581					"invalid.\n", stat->subdev.name);
 582		return ret;
 583	}
 584
 585	if (buf_size != user_cfg->buf_size)
 586		dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
 587			"request to 0x%08lx\n", stat->subdev.name,
 588			(unsigned long)user_cfg->buf_size);
 589
 590	/*
 591	 * Hack: H3A modules may need a doubled buffer size to avoid access
 592	 * to a invalid memory address after a SBL overflow.
 593	 * The buffer size is always PAGE_ALIGNED.
 594	 * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
 595	 * inserted at the end to data integrity check purpose.
 596	 * Hack 3: AF module writes one paxel data more than it should, so
 597	 * the buffer allocation must consider it to avoid invalid memory
 598	 * access.
 599	 * Hack 4: H3A need to allocate extra space for the recover state.
 600	 */
 601	if (IS_H3A(stat)) {
 602		buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE;
 603		if (IS_H3A_AF(stat))
 604			/*
 605			 * Adding one extra paxel data size for each recover
 606			 * buffer + 2 regular ones.
 607			 */
 608			buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2);
 609		if (stat->recover_priv) {
 610			struct ispstat_generic_config *recover_cfg =
 611				stat->recover_priv;
 612			buf_size += recover_cfg->buf_size *
 613				    NUM_H3A_RECOVER_BUFS;
 614		}
 615		buf_size = PAGE_ALIGN(buf_size);
 616	} else { /* Histogram */
 617		buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE);
 618	}
 619
 620	ret = isp_stat_bufs_alloc(stat, buf_size);
 621	if (ret) {
 622		mutex_unlock(&stat->ioctl_lock);
 623		return ret;
 624	}
 625
 626	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
 627	stat->ops->set_params(stat, new_conf);
 628	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 629
 630	/*
 631	 * Returning the right future config_counter for this setup, so
 632	 * userspace can *know* when it has been applied.
 633	 */
 634	user_cfg->config_counter = stat->config_counter + stat->inc_config;
 635
 636	/* Module has a valid configuration. */
 637	stat->configured = 1;
 638	dev_dbg(stat->isp->dev, "%s: module has been successfully "
 639		"configured.\n", stat->subdev.name);
 640
 641	mutex_unlock(&stat->ioctl_lock);
 642
 643	return 0;
 644}
 645
 646/*
 647 * isp_stat_buf_process - Process statistic buffers.
 648 * @buf_state: points out if buffer is ready to be processed. It's necessary
 649 *	       because histogram needs to copy the data from internal memory
 650 *	       before be able to process the buffer.
 651 */
 652static int isp_stat_buf_process(struct ispstat *stat, int buf_state)
 653{
 654	int ret = STAT_NO_BUF;
 655
 656	if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
 657	    buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
 658		ret = isp_stat_buf_queue(stat);
 659		isp_stat_buf_next(stat);
 660	}
 661
 662	return ret;
 663}
 664
 665int omap3isp_stat_pcr_busy(struct ispstat *stat)
 666{
 667	return stat->ops->busy(stat);
 668}
 669
 670int omap3isp_stat_busy(struct ispstat *stat)
 671{
 672	return omap3isp_stat_pcr_busy(stat) | stat->buf_processing |
 673		(stat->state != ISPSTAT_DISABLED);
 674}
 675
 676/*
 677 * isp_stat_pcr_enable - Disables/Enables statistic engines.
 678 * @pcr_enable: 0/1 - Disables/Enables the engine.
 679 *
 680 * Must be called from ISP driver when the module is idle and synchronized
 681 * with CCDC.
 682 */
 683static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
 684{
 685	if ((stat->state != ISPSTAT_ENABLING &&
 686	     stat->state != ISPSTAT_ENABLED) && pcr_enable)
 687		/* Userspace has disabled the module. Aborting. */
 688		return;
 689
 690	stat->ops->enable(stat, pcr_enable);
 691	if (stat->state == ISPSTAT_DISABLING && !pcr_enable)
 692		stat->state = ISPSTAT_DISABLED;
 693	else if (stat->state == ISPSTAT_ENABLING && pcr_enable)
 694		stat->state = ISPSTAT_ENABLED;
 695}
 696
 697void omap3isp_stat_suspend(struct ispstat *stat)
 698{
 699	unsigned long flags;
 700
 701	spin_lock_irqsave(&stat->isp->stat_lock, flags);
 702
 703	if (stat->state != ISPSTAT_DISABLED)
 704		stat->ops->enable(stat, 0);
 705	if (stat->state == ISPSTAT_ENABLED)
 706		stat->state = ISPSTAT_SUSPENDED;
 707
 708	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 709}
 710
 711void omap3isp_stat_resume(struct ispstat *stat)
 712{
 713	/* Module will be re-enabled with its pipeline */
 714	if (stat->state == ISPSTAT_SUSPENDED)
 715		stat->state = ISPSTAT_ENABLING;
 716}
 717
 718static void isp_stat_try_enable(struct ispstat *stat)
 719{
 720	unsigned long irqflags;
 721
 722	if (stat->priv == NULL)
 723		/* driver wasn't initialised */
 724		return;
 725
 726	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
 727	if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing &&
 728	    stat->buf_alloc_size) {
 729		/*
 730		 * Userspace's requested to enable the engine but it wasn't yet.
 731		 * Let's do that now.
 732		 */
 733		stat->update = 1;
 734		isp_stat_buf_next(stat);
 735		stat->ops->setup_regs(stat, stat->priv);
 736		isp_stat_buf_insert_magic(stat, stat->active_buf);
 737
 738		/*
 739		 * H3A module has some hw issues which forces the driver to
 740		 * ignore next buffers even if it was disabled in the meantime.
 741		 * On the other hand, Histogram shouldn't ignore buffers anymore
 742		 * if it's being enabled.
 743		 */
 744		if (!IS_H3A(stat))
 745			atomic_set(&stat->buf_err, 0);
 746
 747		isp_stat_pcr_enable(stat, 1);
 748		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 749		dev_dbg(stat->isp->dev, "%s: module is enabled.\n",
 750			stat->subdev.name);
 751	} else {
 752		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 753	}
 754}
 755
 756void omap3isp_stat_isr_frame_sync(struct ispstat *stat)
 757{
 758	isp_stat_try_enable(stat);
 759}
 760
 761void omap3isp_stat_sbl_overflow(struct ispstat *stat)
 762{
 763	unsigned long irqflags;
 764
 765	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
 766	/*
 767	 * Due to a H3A hw issue which prevents the next buffer to start from
 768	 * the correct memory address, 2 buffers must be ignored.
 769	 */
 770	atomic_set(&stat->buf_err, 2);
 771
 772	/*
 773	 * If more than one SBL overflow happen in a row, H3A module may access
 774	 * invalid memory region.
 775	 * stat->sbl_ovl_recover is set to tell to the driver to temporarily use
 776	 * a soft configuration which helps to avoid consecutive overflows.
 777	 */
 778	if (stat->recover_priv)
 779		stat->sbl_ovl_recover = 1;
 780	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 781}
 782
 783/*
 784 * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
 785 * @enable: 0/1 - Disables/Enables the engine.
 786 *
 787 * Client should configure all the module registers before this.
 788 * This function can be called from a userspace request.
 789 */
 790int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
 791{
 792	unsigned long irqflags;
 793
 794	dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n",
 795		stat->subdev.name, enable ? "enable" : "disable");
 796
 797	/* Prevent enabling while configuring */
 798	mutex_lock(&stat->ioctl_lock);
 799
 800	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
 801
 802	if (!stat->configured && enable) {
 803		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 804		mutex_unlock(&stat->ioctl_lock);
 805		dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
 806			"never been successfully configured so far.\n",
 807			stat->subdev.name);
 808		return -EINVAL;
 809	}
 810
 811	if (enable) {
 812		if (stat->state == ISPSTAT_DISABLING)
 813			/* Previous disabling request wasn't done yet */
 814			stat->state = ISPSTAT_ENABLED;
 815		else if (stat->state == ISPSTAT_DISABLED)
 816			/* Module is now being enabled */
 817			stat->state = ISPSTAT_ENABLING;
 818	} else {
 819		if (stat->state == ISPSTAT_ENABLING) {
 820			/* Previous enabling request wasn't done yet */
 821			stat->state = ISPSTAT_DISABLED;
 822		} else if (stat->state == ISPSTAT_ENABLED) {
 823			/* Module is now being disabled */
 824			stat->state = ISPSTAT_DISABLING;
 825			isp_stat_buf_clear(stat);
 826		}
 827	}
 828
 829	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 830	mutex_unlock(&stat->ioctl_lock);
 831
 832	return 0;
 833}
 834
 835int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable)
 836{
 837	struct ispstat *stat = v4l2_get_subdevdata(subdev);
 838
 839	if (enable) {
 840		/*
 841		 * Only set enable PCR bit if the module was previously
 842		 * enabled through ioct.
 843		 */
 844		isp_stat_try_enable(stat);
 845	} else {
 846		unsigned long flags;
 847		/* Disable PCR bit and config enable field */
 848		omap3isp_stat_enable(stat, 0);
 849		spin_lock_irqsave(&stat->isp->stat_lock, flags);
 850		stat->ops->enable(stat, 0);
 851		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 852
 853		/*
 854		 * If module isn't busy, a new interrupt may come or not to
 855		 * set the state to DISABLED. As Histogram needs to read its
 856		 * internal memory to clear it, let interrupt handler
 857		 * responsible of changing state to DISABLED. If the last
 858		 * interrupt is coming, it's still safe as the handler will
 859		 * ignore the second time when state is already set to DISABLED.
 860		 * It's necessary to synchronize Histogram with streamoff, once
 861		 * the module may be considered idle before last SDMA transfer
 862		 * starts if we return here.
 863		 */
 864		if (!omap3isp_stat_pcr_busy(stat))
 865			omap3isp_stat_isr(stat);
 866
 867		dev_dbg(stat->isp->dev, "%s: module is being disabled\n",
 868			stat->subdev.name);
 869	}
 870
 871	return 0;
 872}
 873
 874/*
 875 * __stat_isr - Interrupt handler for statistic drivers
 876 */
 877static void __stat_isr(struct ispstat *stat, int from_dma)
 878{
 879	int ret = STAT_BUF_DONE;
 880	int buf_processing;
 881	unsigned long irqflags;
 882	struct isp_pipeline *pipe;
 883
 884	/*
 885	 * stat->buf_processing must be set before disable module. It's
 886	 * necessary to not inform too early the buffers aren't busy in case
 887	 * of SDMA is going to be used.
 888	 */
 889	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
 890	if (stat->state == ISPSTAT_DISABLED) {
 891		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 892		return;
 893	}
 894	buf_processing = stat->buf_processing;
 895	stat->buf_processing = 1;
 896	stat->ops->enable(stat, 0);
 897
 898	if (buf_processing && !from_dma) {
 899		if (stat->state == ISPSTAT_ENABLED) {
 900			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 901			dev_err(stat->isp->dev,
 902				"%s: interrupt occurred when module was still "
 903				"processing a buffer.\n", stat->subdev.name);
 904			ret = STAT_NO_BUF;
 905			goto out;
 906		} else {
 907			/*
 908			 * Interrupt handler was called from streamoff when
 909			 * the module wasn't busy anymore to ensure it is being
 910			 * disabled after process last buffer. If such buffer
 911			 * processing has already started, no need to do
 912			 * anything else.
 913			 */
 914			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 915			return;
 916		}
 917	}
 918	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 919
 920	/* If it's busy we can't process this buffer anymore */
 921	if (!omap3isp_stat_pcr_busy(stat)) {
 922		if (!from_dma && stat->ops->buf_process)
 923			/* Module still need to copy data to buffer. */
 924			ret = stat->ops->buf_process(stat);
 925		if (ret == STAT_BUF_WAITING_DMA)
 926			/* Buffer is not ready yet */
 927			return;
 928
 929		spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
 930
 931		/*
 932		 * Histogram needs to read its internal memory to clear it
 933		 * before be disabled. For that reason, common statistic layer
 934		 * can return only after call stat's buf_process() operator.
 935		 */
 936		if (stat->state == ISPSTAT_DISABLING) {
 937			stat->state = ISPSTAT_DISABLED;
 938			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 939			stat->buf_processing = 0;
 940			return;
 941		}
 942		pipe = to_isp_pipeline(&stat->subdev.entity);
 943		stat->frame_number = atomic_read(&pipe->frame_number);
 944
 945		/*
 946		 * Before this point, 'ret' stores the buffer's status if it's
 947		 * ready to be processed. Afterwards, it holds the status if
 948		 * it was processed successfully.
 949		 */
 950		ret = isp_stat_buf_process(stat, ret);
 951
 952		if (likely(!stat->sbl_ovl_recover)) {
 953			stat->ops->setup_regs(stat, stat->priv);
 954		} else {
 955			/*
 956			 * Using recover config to increase the chance to have
 957			 * a good buffer processing and make the H3A module to
 958			 * go back to a valid state.
 959			 */
 960			stat->update = 1;
 961			stat->ops->setup_regs(stat, stat->recover_priv);
 962			stat->sbl_ovl_recover = 0;
 963
 964			/*
 965			 * Set 'update' in case of the module needs to use
 966			 * regular configuration after next buffer.
 967			 */
 968			stat->update = 1;
 969		}
 970
 971		isp_stat_buf_insert_magic(stat, stat->active_buf);
 972
 973		/*
 974		 * Hack: H3A modules may access invalid memory address or send
 975		 * corrupted data to userspace if more than 1 SBL overflow
 976		 * happens in a row without re-writing its buffer's start memory
 977		 * address in the meantime. Such situation is avoided if the
 978		 * module is not immediately re-enabled when the ISR misses the
 979		 * timing to process the buffer and to setup the registers.
 980		 * Because of that, pcr_enable(1) was moved to inside this 'if'
 981		 * block. But the next interruption will still happen as during
 982		 * pcr_enable(0) the module was busy.
 983		 */
 984		isp_stat_pcr_enable(stat, 1);
 985		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 986	} else {
 987		/*
 988		 * If a SBL overflow occurs and the H3A driver misses the timing
 989		 * to process the buffer, stat->buf_err is set and won't be
 990		 * cleared now. So the next buffer will be correctly ignored.
 991		 * It's necessary due to a hw issue which makes the next H3A
 992		 * buffer to start from the memory address where the previous
 993		 * one stopped, instead of start where it was configured to.
 994		 * Do not "stat->buf_err = 0" here.
 995		 */
 996
 997		if (stat->ops->buf_process)
 998			/*
 999			 * Driver may need to erase current data prior to
1000			 * process a new buffer. If it misses the timing, the
1001			 * next buffer might be wrong. So should be ignored.
1002			 * It happens only for Histogram.
1003			 */
1004			atomic_set(&stat->buf_err, 1);
1005
1006		ret = STAT_NO_BUF;
1007		dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
1008					"device is busy.\n", stat->subdev.name);
1009	}
1010
1011out:
1012	stat->buf_processing = 0;
1013	isp_stat_queue_event(stat, ret != STAT_BUF_DONE);
1014}
1015
1016void omap3isp_stat_isr(struct ispstat *stat)
1017{
1018	__stat_isr(stat, 0);
1019}
1020
1021void omap3isp_stat_dma_isr(struct ispstat *stat)
1022{
1023	__stat_isr(stat, 1);
1024}
1025
1026int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
1027				  struct v4l2_fh *fh,
1028				  struct v4l2_event_subscription *sub)
1029{
1030	struct ispstat *stat = v4l2_get_subdevdata(subdev);
1031
1032	if (sub->type != stat->event_type)
1033		return -EINVAL;
1034
1035	return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
1036}
1037
1038int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
1039				    struct v4l2_fh *fh,
1040				    struct v4l2_event_subscription *sub)
1041{
1042	return v4l2_event_unsubscribe(fh, sub);
1043}
1044
1045void omap3isp_stat_unregister_entities(struct ispstat *stat)
1046{
1047	v4l2_device_unregister_subdev(&stat->subdev);
1048}
1049
1050int omap3isp_stat_register_entities(struct ispstat *stat,
1051				    struct v4l2_device *vdev)
1052{
1053	return v4l2_device_register_subdev(vdev, &stat->subdev);
1054}
1055
1056static int isp_stat_init_entities(struct ispstat *stat, const char *name,
1057				  const struct v4l2_subdev_ops *sd_ops)
1058{
1059	struct v4l2_subdev *subdev = &stat->subdev;
1060	struct media_entity *me = &subdev->entity;
1061
1062	v4l2_subdev_init(subdev, sd_ops);
1063	snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
1064	subdev->grp_id = 1 << 16;	/* group ID for isp subdevs */
1065	subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
1066	v4l2_set_subdevdata(subdev, stat);
1067
1068	stat->pad.flags = MEDIA_PAD_FL_SINK;
1069	me->ops = NULL;
1070
1071	return media_entity_init(me, 1, &stat->pad, 0);
1072}
1073
1074int omap3isp_stat_init(struct ispstat *stat, const char *name,
1075		       const struct v4l2_subdev_ops *sd_ops)
1076{
1077	int ret;
1078
1079	stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
1080	if (!stat->buf)
1081		return -ENOMEM;
1082
1083	isp_stat_buf_clear(stat);
1084	mutex_init(&stat->ioctl_lock);
1085	atomic_set(&stat->buf_err, 0);
1086
1087	ret = isp_stat_init_entities(stat, name, sd_ops);
1088	if (ret < 0) {
1089		mutex_destroy(&stat->ioctl_lock);
1090		kfree(stat->buf);
1091	}
1092
1093	return ret;
1094}
1095
1096void omap3isp_stat_cleanup(struct ispstat *stat)
1097{
1098	media_entity_cleanup(&stat->subdev.entity);
1099	mutex_destroy(&stat->ioctl_lock);
1100	isp_stat_bufs_free(stat);
1101	kfree(stat->buf);
1102}