Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
   3 * Authors:
   4 *	Eunchul Kim <chulspro.kim@samsung.com>
   5 *	Jinyoung Jeon <jy0.jeon@samsung.com>
   6 *	Sangmin Lee <lsmin.lee@samsung.com>
   7 *
   8 * This program is free software; you can redistribute  it and/or modify it
   9 * under  the terms of  the GNU General  Public License as published by the
  10 * Free Software Foundation;  either version 2 of the  License, or (at your
  11 * option) any later version.
  12 *
 
 
 
 
 
 
 
 
 
  13 */
  14#include <linux/kernel.h>
  15#include <linux/platform_device.h>
  16#include <linux/types.h>
  17#include <linux/clk.h>
  18#include <linux/pm_runtime.h>
  19
  20#include <drm/drmP.h>
 
 
 
 
  21#include <drm/exynos_drm.h>
 
  22#include "exynos_drm_drv.h"
  23#include "exynos_drm_gem.h"
  24#include "exynos_drm_ipp.h"
  25#include "exynos_drm_iommu.h"
  26
  27/*
  28 * IPP stands for Image Post Processing and
  29 * supports image scaler/rotator and input/output DMA operations.
  30 * using FIMC, GSC, Rotator, so on.
  31 * IPP is integration device driver of same attribute h/w
  32 */
  33
  34/*
  35 * TODO
  36 * 1. expand command control id.
  37 * 2. integrate	property and config.
  38 * 3. removed send_event id check routine.
  39 * 4. compare send_event id if needed.
  40 * 5. free subdrv_remove notifier callback list if needed.
  41 * 6. need to check subdrv_open about multi-open.
  42 * 7. need to power_on implement power and sysmmu ctrl.
  43 */
  44
  45#define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
  46#define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
  47
  48/*
  49 * A structure of event.
 
 
 
 
 
 
 
  50 *
  51 * @base: base of event.
  52 * @event: ipp event.
  53 */
  54struct drm_exynos_ipp_send_event {
  55	struct drm_pending_event	base;
  56	struct drm_exynos_ipp_event	event;
  57};
  58
  59/*
  60 * A structure of memory node.
  61 *
  62 * @list: list head to memory queue information.
  63 * @ops_id: id of operations.
  64 * @prop_id: id of property.
  65 * @buf_id: id of buffer.
  66 * @buf_info: gem objects and dma address, size.
  67 * @filp: a pointer to drm_file.
  68 */
  69struct drm_exynos_ipp_mem_node {
  70	struct list_head	list;
  71	enum drm_exynos_ops_id	ops_id;
  72	u32	prop_id;
  73	u32	buf_id;
  74	struct drm_exynos_ipp_buf_info	buf_info;
  75};
  76
  77/*
  78 * A structure of ipp context.
  79 *
  80 * @subdrv: prepare initialization using subdrv.
  81 * @ipp_lock: lock for synchronization of access to ipp_idr.
  82 * @prop_lock: lock for synchronization of access to prop_idr.
  83 * @ipp_idr: ipp driver idr.
  84 * @prop_idr: property idr.
  85 * @event_workq: event work queue.
  86 * @cmd_workq: command work queue.
  87 */
  88struct ipp_context {
  89	struct exynos_drm_subdrv	subdrv;
  90	struct mutex	ipp_lock;
  91	struct mutex	prop_lock;
  92	struct idr	ipp_idr;
  93	struct idr	prop_idr;
  94	struct workqueue_struct	*event_workq;
  95	struct workqueue_struct	*cmd_workq;
  96};
  97
  98static LIST_HEAD(exynos_drm_ippdrv_list);
  99static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
 100static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
 101
 102int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
 103{
 104	mutex_lock(&exynos_drm_ippdrv_lock);
 105	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
 106	mutex_unlock(&exynos_drm_ippdrv_lock);
 107
 108	return 0;
 109}
 110
 111int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
 112{
 113	mutex_lock(&exynos_drm_ippdrv_lock);
 114	list_del(&ippdrv->drv_list);
 115	mutex_unlock(&exynos_drm_ippdrv_lock);
 116
 117	return 0;
 118}
 119
 120static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
 121{
 122	int ret;
 123
 124	mutex_lock(lock);
 125	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
 126	mutex_unlock(lock);
 127
 128	return ret;
 129}
 130
 131static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
 132{
 133	mutex_lock(lock);
 134	idr_remove(id_idr, id);
 135	mutex_unlock(lock);
 136}
 137
 138static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
 139{
 140	void *obj;
 141
 142	mutex_lock(lock);
 143	obj = idr_find(id_idr, id);
 144	mutex_unlock(lock);
 145
 146	return obj;
 147}
 148
 149static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
 150			    struct drm_exynos_ipp_property *property)
 151{
 152	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
 153				  !pm_runtime_suspended(ippdrv->dev)))
 154		return -EBUSY;
 155
 156	if (ippdrv->check_property &&
 157	    ippdrv->check_property(ippdrv->dev, property))
 158		return -EINVAL;
 159
 160	return 0;
 161}
 162
 163static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
 164		struct drm_exynos_ipp_property *property)
 165{
 166	struct exynos_drm_ippdrv *ippdrv;
 167	u32 ipp_id = property->ipp_id;
 168	int ret;
 169
 170	if (ipp_id) {
 171		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
 172		if (!ippdrv) {
 173			DRM_DEBUG("ipp%d driver not found\n", ipp_id);
 174			return ERR_PTR(-ENODEV);
 175		}
 176
 177		ret = ipp_check_driver(ippdrv, property);
 178		if (ret < 0) {
 179			DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
 180			return ERR_PTR(ret);
 181		}
 182
 183		return ippdrv;
 184	} else {
 185		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
 186			ret = ipp_check_driver(ippdrv, property);
 187			if (ret == 0)
 188				return ippdrv;
 189		}
 190
 191		DRM_DEBUG("cannot find driver suitable for given property.\n");
 192	}
 
 193
 194	return ERR_PTR(-ENODEV);
 195}
 196
 197static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
 198{
 199	struct exynos_drm_ippdrv *ippdrv;
 200	struct drm_exynos_ipp_cmd_node *c_node;
 201	int count = 0;
 202
 203	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
 204
 205	/*
 206	 * This case is search ipp driver by prop_id handle.
 207	 * sometimes, ipp subsystem find driver by prop_id.
 208	 * e.g PAUSE state, queue buf, command control.
 209	 */
 210	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
 211		DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
 212
 213		mutex_lock(&ippdrv->cmd_lock);
 214		list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
 215			if (c_node->property.prop_id == prop_id) {
 216				mutex_unlock(&ippdrv->cmd_lock);
 217				return ippdrv;
 218			}
 219		}
 220		mutex_unlock(&ippdrv->cmd_lock);
 221	}
 222
 223	return ERR_PTR(-ENODEV);
 224}
 225
 226int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
 227		struct drm_file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 228{
 229	struct drm_exynos_file_private *file_priv = file->driver_priv;
 230	struct device *dev = file_priv->ipp_dev;
 231	struct ipp_context *ctx = get_ipp_context(dev);
 232	struct drm_exynos_ipp_prop_list *prop_list = data;
 233	struct exynos_drm_ippdrv *ippdrv;
 234	int count = 0;
 235
 236	if (!ctx) {
 237		DRM_ERROR("invalid context.\n");
 238		return -EINVAL;
 239	}
 240
 241	if (!prop_list) {
 242		DRM_ERROR("invalid property parameter.\n");
 243		return -EINVAL;
 244	}
 245
 246	DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
 
 247
 248	if (!prop_list->ipp_id) {
 249		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
 250			count++;
 
 
 
 
 
 
 
 
 251
 252		/*
 253		 * Supports ippdrv list count for user application.
 254		 * First step user application getting ippdrv count.
 255		 * and second step getting ippdrv capability using ipp_id.
 256		 */
 257		prop_list->count = count;
 258	} else {
 259		/*
 260		 * Getting ippdrv capability by ipp_id.
 261		 * some device not supported wb, output interface.
 262		 * so, user application detect correct ipp driver
 263		 * using this ioctl.
 264		 */
 265		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
 266						prop_list->ipp_id);
 267		if (!ippdrv) {
 268			DRM_ERROR("not found ipp%d driver.\n",
 269					prop_list->ipp_id);
 270			return -ENODEV;
 271		}
 272
 273		*prop_list = ippdrv->prop_list;
 274	}
 
 275
 276	return 0;
 277}
 278
 279static void ipp_print_property(struct drm_exynos_ipp_property *property,
 280		int idx)
 
 281{
 282	struct drm_exynos_ipp_config *config = &property->config[idx];
 283	struct drm_exynos_pos *pos = &config->pos;
 284	struct drm_exynos_sz *sz = &config->sz;
 285
 286	DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
 287		property->prop_id, idx ? "dst" : "src", config->fmt);
 288
 289	DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
 290		pos->x, pos->y, pos->w, pos->h,
 291		sz->hsize, sz->vsize, config->flip, config->degree);
 292}
 293
 294static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
 295{
 296	struct drm_exynos_ipp_cmd_work *cmd_work;
 297
 298	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
 299	if (!cmd_work)
 300		return ERR_PTR(-ENOMEM);
 301
 302	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
 303
 304	return cmd_work;
 305}
 306
 307static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
 308{
 309	struct drm_exynos_ipp_event_work *event_work;
 310
 311	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
 312	if (!event_work)
 313		return ERR_PTR(-ENOMEM);
 314
 315	INIT_WORK(&event_work->work, ipp_sched_event);
 316
 317	return event_work;
 
 
 
 
 
 
 318}
 319
 320int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
 321		struct drm_file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322{
 323	struct drm_exynos_file_private *file_priv = file->driver_priv;
 324	struct device *dev = file_priv->ipp_dev;
 325	struct ipp_context *ctx = get_ipp_context(dev);
 326	struct drm_exynos_ipp_property *property = data;
 327	struct exynos_drm_ippdrv *ippdrv;
 328	struct drm_exynos_ipp_cmd_node *c_node;
 329	u32 prop_id;
 330	int ret, i;
 331
 332	if (!ctx) {
 333		DRM_ERROR("invalid context.\n");
 334		return -EINVAL;
 335	}
 336
 337	if (!property) {
 338		DRM_ERROR("invalid property parameter.\n");
 339		return -EINVAL;
 340	}
 341
 342	prop_id = property->prop_id;
 343
 344	/*
 345	 * This is log print for user application property.
 346	 * user application set various property.
 347	 */
 348	for_each_ipp_ops(i)
 349		ipp_print_property(property, i);
 350
 351	/*
 352	 * In case prop_id is not zero try to set existing property.
 
 353	 */
 354	if (prop_id) {
 355		c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
 356
 357		if (!c_node || c_node->filp != file) {
 358			DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
 359			return -EINVAL;
 360		}
 361
 362		if (c_node->state != IPP_STATE_STOP) {
 363			DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
 364			return -EINVAL;
 365		}
 366
 367		c_node->property = *property;
 368
 369		return 0;
 370	}
 371
 372	/* find ipp driver using ipp id */
 373	ippdrv = ipp_find_driver(ctx, property);
 374	if (IS_ERR(ippdrv)) {
 375		DRM_ERROR("failed to get ipp driver.\n");
 376		return -EINVAL;
 377	}
 378
 379	/* allocate command node */
 380	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
 381	if (!c_node)
 382		return -ENOMEM;
 383
 384	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
 385	if (ret < 0) {
 386		DRM_ERROR("failed to create id.\n");
 387		goto err_clear;
 388	}
 389	property->prop_id = ret;
 390
 391	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
 392		property->prop_id, property->cmd, ippdrv);
 393
 394	/* stored property information and ippdrv in private data */
 395	c_node->property = *property;
 396	c_node->state = IPP_STATE_IDLE;
 397	c_node->filp = file;
 398
 399	c_node->start_work = ipp_create_cmd_work();
 400	if (IS_ERR(c_node->start_work)) {
 401		DRM_ERROR("failed to create start work.\n");
 402		ret = PTR_ERR(c_node->start_work);
 403		goto err_remove_id;
 404	}
 405
 406	c_node->stop_work = ipp_create_cmd_work();
 407	if (IS_ERR(c_node->stop_work)) {
 408		DRM_ERROR("failed to create stop work.\n");
 409		ret = PTR_ERR(c_node->stop_work);
 410		goto err_free_start;
 411	}
 412
 413	c_node->event_work = ipp_create_event_work();
 414	if (IS_ERR(c_node->event_work)) {
 415		DRM_ERROR("failed to create event work.\n");
 416		ret = PTR_ERR(c_node->event_work);
 417		goto err_free_stop;
 418	}
 419
 420	mutex_init(&c_node->lock);
 421	mutex_init(&c_node->mem_lock);
 422	mutex_init(&c_node->event_lock);
 423
 424	init_completion(&c_node->start_complete);
 425	init_completion(&c_node->stop_complete);
 426
 427	for_each_ipp_ops(i)
 428		INIT_LIST_HEAD(&c_node->mem_list[i]);
 429
 430	INIT_LIST_HEAD(&c_node->event_list);
 431	mutex_lock(&ippdrv->cmd_lock);
 432	list_add_tail(&c_node->list, &ippdrv->cmd_list);
 433	mutex_unlock(&ippdrv->cmd_lock);
 434
 435	/* make dedicated state without m2m */
 436	if (!ipp_is_m2m_cmd(property->cmd))
 437		ippdrv->dedicated = true;
 438
 439	return 0;
 440
 441err_free_stop:
 442	kfree(c_node->stop_work);
 443err_free_start:
 444	kfree(c_node->start_work);
 445err_remove_id:
 446	ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
 447err_clear:
 448	kfree(c_node);
 449	return ret;
 450}
 451
 452static int ipp_validate_mem_node(struct drm_device *drm_dev,
 453				 struct drm_exynos_ipp_mem_node *m_node,
 454				 struct drm_exynos_ipp_cmd_node *c_node)
 455{
 456	struct drm_exynos_ipp_config *ipp_cfg;
 457	unsigned int num_plane;
 458	unsigned long size, buf_size = 0, plane_size, img_size = 0;
 459	unsigned int bpp, width, height;
 460	int i;
 461
 462	ipp_cfg = &c_node->property.config[m_node->ops_id];
 463	num_plane = drm_format_num_planes(ipp_cfg->fmt);
 464
 465	/**
 466	 * This is a rather simplified validation of a memory node.
 467	 * It basically verifies provided gem object handles
 468	 * and the buffer sizes with respect to current configuration.
 469	 * This is not the best that can be done
 470	 * but it seems more than enough
 471	 */
 472	for (i = 0; i < num_plane; ++i) {
 473		width = ipp_cfg->sz.hsize;
 474		height = ipp_cfg->sz.vsize;
 475		bpp = drm_format_plane_cpp(ipp_cfg->fmt, i);
 476
 477		/*
 478		 * The result of drm_format_plane_cpp() for chroma planes must
 479		 * be used with drm_format_xxxx_chroma_subsampling() for
 480		 * correct result.
 481		 */
 482		if (i > 0) {
 483			width /= drm_format_horz_chroma_subsampling(
 484								ipp_cfg->fmt);
 485			height /= drm_format_vert_chroma_subsampling(
 486								ipp_cfg->fmt);
 487		}
 488		plane_size = width * height * bpp;
 489		img_size += plane_size;
 490
 491		if (m_node->buf_info.handles[i]) {
 492			size = exynos_drm_gem_get_size(drm_dev,
 493					m_node->buf_info.handles[i],
 494					c_node->filp);
 495			if (plane_size > size) {
 496				DRM_ERROR(
 497					"buffer %d is smaller than required\n",
 498					i);
 499				return -EINVAL;
 500			}
 501
 502			buf_size += size;
 503		}
 504	}
 505
 506	if (buf_size < img_size) {
 507		DRM_ERROR("size of buffers(%lu) is smaller than image(%lu)\n",
 508			buf_size, img_size);
 509		return -EINVAL;
 510	}
 511
 512	return 0;
 513}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514
 515static int ipp_put_mem_node(struct drm_device *drm_dev,
 516		struct drm_exynos_ipp_cmd_node *c_node,
 517		struct drm_exynos_ipp_mem_node *m_node)
 518{
 
 
 
 
 519	int i;
 520
 521	DRM_DEBUG_KMS("node[%p]\n", m_node);
 522
 523	if (!m_node) {
 524		DRM_ERROR("invalid dequeue node.\n");
 525		return -EFAULT;
 526	}
 
 
 
 
 527
 528	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
 
 
 529
 530	/* put gem buffer */
 531	for_each_ipp_planar(i) {
 532		unsigned long handle = m_node->buf_info.handles[i];
 533		if (handle)
 534			exynos_drm_gem_put_dma_addr(drm_dev, handle,
 535							c_node->filp);
 536	}
 537
 538	list_del(&m_node->list);
 539	kfree(m_node);
 540
 541	return 0;
 542}
 543
 544static struct drm_exynos_ipp_mem_node
 545		*ipp_get_mem_node(struct drm_device *drm_dev,
 546		struct drm_exynos_ipp_cmd_node *c_node,
 547		struct drm_exynos_ipp_queue_buf *qbuf)
 548{
 549	struct drm_exynos_ipp_mem_node *m_node;
 550	struct drm_exynos_ipp_buf_info *buf_info;
 551	int i;
 552
 553	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
 554	if (!m_node)
 555		return ERR_PTR(-ENOMEM);
 556
 557	buf_info = &m_node->buf_info;
 558
 559	/* operations, buffer id */
 560	m_node->ops_id = qbuf->ops_id;
 561	m_node->prop_id = qbuf->prop_id;
 562	m_node->buf_id = qbuf->buf_id;
 563	INIT_LIST_HEAD(&m_node->list);
 564
 565	DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
 566	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
 567
 568	for_each_ipp_planar(i) {
 569		DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
 570
 571		/* get dma address by handle */
 572		if (qbuf->handle[i]) {
 573			dma_addr_t *addr;
 574
 575			addr = exynos_drm_gem_get_dma_addr(drm_dev,
 576					qbuf->handle[i], c_node->filp);
 577			if (IS_ERR(addr)) {
 578				DRM_ERROR("failed to get addr.\n");
 579				ipp_put_mem_node(drm_dev, c_node, m_node);
 580				return ERR_PTR(-EFAULT);
 581			}
 582
 583			buf_info->handles[i] = qbuf->handle[i];
 584			buf_info->base[i] = *addr;
 585			DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
 586				      &buf_info->base[i], buf_info->handles[i]);
 587		}
 588	}
 589
 590	mutex_lock(&c_node->mem_lock);
 591	if (ipp_validate_mem_node(drm_dev, m_node, c_node)) {
 592		ipp_put_mem_node(drm_dev, c_node, m_node);
 593		mutex_unlock(&c_node->mem_lock);
 594		return ERR_PTR(-EFAULT);
 595	}
 596	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
 597	mutex_unlock(&c_node->mem_lock);
 598
 599	return m_node;
 600}
 601
 602static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
 603			       struct drm_exynos_ipp_cmd_node *c_node, int ops)
 604{
 605	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
 606	struct list_head *head = &c_node->mem_list[ops];
 607
 608	mutex_lock(&c_node->mem_lock);
 609
 610	list_for_each_entry_safe(m_node, tm_node, head, list) {
 611		int ret;
 612
 613		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
 614		if (ret)
 615			DRM_ERROR("failed to put m_node.\n");
 616	}
 617
 618	mutex_unlock(&c_node->mem_lock);
 
 
 
 619}
 620
 621static int ipp_get_event(struct drm_device *drm_dev,
 622		struct drm_exynos_ipp_cmd_node *c_node,
 623		struct drm_exynos_ipp_queue_buf *qbuf)
 624{
 625	struct drm_exynos_ipp_send_event *e;
 626	int ret;
 627
 628	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
 629
 630	e = kzalloc(sizeof(*e), GFP_KERNEL);
 631	if (!e)
 632		return -ENOMEM;
 633
 634	/* make event */
 635	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
 636	e->event.base.length = sizeof(e->event);
 637	e->event.user_data = qbuf->user_data;
 638	e->event.prop_id = qbuf->prop_id;
 639	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
 640
 641	ret = drm_event_reserve_init(drm_dev, c_node->filp, &e->base, &e->event.base);
 642	if (ret) {
 643		kfree(e);
 644		return ret;
 645	}
 646
 647	mutex_lock(&c_node->event_lock);
 648	list_add_tail(&e->base.link, &c_node->event_list);
 649	mutex_unlock(&c_node->event_lock);
 650
 651	return 0;
 652}
 
 
 
 
 
 
 653
 654static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
 655		struct drm_exynos_ipp_queue_buf *qbuf)
 656{
 657	struct drm_exynos_ipp_send_event *e, *te;
 658	int count = 0;
 659
 660	mutex_lock(&c_node->event_lock);
 661	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
 662		DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
 663
 664		/*
 665		 * qbuf == NULL condition means all event deletion.
 666		 * stop operations want to delete all event list.
 667		 * another case delete only same buf id.
 668		 */
 669		if (!qbuf) {
 670			/* delete list */
 671			list_del(&e->base.link);
 672			kfree(e);
 673		}
 674
 675		/* compare buffer id */
 676		if (qbuf && (qbuf->buf_id ==
 677		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
 678			/* delete list */
 679			list_del(&e->base.link);
 680			kfree(e);
 681			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 682		}
 683	}
 684
 685out_unlock:
 686	mutex_unlock(&c_node->event_lock);
 687	return;
 688}
 689
 690static void ipp_clean_cmd_node(struct ipp_context *ctx,
 691				struct drm_exynos_ipp_cmd_node *c_node)
 692{
 693	int i;
 694
 695	/* cancel works */
 696	cancel_work_sync(&c_node->start_work->work);
 697	cancel_work_sync(&c_node->stop_work->work);
 698	cancel_work_sync(&c_node->event_work->work);
 699
 700	/* put event */
 701	ipp_put_event(c_node, NULL);
 702
 703	for_each_ipp_ops(i)
 704		ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
 705
 706	/* delete list */
 707	list_del(&c_node->list);
 708
 709	ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
 710			c_node->property.prop_id);
 711
 712	/* destroy mutex */
 713	mutex_destroy(&c_node->lock);
 714	mutex_destroy(&c_node->mem_lock);
 715	mutex_destroy(&c_node->event_lock);
 716
 717	/* free command node */
 718	kfree(c_node->start_work);
 719	kfree(c_node->stop_work);
 720	kfree(c_node->event_work);
 721	kfree(c_node);
 722}
 723
 724static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
 
 725{
 726	switch (c_node->property.cmd) {
 727	case IPP_CMD_WB:
 728		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
 729	case IPP_CMD_OUTPUT:
 730		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
 731	case IPP_CMD_M2M:
 732	default:
 733		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
 734		       !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
 735	}
 
 736}
 737
 738static struct drm_exynos_ipp_mem_node
 739		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
 740		struct drm_exynos_ipp_queue_buf *qbuf)
 741{
 742	struct drm_exynos_ipp_mem_node *m_node;
 743	struct list_head *head;
 744	int count = 0;
 745
 746	DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
 747
 748	/* source/destination memory list */
 749	head = &c_node->mem_list[qbuf->ops_id];
 750
 751	/* find memory node from memory list */
 752	list_for_each_entry(m_node, head, list) {
 753		DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
 
 754
 755		/* compare buffer id */
 756		if (m_node->buf_id == qbuf->buf_id)
 757			return m_node;
 758	}
 
 
 
 
 
 
 759
 760	return NULL;
 761}
 762
 763static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
 764		struct drm_exynos_ipp_cmd_node *c_node,
 765		struct drm_exynos_ipp_mem_node *m_node)
 766{
 767	struct exynos_drm_ipp_ops *ops = NULL;
 768	int ret = 0;
 769
 770	DRM_DEBUG_KMS("node[%p]\n", m_node);
 771
 772	if (!m_node) {
 773		DRM_ERROR("invalid queue node.\n");
 774		return -EFAULT;
 775	}
 776
 777	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
 778
 779	/* get operations callback */
 780	ops = ippdrv->ops[m_node->ops_id];
 781	if (!ops) {
 782		DRM_ERROR("not support ops.\n");
 783		return -EFAULT;
 784	}
 785
 786	/* set address and enable irq */
 787	if (ops->set_addr) {
 788		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
 789			m_node->buf_id, IPP_BUF_ENQUEUE);
 790		if (ret) {
 791			DRM_ERROR("failed to set addr.\n");
 792			return ret;
 793		}
 794	}
 795
 796	return ret;
 797}
 798
 799static void ipp_handle_cmd_work(struct device *dev,
 800		struct exynos_drm_ippdrv *ippdrv,
 801		struct drm_exynos_ipp_cmd_work *cmd_work,
 802		struct drm_exynos_ipp_cmd_node *c_node)
 803{
 804	struct ipp_context *ctx = get_ipp_context(dev);
 805
 806	cmd_work->ippdrv = ippdrv;
 807	cmd_work->c_node = c_node;
 808	queue_work(ctx->cmd_workq, &cmd_work->work);
 809}
 810
 811static int ipp_queue_buf_with_run(struct device *dev,
 812		struct drm_exynos_ipp_cmd_node *c_node,
 813		struct drm_exynos_ipp_mem_node *m_node,
 814		struct drm_exynos_ipp_queue_buf *qbuf)
 815{
 816	struct exynos_drm_ippdrv *ippdrv;
 817	struct drm_exynos_ipp_property *property;
 818	struct exynos_drm_ipp_ops *ops;
 819	int ret;
 820
 821	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
 822	if (IS_ERR(ippdrv)) {
 823		DRM_ERROR("failed to get ipp driver.\n");
 824		return -EFAULT;
 825	}
 826
 827	ops = ippdrv->ops[qbuf->ops_id];
 828	if (!ops) {
 829		DRM_ERROR("failed to get ops.\n");
 830		return -EFAULT;
 831	}
 832
 833	property = &c_node->property;
 834
 835	if (c_node->state != IPP_STATE_START) {
 836		DRM_DEBUG_KMS("bypass for invalid state.\n");
 837		return 0;
 838	}
 839
 840	mutex_lock(&c_node->mem_lock);
 841	if (!ipp_check_mem_list(c_node)) {
 842		mutex_unlock(&c_node->mem_lock);
 843		DRM_DEBUG_KMS("empty memory.\n");
 844		return 0;
 845	}
 846
 847	/*
 848	 * If set destination buffer and enabled clock,
 849	 * then m2m operations need start operations at queue_buf
 850	 */
 851	if (ipp_is_m2m_cmd(property->cmd)) {
 852		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
 853
 854		cmd_work->ctrl = IPP_CTRL_PLAY;
 855		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
 856	} else {
 857		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
 858		if (ret) {
 859			mutex_unlock(&c_node->mem_lock);
 860			DRM_ERROR("failed to set m node.\n");
 861			return ret;
 862		}
 863	}
 864	mutex_unlock(&c_node->mem_lock);
 865
 866	return 0;
 867}
 868
 869static void ipp_clean_queue_buf(struct drm_device *drm_dev,
 870		struct drm_exynos_ipp_cmd_node *c_node,
 871		struct drm_exynos_ipp_queue_buf *qbuf)
 872{
 873	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
 874
 875	/* delete list */
 876	mutex_lock(&c_node->mem_lock);
 877	list_for_each_entry_safe(m_node, tm_node,
 878		&c_node->mem_list[qbuf->ops_id], list) {
 879		if (m_node->buf_id == qbuf->buf_id &&
 880		    m_node->ops_id == qbuf->ops_id)
 881			ipp_put_mem_node(drm_dev, c_node, m_node);
 882	}
 883	mutex_unlock(&c_node->mem_lock);
 884}
 885
 886int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
 887		struct drm_file *file)
 888{
 889	struct drm_exynos_file_private *file_priv = file->driver_priv;
 890	struct device *dev = file_priv->ipp_dev;
 891	struct ipp_context *ctx = get_ipp_context(dev);
 892	struct drm_exynos_ipp_queue_buf *qbuf = data;
 893	struct drm_exynos_ipp_cmd_node *c_node;
 894	struct drm_exynos_ipp_mem_node *m_node;
 895	int ret;
 896
 897	if (!qbuf) {
 898		DRM_ERROR("invalid buf parameter.\n");
 
 
 
 
 
 899		return -EINVAL;
 900	}
 901
 902	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
 903		DRM_ERROR("invalid ops parameter.\n");
 904		return -EINVAL;
 905	}
 906
 907	DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
 908		qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
 909		qbuf->buf_id, qbuf->buf_type);
 910
 911	/* find command node */
 912	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
 913		qbuf->prop_id);
 914	if (!c_node || c_node->filp != file) {
 915		DRM_ERROR("failed to get command node.\n");
 916		return -ENODEV;
 917	}
 918
 919	/* buffer control */
 920	switch (qbuf->buf_type) {
 921	case IPP_BUF_ENQUEUE:
 922		/* get memory node */
 923		m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
 924		if (IS_ERR(m_node)) {
 925			DRM_ERROR("failed to get m_node.\n");
 926			return PTR_ERR(m_node);
 927		}
 928
 929		/*
 930		 * first step get event for destination buffer.
 931		 * and second step when M2M case run with destination buffer
 932		 * if needed.
 933		 */
 934		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
 935			/* get event for destination buffer */
 936			ret = ipp_get_event(drm_dev, c_node, qbuf);
 937			if (ret) {
 938				DRM_ERROR("failed to get event.\n");
 939				goto err_clean_node;
 940			}
 941
 942			/*
 943			 * M2M case run play control for streaming feature.
 944			 * other case set address and waiting.
 945			 */
 946			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
 947			if (ret) {
 948				DRM_ERROR("failed to run command.\n");
 949				goto err_clean_node;
 950			}
 951		}
 952		break;
 953	case IPP_BUF_DEQUEUE:
 954		mutex_lock(&c_node->lock);
 955
 956		/* put event for destination buffer */
 957		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
 958			ipp_put_event(c_node, qbuf);
 959
 960		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
 961
 962		mutex_unlock(&c_node->lock);
 963		break;
 964	default:
 965		DRM_ERROR("invalid buffer control.\n");
 966		return -EINVAL;
 967	}
 968
 969	return 0;
 970
 971err_clean_node:
 972	DRM_ERROR("clean memory nodes.\n");
 973
 974	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
 
 
 
 
 
 
 
 
 
 975	return ret;
 976}
 977
 978static bool exynos_drm_ipp_check_valid(struct device *dev,
 979		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
 980{
 981	if (ctrl != IPP_CTRL_PLAY) {
 982		if (pm_runtime_suspended(dev)) {
 983			DRM_ERROR("pm:runtime_suspended.\n");
 984			goto err_status;
 985		}
 986	}
 987
 988	switch (ctrl) {
 989	case IPP_CTRL_PLAY:
 990		if (state != IPP_STATE_IDLE)
 991			goto err_status;
 992		break;
 993	case IPP_CTRL_STOP:
 994		if (state == IPP_STATE_STOP)
 995			goto err_status;
 996		break;
 997	case IPP_CTRL_PAUSE:
 998		if (state != IPP_STATE_START)
 999			goto err_status;
1000		break;
1001	case IPP_CTRL_RESUME:
1002		if (state != IPP_STATE_STOP)
1003			goto err_status;
1004		break;
1005	default:
1006		DRM_ERROR("invalid state.\n");
1007		goto err_status;
1008	}
1009
1010	return true;
1011
1012err_status:
1013	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1014	return false;
1015}
1016
1017int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1018		struct drm_file *file)
1019{
1020	struct drm_exynos_file_private *file_priv = file->driver_priv;
1021	struct exynos_drm_ippdrv *ippdrv = NULL;
1022	struct device *dev = file_priv->ipp_dev;
1023	struct ipp_context *ctx = get_ipp_context(dev);
1024	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1025	struct drm_exynos_ipp_cmd_work *cmd_work;
1026	struct drm_exynos_ipp_cmd_node *c_node;
1027
1028	if (!ctx) {
1029		DRM_ERROR("invalid context.\n");
1030		return -EINVAL;
1031	}
1032
1033	if (!cmd_ctrl) {
1034		DRM_ERROR("invalid control parameter.\n");
1035		return -EINVAL;
1036	}
1037
1038	DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1039		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1040
1041	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1042	if (IS_ERR(ippdrv)) {
1043		DRM_ERROR("failed to get ipp driver.\n");
1044		return PTR_ERR(ippdrv);
1045	}
1046
1047	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1048		cmd_ctrl->prop_id);
1049	if (!c_node || c_node->filp != file) {
1050		DRM_ERROR("invalid command node list.\n");
1051		return -ENODEV;
1052	}
1053
1054	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1055	    c_node->state)) {
1056		DRM_ERROR("invalid state.\n");
1057		return -EINVAL;
1058	}
1059
1060	switch (cmd_ctrl->ctrl) {
1061	case IPP_CTRL_PLAY:
1062		if (pm_runtime_suspended(ippdrv->dev))
1063			pm_runtime_get_sync(ippdrv->dev);
1064
1065		c_node->state = IPP_STATE_START;
1066
1067		cmd_work = c_node->start_work;
1068		cmd_work->ctrl = cmd_ctrl->ctrl;
1069		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1070		break;
1071	case IPP_CTRL_STOP:
1072		cmd_work = c_node->stop_work;
1073		cmd_work->ctrl = cmd_ctrl->ctrl;
1074		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1075
1076		if (!wait_for_completion_timeout(&c_node->stop_complete,
1077		    msecs_to_jiffies(300))) {
1078			DRM_ERROR("timeout stop:prop_id[%d]\n",
1079				c_node->property.prop_id);
1080		}
1081
1082		c_node->state = IPP_STATE_STOP;
1083		ippdrv->dedicated = false;
1084		mutex_lock(&ippdrv->cmd_lock);
1085		ipp_clean_cmd_node(ctx, c_node);
1086
1087		if (list_empty(&ippdrv->cmd_list))
1088			pm_runtime_put_sync(ippdrv->dev);
1089		mutex_unlock(&ippdrv->cmd_lock);
1090		break;
1091	case IPP_CTRL_PAUSE:
1092		cmd_work = c_node->stop_work;
1093		cmd_work->ctrl = cmd_ctrl->ctrl;
1094		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1095
1096		if (!wait_for_completion_timeout(&c_node->stop_complete,
1097		    msecs_to_jiffies(200))) {
1098			DRM_ERROR("timeout stop:prop_id[%d]\n",
1099				c_node->property.prop_id);
1100		}
1101
1102		c_node->state = IPP_STATE_STOP;
1103		break;
1104	case IPP_CTRL_RESUME:
1105		c_node->state = IPP_STATE_START;
1106		cmd_work = c_node->start_work;
1107		cmd_work->ctrl = cmd_ctrl->ctrl;
1108		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1109		break;
1110	default:
1111		DRM_ERROR("could not support this state currently.\n");
1112		return -EINVAL;
1113	}
1114
1115	DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1116		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
 
1117
1118	return 0;
1119}
 
1120
1121int exynos_drm_ippnb_register(struct notifier_block *nb)
1122{
1123	return blocking_notifier_chain_register(
1124		&exynos_drm_ippnb_list, nb);
1125}
1126
1127int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1128{
1129	return blocking_notifier_chain_unregister(
1130		&exynos_drm_ippnb_list, nb);
1131}
1132
1133int exynos_drm_ippnb_send_event(unsigned long val, void *v)
 
1134{
1135	return blocking_notifier_call_chain(
1136		&exynos_drm_ippnb_list, val, v);
1137}
1138
1139static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1140		struct drm_exynos_ipp_property *property)
1141{
1142	struct exynos_drm_ipp_ops *ops = NULL;
1143	bool swap = false;
1144	int ret, i;
1145
1146	if (!property) {
1147		DRM_ERROR("invalid property parameter.\n");
1148		return -EINVAL;
 
 
 
1149	}
1150
1151	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1152
1153	/* reset h/w block */
1154	if (ippdrv->reset &&
1155	    ippdrv->reset(ippdrv->dev)) {
1156		return -EINVAL;
1157	}
1158
1159	/* set source,destination operations */
1160	for_each_ipp_ops(i) {
1161		struct drm_exynos_ipp_config *config =
1162			&property->config[i];
1163
1164		ops = ippdrv->ops[i];
1165		if (!ops || !config) {
1166			DRM_ERROR("not support ops and config.\n");
1167			return -EINVAL;
1168		}
1169
1170		/* set format */
1171		if (ops->set_fmt) {
1172			ret = ops->set_fmt(ippdrv->dev, config->fmt);
1173			if (ret)
1174				return ret;
1175		}
1176
1177		/* set transform for rotation, flip */
1178		if (ops->set_transf) {
1179			ret = ops->set_transf(ippdrv->dev, config->degree,
1180				config->flip, &swap);
1181			if (ret)
1182				return ret;
1183		}
1184
1185		/* set size */
1186		if (ops->set_size) {
1187			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1188				&config->sz);
1189			if (ret)
1190				return ret;
1191		}
1192	}
1193
1194	return 0;
1195}
1196
1197static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1198		struct drm_exynos_ipp_cmd_node *c_node)
1199{
1200	struct drm_exynos_ipp_mem_node *m_node;
1201	struct drm_exynos_ipp_property *property = &c_node->property;
1202	struct list_head *head;
1203	int ret, i;
1204
1205	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
 
 
 
 
1206
1207	/* store command info in ippdrv */
1208	ippdrv->c_node = c_node;
 
1209
1210	mutex_lock(&c_node->mem_lock);
1211	if (!ipp_check_mem_list(c_node)) {
1212		DRM_DEBUG_KMS("empty memory.\n");
1213		ret = -ENOMEM;
1214		goto err_unlock;
1215	}
1216
1217	/* set current property in ippdrv */
1218	ret = ipp_set_property(ippdrv, property);
1219	if (ret) {
1220		DRM_ERROR("failed to set property.\n");
1221		ippdrv->c_node = NULL;
1222		goto err_unlock;
1223	}
1224
1225	/* check command */
1226	switch (property->cmd) {
1227	case IPP_CMD_M2M:
1228		for_each_ipp_ops(i) {
1229			/* source/destination memory list */
1230			head = &c_node->mem_list[i];
1231
1232			m_node = list_first_entry(head,
1233				struct drm_exynos_ipp_mem_node, list);
1234
1235			DRM_DEBUG_KMS("m_node[%p]\n", m_node);
1236
1237			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1238			if (ret) {
1239				DRM_ERROR("failed to set m node.\n");
1240				goto err_unlock;
1241			}
1242		}
1243		break;
1244	case IPP_CMD_WB:
1245		/* destination memory list */
1246		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1247
1248		list_for_each_entry(m_node, head, list) {
1249			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1250			if (ret) {
1251				DRM_ERROR("failed to set m node.\n");
1252				goto err_unlock;
1253			}
1254		}
1255		break;
1256	case IPP_CMD_OUTPUT:
1257		/* source memory list */
1258		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1259
1260		list_for_each_entry(m_node, head, list) {
1261			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1262			if (ret) {
1263				DRM_ERROR("failed to set m node.\n");
1264				goto err_unlock;
1265			}
1266		}
1267		break;
1268	default:
1269		DRM_ERROR("invalid operations.\n");
1270		ret = -EINVAL;
1271		goto err_unlock;
1272	}
1273	mutex_unlock(&c_node->mem_lock);
1274
1275	DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1276
1277	/* start operations */
1278	if (ippdrv->start) {
1279		ret = ippdrv->start(ippdrv->dev, property->cmd);
1280		if (ret) {
1281			DRM_ERROR("failed to start ops.\n");
1282			ippdrv->c_node = NULL;
1283			return ret;
1284		}
1285	}
1286
 
1287	return 0;
1288
1289err_unlock:
1290	mutex_unlock(&c_node->mem_lock);
1291	ippdrv->c_node = NULL;
1292	return ret;
1293}
1294
1295static int ipp_stop_property(struct drm_device *drm_dev,
1296		struct exynos_drm_ippdrv *ippdrv,
1297		struct drm_exynos_ipp_cmd_node *c_node)
1298{
1299	struct drm_exynos_ipp_property *property = &c_node->property;
1300	int i;
1301
1302	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1303
1304	/* stop operations */
1305	if (ippdrv->stop)
1306		ippdrv->stop(ippdrv->dev, property->cmd);
1307
1308	/* check command */
1309	switch (property->cmd) {
1310	case IPP_CMD_M2M:
1311		for_each_ipp_ops(i)
1312			ipp_clean_mem_nodes(drm_dev, c_node, i);
1313		break;
1314	case IPP_CMD_WB:
1315		ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
1316		break;
1317	case IPP_CMD_OUTPUT:
1318		ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
1319		break;
1320	default:
1321		DRM_ERROR("invalid operations.\n");
1322		return -EINVAL;
1323	}
1324
1325	return 0;
1326}
1327
1328void ipp_sched_cmd(struct work_struct *work)
1329{
1330	struct drm_exynos_ipp_cmd_work *cmd_work =
1331		container_of(work, struct drm_exynos_ipp_cmd_work, work);
1332	struct exynos_drm_ippdrv *ippdrv;
1333	struct drm_exynos_ipp_cmd_node *c_node;
1334	struct drm_exynos_ipp_property *property;
1335	int ret;
1336
1337	ippdrv = cmd_work->ippdrv;
1338	if (!ippdrv) {
1339		DRM_ERROR("invalid ippdrv list.\n");
1340		return;
1341	}
1342
1343	c_node = cmd_work->c_node;
1344	if (!c_node) {
1345		DRM_ERROR("invalid command node list.\n");
1346		return;
1347	}
1348
1349	mutex_lock(&c_node->lock);
 
 
1350
1351	property = &c_node->property;
 
 
 
1352
1353	switch (cmd_work->ctrl) {
1354	case IPP_CTRL_PLAY:
1355	case IPP_CTRL_RESUME:
1356		ret = ipp_start_property(ippdrv, c_node);
1357		if (ret) {
1358			DRM_ERROR("failed to start property:prop_id[%d]\n",
1359				c_node->property.prop_id);
1360			goto err_unlock;
1361		}
1362
1363		/*
1364		 * M2M case supports wait_completion of transfer.
1365		 * because M2M case supports single unit operation
1366		 * with multiple queue.
1367		 * M2M need to wait completion of data transfer.
1368		 */
1369		if (ipp_is_m2m_cmd(property->cmd)) {
1370			if (!wait_for_completion_timeout
1371			    (&c_node->start_complete, msecs_to_jiffies(200))) {
1372				DRM_ERROR("timeout event:prop_id[%d]\n",
1373					c_node->property.prop_id);
1374				goto err_unlock;
1375			}
1376		}
1377		break;
1378	case IPP_CTRL_STOP:
1379	case IPP_CTRL_PAUSE:
1380		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1381			c_node);
1382		if (ret) {
1383			DRM_ERROR("failed to stop property.\n");
1384			goto err_unlock;
1385		}
1386
1387		complete(&c_node->stop_complete);
1388		break;
1389	default:
1390		DRM_ERROR("unknown control type\n");
1391		break;
1392	}
1393
1394	DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1395
1396err_unlock:
1397	mutex_unlock(&c_node->lock);
1398}
1399
1400static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1401		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1402{
1403	struct drm_device *drm_dev = ippdrv->drm_dev;
1404	struct drm_exynos_ipp_property *property = &c_node->property;
1405	struct drm_exynos_ipp_mem_node *m_node;
1406	struct drm_exynos_ipp_queue_buf qbuf;
1407	struct drm_exynos_ipp_send_event *e;
1408	struct list_head *head;
1409	struct timeval now;
1410	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1411	int ret, i;
1412
1413	for_each_ipp_ops(i)
1414		DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1415
1416	if (!drm_dev) {
1417		DRM_ERROR("failed to get drm_dev.\n");
1418		return -EINVAL;
1419	}
 
 
1420
1421	if (!property) {
1422		DRM_ERROR("failed to get property.\n");
1423		return -EINVAL;
1424	}
1425
1426	mutex_lock(&c_node->event_lock);
1427	if (list_empty(&c_node->event_list)) {
1428		DRM_DEBUG_KMS("event list is empty.\n");
1429		ret = 0;
1430		goto err_event_unlock;
1431	}
1432
1433	mutex_lock(&c_node->mem_lock);
1434	if (!ipp_check_mem_list(c_node)) {
1435		DRM_DEBUG_KMS("empty memory.\n");
1436		ret = 0;
1437		goto err_mem_unlock;
1438	}
1439
1440	/* check command */
1441	switch (property->cmd) {
1442	case IPP_CMD_M2M:
1443		for_each_ipp_ops(i) {
1444			/* source/destination memory list */
1445			head = &c_node->mem_list[i];
1446
1447			m_node = list_first_entry(head,
1448				struct drm_exynos_ipp_mem_node, list);
1449
1450			tbuf_id[i] = m_node->buf_id;
1451			DRM_DEBUG_KMS("%s buf_id[%d]\n",
1452				i ? "dst" : "src", tbuf_id[i]);
1453
1454			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1455			if (ret)
1456				DRM_ERROR("failed to put m_node.\n");
1457		}
1458		break;
1459	case IPP_CMD_WB:
1460		/* clear buf for finding */
1461		memset(&qbuf, 0x0, sizeof(qbuf));
1462		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1463		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1464
1465		/* get memory node entry */
1466		m_node = ipp_find_mem_node(c_node, &qbuf);
1467		if (!m_node) {
1468			DRM_ERROR("empty memory node.\n");
1469			ret = -ENOMEM;
1470			goto err_mem_unlock;
1471		}
1472
1473		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1474
1475		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1476		if (ret)
1477			DRM_ERROR("failed to put m_node.\n");
1478		break;
1479	case IPP_CMD_OUTPUT:
1480		/* source memory list */
1481		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1482
1483		m_node = list_first_entry(head,
1484			struct drm_exynos_ipp_mem_node, list);
1485
1486		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1487
1488		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1489		if (ret)
1490			DRM_ERROR("failed to put m_node.\n");
1491		break;
1492	default:
1493		DRM_ERROR("invalid operations.\n");
1494		ret = -EINVAL;
1495		goto err_mem_unlock;
1496	}
1497	mutex_unlock(&c_node->mem_lock);
1498
1499	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1500		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1501			tbuf_id[1], buf_id[1], property->prop_id);
1502
1503	/*
1504	 * command node have event list of destination buffer
1505	 * If destination buffer enqueue to mem list,
1506	 * then we make event and link to event list tail.
1507	 * so, we get first event for first enqueued buffer.
1508	 */
1509	e = list_first_entry(&c_node->event_list,
1510		struct drm_exynos_ipp_send_event, base.link);
1511
1512	do_gettimeofday(&now);
1513	DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1514	e->event.tv_sec = now.tv_sec;
1515	e->event.tv_usec = now.tv_usec;
1516	e->event.prop_id = property->prop_id;
1517
1518	/* set buffer id about source destination */
1519	for_each_ipp_ops(i)
1520		e->event.buf_id[i] = tbuf_id[i];
1521
1522	drm_send_event(drm_dev, &e->base);
1523	mutex_unlock(&c_node->event_lock);
1524
1525	DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1526		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1527
1528	return 0;
1529
1530err_mem_unlock:
1531	mutex_unlock(&c_node->mem_lock);
1532err_event_unlock:
1533	mutex_unlock(&c_node->event_lock);
1534	return ret;
1535}
1536
1537void ipp_sched_event(struct work_struct *work)
1538{
1539	struct drm_exynos_ipp_event_work *event_work =
1540		container_of(work, struct drm_exynos_ipp_event_work, work);
1541	struct exynos_drm_ippdrv *ippdrv;
1542	struct drm_exynos_ipp_cmd_node *c_node;
1543	int ret;
1544
1545	if (!event_work) {
1546		DRM_ERROR("failed to get event_work.\n");
1547		return;
1548	}
1549
1550	DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1551
1552	ippdrv = event_work->ippdrv;
1553	if (!ippdrv) {
1554		DRM_ERROR("failed to get ipp driver.\n");
1555		return;
1556	}
1557
1558	c_node = ippdrv->c_node;
1559	if (!c_node) {
1560		DRM_ERROR("failed to get command node.\n");
1561		return;
1562	}
1563
1564	/*
1565	 * IPP supports command thread, event thread synchronization.
1566	 * If IPP close immediately from user land, then IPP make
1567	 * synchronization with command thread, so make complete event.
1568	 * or going out operations.
1569	 */
1570	if (c_node->state != IPP_STATE_START) {
1571		DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1572			c_node->state, c_node->property.prop_id);
1573		goto err_completion;
1574	}
1575
1576	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1577	if (ret) {
1578		DRM_ERROR("failed to send event.\n");
1579		goto err_completion;
1580	}
1581
1582err_completion:
1583	if (ipp_is_m2m_cmd(c_node->property.cmd))
1584		complete(&c_node->start_complete);
1585}
1586
1587static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 
1588{
1589	struct ipp_context *ctx = get_ipp_context(dev);
1590	struct exynos_drm_ippdrv *ippdrv;
1591	int ret, count = 0;
1592
1593	/* get ipp driver entry */
1594	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1595		ippdrv->drm_dev = drm_dev;
1596
1597		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
1598		if (ret < 0) {
1599			DRM_ERROR("failed to create id.\n");
1600			goto err;
1601		}
1602		ippdrv->prop_list.ipp_id = ret;
1603
1604		DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
1605			count++, ippdrv, ret);
1606
1607		/* store parent device for node */
1608		ippdrv->parent_dev = dev;
1609
1610		/* store event work queue and handler */
1611		ippdrv->event_workq = ctx->event_workq;
1612		ippdrv->sched_event = ipp_sched_event;
1613		INIT_LIST_HEAD(&ippdrv->cmd_list);
1614		mutex_init(&ippdrv->cmd_lock);
1615
1616		ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1617		if (ret) {
1618			DRM_ERROR("failed to activate iommu\n");
1619			goto err;
1620		}
1621	}
1622
1623	return 0;
1624
1625err:
1626	/* get ipp driver entry */
1627	list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1628						drv_list) {
1629		drm_iommu_detach_device(drm_dev, ippdrv->dev);
1630
1631		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1632				ippdrv->prop_list.ipp_id);
1633	}
1634
1635	return ret;
1636}
1637
1638static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 
1639{
1640	struct exynos_drm_ippdrv *ippdrv, *t;
1641	struct ipp_context *ctx = get_ipp_context(dev);
1642
1643	/* get ipp driver entry */
1644	list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
1645		drm_iommu_detach_device(drm_dev, ippdrv->dev);
1646
1647		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1648				ippdrv->prop_list.ipp_id);
1649
1650		ippdrv->drm_dev = NULL;
1651		exynos_drm_ippdrv_unregister(ippdrv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1652	}
 
1653}
1654
1655static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1656		struct drm_file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1657{
1658	struct drm_exynos_file_private *file_priv = file->driver_priv;
1659
1660	file_priv->ipp_dev = dev;
1661
1662	DRM_DEBUG_KMS("done priv[%p]\n", dev);
1663
1664	return 0;
1665}
1666
1667static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1668		struct drm_file *file)
1669{
1670	struct exynos_drm_ippdrv *ippdrv = NULL;
1671	struct ipp_context *ctx = get_ipp_context(dev);
1672	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1673	int count = 0;
1674
1675	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1676		mutex_lock(&ippdrv->cmd_lock);
1677		list_for_each_entry_safe(c_node, tc_node,
1678			&ippdrv->cmd_list, list) {
1679			DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
1680				count++, ippdrv);
1681
1682			if (c_node->filp == file) {
1683				/*
1684				 * userland goto unnormal state. process killed.
1685				 * and close the file.
1686				 * so, IPP didn't called stop cmd ctrl.
1687				 * so, we are make stop operation in this state.
1688				 */
1689				if (c_node->state == IPP_STATE_START) {
1690					ipp_stop_property(drm_dev, ippdrv,
1691						c_node);
1692					c_node->state = IPP_STATE_STOP;
1693				}
1694
1695				ippdrv->dedicated = false;
1696				ipp_clean_cmd_node(ctx, c_node);
1697				if (list_empty(&ippdrv->cmd_list))
1698					pm_runtime_put_sync(ippdrv->dev);
1699			}
1700		}
1701		mutex_unlock(&ippdrv->cmd_lock);
1702	}
1703
1704	return;
1705}
 
1706
1707static int ipp_probe(struct platform_device *pdev)
1708{
1709	struct device *dev = &pdev->dev;
1710	struct ipp_context *ctx;
1711	struct exynos_drm_subdrv *subdrv;
1712	int ret;
1713
1714	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1715	if (!ctx)
1716		return -ENOMEM;
1717
1718	mutex_init(&ctx->ipp_lock);
1719	mutex_init(&ctx->prop_lock);
1720
1721	idr_init(&ctx->ipp_idr);
1722	idr_init(&ctx->prop_idr);
1723
1724	/*
1725	 * create single thread for ipp event
1726	 * IPP supports event thread for IPP drivers.
1727	 * IPP driver send event_work to this thread.
1728	 * and IPP event thread send event to user process.
1729	 */
1730	ctx->event_workq = create_singlethread_workqueue("ipp_event");
1731	if (!ctx->event_workq) {
1732		dev_err(dev, "failed to create event workqueue\n");
1733		return -EINVAL;
 
1734	}
1735
1736	/*
1737	 * create single thread for ipp command
1738	 * IPP supports command thread for user process.
1739	 * user process make command node using set property ioctl.
1740	 * and make start_work and send this work to command thread.
1741	 * and then this command thread start property.
1742	 */
1743	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1744	if (!ctx->cmd_workq) {
1745		dev_err(dev, "failed to create cmd workqueue\n");
1746		ret = -EINVAL;
1747		goto err_event_workq;
1748	}
1749
1750	/* set sub driver informations */
1751	subdrv = &ctx->subdrv;
1752	subdrv->dev = dev;
1753	subdrv->probe = ipp_subdrv_probe;
1754	subdrv->remove = ipp_subdrv_remove;
1755	subdrv->open = ipp_subdrv_open;
1756	subdrv->close = ipp_subdrv_close;
1757
1758	platform_set_drvdata(pdev, ctx);
1759
1760	ret = exynos_drm_subdrv_register(subdrv);
1761	if (ret < 0) {
1762		DRM_ERROR("failed to register drm ipp device.\n");
1763		goto err_cmd_workq;
1764	}
1765
1766	dev_info(dev, "drm ipp registered successfully.\n");
1767
1768	return 0;
1769
1770err_cmd_workq:
1771	destroy_workqueue(ctx->cmd_workq);
1772err_event_workq:
1773	destroy_workqueue(ctx->event_workq);
 
 
 
 
 
 
 
 
 
 
1774	return ret;
1775}
1776
1777static int ipp_remove(struct platform_device *pdev)
1778{
1779	struct ipp_context *ctx = platform_get_drvdata(pdev);
1780
1781	/* unregister sub driver */
1782	exynos_drm_subdrv_unregister(&ctx->subdrv);
1783
1784	/* remove,destroy ipp idr */
1785	idr_destroy(&ctx->ipp_idr);
1786	idr_destroy(&ctx->prop_idr);
1787
1788	mutex_destroy(&ctx->ipp_lock);
1789	mutex_destroy(&ctx->prop_lock);
1790
1791	/* destroy command, event work queue */
1792	destroy_workqueue(ctx->cmd_workq);
1793	destroy_workqueue(ctx->event_workq);
1794
1795	return 0;
1796}
1797
1798struct platform_driver ipp_driver = {
1799	.probe		= ipp_probe,
1800	.remove		= ipp_remove,
1801	.driver		= {
1802		.name	= "exynos-drm-ipp",
1803		.owner	= THIS_MODULE,
1804	},
1805};
1806
v5.14.15
  1/*
  2 * Copyright (C) 2017 Samsung Electronics Co.Ltd
  3 * Authors:
  4 *	Marek Szyprowski <m.szyprowski@samsung.com>
 
 
  5 *
  6 * Exynos DRM Image Post Processing (IPP) related functions
 
 
 
  7 *
  8 * Permission is hereby granted, free of charge, to any person obtaining a
  9 * copy of this software and associated documentation files (the "Software"),
 10 * to deal in the Software without restriction, including without limitation
 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 12 * and/or sell copies of the Software, and to permit persons to whom the
 13 * Software is furnished to do so, subject to the following conditions:
 14 *
 15 * The above copyright notice and this permission notice shall be included in
 16 * all copies or substantial portions of the Software.
 17 */
 
 
 
 
 
 18
 19#include <linux/uaccess.h>
 20
 21#include <drm/drm_file.h>
 22#include <drm/drm_fourcc.h>
 23#include <drm/drm_mode.h>
 24#include <drm/exynos_drm.h>
 25
 26#include "exynos_drm_drv.h"
 27#include "exynos_drm_gem.h"
 28#include "exynos_drm_ipp.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29
 30static int num_ipp;
 31static LIST_HEAD(ipp_list);
 32
 33/**
 34 * exynos_drm_ipp_register - Register a new picture processor hardware module
 35 * @dev: DRM device
 36 * @ipp: ipp module to init
 37 * @funcs: callbacks for the new ipp object
 38 * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
 39 * @formats: array of supported formats
 40 * @num_formats: size of the supported formats array
 41 * @name: name (for debugging purposes)
 42 *
 43 * Initializes a ipp module.
 
 
 
 
 
 
 
 
 
 44 *
 45 * Returns:
 46 * Zero on success, error code on failure.
 
 
 
 
 47 */
 48int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
 49		const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
 50		const struct exynos_drm_ipp_formats *formats,
 51		unsigned int num_formats, const char *name)
 52{
 53	WARN_ON(!ipp);
 54	WARN_ON(!funcs);
 55	WARN_ON(!formats);
 56	WARN_ON(!num_formats);
 57
 58	spin_lock_init(&ipp->lock);
 59	INIT_LIST_HEAD(&ipp->todo_list);
 60	init_waitqueue_head(&ipp->done_wq);
 61	ipp->dev = dev;
 62	ipp->funcs = funcs;
 63	ipp->capabilities = caps;
 64	ipp->name = name;
 65	ipp->formats = formats;
 66	ipp->num_formats = num_formats;
 67
 68	/* ipp_list modification is serialized by component framework */
 69	list_add_tail(&ipp->head, &ipp_list);
 70	ipp->id = num_ipp++;
 71
 72	DRM_DEV_DEBUG_DRIVER(dev, "Registered ipp %d\n", ipp->id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 73
 74	return 0;
 75}
 76
 77/**
 78 * exynos_drm_ipp_unregister - Unregister the picture processor module
 79 * @dev: DRM device
 80 * @ipp: ipp module
 81 */
 82void exynos_drm_ipp_unregister(struct device *dev,
 83			       struct exynos_drm_ipp *ipp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84{
 85	WARN_ON(ipp->task);
 86	WARN_ON(!list_empty(&ipp->todo_list));
 87	list_del(&ipp->head);
 
 
 
 
 88}
 89
 90/**
 91 * exynos_drm_ipp_get_res_ioctl - enumerate all ipp modules
 92 * @dev: DRM device
 93 * @data: ioctl data
 94 * @file_priv: DRM file info
 95 *
 96 * Construct a list of ipp ids.
 97 *
 98 * Called by the user via ioctl.
 99 *
100 * Returns:
101 * Zero on success, negative errno on failure.
102 */
103int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
104				 struct drm_file *file_priv)
 
105{
106	struct drm_exynos_ioctl_ipp_get_res *resp = data;
107	struct exynos_drm_ipp *ipp;
108	uint32_t __user *ipp_ptr = (uint32_t __user *)
109						(unsigned long)resp->ipp_id_ptr;
110	unsigned int count = num_ipp, copied = 0;
 
 
 
 
 
111
112	/*
113	 * This ioctl is called twice, once to determine how much space is
114	 * needed, and the 2nd time to fill it.
115	 */
116	if (count && resp->count_ipps >= count) {
117		list_for_each_entry(ipp, &ipp_list, head) {
118			if (put_user(ipp->id, ipp_ptr + copied))
119				return -EFAULT;
120			copied++;
 
 
 
121		}
 
 
122	}
123	resp->count_ipps = count;
124
125	return 0;
126}
127
128static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
129{
130	struct exynos_drm_ipp *ipp;
 
 
131
132	list_for_each_entry(ipp, &ipp_list, head)
133		if (ipp->id == id)
134			return ipp;
135	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136}
137
138/**
139 * exynos_drm_ipp_get_caps_ioctl - get ipp module capabilities and formats
140 * @dev: DRM device
141 * @data: ioctl data
142 * @file_priv: DRM file info
143 *
144 * Construct a structure describing ipp module capabilities.
145 *
146 * Called by the user via ioctl.
147 *
148 * Returns:
149 * Zero on success, negative errno on failure.
150 */
151int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
152				  struct drm_file *file_priv)
153{
154	struct drm_exynos_ioctl_ipp_get_caps *resp = data;
155	void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
156	struct exynos_drm_ipp *ipp;
157	int i;
 
 
 
 
 
 
 
158
159	ipp = __ipp_get(resp->ipp_id);
160	if (!ipp)
161		return -ENOENT;
 
162
163	resp->ipp_id = ipp->id;
164	resp->capabilities = ipp->capabilities;
165
166	/*
167	 * This ioctl is called twice, once to determine how much space is
168	 * needed, and the 2nd time to fill it.
169	 */
170	if (resp->formats_count >= ipp->num_formats) {
171		for (i = 0; i < ipp->num_formats; i++) {
172			struct drm_exynos_ipp_format tmp = {
173				.fourcc = ipp->formats[i].fourcc,
174				.type = ipp->formats[i].type,
175				.modifier = ipp->formats[i].modifier,
176			};
177
178			if (copy_to_user(ptr, &tmp, sizeof(tmp)))
179				return -EFAULT;
180			ptr += sizeof(tmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181		}
 
 
182	}
183	resp->formats_count = ipp->num_formats;
184
185	return 0;
186}
187
188static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
189				struct exynos_drm_ipp *ipp, uint32_t fourcc,
190				uint64_t mod, unsigned int type)
191{
192	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
194	for (i = 0; i < ipp->num_formats; i++) {
195		if ((ipp->formats[i].type & type) &&
196		    ipp->formats[i].fourcc == fourcc &&
197		    ipp->formats[i].modifier == mod)
198			return &ipp->formats[i];
199	}
200	return NULL;
201}
202
203/**
204 * exynos_drm_ipp_get_limits_ioctl - get ipp module limits
205 * @dev: DRM device
206 * @data: ioctl data
207 * @file_priv: DRM file info
208 *
209 * Construct a structure describing ipp module limitations for provided
210 * picture format.
211 *
212 * Called by the user via ioctl.
213 *
214 * Returns:
215 * Zero on success, negative errno on failure.
216 */
217int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
218				    struct drm_file *file_priv)
219{
220	struct drm_exynos_ioctl_ipp_get_limits *resp = data;
221	void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
222	const struct exynos_drm_ipp_formats *format;
223	struct exynos_drm_ipp *ipp;
 
 
 
 
224
225	if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
226	    resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
227		return -EINVAL;
 
228
229	ipp = __ipp_get(resp->ipp_id);
230	if (!ipp)
231		return -ENOENT;
 
232
233	format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
234				  resp->type);
235	if (!format)
236		return -EINVAL;
 
 
 
 
237
238	/*
239	 * This ioctl is called twice, once to determine how much space is
240	 * needed, and the 2nd time to fill it.
241	 */
242	if (format->num_limits && resp->limits_count >= format->num_limits)
243		if (copy_to_user((void __user *)ptr, format->limits,
244				 sizeof(*format->limits) * format->num_limits))
245			return -EFAULT;
246	resp->limits_count = format->num_limits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
248	return 0;
 
 
 
 
 
 
 
 
 
 
249}
250
251struct drm_pending_exynos_ipp_event {
252	struct drm_pending_event base;
253	struct drm_exynos_ipp_event event;
254};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
256static inline struct exynos_drm_ipp_task *
257			exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
258{
259	struct exynos_drm_ipp_task *task;
 
260
261	task = kzalloc(sizeof(*task), GFP_KERNEL);
262	if (!task)
263		return NULL;
264
265	task->dev = ipp->dev;
266	task->ipp = ipp;
267
268	/* some defaults */
269	task->src.rect.w = task->dst.rect.w = UINT_MAX;
270	task->src.rect.h = task->dst.rect.h = UINT_MAX;
271	task->transform.rotation = DRM_MODE_ROTATE_0;
272
273	DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
274
275	return task;
276}
277
278static const struct exynos_drm_param_map {
279	unsigned int id;
280	unsigned int size;
281	unsigned int offset;
282} exynos_drm_ipp_params_maps[] = {
283	{
284		DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
285		sizeof(struct drm_exynos_ipp_task_buffer),
286		offsetof(struct exynos_drm_ipp_task, src.buf),
287	}, {
288		DRM_EXYNOS_IPP_TASK_BUFFER |
289			DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
290		sizeof(struct drm_exynos_ipp_task_buffer),
291		offsetof(struct exynos_drm_ipp_task, dst.buf),
292	}, {
293		DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
294		sizeof(struct drm_exynos_ipp_task_rect),
295		offsetof(struct exynos_drm_ipp_task, src.rect),
296	}, {
297		DRM_EXYNOS_IPP_TASK_RECTANGLE |
298			DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
299		sizeof(struct drm_exynos_ipp_task_rect),
300		offsetof(struct exynos_drm_ipp_task, dst.rect),
301	}, {
302		DRM_EXYNOS_IPP_TASK_TRANSFORM,
303		sizeof(struct drm_exynos_ipp_task_transform),
304		offsetof(struct exynos_drm_ipp_task, transform),
305	}, {
306		DRM_EXYNOS_IPP_TASK_ALPHA,
307		sizeof(struct drm_exynos_ipp_task_alpha),
308		offsetof(struct exynos_drm_ipp_task, alpha),
309	},
310};
311
312static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
313				   struct drm_exynos_ioctl_ipp_commit *arg)
 
314{
315	const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
316	void __user *params = (void __user *)(unsigned long)arg->params_ptr;
317	unsigned int size = arg->params_size;
318	uint32_t id;
319	int i;
320
321	while (size) {
322		if (get_user(id, (uint32_t __user *)params))
323			return -EFAULT;
324
325		for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
326			if (map[i].id == id)
327				break;
328		if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
329		    map[i].size > size)
330			return -EINVAL;
331
332		if (copy_from_user((void *)task + map[i].offset, params,
333				   map[i].size))
334			return -EFAULT;
335
336		params += map[i].size;
337		size -= map[i].size;
 
 
 
 
338	}
339
340	DRM_DEV_DEBUG_DRIVER(task->dev,
341			     "Got task %pK configuration from userspace\n",
342			     task);
343	return 0;
344}
345
346static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
347					    struct drm_file *filp)
 
 
348{
349	int ret = 0;
 
350	int i;
351
352	/* get GEM buffers and check their size */
353	for (i = 0; i < buf->format->num_planes; i++) {
354		unsigned int height = (i == 0) ? buf->buf.height :
355			     DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
356		unsigned long size = height * buf->buf.pitch[i];
357		struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
358							    buf->buf.gem_id[i]);
359		if (!gem) {
360			ret = -ENOENT;
361			goto gem_free;
362		}
363		buf->exynos_gem[i] = gem;
364
365		if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
366			i++;
367			ret = -EINVAL;
368			goto gem_free;
369		}
370		buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
371				   buf->buf.offset[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372	}
373
374	return 0;
375gem_free:
376	while (i--) {
377		exynos_drm_gem_put(buf->exynos_gem[i]);
378		buf->exynos_gem[i] = NULL;
379	}
380	return ret;
 
 
 
381}
382
383static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
 
384{
385	int i;
 
 
 
 
 
 
 
 
 
 
 
386
387	if (!buf->exynos_gem[0])
388		return;
389	for (i = 0; i < buf->format->num_planes; i++)
390		exynos_drm_gem_put(buf->exynos_gem[i]);
391}
392
393static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
394				 struct exynos_drm_ipp_task *task)
 
395{
396	DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
 
397
398	exynos_drm_ipp_task_release_buf(&task->src);
399	exynos_drm_ipp_task_release_buf(&task->dst);
400	if (task->event)
401		drm_event_cancel_free(ipp->drm_dev, &task->event->base);
402	kfree(task);
403}
 
 
 
 
 
 
404
405struct drm_ipp_limit {
406	struct drm_exynos_ipp_limit_val h;
407	struct drm_exynos_ipp_limit_val v;
408};
 
409
410enum drm_ipp_size_id {
411	IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
412};
413
414static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
415	[IPP_LIMIT_BUFFER]  = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
416	[IPP_LIMIT_AREA]    = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
417				DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
418	[IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
419				DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
420				DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
421};
422
423static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
 
424{
425	if (!*ptr)
426		*ptr = val;
427}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428
429static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
430			     unsigned int num_limits, enum drm_ipp_size_id id,
431			     struct drm_ipp_limit *res)
432{
433	const struct drm_exynos_ipp_limit *l = limits;
434	int i = 0;
435
436	memset(res, 0, sizeof(*res));
437	for (i = 0; limit_id_fallback[id][i]; i++)
438		for (l = limits; l - limits < num_limits; l++) {
439			if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
440			      DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
441			    ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
442						     limit_id_fallback[id][i]))
443				continue;
444			__limit_set_val(&res->h.min, l->h.min);
445			__limit_set_val(&res->h.max, l->h.max);
446			__limit_set_val(&res->h.align, l->h.align);
447			__limit_set_val(&res->v.min, l->v.min);
448			__limit_set_val(&res->v.max, l->v.max);
449			__limit_set_val(&res->v.align, l->v.align);
450		}
 
 
 
 
 
451}
452
453static inline bool __align_check(unsigned int val, unsigned int align)
454{
455	if (align && (val & (align - 1))) {
456		DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
457				 val, align);
458		return false;
459	}
460	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461}
462
463static inline bool __size_limit_check(unsigned int val,
464				 struct drm_exynos_ipp_limit_val *l)
465{
466	if ((l->min && val < l->min) || (l->max && val > l->max)) {
467		DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
468				 val, l->min, l->max);
469		return false;
 
 
 
 
 
470	}
471	return __align_check(val, l->align);
472}
473
474static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
475	const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
476	bool rotate, bool swap)
477{
478	enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
479	struct drm_ipp_limit l;
480	struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
481	int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
 
482
483	if (!limits)
484		return 0;
485
486	__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
487	if (!__size_limit_check(real_width, &l.h) ||
488	    !__size_limit_check(buf->buf.height, &l.v))
489		return -EINVAL;
490
491	if (swap) {
492		lv = &l.h;
493		lh = &l.v;
494	}
495	__get_size_limit(limits, num_limits, id, &l);
496	if (!__size_limit_check(buf->rect.w, lh) ||
497	    !__align_check(buf->rect.x, lh->align) ||
498	    !__size_limit_check(buf->rect.h, lv) ||
499	    !__align_check(buf->rect.y, lv->align))
500		return -EINVAL;
501
502	return 0;
503}
504
505static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
506				       unsigned int min, unsigned int max)
 
507{
508	if ((max && (dst << 16) > src * max) ||
509	    (min && (dst << 16) < src * min)) {
510		DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
511			 src, dst,
512			 min >> 16, 100000 * (min & 0xffff) / (1 << 16),
513			 max >> 16, 100000 * (max & 0xffff) / (1 << 16));
514		return false;
 
515	}
516	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517}
518
519static int exynos_drm_ipp_check_scale_limits(
520				struct drm_exynos_ipp_task_rect *src,
521				struct drm_exynos_ipp_task_rect *dst,
522				const struct drm_exynos_ipp_limit *limits,
523				unsigned int num_limits, bool swap)
524{
525	const struct drm_exynos_ipp_limit_val *lh, *lv;
526	int dw, dh;
527
528	for (; num_limits; limits++, num_limits--)
529		if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
530		    DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
531			break;
532	if (!num_limits)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533		return 0;
 
534
535	lh = (!swap) ? &limits->h : &limits->v;
536	lv = (!swap) ? &limits->v : &limits->h;
537	dw = (!swap) ? dst->w : dst->h;
538	dh = (!swap) ? dst->h : dst->w;
 
 
 
 
 
 
 
 
 
539
540	if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
541	    !__scale_limit_check(src->h, dh, lv->min, lv->max))
542		return -EINVAL;
 
 
 
 
 
 
 
 
543
544	return 0;
545}
546
547static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
548				       struct exynos_drm_ipp_buffer *buf,
549				       struct exynos_drm_ipp_buffer *src,
550				       struct exynos_drm_ipp_buffer *dst,
551				       bool rotate, bool swap)
552{
553	const struct exynos_drm_ipp_formats *fmt;
554	int ret, i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555
556	fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
557			       buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
558					    DRM_EXYNOS_IPP_FORMAT_DESTINATION);
559	if (!fmt) {
560		DRM_DEV_DEBUG_DRIVER(task->dev,
561				     "Task %pK: %s format not supported\n",
562				     task, buf == src ? "src" : "dst");
563		return -EINVAL;
564	}
565
566	/* basic checks */
567	if (buf->buf.width == 0 || buf->buf.height == 0)
568		return -EINVAL;
 
569
570	buf->format = drm_format_info(buf->buf.fourcc);
571	for (i = 0; i < buf->format->num_planes; i++) {
572		unsigned int width = (i == 0) ? buf->buf.width :
573			     DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
574
575		if (buf->buf.pitch[i] == 0)
576			buf->buf.pitch[i] = width * buf->format->cpp[i];
577		if (buf->buf.pitch[i] < width * buf->format->cpp[i])
578			return -EINVAL;
579		if (!buf->buf.gem_id[i])
580			return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
581	}
582
583	/* pitch for additional planes must match */
584	if (buf->format->num_planes > 2 &&
585	    buf->buf.pitch[1] != buf->buf.pitch[2])
586		return -EINVAL;
587
588	/* check driver limits */
589	ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
590					       fmt->num_limits,
591					       rotate,
592					       buf == dst ? swap : false);
593	if (ret)
594		return ret;
595	ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
596						fmt->limits,
597						fmt->num_limits, swap);
598	return ret;
599}
600
601static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
 
602{
603	struct exynos_drm_ipp *ipp = task->ipp;
604	struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
605	unsigned int rotation = task->transform.rotation;
606	int ret = 0;
607	bool swap = drm_rotation_90_or_270(rotation);
608	bool rotate = (rotation != DRM_MODE_ROTATE_0);
609	bool scale = false;
610
611	DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
612
613	if (src->rect.w == UINT_MAX)
614		src->rect.w = src->buf.width;
615	if (src->rect.h == UINT_MAX)
616		src->rect.h = src->buf.height;
617	if (dst->rect.w == UINT_MAX)
618		dst->rect.w = dst->buf.width;
619	if (dst->rect.h == UINT_MAX)
620		dst->rect.h = dst->buf.height;
621
622	if (src->rect.x + src->rect.w > (src->buf.width) ||
623	    src->rect.y + src->rect.h > (src->buf.height) ||
624	    dst->rect.x + dst->rect.w > (dst->buf.width) ||
625	    dst->rect.y + dst->rect.h > (dst->buf.height)) {
626		DRM_DEV_DEBUG_DRIVER(task->dev,
627				     "Task %pK: defined area is outside provided buffers\n",
628				     task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629		return -EINVAL;
630	}
631
632	if ((!swap && (src->rect.w != dst->rect.w ||
633		       src->rect.h != dst->rect.h)) ||
634	    (swap && (src->rect.w != dst->rect.h ||
635		      src->rect.h != dst->rect.w)))
636		scale = true;
637
638	if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
639	     (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
640	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
641	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
642	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
643	     src->buf.fourcc != dst->buf.fourcc)) {
644		DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
645				     task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646		return -EINVAL;
647	}
648
649	ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
650	if (ret)
651		return ret;
652
653	ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
654	if (ret)
655		return ret;
656
657	DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
658			     task);
 
 
 
659
660	return ret;
 
 
 
661}
662
663static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
664				     struct drm_file *filp)
665{
666	struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
667	int ret = 0;
 
668
669	DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
670			     task);
 
 
 
 
671
672	ret = exynos_drm_ipp_task_setup_buffer(src, filp);
673	if (ret) {
674		DRM_DEV_DEBUG_DRIVER(task->dev,
675				     "Task %pK: src buffer setup failed\n",
676				     task);
677		return ret;
678	}
679	ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
680	if (ret) {
681		DRM_DEV_DEBUG_DRIVER(task->dev,
682				     "Task %pK: dst buffer setup failed\n",
683				     task);
684		return ret;
 
685	}
686
687	DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
688			     task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
689
690	return ret;
691}
692
 
 
 
 
 
 
 
693
694static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
695				 struct drm_file *file_priv, uint64_t user_data)
696{
697	struct drm_pending_exynos_ipp_event *e = NULL;
698	int ret;
699
700	e = kzalloc(sizeof(*e), GFP_KERNEL);
701	if (!e)
702		return -ENOMEM;
703
704	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
705	e->event.base.length = sizeof(e->event);
706	e->event.user_data = user_data;
 
 
 
707
708	ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
709				     &e->event.base);
710	if (ret)
711		goto free;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
713	task->event = e;
714	return 0;
715free:
716	kfree(e);
 
 
717	return ret;
718}
719
720static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
 
 
721{
722	struct timespec64 now;
 
 
 
723
724	ktime_get_ts64(&now);
725	task->event->event.tv_sec = now.tv_sec;
726	task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
727	task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728
729	drm_send_event(task->ipp->drm_dev, &task->event->base);
730}
731
732static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
733{
734	int ret = task->ret;
 
 
 
 
 
 
 
 
 
 
 
735
736	if (ret == 0 && task->event) {
737		exynos_drm_ipp_event_send(task);
738		/* ensure event won't be canceled on task free */
739		task->event = NULL;
740	}
741
742	exynos_drm_ipp_task_free(task->ipp, task);
743	return ret;
744}
745
746static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
747{
748	struct exynos_drm_ipp_task *task = container_of(work,
749				      struct exynos_drm_ipp_task, cleanup_work);
750
751	exynos_drm_ipp_task_cleanup(task);
752}
 
 
 
 
 
 
 
753
754static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755
756/**
757 * exynos_drm_ipp_task_done - finish given task and set return code
758 * @task: ipp task to finish
759 * @ret: error code or 0 if operation has been performed successfully
760 */
761void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
762{
763	struct exynos_drm_ipp *ipp = task->ipp;
764	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
765
766	DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
767			     ipp->id, task, ret);
768
769	spin_lock_irqsave(&ipp->lock, flags);
770	if (ipp->task == task)
771		ipp->task = NULL;
772	task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
773	task->ret = ret;
774	spin_unlock_irqrestore(&ipp->lock, flags);
775
776	exynos_drm_ipp_next_task(ipp);
777	wake_up(&ipp->done_wq);
 
 
778
779	if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
780		INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
781		schedule_work(&task->cleanup_work);
 
 
782	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
783}
784
785static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
786{
787	struct exynos_drm_ipp_task *task;
788	unsigned long flags;
 
 
789	int ret;
790
791	DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, try to run new task\n",
792			     ipp->id);
 
 
793
794	spin_lock_irqsave(&ipp->lock, flags);
795
796	if (ipp->task || list_empty(&ipp->todo_list)) {
797		spin_unlock_irqrestore(&ipp->lock, flags);
 
798		return;
799	}
800
801	task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
802				head);
803	list_del_init(&task->head);
804	ipp->task = task;
 
805
806	spin_unlock_irqrestore(&ipp->lock, flags);
 
 
 
 
 
 
 
 
 
 
807
808	DRM_DEV_DEBUG_DRIVER(ipp->dev,
809			     "ipp: %d, selected task %pK to run\n", ipp->id,
810			     task);
 
 
811
812	ret = ipp->funcs->commit(ipp, task);
813	if (ret)
814		exynos_drm_ipp_task_done(task, ret);
815}
816
817static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
818					 struct exynos_drm_ipp_task *task)
819{
820	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
821
822	spin_lock_irqsave(&ipp->lock, flags);
823	list_add(&task->head, &ipp->todo_list);
824	spin_unlock_irqrestore(&ipp->lock, flags);
825
826	exynos_drm_ipp_next_task(ipp);
827}
828
829static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
830				      struct exynos_drm_ipp_task *task)
831{
832	unsigned long flags;
 
 
 
 
 
 
 
 
833
834	spin_lock_irqsave(&ipp->lock, flags);
835	if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
836		/* already completed task */
837		exynos_drm_ipp_task_cleanup(task);
838	} else if (ipp->task != task) {
839		/* task has not been scheduled for execution yet */
840		list_del_init(&task->head);
841		exynos_drm_ipp_task_cleanup(task);
842	} else {
843		/*
844		 * currently processed task, call abort() and perform
845		 * cleanup with async worker
846		 */
847		task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
848		spin_unlock_irqrestore(&ipp->lock, flags);
849		if (ipp->funcs->abort)
850			ipp->funcs->abort(ipp, task);
851		return;
852	}
853	spin_unlock_irqrestore(&ipp->lock, flags);
854}
855
856/**
857 * exynos_drm_ipp_commit_ioctl - perform image processing operation
858 * @dev: DRM device
859 * @data: ioctl data
860 * @file_priv: DRM file info
861 *
862 * Construct a ipp task from the set of properties provided from the user
863 * and try to schedule it to framebuffer processor hardware.
864 *
865 * Called by the user via ioctl.
866 *
867 * Returns:
868 * Zero on success, negative errno on failure.
869 */
870int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
871				struct drm_file *file_priv)
872{
873	struct drm_exynos_ioctl_ipp_commit *arg = data;
874	struct exynos_drm_ipp *ipp;
875	struct exynos_drm_ipp_task *task;
876	int ret = 0;
 
877
878	if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
879		return -EINVAL;
880
881	/* can't test and expect an event at the same time */
882	if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
883			(arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
884		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
885
886	ipp = __ipp_get(arg->ipp_id);
887	if (!ipp)
888		return -ENOENT;
889
890	task = exynos_drm_ipp_task_alloc(ipp);
891	if (!task)
 
 
 
 
 
 
 
892		return -ENOMEM;
893
894	ret = exynos_drm_ipp_task_set(task, arg);
895	if (ret)
896		goto free;
897
898	ret = exynos_drm_ipp_task_check(task);
899	if (ret)
900		goto free;
901
902	ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
903	if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
904		goto free;
905
906	if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
907		ret = exynos_drm_ipp_event_create(task, file_priv,
908						 arg->user_data);
909		if (ret)
910			goto free;
911	}
912
913	/*
914	 * Queue task for processing on the hardware. task object will be
915	 * then freed after exynos_drm_ipp_task_done()
 
 
 
916	 */
917	if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
918		DRM_DEV_DEBUG_DRIVER(ipp->dev,
919				     "ipp: %d, nonblocking processing task %pK\n",
920				     ipp->id, task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
921
922		task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
923		exynos_drm_ipp_schedule_task(task->ipp, task);
924		ret = 0;
925	} else {
926		DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
927				     ipp->id, task);
928		exynos_drm_ipp_schedule_task(ipp, task);
929		ret = wait_event_interruptible(ipp->done_wq,
930					task->flags & DRM_EXYNOS_IPP_TASK_DONE);
931		if (ret)
932			exynos_drm_ipp_task_abort(ipp, task);
933		else
934			ret = exynos_drm_ipp_task_cleanup(task);
935	}
936	return ret;
937free:
938	exynos_drm_ipp_task_free(ipp, task);
 
 
 
 
 
 
 
 
 
 
 
 
 
939
940	return ret;
 
 
 
 
941}