Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * DMM IOMMU driver support functions for TI OMAP processors.
   3 *
   4 * Author: Rob Clark <rob@ti.com>
   5 *         Andy Gross <andy.gross@ti.com>
   6 *
   7 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License as
  11 * published by the Free Software Foundation version 2.
  12 *
  13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  14 * kind, whether express or implied; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 */
  18
  19#include <linux/completion.h>
  20#include <linux/delay.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/errno.h>
  23#include <linux/init.h>
  24#include <linux/interrupt.h>
  25#include <linux/list.h>
  26#include <linux/mm.h>
  27#include <linux/module.h>
  28#include <linux/platform_device.h> /* platform_device() */
 
  29#include <linux/sched.h>
 
 
 
  30#include <linux/slab.h>
  31#include <linux/time.h>
  32#include <linux/vmalloc.h>
  33#include <linux/wait.h>
 
 
 
  34
  35#include "omap_dmm_tiler.h"
  36#include "omap_dmm_priv.h"
  37
  38#define DMM_DRIVER_NAME "dmm"
  39
  40/* mappings for associating views to luts */
  41static struct tcm *containers[TILFMT_NFORMATS];
  42static struct dmm *omap_dmm;
  43
  44#if defined(CONFIG_OF)
  45static const struct of_device_id dmm_of_match[];
  46#endif
  47
  48/* global spinlock for protecting lists */
  49static DEFINE_SPINLOCK(list_lock);
  50
  51/* Geometry table */
  52#define GEOM(xshift, yshift, bytes_per_pixel) { \
  53		.x_shft = (xshift), \
  54		.y_shft = (yshift), \
  55		.cpp    = (bytes_per_pixel), \
  56		.slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
  57		.slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
  58	}
  59
  60static const struct {
  61	uint32_t x_shft;	/* unused X-bits (as part of bpp) */
  62	uint32_t y_shft;	/* unused Y-bits (as part of bpp) */
  63	uint32_t cpp;		/* bytes/chars per pixel */
  64	uint32_t slot_w;	/* width of each slot (in pixels) */
  65	uint32_t slot_h;	/* height of each slot (in pixels) */
  66} geom[TILFMT_NFORMATS] = {
  67	[TILFMT_8BIT]  = GEOM(0, 0, 1),
  68	[TILFMT_16BIT] = GEOM(0, 1, 2),
  69	[TILFMT_32BIT] = GEOM(1, 1, 4),
  70	[TILFMT_PAGE]  = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
  71};
  72
  73
  74/* lookup table for registers w/ per-engine instances */
  75static const uint32_t reg[][4] = {
  76	[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
  77			DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
  78	[PAT_DESCR]  = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
  79			DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
  80};
  81
  82static u32 dmm_read(struct dmm *dmm, u32 reg)
  83{
  84	return readl(dmm->base + reg);
  85}
  86
  87static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
  88{
  89	writel(val, dmm->base + reg);
  90}
  91
  92/* simple allocator to grab next 16 byte aligned memory from txn */
  93static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
  94{
  95	void *ptr;
  96	struct refill_engine *engine = txn->engine_handle;
  97
  98	/* dmm programming requires 16 byte aligned addresses */
  99	txn->current_pa = round_up(txn->current_pa, 16);
 100	txn->current_va = (void *)round_up((long)txn->current_va, 16);
 101
 102	ptr = txn->current_va;
 103	*pa = txn->current_pa;
 104
 105	txn->current_pa += sz;
 106	txn->current_va += sz;
 107
 108	BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
 109
 110	return ptr;
 111}
 112
 113/* check status and spin until wait_mask comes true */
 114static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
 115{
 116	struct dmm *dmm = engine->dmm;
 117	uint32_t r = 0, err, i;
 118
 119	i = DMM_FIXED_RETRY_COUNT;
 120	while (true) {
 121		r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
 122		err = r & DMM_PATSTATUS_ERR;
 123		if (err)
 124			return -EFAULT;
 125
 126		if ((r & wait_mask) == wait_mask)
 127			break;
 128
 129		if (--i == 0)
 130			return -ETIMEDOUT;
 131
 132		udelay(1);
 133	}
 134
 135	return 0;
 136}
 137
 138static void release_engine(struct refill_engine *engine)
 139{
 140	unsigned long flags;
 141
 142	spin_lock_irqsave(&list_lock, flags);
 143	list_add(&engine->idle_node, &omap_dmm->idle_head);
 144	spin_unlock_irqrestore(&list_lock, flags);
 145
 146	atomic_inc(&omap_dmm->engine_counter);
 147	wake_up_interruptible(&omap_dmm->engine_queue);
 148}
 149
 150static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
 151{
 152	struct dmm *dmm = arg;
 153	uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
 154	int i;
 155
 156	/* ack IRQ */
 157	dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
 158
 159	for (i = 0; i < dmm->num_engines; i++) {
 160		if (status & DMM_IRQSTAT_LST) {
 
 
 161			if (dmm->engines[i].async)
 162				release_engine(&dmm->engines[i]);
 163
 164			complete(&dmm->engines[i].compl);
 165		}
 166
 167		status >>= 8;
 168	}
 169
 170	return IRQ_HANDLED;
 171}
 172
 173/**
 174 * Get a handle for a DMM transaction
 175 */
 176static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
 177{
 178	struct dmm_txn *txn = NULL;
 179	struct refill_engine *engine = NULL;
 180	int ret;
 181	unsigned long flags;
 182
 183
 184	/* wait until an engine is available */
 185	ret = wait_event_interruptible(omap_dmm->engine_queue,
 186		atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
 187	if (ret)
 188		return ERR_PTR(ret);
 189
 190	/* grab an idle engine */
 191	spin_lock_irqsave(&list_lock, flags);
 192	if (!list_empty(&dmm->idle_head)) {
 193		engine = list_entry(dmm->idle_head.next, struct refill_engine,
 194					idle_node);
 195		list_del(&engine->idle_node);
 196	}
 197	spin_unlock_irqrestore(&list_lock, flags);
 198
 199	BUG_ON(!engine);
 200
 201	txn = &engine->txn;
 202	engine->tcm = tcm;
 203	txn->engine_handle = engine;
 204	txn->last_pat = NULL;
 205	txn->current_va = engine->refill_va;
 206	txn->current_pa = engine->refill_pa;
 207
 208	return txn;
 209}
 210
 211/**
 212 * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
 213 * corresponding slot is cleared (ie. dummy_pa is programmed)
 214 */
 215static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
 216		struct page **pages, uint32_t npages, uint32_t roll)
 217{
 218	dma_addr_t pat_pa = 0, data_pa = 0;
 219	uint32_t *data;
 220	struct pat *pat;
 221	struct refill_engine *engine = txn->engine_handle;
 222	int columns = (1 + area->x1 - area->x0);
 223	int rows = (1 + area->y1 - area->y0);
 224	int i = columns*rows;
 225
 226	pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
 227
 228	if (txn->last_pat)
 229		txn->last_pat->next_pa = (uint32_t)pat_pa;
 230
 231	pat->area = *area;
 232
 233	/* adjust Y coordinates based off of container parameters */
 234	pat->area.y0 += engine->tcm->y_offset;
 235	pat->area.y1 += engine->tcm->y_offset;
 236
 237	pat->ctrl = (struct pat_ctrl){
 238			.start = 1,
 239			.lut_id = engine->tcm->lut_id,
 240		};
 241
 242	data = alloc_dma(txn, 4*i, &data_pa);
 243	/* FIXME: what if data_pa is more than 32-bit ? */
 244	pat->data_pa = data_pa;
 245
 246	while (i--) {
 247		int n = i + roll;
 248		if (n >= npages)
 249			n -= npages;
 250		data[i] = (pages && pages[n]) ?
 251			page_to_phys(pages[n]) : engine->dmm->dummy_pa;
 252	}
 253
 254	txn->last_pat = pat;
 255
 256	return;
 257}
 258
 259/**
 260 * Commit the DMM transaction.
 261 */
 262static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
 263{
 264	int ret = 0;
 265	struct refill_engine *engine = txn->engine_handle;
 266	struct dmm *dmm = engine->dmm;
 267
 268	if (!txn->last_pat) {
 269		dev_err(engine->dmm->dev, "need at least one txn\n");
 270		ret = -EINVAL;
 271		goto cleanup;
 272	}
 273
 274	txn->last_pat->next_pa = 0;
 275
 276	/* write to PAT_DESCR to clear out any pending transaction */
 277	dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
 278
 279	/* wait for engine ready: */
 280	ret = wait_status(engine, DMM_PATSTATUS_READY);
 281	if (ret) {
 282		ret = -EFAULT;
 283		goto cleanup;
 284	}
 285
 286	/* mark whether it is async to denote list management in IRQ handler */
 287	engine->async = wait ? false : true;
 288	reinit_completion(&engine->compl);
 289	/* verify that the irq handler sees the 'async' and completion value */
 290	smp_mb();
 291
 292	/* kick reload */
 293	dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
 
 294
 295	if (wait) {
 296		if (!wait_for_completion_timeout(&engine->compl,
 297				msecs_to_jiffies(100))) {
 
 298			dev_err(dmm->dev, "timed out waiting for done\n");
 299			ret = -ETIMEDOUT;
 300		}
 301	}
 302
 303cleanup:
 304	/* only place engine back on list if we are done with it */
 305	if (ret || wait)
 306		release_engine(engine);
 307
 308	return ret;
 309}
 310
 311/*
 312 * DMM programming
 313 */
 314static int fill(struct tcm_area *area, struct page **pages,
 315		uint32_t npages, uint32_t roll, bool wait)
 316{
 317	int ret = 0;
 318	struct tcm_area slice, area_s;
 319	struct dmm_txn *txn;
 320
 321	/*
 322	 * FIXME
 323	 *
 324	 * Asynchronous fill does not work reliably, as the driver does not
 325	 * handle errors in the async code paths. The fill operation may
 326	 * silently fail, leading to leaking DMM engines, which may eventually
 327	 * lead to deadlock if we run out of DMM engines.
 328	 *
 329	 * For now, always set 'wait' so that we only use sync fills. Async
 330	 * fills should be fixed, or alternatively we could decide to only
 331	 * support sync fills and so the whole async code path could be removed.
 332	 */
 333
 334	wait = true;
 335
 336	txn = dmm_txn_init(omap_dmm, area->tcm);
 337	if (IS_ERR_OR_NULL(txn))
 338		return -ENOMEM;
 339
 340	tcm_for_each_slice(slice, *area, area_s) {
 341		struct pat_area p_area = {
 342				.x0 = slice.p0.x,  .y0 = slice.p0.y,
 343				.x1 = slice.p1.x,  .y1 = slice.p1.y,
 344		};
 345
 346		dmm_txn_append(txn, &p_area, pages, npages, roll);
 347
 348		roll += tcm_sizeof(slice);
 349	}
 350
 351	ret = dmm_txn_commit(txn, wait);
 352
 353	return ret;
 354}
 355
 356/*
 357 * Pin/unpin
 358 */
 359
 360/* note: slots for which pages[i] == NULL are filled w/ dummy page
 361 */
 362int tiler_pin(struct tiler_block *block, struct page **pages,
 363		uint32_t npages, uint32_t roll, bool wait)
 364{
 365	int ret;
 366
 367	ret = fill(&block->area, pages, npages, roll, wait);
 368
 369	if (ret)
 370		tiler_unpin(block);
 371
 372	return ret;
 373}
 374
 375int tiler_unpin(struct tiler_block *block)
 376{
 377	return fill(&block->area, NULL, 0, 0, false);
 378}
 379
 380/*
 381 * Reserve/release
 382 */
 383struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
 384		uint16_t h, uint16_t align)
 385{
 386	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
 387	u32 min_align = 128;
 388	int ret;
 389	unsigned long flags;
 390	size_t slot_bytes;
 391
 392	BUG_ON(!validfmt(fmt));
 393
 394	/* convert width/height to slots */
 395	w = DIV_ROUND_UP(w, geom[fmt].slot_w);
 396	h = DIV_ROUND_UP(h, geom[fmt].slot_h);
 397
 398	/* convert alignment to slots */
 399	slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
 400	min_align = max(min_align, slot_bytes);
 401	align = (align > min_align) ? ALIGN(align, min_align) : min_align;
 402	align /= slot_bytes;
 403
 404	block->fmt = fmt;
 405
 406	ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
 407			&block->area);
 408	if (ret) {
 409		kfree(block);
 410		return ERR_PTR(-ENOMEM);
 411	}
 412
 413	/* add to allocation list */
 414	spin_lock_irqsave(&list_lock, flags);
 415	list_add(&block->alloc_node, &omap_dmm->alloc_head);
 416	spin_unlock_irqrestore(&list_lock, flags);
 417
 418	return block;
 419}
 420
 421struct tiler_block *tiler_reserve_1d(size_t size)
 422{
 423	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
 424	int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 425	unsigned long flags;
 426
 427	if (!block)
 428		return ERR_PTR(-ENOMEM);
 429
 430	block->fmt = TILFMT_PAGE;
 431
 432	if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
 433				&block->area)) {
 434		kfree(block);
 435		return ERR_PTR(-ENOMEM);
 436	}
 437
 438	spin_lock_irqsave(&list_lock, flags);
 439	list_add(&block->alloc_node, &omap_dmm->alloc_head);
 440	spin_unlock_irqrestore(&list_lock, flags);
 441
 442	return block;
 443}
 444
 445/* note: if you have pin'd pages, you should have already unpin'd first! */
 446int tiler_release(struct tiler_block *block)
 447{
 448	int ret = tcm_free(&block->area);
 449	unsigned long flags;
 450
 451	if (block->area.tcm)
 452		dev_err(omap_dmm->dev, "failed to release block\n");
 453
 454	spin_lock_irqsave(&list_lock, flags);
 455	list_del(&block->alloc_node);
 456	spin_unlock_irqrestore(&list_lock, flags);
 457
 458	kfree(block);
 459	return ret;
 460}
 461
 462/*
 463 * Utils
 464 */
 465
 466/* calculate the tiler space address of a pixel in a view orientation...
 467 * below description copied from the display subsystem section of TRM:
 468 *
 469 * When the TILER is addressed, the bits:
 470 *   [28:27] = 0x0 for 8-bit tiled
 471 *             0x1 for 16-bit tiled
 472 *             0x2 for 32-bit tiled
 473 *             0x3 for page mode
 474 *   [31:29] = 0x0 for 0-degree view
 475 *             0x1 for 180-degree view + mirroring
 476 *             0x2 for 0-degree view + mirroring
 477 *             0x3 for 180-degree view
 478 *             0x4 for 270-degree view + mirroring
 479 *             0x5 for 270-degree view
 480 *             0x6 for 90-degree view
 481 *             0x7 for 90-degree view + mirroring
 482 * Otherwise the bits indicated the corresponding bit address to access
 483 * the SDRAM.
 484 */
 485static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
 486{
 487	u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
 488
 489	x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
 490	y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
 491	alignment = geom[fmt].x_shft + geom[fmt].y_shft;
 492
 493	/* validate coordinate */
 494	x_mask = MASK(x_bits);
 495	y_mask = MASK(y_bits);
 496
 497	if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
 498		DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
 499				x, x, x_mask, y, y, y_mask);
 500		return 0;
 501	}
 502
 503	/* account for mirroring */
 504	if (orient & MASK_X_INVERT)
 505		x ^= x_mask;
 506	if (orient & MASK_Y_INVERT)
 507		y ^= y_mask;
 508
 509	/* get coordinate address */
 510	if (orient & MASK_XY_FLIP)
 511		tmp = ((x << y_bits) + y);
 512	else
 513		tmp = ((y << x_bits) + x);
 514
 515	return TIL_ADDR((tmp << alignment), orient, fmt);
 516}
 517
 518dma_addr_t tiler_ssptr(struct tiler_block *block)
 519{
 520	BUG_ON(!validfmt(block->fmt));
 521
 522	return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
 523			block->area.p0.x * geom[block->fmt].slot_w,
 524			block->area.p0.y * geom[block->fmt].slot_h);
 525}
 526
 527dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
 528		uint32_t x, uint32_t y)
 529{
 530	struct tcm_pt *p = &block->area.p0;
 531	BUG_ON(!validfmt(block->fmt));
 532
 533	return tiler_get_address(block->fmt, orient,
 534			(p->x * geom[block->fmt].slot_w) + x,
 535			(p->y * geom[block->fmt].slot_h) + y);
 536}
 537
 538void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
 539{
 540	BUG_ON(!validfmt(fmt));
 541	*w = round_up(*w, geom[fmt].slot_w);
 542	*h = round_up(*h, geom[fmt].slot_h);
 543}
 544
 545uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
 546{
 547	BUG_ON(!validfmt(fmt));
 548
 549	if (orient & MASK_XY_FLIP)
 550		return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
 551	else
 552		return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
 553}
 554
 555size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
 556{
 557	tiler_align(fmt, &w, &h);
 558	return geom[fmt].cpp * w * h;
 559}
 560
 561size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
 562{
 563	BUG_ON(!validfmt(fmt));
 564	return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
 565}
 566
 567uint32_t tiler_get_cpu_cache_flags(void)
 568{
 569	return omap_dmm->plat_data->cpu_cache_flags;
 570}
 571
 572bool dmm_is_available(void)
 573{
 574	return omap_dmm ? true : false;
 575}
 576
 577static int omap_dmm_remove(struct platform_device *dev)
 578{
 579	struct tiler_block *block, *_block;
 580	int i;
 581	unsigned long flags;
 582
 583	if (omap_dmm) {
 584		/* free all area regions */
 585		spin_lock_irqsave(&list_lock, flags);
 586		list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
 587					alloc_node) {
 588			list_del(&block->alloc_node);
 589			kfree(block);
 590		}
 591		spin_unlock_irqrestore(&list_lock, flags);
 592
 593		for (i = 0; i < omap_dmm->num_lut; i++)
 594			if (omap_dmm->tcm && omap_dmm->tcm[i])
 595				omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
 596		kfree(omap_dmm->tcm);
 597
 598		kfree(omap_dmm->engines);
 599		if (omap_dmm->refill_va)
 600			dma_free_wc(omap_dmm->dev,
 601				    REFILL_BUFFER_SIZE * omap_dmm->num_engines,
 602				    omap_dmm->refill_va, omap_dmm->refill_pa);
 
 603		if (omap_dmm->dummy_page)
 604			__free_page(omap_dmm->dummy_page);
 605
 606		if (omap_dmm->irq > 0)
 607			free_irq(omap_dmm->irq, omap_dmm);
 608
 609		iounmap(omap_dmm->base);
 610		kfree(omap_dmm);
 611		omap_dmm = NULL;
 612	}
 613
 614	return 0;
 615}
 616
 617static int omap_dmm_probe(struct platform_device *dev)
 618{
 619	int ret = -EFAULT, i;
 620	struct tcm_area area = {0};
 621	u32 hwinfo, pat_geom;
 622	struct resource *mem;
 623
 624	omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
 625	if (!omap_dmm)
 626		goto fail;
 627
 628	/* initialize lists */
 629	INIT_LIST_HEAD(&omap_dmm->alloc_head);
 630	INIT_LIST_HEAD(&omap_dmm->idle_head);
 631
 632	init_waitqueue_head(&omap_dmm->engine_queue);
 633
 634	if (dev->dev.of_node) {
 635		const struct of_device_id *match;
 636
 637		match = of_match_node(dmm_of_match, dev->dev.of_node);
 638		if (!match) {
 639			dev_err(&dev->dev, "failed to find matching device node\n");
 640			return -ENODEV;
 641		}
 642
 643		omap_dmm->plat_data = match->data;
 644	}
 645
 646	/* lookup hwmod data - base address and irq */
 647	mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
 648	if (!mem) {
 649		dev_err(&dev->dev, "failed to get base address resource\n");
 650		goto fail;
 651	}
 652
 653	omap_dmm->base = ioremap(mem->start, SZ_2K);
 654
 655	if (!omap_dmm->base) {
 656		dev_err(&dev->dev, "failed to get dmm base address\n");
 657		goto fail;
 658	}
 659
 660	omap_dmm->irq = platform_get_irq(dev, 0);
 661	if (omap_dmm->irq < 0) {
 662		dev_err(&dev->dev, "failed to get IRQ resource\n");
 663		goto fail;
 664	}
 665
 666	omap_dmm->dev = &dev->dev;
 667
 668	hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
 669	omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
 670	omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
 671	omap_dmm->container_width = 256;
 672	omap_dmm->container_height = 128;
 673
 674	atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
 675
 676	/* read out actual LUT width and height */
 677	pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
 678	omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
 679	omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
 680
 681	/* increment LUT by one if on OMAP5 */
 682	/* LUT has twice the height, and is split into a separate container */
 683	if (omap_dmm->lut_height != omap_dmm->container_height)
 684		omap_dmm->num_lut++;
 685
 686	/* initialize DMM registers */
 687	dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
 688	dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
 689	dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
 690	dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
 691	dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
 692	dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
 693
 694	ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
 695				"omap_dmm_irq_handler", omap_dmm);
 696
 697	if (ret) {
 698		dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
 699			omap_dmm->irq, ret);
 700		omap_dmm->irq = -1;
 701		goto fail;
 702	}
 703
 704	/* Enable all interrupts for each refill engine except
 705	 * ERR_LUT_MISS<n> (which is just advisory, and we don't care
 706	 * about because we want to be able to refill live scanout
 707	 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
 708	 * we just generally don't care about.
 709	 */
 710	dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
 711
 712	omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
 713	if (!omap_dmm->dummy_page) {
 714		dev_err(&dev->dev, "could not allocate dummy page\n");
 715		ret = -ENOMEM;
 716		goto fail;
 717	}
 718
 719	/* set dma mask for device */
 720	ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
 721	if (ret)
 722		goto fail;
 723
 724	omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
 725
 726	/* alloc refill memory */
 727	omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
 728					   REFILL_BUFFER_SIZE * omap_dmm->num_engines,
 729					   &omap_dmm->refill_pa, GFP_KERNEL);
 730	if (!omap_dmm->refill_va) {
 731		dev_err(&dev->dev, "could not allocate refill memory\n");
 732		goto fail;
 733	}
 734
 735	/* alloc engines */
 736	omap_dmm->engines = kcalloc(omap_dmm->num_engines,
 737				    sizeof(struct refill_engine), GFP_KERNEL);
 738	if (!omap_dmm->engines) {
 739		ret = -ENOMEM;
 740		goto fail;
 741	}
 742
 743	for (i = 0; i < omap_dmm->num_engines; i++) {
 744		omap_dmm->engines[i].id = i;
 745		omap_dmm->engines[i].dmm = omap_dmm;
 746		omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
 747						(REFILL_BUFFER_SIZE * i);
 748		omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
 749						(REFILL_BUFFER_SIZE * i);
 750		init_completion(&omap_dmm->engines[i].compl);
 751
 752		list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
 753	}
 754
 755	omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
 756				GFP_KERNEL);
 757	if (!omap_dmm->tcm) {
 758		ret = -ENOMEM;
 759		goto fail;
 760	}
 761
 762	/* init containers */
 763	/* Each LUT is associated with a TCM (container manager).  We use the
 764	   lut_id to denote the lut_id used to identify the correct LUT for
 765	   programming during reill operations */
 766	for (i = 0; i < omap_dmm->num_lut; i++) {
 767		omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
 768						omap_dmm->container_height);
 
 769
 770		if (!omap_dmm->tcm[i]) {
 771			dev_err(&dev->dev, "failed to allocate container\n");
 772			ret = -ENOMEM;
 773			goto fail;
 774		}
 775
 776		omap_dmm->tcm[i]->lut_id = i;
 777	}
 778
 779	/* assign access mode containers to applicable tcm container */
 780	/* OMAP 4 has 1 container for all 4 views */
 781	/* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
 782	containers[TILFMT_8BIT] = omap_dmm->tcm[0];
 783	containers[TILFMT_16BIT] = omap_dmm->tcm[0];
 784	containers[TILFMT_32BIT] = omap_dmm->tcm[0];
 785
 786	if (omap_dmm->container_height != omap_dmm->lut_height) {
 787		/* second LUT is used for PAGE mode.  Programming must use
 788		   y offset that is added to all y coordinates.  LUT id is still
 789		   0, because it is the same LUT, just the upper 128 lines */
 790		containers[TILFMT_PAGE] = omap_dmm->tcm[1];
 791		omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
 792		omap_dmm->tcm[1]->lut_id = 0;
 793	} else {
 794		containers[TILFMT_PAGE] = omap_dmm->tcm[0];
 795	}
 796
 797	area = (struct tcm_area) {
 798		.tcm = NULL,
 799		.p1.x = omap_dmm->container_width - 1,
 800		.p1.y = omap_dmm->container_height - 1,
 801	};
 802
 803	/* initialize all LUTs to dummy page entries */
 804	for (i = 0; i < omap_dmm->num_lut; i++) {
 805		area.tcm = omap_dmm->tcm[i];
 806		if (fill(&area, NULL, 0, 0, true))
 807			dev_err(omap_dmm->dev, "refill failed");
 808	}
 809
 810	dev_info(omap_dmm->dev, "initialized all PAT entries\n");
 811
 812	return 0;
 813
 814fail:
 815	if (omap_dmm_remove(dev))
 816		dev_err(&dev->dev, "cleanup failed\n");
 817	return ret;
 818}
 819
 820/*
 821 * debugfs support
 822 */
 823
 824#ifdef CONFIG_DEBUG_FS
 825
 826static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
 827				"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
 828static const char *special = ".,:;'\"`~!^-+";
 829
 830static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
 831							char c, bool ovw)
 832{
 833	int x, y;
 834	for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
 835		for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
 836			if (map[y][x] == ' ' || ovw)
 837				map[y][x] = c;
 838}
 839
 840static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
 841									char c)
 842{
 843	map[p->y / ydiv][p->x / xdiv] = c;
 844}
 845
 846static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
 847{
 848	return map[p->y / ydiv][p->x / xdiv];
 849}
 850
 851static int map_width(int xdiv, int x0, int x1)
 852{
 853	return (x1 / xdiv) - (x0 / xdiv) + 1;
 854}
 855
 856static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
 857{
 858	char *p = map[yd] + (x0 / xdiv);
 859	int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
 860	if (w >= 0) {
 861		p += w;
 862		while (*nice)
 863			*p++ = *nice++;
 864	}
 865}
 866
 867static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
 868							struct tcm_area *a)
 869{
 870	sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
 871	if (a->p0.y + 1 < a->p1.y) {
 872		text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
 873							256 - 1);
 874	} else if (a->p0.y < a->p1.y) {
 875		if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
 876			text_map(map, xdiv, nice, a->p0.y / ydiv,
 877					a->p0.x + xdiv,	256 - 1);
 878		else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
 879			text_map(map, xdiv, nice, a->p1.y / ydiv,
 880					0, a->p1.y - xdiv);
 881	} else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
 882		text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
 883	}
 884}
 885
 886static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
 887							struct tcm_area *a)
 888{
 889	sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
 890	if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
 891		text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
 892							a->p0.x, a->p1.x);
 893}
 894
 895int tiler_map_show(struct seq_file *s, void *arg)
 896{
 897	int xdiv = 2, ydiv = 1;
 898	char **map = NULL, *global_map;
 899	struct tiler_block *block;
 900	struct tcm_area a, p;
 901	int i;
 902	const char *m2d = alphabet;
 903	const char *a2d = special;
 904	const char *m2dp = m2d, *a2dp = a2d;
 905	char nice[128];
 906	int h_adj;
 907	int w_adj;
 908	unsigned long flags;
 909	int lut_idx;
 910
 911
 912	if (!omap_dmm) {
 913		/* early return if dmm/tiler device is not initialized */
 914		return 0;
 915	}
 916
 917	h_adj = omap_dmm->container_height / ydiv;
 918	w_adj = omap_dmm->container_width / xdiv;
 919
 920	map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
 921	global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
 922
 923	if (!map || !global_map)
 924		goto error;
 925
 926	for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
 927		memset(map, 0, h_adj * sizeof(*map));
 928		memset(global_map, ' ', (w_adj + 1) * h_adj);
 929
 930		for (i = 0; i < omap_dmm->container_height; i++) {
 931			map[i] = global_map + i * (w_adj + 1);
 932			map[i][w_adj] = 0;
 933		}
 934
 935		spin_lock_irqsave(&list_lock, flags);
 936
 937		list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
 938			if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
 939				if (block->fmt != TILFMT_PAGE) {
 940					fill_map(map, xdiv, ydiv, &block->area,
 941						*m2dp, true);
 942					if (!*++a2dp)
 943						a2dp = a2d;
 944					if (!*++m2dp)
 945						m2dp = m2d;
 946					map_2d_info(map, xdiv, ydiv, nice,
 947							&block->area);
 948				} else {
 949					bool start = read_map_pt(map, xdiv,
 950						ydiv, &block->area.p0) == ' ';
 951					bool end = read_map_pt(map, xdiv, ydiv,
 952							&block->area.p1) == ' ';
 953
 954					tcm_for_each_slice(a, block->area, p)
 955						fill_map(map, xdiv, ydiv, &a,
 956							'=', true);
 957					fill_map_pt(map, xdiv, ydiv,
 958							&block->area.p0,
 959							start ? '<' : 'X');
 960					fill_map_pt(map, xdiv, ydiv,
 961							&block->area.p1,
 962							end ? '>' : 'X');
 963					map_1d_info(map, xdiv, ydiv, nice,
 964							&block->area);
 965				}
 966			}
 967		}
 968
 969		spin_unlock_irqrestore(&list_lock, flags);
 970
 971		if (s) {
 972			seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
 973			for (i = 0; i < 128; i++)
 974				seq_printf(s, "%03d:%s\n", i, map[i]);
 975			seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
 976		} else {
 977			dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
 978				lut_idx);
 979			for (i = 0; i < 128; i++)
 980				dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
 981			dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
 982				lut_idx);
 983		}
 984	}
 985
 986error:
 987	kfree(map);
 988	kfree(global_map);
 989
 990	return 0;
 991}
 992#endif
 993
 994#ifdef CONFIG_PM_SLEEP
 995static int omap_dmm_resume(struct device *dev)
 996{
 997	struct tcm_area area;
 998	int i;
 999
1000	if (!omap_dmm)
1001		return -ENODEV;
1002
1003	area = (struct tcm_area) {
1004		.tcm = NULL,
1005		.p1.x = omap_dmm->container_width - 1,
1006		.p1.y = omap_dmm->container_height - 1,
1007	};
1008
1009	/* initialize all LUTs to dummy page entries */
1010	for (i = 0; i < omap_dmm->num_lut; i++) {
1011		area.tcm = omap_dmm->tcm[i];
1012		if (fill(&area, NULL, 0, 0, true))
1013			dev_err(dev, "refill failed");
1014	}
1015
1016	return 0;
1017}
1018#endif
1019
1020static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
1021
1022#if defined(CONFIG_OF)
1023static const struct dmm_platform_data dmm_omap4_platform_data = {
1024	.cpu_cache_flags = OMAP_BO_WC,
1025};
1026
1027static const struct dmm_platform_data dmm_omap5_platform_data = {
1028	.cpu_cache_flags = OMAP_BO_UNCACHED,
1029};
 
1030
 
1031static const struct of_device_id dmm_of_match[] = {
1032	{
1033		.compatible = "ti,omap4-dmm",
1034		.data = &dmm_omap4_platform_data,
1035	},
1036	{
1037		.compatible = "ti,omap5-dmm",
1038		.data = &dmm_omap5_platform_data,
1039	},
1040	{},
1041};
1042#endif
1043
1044struct platform_driver omap_dmm_driver = {
1045	.probe = omap_dmm_probe,
1046	.remove = omap_dmm_remove,
1047	.driver = {
1048		.owner = THIS_MODULE,
1049		.name = DMM_DRIVER_NAME,
1050		.of_match_table = of_match_ptr(dmm_of_match),
 
1051		.pm = &omap_dmm_pm_ops,
 
1052	},
1053};
1054
1055MODULE_LICENSE("GPL v2");
1056MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
1057MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
v3.15
  1/*
  2 * DMM IOMMU driver support functions for TI OMAP processors.
  3 *
  4 * Author: Rob Clark <rob@ti.com>
  5 *         Andy Gross <andy.gross@ti.com>
  6 *
  7 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
  8 *
  9 * This program is free software; you can redistribute it and/or
 10 * modify it under the terms of the GNU General Public License as
 11 * published by the Free Software Foundation version 2.
 12 *
 13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 14 * kind, whether express or implied; without even the implied warranty
 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16 * GNU General Public License for more details.
 17 */
 
 
 
 
 
 18#include <linux/init.h>
 
 
 
 19#include <linux/module.h>
 20#include <linux/platform_device.h> /* platform_device() */
 21#include <linux/errno.h>
 22#include <linux/sched.h>
 23#include <linux/wait.h>
 24#include <linux/interrupt.h>
 25#include <linux/dma-mapping.h>
 26#include <linux/slab.h>
 
 27#include <linux/vmalloc.h>
 28#include <linux/delay.h>
 29#include <linux/mm.h>
 30#include <linux/time.h>
 31#include <linux/list.h>
 32
 33#include "omap_dmm_tiler.h"
 34#include "omap_dmm_priv.h"
 35
 36#define DMM_DRIVER_NAME "dmm"
 37
 38/* mappings for associating views to luts */
 39static struct tcm *containers[TILFMT_NFORMATS];
 40static struct dmm *omap_dmm;
 41
 
 
 
 
 42/* global spinlock for protecting lists */
 43static DEFINE_SPINLOCK(list_lock);
 44
 45/* Geometry table */
 46#define GEOM(xshift, yshift, bytes_per_pixel) { \
 47		.x_shft = (xshift), \
 48		.y_shft = (yshift), \
 49		.cpp    = (bytes_per_pixel), \
 50		.slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
 51		.slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
 52	}
 53
 54static const struct {
 55	uint32_t x_shft;	/* unused X-bits (as part of bpp) */
 56	uint32_t y_shft;	/* unused Y-bits (as part of bpp) */
 57	uint32_t cpp;		/* bytes/chars per pixel */
 58	uint32_t slot_w;	/* width of each slot (in pixels) */
 59	uint32_t slot_h;	/* height of each slot (in pixels) */
 60} geom[TILFMT_NFORMATS] = {
 61		[TILFMT_8BIT]  = GEOM(0, 0, 1),
 62		[TILFMT_16BIT] = GEOM(0, 1, 2),
 63		[TILFMT_32BIT] = GEOM(1, 1, 4),
 64		[TILFMT_PAGE]  = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
 65};
 66
 67
 68/* lookup table for registers w/ per-engine instances */
 69static const uint32_t reg[][4] = {
 70		[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
 71				DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
 72		[PAT_DESCR]  = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
 73				DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
 74};
 75
 
 
 
 
 
 
 
 
 
 
 76/* simple allocator to grab next 16 byte aligned memory from txn */
 77static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
 78{
 79	void *ptr;
 80	struct refill_engine *engine = txn->engine_handle;
 81
 82	/* dmm programming requires 16 byte aligned addresses */
 83	txn->current_pa = round_up(txn->current_pa, 16);
 84	txn->current_va = (void *)round_up((long)txn->current_va, 16);
 85
 86	ptr = txn->current_va;
 87	*pa = txn->current_pa;
 88
 89	txn->current_pa += sz;
 90	txn->current_va += sz;
 91
 92	BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
 93
 94	return ptr;
 95}
 96
 97/* check status and spin until wait_mask comes true */
 98static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
 99{
100	struct dmm *dmm = engine->dmm;
101	uint32_t r = 0, err, i;
102
103	i = DMM_FIXED_RETRY_COUNT;
104	while (true) {
105		r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
106		err = r & DMM_PATSTATUS_ERR;
107		if (err)
108			return -EFAULT;
109
110		if ((r & wait_mask) == wait_mask)
111			break;
112
113		if (--i == 0)
114			return -ETIMEDOUT;
115
116		udelay(1);
117	}
118
119	return 0;
120}
121
122static void release_engine(struct refill_engine *engine)
123{
124	unsigned long flags;
125
126	spin_lock_irqsave(&list_lock, flags);
127	list_add(&engine->idle_node, &omap_dmm->idle_head);
128	spin_unlock_irqrestore(&list_lock, flags);
129
130	atomic_inc(&omap_dmm->engine_counter);
131	wake_up_interruptible(&omap_dmm->engine_queue);
132}
133
134static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
135{
136	struct dmm *dmm = arg;
137	uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
138	int i;
139
140	/* ack IRQ */
141	writel(status, dmm->base + DMM_PAT_IRQSTATUS);
142
143	for (i = 0; i < dmm->num_engines; i++) {
144		if (status & DMM_IRQSTAT_LST) {
145			wake_up_interruptible(&dmm->engines[i].wait_for_refill);
146
147			if (dmm->engines[i].async)
148				release_engine(&dmm->engines[i]);
 
 
149		}
150
151		status >>= 8;
152	}
153
154	return IRQ_HANDLED;
155}
156
157/**
158 * Get a handle for a DMM transaction
159 */
160static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
161{
162	struct dmm_txn *txn = NULL;
163	struct refill_engine *engine = NULL;
164	int ret;
165	unsigned long flags;
166
167
168	/* wait until an engine is available */
169	ret = wait_event_interruptible(omap_dmm->engine_queue,
170		atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
171	if (ret)
172		return ERR_PTR(ret);
173
174	/* grab an idle engine */
175	spin_lock_irqsave(&list_lock, flags);
176	if (!list_empty(&dmm->idle_head)) {
177		engine = list_entry(dmm->idle_head.next, struct refill_engine,
178					idle_node);
179		list_del(&engine->idle_node);
180	}
181	spin_unlock_irqrestore(&list_lock, flags);
182
183	BUG_ON(!engine);
184
185	txn = &engine->txn;
186	engine->tcm = tcm;
187	txn->engine_handle = engine;
188	txn->last_pat = NULL;
189	txn->current_va = engine->refill_va;
190	txn->current_pa = engine->refill_pa;
191
192	return txn;
193}
194
195/**
196 * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
197 * corresponding slot is cleared (ie. dummy_pa is programmed)
198 */
199static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
200		struct page **pages, uint32_t npages, uint32_t roll)
201{
202	dma_addr_t pat_pa = 0;
203	uint32_t *data;
204	struct pat *pat;
205	struct refill_engine *engine = txn->engine_handle;
206	int columns = (1 + area->x1 - area->x0);
207	int rows = (1 + area->y1 - area->y0);
208	int i = columns*rows;
209
210	pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
211
212	if (txn->last_pat)
213		txn->last_pat->next_pa = (uint32_t)pat_pa;
214
215	pat->area = *area;
216
217	/* adjust Y coordinates based off of container parameters */
218	pat->area.y0 += engine->tcm->y_offset;
219	pat->area.y1 += engine->tcm->y_offset;
220
221	pat->ctrl = (struct pat_ctrl){
222			.start = 1,
223			.lut_id = engine->tcm->lut_id,
224		};
225
226	data = alloc_dma(txn, 4*i, &pat->data_pa);
 
 
227
228	while (i--) {
229		int n = i + roll;
230		if (n >= npages)
231			n -= npages;
232		data[i] = (pages && pages[n]) ?
233			page_to_phys(pages[n]) : engine->dmm->dummy_pa;
234	}
235
236	txn->last_pat = pat;
237
238	return;
239}
240
241/**
242 * Commit the DMM transaction.
243 */
244static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
245{
246	int ret = 0;
247	struct refill_engine *engine = txn->engine_handle;
248	struct dmm *dmm = engine->dmm;
249
250	if (!txn->last_pat) {
251		dev_err(engine->dmm->dev, "need at least one txn\n");
252		ret = -EINVAL;
253		goto cleanup;
254	}
255
256	txn->last_pat->next_pa = 0;
257
258	/* write to PAT_DESCR to clear out any pending transaction */
259	writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
260
261	/* wait for engine ready: */
262	ret = wait_status(engine, DMM_PATSTATUS_READY);
263	if (ret) {
264		ret = -EFAULT;
265		goto cleanup;
266	}
267
268	/* mark whether it is async to denote list management in IRQ handler */
269	engine->async = wait ? false : true;
 
 
 
270
271	/* kick reload */
272	writel(engine->refill_pa,
273		dmm->base + reg[PAT_DESCR][engine->id]);
274
275	if (wait) {
276		if (wait_event_interruptible_timeout(engine->wait_for_refill,
277				wait_status(engine, DMM_PATSTATUS_READY) == 0,
278				msecs_to_jiffies(1)) <= 0) {
279			dev_err(dmm->dev, "timed out waiting for done\n");
280			ret = -ETIMEDOUT;
281		}
282	}
283
284cleanup:
285	/* only place engine back on list if we are done with it */
286	if (ret || wait)
287		release_engine(engine);
288
289	return ret;
290}
291
292/*
293 * DMM programming
294 */
295static int fill(struct tcm_area *area, struct page **pages,
296		uint32_t npages, uint32_t roll, bool wait)
297{
298	int ret = 0;
299	struct tcm_area slice, area_s;
300	struct dmm_txn *txn;
301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302	txn = dmm_txn_init(omap_dmm, area->tcm);
303	if (IS_ERR_OR_NULL(txn))
304		return -ENOMEM;
305
306	tcm_for_each_slice(slice, *area, area_s) {
307		struct pat_area p_area = {
308				.x0 = slice.p0.x,  .y0 = slice.p0.y,
309				.x1 = slice.p1.x,  .y1 = slice.p1.y,
310		};
311
312		dmm_txn_append(txn, &p_area, pages, npages, roll);
313
314		roll += tcm_sizeof(slice);
315	}
316
317	ret = dmm_txn_commit(txn, wait);
318
319	return ret;
320}
321
322/*
323 * Pin/unpin
324 */
325
326/* note: slots for which pages[i] == NULL are filled w/ dummy page
327 */
328int tiler_pin(struct tiler_block *block, struct page **pages,
329		uint32_t npages, uint32_t roll, bool wait)
330{
331	int ret;
332
333	ret = fill(&block->area, pages, npages, roll, wait);
334
335	if (ret)
336		tiler_unpin(block);
337
338	return ret;
339}
340
341int tiler_unpin(struct tiler_block *block)
342{
343	return fill(&block->area, NULL, 0, 0, false);
344}
345
346/*
347 * Reserve/release
348 */
349struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
350		uint16_t h, uint16_t align)
351{
352	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
353	u32 min_align = 128;
354	int ret;
355	unsigned long flags;
 
356
357	BUG_ON(!validfmt(fmt));
358
359	/* convert width/height to slots */
360	w = DIV_ROUND_UP(w, geom[fmt].slot_w);
361	h = DIV_ROUND_UP(h, geom[fmt].slot_h);
362
363	/* convert alignment to slots */
364	min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
365	align = ALIGN(align, min_align);
366	align /= geom[fmt].slot_w * geom[fmt].cpp;
 
367
368	block->fmt = fmt;
369
370	ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
 
371	if (ret) {
372		kfree(block);
373		return ERR_PTR(-ENOMEM);
374	}
375
376	/* add to allocation list */
377	spin_lock_irqsave(&list_lock, flags);
378	list_add(&block->alloc_node, &omap_dmm->alloc_head);
379	spin_unlock_irqrestore(&list_lock, flags);
380
381	return block;
382}
383
384struct tiler_block *tiler_reserve_1d(size_t size)
385{
386	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
387	int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
388	unsigned long flags;
389
390	if (!block)
391		return ERR_PTR(-ENOMEM);
392
393	block->fmt = TILFMT_PAGE;
394
395	if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
396				&block->area)) {
397		kfree(block);
398		return ERR_PTR(-ENOMEM);
399	}
400
401	spin_lock_irqsave(&list_lock, flags);
402	list_add(&block->alloc_node, &omap_dmm->alloc_head);
403	spin_unlock_irqrestore(&list_lock, flags);
404
405	return block;
406}
407
408/* note: if you have pin'd pages, you should have already unpin'd first! */
409int tiler_release(struct tiler_block *block)
410{
411	int ret = tcm_free(&block->area);
412	unsigned long flags;
413
414	if (block->area.tcm)
415		dev_err(omap_dmm->dev, "failed to release block\n");
416
417	spin_lock_irqsave(&list_lock, flags);
418	list_del(&block->alloc_node);
419	spin_unlock_irqrestore(&list_lock, flags);
420
421	kfree(block);
422	return ret;
423}
424
425/*
426 * Utils
427 */
428
429/* calculate the tiler space address of a pixel in a view orientation...
430 * below description copied from the display subsystem section of TRM:
431 *
432 * When the TILER is addressed, the bits:
433 *   [28:27] = 0x0 for 8-bit tiled
434 *             0x1 for 16-bit tiled
435 *             0x2 for 32-bit tiled
436 *             0x3 for page mode
437 *   [31:29] = 0x0 for 0-degree view
438 *             0x1 for 180-degree view + mirroring
439 *             0x2 for 0-degree view + mirroring
440 *             0x3 for 180-degree view
441 *             0x4 for 270-degree view + mirroring
442 *             0x5 for 270-degree view
443 *             0x6 for 90-degree view
444 *             0x7 for 90-degree view + mirroring
445 * Otherwise the bits indicated the corresponding bit address to access
446 * the SDRAM.
447 */
448static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
449{
450	u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
451
452	x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
453	y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
454	alignment = geom[fmt].x_shft + geom[fmt].y_shft;
455
456	/* validate coordinate */
457	x_mask = MASK(x_bits);
458	y_mask = MASK(y_bits);
459
460	if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
461		DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
462				x, x, x_mask, y, y, y_mask);
463		return 0;
464	}
465
466	/* account for mirroring */
467	if (orient & MASK_X_INVERT)
468		x ^= x_mask;
469	if (orient & MASK_Y_INVERT)
470		y ^= y_mask;
471
472	/* get coordinate address */
473	if (orient & MASK_XY_FLIP)
474		tmp = ((x << y_bits) + y);
475	else
476		tmp = ((y << x_bits) + x);
477
478	return TIL_ADDR((tmp << alignment), orient, fmt);
479}
480
481dma_addr_t tiler_ssptr(struct tiler_block *block)
482{
483	BUG_ON(!validfmt(block->fmt));
484
485	return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
486			block->area.p0.x * geom[block->fmt].slot_w,
487			block->area.p0.y * geom[block->fmt].slot_h);
488}
489
490dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
491		uint32_t x, uint32_t y)
492{
493	struct tcm_pt *p = &block->area.p0;
494	BUG_ON(!validfmt(block->fmt));
495
496	return tiler_get_address(block->fmt, orient,
497			(p->x * geom[block->fmt].slot_w) + x,
498			(p->y * geom[block->fmt].slot_h) + y);
499}
500
501void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
502{
503	BUG_ON(!validfmt(fmt));
504	*w = round_up(*w, geom[fmt].slot_w);
505	*h = round_up(*h, geom[fmt].slot_h);
506}
507
508uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
509{
510	BUG_ON(!validfmt(fmt));
511
512	if (orient & MASK_XY_FLIP)
513		return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
514	else
515		return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
516}
517
518size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
519{
520	tiler_align(fmt, &w, &h);
521	return geom[fmt].cpp * w * h;
522}
523
524size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
525{
526	BUG_ON(!validfmt(fmt));
527	return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
528}
529
 
 
 
 
 
530bool dmm_is_available(void)
531{
532	return omap_dmm ? true : false;
533}
534
535static int omap_dmm_remove(struct platform_device *dev)
536{
537	struct tiler_block *block, *_block;
538	int i;
539	unsigned long flags;
540
541	if (omap_dmm) {
542		/* free all area regions */
543		spin_lock_irqsave(&list_lock, flags);
544		list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
545					alloc_node) {
546			list_del(&block->alloc_node);
547			kfree(block);
548		}
549		spin_unlock_irqrestore(&list_lock, flags);
550
551		for (i = 0; i < omap_dmm->num_lut; i++)
552			if (omap_dmm->tcm && omap_dmm->tcm[i])
553				omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
554		kfree(omap_dmm->tcm);
555
556		kfree(omap_dmm->engines);
557		if (omap_dmm->refill_va)
558			dma_free_writecombine(omap_dmm->dev,
559				REFILL_BUFFER_SIZE * omap_dmm->num_engines,
560				omap_dmm->refill_va,
561				omap_dmm->refill_pa);
562		if (omap_dmm->dummy_page)
563			__free_page(omap_dmm->dummy_page);
564
565		if (omap_dmm->irq > 0)
566			free_irq(omap_dmm->irq, omap_dmm);
567
568		iounmap(omap_dmm->base);
569		kfree(omap_dmm);
570		omap_dmm = NULL;
571	}
572
573	return 0;
574}
575
576static int omap_dmm_probe(struct platform_device *dev)
577{
578	int ret = -EFAULT, i;
579	struct tcm_area area = {0};
580	u32 hwinfo, pat_geom;
581	struct resource *mem;
582
583	omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
584	if (!omap_dmm)
585		goto fail;
586
587	/* initialize lists */
588	INIT_LIST_HEAD(&omap_dmm->alloc_head);
589	INIT_LIST_HEAD(&omap_dmm->idle_head);
590
591	init_waitqueue_head(&omap_dmm->engine_queue);
592
 
 
 
 
 
 
 
 
 
 
 
 
593	/* lookup hwmod data - base address and irq */
594	mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
595	if (!mem) {
596		dev_err(&dev->dev, "failed to get base address resource\n");
597		goto fail;
598	}
599
600	omap_dmm->base = ioremap(mem->start, SZ_2K);
601
602	if (!omap_dmm->base) {
603		dev_err(&dev->dev, "failed to get dmm base address\n");
604		goto fail;
605	}
606
607	omap_dmm->irq = platform_get_irq(dev, 0);
608	if (omap_dmm->irq < 0) {
609		dev_err(&dev->dev, "failed to get IRQ resource\n");
610		goto fail;
611	}
612
613	omap_dmm->dev = &dev->dev;
614
615	hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
616	omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
617	omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
618	omap_dmm->container_width = 256;
619	omap_dmm->container_height = 128;
620
621	atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
622
623	/* read out actual LUT width and height */
624	pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
625	omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
626	omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
627
628	/* increment LUT by one if on OMAP5 */
629	/* LUT has twice the height, and is split into a separate container */
630	if (omap_dmm->lut_height != omap_dmm->container_height)
631		omap_dmm->num_lut++;
632
633	/* initialize DMM registers */
634	writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
635	writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
636	writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
637	writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
638	writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
639	writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
640
641	ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
642				"omap_dmm_irq_handler", omap_dmm);
643
644	if (ret) {
645		dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
646			omap_dmm->irq, ret);
647		omap_dmm->irq = -1;
648		goto fail;
649	}
650
651	/* Enable all interrupts for each refill engine except
652	 * ERR_LUT_MISS<n> (which is just advisory, and we don't care
653	 * about because we want to be able to refill live scanout
654	 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
655	 * we just generally don't care about.
656	 */
657	writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
658
659	omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
660	if (!omap_dmm->dummy_page) {
661		dev_err(&dev->dev, "could not allocate dummy page\n");
662		ret = -ENOMEM;
663		goto fail;
664	}
665
666	/* set dma mask for device */
667	ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
668	if (ret)
669		goto fail;
670
671	omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
672
673	/* alloc refill memory */
674	omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
675				REFILL_BUFFER_SIZE * omap_dmm->num_engines,
676				&omap_dmm->refill_pa, GFP_KERNEL);
677	if (!omap_dmm->refill_va) {
678		dev_err(&dev->dev, "could not allocate refill memory\n");
679		goto fail;
680	}
681
682	/* alloc engines */
683	omap_dmm->engines = kcalloc(omap_dmm->num_engines,
684				    sizeof(struct refill_engine), GFP_KERNEL);
685	if (!omap_dmm->engines) {
686		ret = -ENOMEM;
687		goto fail;
688	}
689
690	for (i = 0; i < omap_dmm->num_engines; i++) {
691		omap_dmm->engines[i].id = i;
692		omap_dmm->engines[i].dmm = omap_dmm;
693		omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
694						(REFILL_BUFFER_SIZE * i);
695		omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
696						(REFILL_BUFFER_SIZE * i);
697		init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
698
699		list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
700	}
701
702	omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
703				GFP_KERNEL);
704	if (!omap_dmm->tcm) {
705		ret = -ENOMEM;
706		goto fail;
707	}
708
709	/* init containers */
710	/* Each LUT is associated with a TCM (container manager).  We use the
711	   lut_id to denote the lut_id used to identify the correct LUT for
712	   programming during reill operations */
713	for (i = 0; i < omap_dmm->num_lut; i++) {
714		omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
715						omap_dmm->container_height,
716						NULL);
717
718		if (!omap_dmm->tcm[i]) {
719			dev_err(&dev->dev, "failed to allocate container\n");
720			ret = -ENOMEM;
721			goto fail;
722		}
723
724		omap_dmm->tcm[i]->lut_id = i;
725	}
726
727	/* assign access mode containers to applicable tcm container */
728	/* OMAP 4 has 1 container for all 4 views */
729	/* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
730	containers[TILFMT_8BIT] = omap_dmm->tcm[0];
731	containers[TILFMT_16BIT] = omap_dmm->tcm[0];
732	containers[TILFMT_32BIT] = omap_dmm->tcm[0];
733
734	if (omap_dmm->container_height != omap_dmm->lut_height) {
735		/* second LUT is used for PAGE mode.  Programming must use
736		   y offset that is added to all y coordinates.  LUT id is still
737		   0, because it is the same LUT, just the upper 128 lines */
738		containers[TILFMT_PAGE] = omap_dmm->tcm[1];
739		omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
740		omap_dmm->tcm[1]->lut_id = 0;
741	} else {
742		containers[TILFMT_PAGE] = omap_dmm->tcm[0];
743	}
744
745	area = (struct tcm_area) {
746		.tcm = NULL,
747		.p1.x = omap_dmm->container_width - 1,
748		.p1.y = omap_dmm->container_height - 1,
749	};
750
751	/* initialize all LUTs to dummy page entries */
752	for (i = 0; i < omap_dmm->num_lut; i++) {
753		area.tcm = omap_dmm->tcm[i];
754		if (fill(&area, NULL, 0, 0, true))
755			dev_err(omap_dmm->dev, "refill failed");
756	}
757
758	dev_info(omap_dmm->dev, "initialized all PAT entries\n");
759
760	return 0;
761
762fail:
763	if (omap_dmm_remove(dev))
764		dev_err(&dev->dev, "cleanup failed\n");
765	return ret;
766}
767
768/*
769 * debugfs support
770 */
771
772#ifdef CONFIG_DEBUG_FS
773
774static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
775				"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
776static const char *special = ".,:;'\"`~!^-+";
777
778static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
779							char c, bool ovw)
780{
781	int x, y;
782	for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
783		for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
784			if (map[y][x] == ' ' || ovw)
785				map[y][x] = c;
786}
787
788static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
789									char c)
790{
791	map[p->y / ydiv][p->x / xdiv] = c;
792}
793
794static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
795{
796	return map[p->y / ydiv][p->x / xdiv];
797}
798
799static int map_width(int xdiv, int x0, int x1)
800{
801	return (x1 / xdiv) - (x0 / xdiv) + 1;
802}
803
804static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
805{
806	char *p = map[yd] + (x0 / xdiv);
807	int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
808	if (w >= 0) {
809		p += w;
810		while (*nice)
811			*p++ = *nice++;
812	}
813}
814
815static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
816							struct tcm_area *a)
817{
818	sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
819	if (a->p0.y + 1 < a->p1.y) {
820		text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
821							256 - 1);
822	} else if (a->p0.y < a->p1.y) {
823		if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
824			text_map(map, xdiv, nice, a->p0.y / ydiv,
825					a->p0.x + xdiv,	256 - 1);
826		else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
827			text_map(map, xdiv, nice, a->p1.y / ydiv,
828					0, a->p1.y - xdiv);
829	} else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
830		text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
831	}
832}
833
834static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
835							struct tcm_area *a)
836{
837	sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
838	if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
839		text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
840							a->p0.x, a->p1.x);
841}
842
843int tiler_map_show(struct seq_file *s, void *arg)
844{
845	int xdiv = 2, ydiv = 1;
846	char **map = NULL, *global_map;
847	struct tiler_block *block;
848	struct tcm_area a, p;
849	int i;
850	const char *m2d = alphabet;
851	const char *a2d = special;
852	const char *m2dp = m2d, *a2dp = a2d;
853	char nice[128];
854	int h_adj;
855	int w_adj;
856	unsigned long flags;
857	int lut_idx;
858
859
860	if (!omap_dmm) {
861		/* early return if dmm/tiler device is not initialized */
862		return 0;
863	}
864
865	h_adj = omap_dmm->container_height / ydiv;
866	w_adj = omap_dmm->container_width / xdiv;
867
868	map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
869	global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
870
871	if (!map || !global_map)
872		goto error;
873
874	for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
875		memset(map, 0, h_adj * sizeof(*map));
876		memset(global_map, ' ', (w_adj + 1) * h_adj);
877
878		for (i = 0; i < omap_dmm->container_height; i++) {
879			map[i] = global_map + i * (w_adj + 1);
880			map[i][w_adj] = 0;
881		}
882
883		spin_lock_irqsave(&list_lock, flags);
884
885		list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
886			if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
887				if (block->fmt != TILFMT_PAGE) {
888					fill_map(map, xdiv, ydiv, &block->area,
889						*m2dp, true);
890					if (!*++a2dp)
891						a2dp = a2d;
892					if (!*++m2dp)
893						m2dp = m2d;
894					map_2d_info(map, xdiv, ydiv, nice,
895							&block->area);
896				} else {
897					bool start = read_map_pt(map, xdiv,
898						ydiv, &block->area.p0) == ' ';
899					bool end = read_map_pt(map, xdiv, ydiv,
900							&block->area.p1) == ' ';
901
902					tcm_for_each_slice(a, block->area, p)
903						fill_map(map, xdiv, ydiv, &a,
904							'=', true);
905					fill_map_pt(map, xdiv, ydiv,
906							&block->area.p0,
907							start ? '<' : 'X');
908					fill_map_pt(map, xdiv, ydiv,
909							&block->area.p1,
910							end ? '>' : 'X');
911					map_1d_info(map, xdiv, ydiv, nice,
912							&block->area);
913				}
914			}
915		}
916
917		spin_unlock_irqrestore(&list_lock, flags);
918
919		if (s) {
920			seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
921			for (i = 0; i < 128; i++)
922				seq_printf(s, "%03d:%s\n", i, map[i]);
923			seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
924		} else {
925			dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
926				lut_idx);
927			for (i = 0; i < 128; i++)
928				dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
929			dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
930				lut_idx);
931		}
932	}
933
934error:
935	kfree(map);
936	kfree(global_map);
937
938	return 0;
939}
940#endif
941
942#ifdef CONFIG_PM
943static int omap_dmm_resume(struct device *dev)
944{
945	struct tcm_area area;
946	int i;
947
948	if (!omap_dmm)
949		return -ENODEV;
950
951	area = (struct tcm_area) {
952		.tcm = NULL,
953		.p1.x = omap_dmm->container_width - 1,
954		.p1.y = omap_dmm->container_height - 1,
955	};
956
957	/* initialize all LUTs to dummy page entries */
958	for (i = 0; i < omap_dmm->num_lut; i++) {
959		area.tcm = omap_dmm->tcm[i];
960		if (fill(&area, NULL, 0, 0, true))
961			dev_err(dev, "refill failed");
962	}
963
964	return 0;
965}
 
 
 
 
 
 
 
 
966
967static const struct dev_pm_ops omap_dmm_pm_ops = {
968	.resume = omap_dmm_resume,
969};
970#endif
971
972#if defined(CONFIG_OF)
973static const struct of_device_id dmm_of_match[] = {
974	{ .compatible = "ti,omap4-dmm", },
975	{ .compatible = "ti,omap5-dmm", },
 
 
 
 
 
 
976	{},
977};
978#endif
979
980struct platform_driver omap_dmm_driver = {
981	.probe = omap_dmm_probe,
982	.remove = omap_dmm_remove,
983	.driver = {
984		.owner = THIS_MODULE,
985		.name = DMM_DRIVER_NAME,
986		.of_match_table = of_match_ptr(dmm_of_match),
987#ifdef CONFIG_PM
988		.pm = &omap_dmm_pm_ops,
989#endif
990	},
991};
992
993MODULE_LICENSE("GPL v2");
994MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
995MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
996MODULE_ALIAS("platform:" DMM_DRIVER_NAME);