Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
   4 * Author: Rob Clark <rob.clark@linaro.org>
   5 */
   6
   7#include <linux/dma-mapping.h>
   8#include <linux/seq_file.h>
   9#include <linux/shmem_fs.h>
  10#include <linux/spinlock.h>
  11#include <linux/pfn_t.h>
  12
  13#include <drm/drm_prime.h>
  14#include <drm/drm_vma_manager.h>
  15
  16#include "omap_drv.h"
  17#include "omap_dmm_tiler.h"
  18
  19/*
  20 * GEM buffer object implementation.
  21 */
  22
  23/* note: we use upper 8 bits of flags for driver-internal flags: */
  24#define OMAP_BO_MEM_DMA_API	0x01000000	/* memory allocated with the dma_alloc_* API */
  25#define OMAP_BO_MEM_SHMEM	0x02000000	/* memory allocated through shmem backing */
  26#define OMAP_BO_MEM_DMABUF	0x08000000	/* memory imported from a dmabuf */
  27
  28struct omap_gem_object {
  29	struct drm_gem_object base;
  30
  31	struct list_head mm_list;
  32
  33	u32 flags;
  34
  35	/** width/height for tiled formats (rounded up to slot boundaries) */
  36	u16 width, height;
  37
  38	/** roll applied when mapping to DMM */
  39	u32 roll;
  40
  41	/** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
  42	struct mutex lock;
  43
  44	/**
  45	 * dma_addr contains the buffer DMA address. It is valid for
  46	 *
  47	 * - buffers allocated through the DMA mapping API (with the
  48	 *   OMAP_BO_MEM_DMA_API flag set)
  49	 *
  50	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  51	 *   if they are physically contiguous (when sgt->orig_nents == 1)
  52	 *
  53	 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
  54	 *   which case the DMA address points to the TILER aperture
  55	 *
  56	 * Physically contiguous buffers have their DMA address equal to the
  57	 * physical address as we don't remap those buffers through the TILER.
  58	 *
  59	 * Buffers mapped to the TILER have their DMA address pointing to the
  60	 * TILER aperture. As TILER mappings are refcounted (through
  61	 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
  62	 * to ensure that the mapping won't disappear unexpectedly. References
  63	 * must be released with omap_gem_unpin().
  64	 */
  65	dma_addr_t dma_addr;
  66
  67	/**
  68	 * # of users of dma_addr
  69	 */
  70	u32 dma_addr_cnt;
  71
  72	/**
  73	 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  74	 * is set and the sgt field is valid.
  75	 */
  76	struct sg_table *sgt;
  77
  78	/**
  79	 * tiler block used when buffer is remapped in DMM/TILER.
  80	 */
  81	struct tiler_block *block;
  82
  83	/**
  84	 * Array of backing pages, if allocated.  Note that pages are never
  85	 * allocated for buffers originally allocated from contiguous memory
  86	 */
  87	struct page **pages;
  88
  89	/** addresses corresponding to pages in above array */
  90	dma_addr_t *dma_addrs;
  91
  92	/**
  93	 * Virtual address, if mapped.
  94	 */
  95	void *vaddr;
  96};
  97
  98#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  99
 100/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
 101 * not necessarily pinned in TILER all the time, and (b) when they are
 102 * they are not necessarily page aligned, we reserve one or more small
 103 * regions in each of the 2d containers to use as a user-GART where we
 104 * can create a second page-aligned mapping of parts of the buffer
 105 * being accessed from userspace.
 106 *
 107 * Note that we could optimize slightly when we know that multiple
 108 * tiler containers are backed by the same PAT.. but I'll leave that
 109 * for later..
 110 */
 111#define NUM_USERGART_ENTRIES 2
 112struct omap_drm_usergart_entry {
 113	struct tiler_block *block;	/* the reserved tiler block */
 114	dma_addr_t dma_addr;
 115	struct drm_gem_object *obj;	/* the current pinned obj */
 116	pgoff_t obj_pgoff;		/* page offset of obj currently
 117					   mapped in */
 118};
 119
 120struct omap_drm_usergart {
 121	struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
 122	int height;				/* height in rows */
 123	int height_shift;		/* ilog2(height in rows) */
 124	int slot_shift;			/* ilog2(width per slot) */
 125	int stride_pfn;			/* stride in pages */
 126	int last;				/* index of last used entry */
 127};
 128
 129/* -----------------------------------------------------------------------------
 130 * Helpers
 131 */
 132
 133/** get mmap offset */
 134u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
 135{
 136	struct drm_device *dev = obj->dev;
 137	int ret;
 138	size_t size;
 139
 140	/* Make it mmapable */
 141	size = omap_gem_mmap_size(obj);
 142	ret = drm_gem_create_mmap_offset_size(obj, size);
 143	if (ret) {
 144		dev_err(dev->dev, "could not allocate mmap offset\n");
 145		return 0;
 146	}
 147
 148	return drm_vma_node_offset_addr(&obj->vma_node);
 149}
 150
 151static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
 152{
 153	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
 154		return true;
 155
 156	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
 157		return true;
 158
 159	return false;
 160}
 161
 162/* -----------------------------------------------------------------------------
 163 * Eviction
 164 */
 165
 166static void omap_gem_evict_entry(struct drm_gem_object *obj,
 167		enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
 168{
 169	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 170	struct omap_drm_private *priv = obj->dev->dev_private;
 171	int n = priv->usergart[fmt].height;
 172	size_t size = PAGE_SIZE * n;
 173	loff_t off = omap_gem_mmap_offset(obj) +
 174			(entry->obj_pgoff << PAGE_SHIFT);
 175	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 176
 177	if (m > 1) {
 178		int i;
 179		/* if stride > than PAGE_SIZE then sparse mapping: */
 180		for (i = n; i > 0; i--) {
 181			unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 182					    off, PAGE_SIZE, 1);
 183			off += PAGE_SIZE * m;
 184		}
 185	} else {
 186		unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 187				    off, size, 1);
 188	}
 189
 190	entry->obj = NULL;
 191}
 192
 193/* Evict a buffer from usergart, if it is mapped there */
 194static void omap_gem_evict(struct drm_gem_object *obj)
 195{
 196	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 197	struct omap_drm_private *priv = obj->dev->dev_private;
 198
 199	if (omap_obj->flags & OMAP_BO_TILED) {
 200		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 201		int i;
 202
 203		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
 204			struct omap_drm_usergart_entry *entry =
 205				&priv->usergart[fmt].entry[i];
 206
 207			if (entry->obj == obj)
 208				omap_gem_evict_entry(obj, fmt, entry);
 209		}
 210	}
 211}
 212
 213/* -----------------------------------------------------------------------------
 214 * Page Management
 215 */
 216
 217/*
 218 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
 219 * held.
 220 */
 221static int omap_gem_attach_pages(struct drm_gem_object *obj)
 222{
 223	struct drm_device *dev = obj->dev;
 224	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 225	struct page **pages;
 226	int npages = obj->size >> PAGE_SHIFT;
 227	int i, ret;
 228	dma_addr_t *addrs;
 229
 230	lockdep_assert_held(&omap_obj->lock);
 231
 232	/*
 233	 * If not using shmem (in which case backing pages don't need to be
 234	 * allocated) or if pages are already allocated we're done.
 235	 */
 236	if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
 237		return 0;
 238
 239	pages = drm_gem_get_pages(obj);
 240	if (IS_ERR(pages)) {
 241		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 242		return PTR_ERR(pages);
 243	}
 244
 245	/* for non-cached buffers, ensure the new pages are clean because
 246	 * DSS, GPU, etc. are not cache coherent:
 247	 */
 248	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
 249		addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
 250		if (!addrs) {
 251			ret = -ENOMEM;
 252			goto free_pages;
 253		}
 254
 255		for (i = 0; i < npages; i++) {
 256			addrs[i] = dma_map_page(dev->dev, pages[i],
 257					0, PAGE_SIZE, DMA_TO_DEVICE);
 258
 259			if (dma_mapping_error(dev->dev, addrs[i])) {
 260				dev_warn(dev->dev,
 261					"%s: failed to map page\n", __func__);
 262
 263				for (i = i - 1; i >= 0; --i) {
 264					dma_unmap_page(dev->dev, addrs[i],
 265						PAGE_SIZE, DMA_TO_DEVICE);
 266				}
 267
 268				ret = -ENOMEM;
 269				goto free_addrs;
 270			}
 271		}
 272	} else {
 273		addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
 274		if (!addrs) {
 275			ret = -ENOMEM;
 276			goto free_pages;
 277		}
 278	}
 279
 280	omap_obj->dma_addrs = addrs;
 281	omap_obj->pages = pages;
 282
 283	return 0;
 284
 285free_addrs:
 286	kfree(addrs);
 287free_pages:
 288	drm_gem_put_pages(obj, pages, true, false);
 289
 290	return ret;
 291}
 292
 293/* Release backing pages. Must be called with the omap_obj.lock held. */
 294static void omap_gem_detach_pages(struct drm_gem_object *obj)
 295{
 296	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 297	unsigned int npages = obj->size >> PAGE_SHIFT;
 298	unsigned int i;
 299
 300	lockdep_assert_held(&omap_obj->lock);
 301
 302	for (i = 0; i < npages; i++) {
 303		if (omap_obj->dma_addrs[i])
 304			dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
 305				       PAGE_SIZE, DMA_TO_DEVICE);
 306	}
 307
 308	kfree(omap_obj->dma_addrs);
 309	omap_obj->dma_addrs = NULL;
 310
 311	drm_gem_put_pages(obj, omap_obj->pages, true, false);
 312	omap_obj->pages = NULL;
 313}
 314
 315/* get buffer flags */
 316u32 omap_gem_flags(struct drm_gem_object *obj)
 317{
 318	return to_omap_bo(obj)->flags;
 319}
 320
 321/** get mmap size */
 322size_t omap_gem_mmap_size(struct drm_gem_object *obj)
 323{
 324	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 325	size_t size = obj->size;
 326
 327	if (omap_obj->flags & OMAP_BO_TILED) {
 328		/* for tiled buffers, the virtual size has stride rounded up
 329		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
 330		 * 32kb later!).  But we don't back the entire buffer with
 331		 * pages, only the valid picture part.. so need to adjust for
 332		 * this in the size used to mmap and generate mmap offset
 333		 */
 334		size = tiler_vsize(gem2fmt(omap_obj->flags),
 335				omap_obj->width, omap_obj->height);
 336	}
 337
 338	return size;
 339}
 340
 341/* -----------------------------------------------------------------------------
 342 * Fault Handling
 343 */
 344
 345/* Normal handling for the case of faulting in non-tiled buffers */
 346static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
 347		struct vm_area_struct *vma, struct vm_fault *vmf)
 348{
 349	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 350	unsigned long pfn;
 351	pgoff_t pgoff;
 352
 353	/* We don't use vmf->pgoff since that has the fake offset: */
 354	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 355
 356	if (omap_obj->pages) {
 357		omap_gem_cpu_sync_page(obj, pgoff);
 358		pfn = page_to_pfn(omap_obj->pages[pgoff]);
 359	} else {
 360		BUG_ON(!omap_gem_is_contiguous(omap_obj));
 361		pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
 362	}
 363
 364	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 365			pfn, pfn << PAGE_SHIFT);
 366
 367	return vmf_insert_mixed(vma, vmf->address,
 368			__pfn_to_pfn_t(pfn, PFN_DEV));
 369}
 370
 371/* Special handling for the case of faulting in 2d tiled buffers */
 372static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
 373		struct vm_area_struct *vma, struct vm_fault *vmf)
 374{
 375	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 376	struct omap_drm_private *priv = obj->dev->dev_private;
 377	struct omap_drm_usergart_entry *entry;
 378	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 379	struct page *pages[64];  /* XXX is this too much to have on stack? */
 380	unsigned long pfn;
 381	pgoff_t pgoff, base_pgoff;
 382	unsigned long vaddr;
 383	int i, err, slots;
 384	vm_fault_t ret = VM_FAULT_NOPAGE;
 385
 386	/*
 387	 * Note the height of the slot is also equal to the number of pages
 388	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
 389	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
 390	 */
 391	const int n = priv->usergart[fmt].height;
 392	const int n_shift = priv->usergart[fmt].height_shift;
 393
 394	/*
 395	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
 396	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
 397	 * into account in some of the math, so figure out virtual stride
 398	 * in pages
 399	 */
 400	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 401
 402	/* We don't use vmf->pgoff since that has the fake offset: */
 403	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 404
 405	/*
 406	 * Actual address we start mapping at is rounded down to previous slot
 407	 * boundary in the y direction:
 408	 */
 409	base_pgoff = round_down(pgoff, m << n_shift);
 410
 411	/* figure out buffer width in slots */
 412	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 413
 414	vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 415
 416	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 417
 418	/* evict previous buffer using this usergart entry, if any: */
 419	if (entry->obj)
 420		omap_gem_evict_entry(entry->obj, fmt, entry);
 421
 422	entry->obj = obj;
 423	entry->obj_pgoff = base_pgoff;
 424
 425	/* now convert base_pgoff to phys offset from virt offset: */
 426	base_pgoff = (base_pgoff >> n_shift) * slots;
 427
 428	/* for wider-than 4k.. figure out which part of the slot-row we want: */
 429	if (m > 1) {
 430		int off = pgoff % m;
 431		entry->obj_pgoff += off;
 432		base_pgoff /= m;
 433		slots = min(slots - (off << n_shift), n);
 434		base_pgoff += off << n_shift;
 435		vaddr += off << PAGE_SHIFT;
 436	}
 437
 438	/*
 439	 * Map in pages. Beyond the valid pixel part of the buffer, we set
 440	 * pages[i] to NULL to get a dummy page mapped in.. if someone
 441	 * reads/writes it they will get random/undefined content, but at
 442	 * least it won't be corrupting whatever other random page used to
 443	 * be mapped in, or other undefined behavior.
 444	 */
 445	memcpy(pages, &omap_obj->pages[base_pgoff],
 446			sizeof(struct page *) * slots);
 447	memset(pages + slots, 0,
 448			sizeof(struct page *) * (n - slots));
 449
 450	err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
 451	if (err) {
 452		ret = vmf_error(err);
 453		dev_err(obj->dev->dev, "failed to pin: %d\n", err);
 454		return ret;
 455	}
 456
 457	pfn = entry->dma_addr >> PAGE_SHIFT;
 458
 459	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 460			pfn, pfn << PAGE_SHIFT);
 461
 462	for (i = n; i > 0; i--) {
 463		ret = vmf_insert_mixed(vma,
 464			vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
 465		if (ret & VM_FAULT_ERROR)
 466			break;
 467		pfn += priv->usergart[fmt].stride_pfn;
 468		vaddr += PAGE_SIZE * m;
 469	}
 470
 471	/* simple round-robin: */
 472	priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
 473				 % NUM_USERGART_ENTRIES;
 474
 475	return ret;
 476}
 477
 478/**
 479 * omap_gem_fault		-	pagefault handler for GEM objects
 480 * @vmf: fault detail
 481 *
 482 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
 483 * does most of the work for us including the actual map/unmap calls
 484 * but we need to do the actual page work.
 485 *
 486 * The VMA was set up by GEM. In doing so it also ensured that the
 487 * vma->vm_private_data points to the GEM object that is backing this
 488 * mapping.
 489 */
 490vm_fault_t omap_gem_fault(struct vm_fault *vmf)
 491{
 492	struct vm_area_struct *vma = vmf->vma;
 493	struct drm_gem_object *obj = vma->vm_private_data;
 494	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 495	int err;
 496	vm_fault_t ret;
 497
 498	/* Make sure we don't parallel update on a fault, nor move or remove
 499	 * something from beneath our feet
 500	 */
 501	mutex_lock(&omap_obj->lock);
 502
 503	/* if a shmem backed object, make sure we have pages attached now */
 504	err = omap_gem_attach_pages(obj);
 505	if (err) {
 506		ret = vmf_error(err);
 507		goto fail;
 508	}
 509
 510	/* where should we do corresponding put_pages().. we are mapping
 511	 * the original page, rather than thru a GART, so we can't rely
 512	 * on eviction to trigger this.  But munmap() or all mappings should
 513	 * probably trigger put_pages()?
 514	 */
 515
 516	if (omap_obj->flags & OMAP_BO_TILED)
 517		ret = omap_gem_fault_2d(obj, vma, vmf);
 518	else
 519		ret = omap_gem_fault_1d(obj, vma, vmf);
 520
 521
 522fail:
 523	mutex_unlock(&omap_obj->lock);
 524	return ret;
 525}
 526
 527/** We override mainly to fix up some of the vm mapping flags.. */
 528int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 529{
 530	int ret;
 531
 532	ret = drm_gem_mmap(filp, vma);
 533	if (ret) {
 534		DBG("mmap failed: %d", ret);
 535		return ret;
 536	}
 537
 538	return omap_gem_mmap_obj(vma->vm_private_data, vma);
 539}
 540
 541int omap_gem_mmap_obj(struct drm_gem_object *obj,
 542		struct vm_area_struct *vma)
 543{
 544	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 545
 546	vma->vm_flags &= ~VM_PFNMAP;
 547	vma->vm_flags |= VM_MIXEDMAP;
 548
 549	if (omap_obj->flags & OMAP_BO_WC) {
 550		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 551	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
 552		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 553	} else {
 554		/*
 555		 * We do have some private objects, at least for scanout buffers
 556		 * on hardware without DMM/TILER.  But these are allocated write-
 557		 * combine
 558		 */
 559		if (WARN_ON(!obj->filp))
 560			return -EINVAL;
 561
 562		/*
 563		 * Shunt off cached objs to shmem file so they have their own
 564		 * address_space (so unmap_mapping_range does what we want,
 565		 * in particular in the case of mmap'd dmabufs)
 566		 */
 567		fput(vma->vm_file);
 568		vma->vm_pgoff = 0;
 569		vma->vm_file  = get_file(obj->filp);
 570
 571		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 572	}
 573
 574	return 0;
 575}
 576
 577/* -----------------------------------------------------------------------------
 578 * Dumb Buffers
 579 */
 580
 581/**
 582 * omap_gem_dumb_create	-	create a dumb buffer
 583 * @drm_file: our client file
 584 * @dev: our device
 585 * @args: the requested arguments copied from userspace
 586 *
 587 * Allocate a buffer suitable for use for a frame buffer of the
 588 * form described by user space. Give userspace a handle by which
 589 * to reference it.
 590 */
 591int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 592		struct drm_mode_create_dumb *args)
 593{
 594	union omap_gem_size gsize;
 595
 596	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 597
 598	args->size = PAGE_ALIGN(args->pitch * args->height);
 599
 600	gsize = (union omap_gem_size){
 601		.bytes = args->size,
 602	};
 603
 604	return omap_gem_new_handle(dev, file, gsize,
 605			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
 606}
 607
 608/**
 609 * omap_gem_dumb_map	-	buffer mapping for dumb interface
 610 * @file: our drm client file
 611 * @dev: drm device
 612 * @handle: GEM handle to the object (from dumb_create)
 613 *
 614 * Do the necessary setup to allow the mapping of the frame buffer
 615 * into user memory. We don't have to do much here at the moment.
 616 */
 617int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 618		u32 handle, u64 *offset)
 619{
 620	struct drm_gem_object *obj;
 621	int ret = 0;
 622
 623	/* GEM does all our handle to object mapping */
 624	obj = drm_gem_object_lookup(file, handle);
 625	if (obj == NULL) {
 626		ret = -ENOENT;
 627		goto fail;
 628	}
 629
 630	*offset = omap_gem_mmap_offset(obj);
 631
 632	drm_gem_object_put_unlocked(obj);
 633
 634fail:
 635	return ret;
 636}
 637
 638#ifdef CONFIG_DRM_FBDEV_EMULATION
 639/* Set scrolling position.  This allows us to implement fast scrolling
 640 * for console.
 641 *
 642 * Call only from non-atomic contexts.
 643 */
 644int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
 645{
 646	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 647	u32 npages = obj->size >> PAGE_SHIFT;
 648	int ret = 0;
 649
 650	if (roll > npages) {
 651		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
 652		return -EINVAL;
 653	}
 654
 655	omap_obj->roll = roll;
 656
 657	mutex_lock(&omap_obj->lock);
 658
 659	/* if we aren't mapped yet, we don't need to do anything */
 660	if (omap_obj->block) {
 661		ret = omap_gem_attach_pages(obj);
 662		if (ret)
 663			goto fail;
 664
 665		ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
 666				roll, true);
 667		if (ret)
 668			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
 669	}
 670
 671fail:
 672	mutex_unlock(&omap_obj->lock);
 673
 674	return ret;
 675}
 676#endif
 677
 678/* -----------------------------------------------------------------------------
 679 * Memory Management & DMA Sync
 680 */
 681
 682/*
 683 * shmem buffers that are mapped cached are not coherent.
 684 *
 685 * We keep track of dirty pages using page faulting to perform cache management.
 686 * When a page is mapped to the CPU in read/write mode the device can't access
 687 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
 688 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
 689 * unmapped from the CPU.
 690 */
 691static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
 692{
 693	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 694
 695	return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
 696		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
 697}
 698
 699/* Sync the buffer for CPU access.. note pages should already be
 700 * attached, ie. omap_gem_get_pages()
 701 */
 702void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
 703{
 704	struct drm_device *dev = obj->dev;
 705	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 706
 707	if (omap_gem_is_cached_coherent(obj))
 708		return;
 709
 710	if (omap_obj->dma_addrs[pgoff]) {
 711		dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
 712				PAGE_SIZE, DMA_TO_DEVICE);
 713		omap_obj->dma_addrs[pgoff] = 0;
 714	}
 715}
 716
 717/* sync the buffer for DMA access */
 718void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
 719		enum dma_data_direction dir)
 720{
 721	struct drm_device *dev = obj->dev;
 722	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 723	int i, npages = obj->size >> PAGE_SHIFT;
 724	struct page **pages = omap_obj->pages;
 725	bool dirty = false;
 726
 727	if (omap_gem_is_cached_coherent(obj))
 728		return;
 729
 730	for (i = 0; i < npages; i++) {
 731		if (!omap_obj->dma_addrs[i]) {
 732			dma_addr_t addr;
 733
 734			addr = dma_map_page(dev->dev, pages[i], 0,
 735					    PAGE_SIZE, dir);
 736			if (dma_mapping_error(dev->dev, addr)) {
 737				dev_warn(dev->dev, "%s: failed to map page\n",
 738					__func__);
 739				break;
 740			}
 741
 742			dirty = true;
 743			omap_obj->dma_addrs[i] = addr;
 744		}
 745	}
 746
 747	if (dirty) {
 748		unmap_mapping_range(obj->filp->f_mapping, 0,
 749				    omap_gem_mmap_size(obj), 1);
 750	}
 751}
 752
 753/**
 754 * omap_gem_pin() - Pin a GEM object in memory
 755 * @obj: the GEM object
 756 * @dma_addr: the DMA address
 757 *
 758 * Pin the given GEM object in memory and fill the dma_addr pointer with the
 759 * object's DMA address. If the buffer is not physically contiguous it will be
 760 * remapped through the TILER to provide a contiguous view.
 761 *
 762 * Pins are reference-counted, calling this function multiple times is allowed
 763 * as long the corresponding omap_gem_unpin() calls are balanced.
 764 *
 765 * Return 0 on success or a negative error code otherwise.
 766 */
 767int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
 768{
 769	struct omap_drm_private *priv = obj->dev->dev_private;
 770	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 771	int ret = 0;
 772
 773	mutex_lock(&omap_obj->lock);
 774
 775	if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
 776		if (omap_obj->dma_addr_cnt == 0) {
 777			u32 npages = obj->size >> PAGE_SHIFT;
 778			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 779			struct tiler_block *block;
 780
 781			BUG_ON(omap_obj->block);
 782
 783			ret = omap_gem_attach_pages(obj);
 784			if (ret)
 785				goto fail;
 786
 787			if (omap_obj->flags & OMAP_BO_TILED) {
 788				block = tiler_reserve_2d(fmt,
 789						omap_obj->width,
 790						omap_obj->height, 0);
 791			} else {
 792				block = tiler_reserve_1d(obj->size);
 793			}
 794
 795			if (IS_ERR(block)) {
 796				ret = PTR_ERR(block);
 797				dev_err(obj->dev->dev,
 798					"could not remap: %d (%d)\n", ret, fmt);
 799				goto fail;
 800			}
 801
 802			/* TODO: enable async refill.. */
 803			ret = tiler_pin(block, omap_obj->pages, npages,
 804					omap_obj->roll, true);
 805			if (ret) {
 806				tiler_release(block);
 807				dev_err(obj->dev->dev,
 808						"could not pin: %d\n", ret);
 809				goto fail;
 810			}
 811
 812			omap_obj->dma_addr = tiler_ssptr(block);
 813			omap_obj->block = block;
 814
 815			DBG("got dma address: %pad", &omap_obj->dma_addr);
 816		}
 817
 818		omap_obj->dma_addr_cnt++;
 819
 820		*dma_addr = omap_obj->dma_addr;
 821	} else if (omap_gem_is_contiguous(omap_obj)) {
 822		*dma_addr = omap_obj->dma_addr;
 823	} else {
 824		ret = -EINVAL;
 825		goto fail;
 826	}
 827
 828fail:
 829	mutex_unlock(&omap_obj->lock);
 830
 831	return ret;
 832}
 833
 834/**
 835 * omap_gem_unpin() - Unpin a GEM object from memory
 836 * @obj: the GEM object
 837 *
 838 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
 839 * reference-counted, the actualy unpin will only be performed when the number
 840 * of calls to this function matches the number of calls to omap_gem_pin().
 841 */
 842void omap_gem_unpin(struct drm_gem_object *obj)
 843{
 844	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 845	int ret;
 846
 847	mutex_lock(&omap_obj->lock);
 848
 849	if (omap_obj->dma_addr_cnt > 0) {
 850		omap_obj->dma_addr_cnt--;
 851		if (omap_obj->dma_addr_cnt == 0) {
 852			ret = tiler_unpin(omap_obj->block);
 853			if (ret) {
 854				dev_err(obj->dev->dev,
 855					"could not unpin pages: %d\n", ret);
 856			}
 857			ret = tiler_release(omap_obj->block);
 858			if (ret) {
 859				dev_err(obj->dev->dev,
 860					"could not release unmap: %d\n", ret);
 861			}
 862			omap_obj->dma_addr = 0;
 863			omap_obj->block = NULL;
 864		}
 865	}
 866
 867	mutex_unlock(&omap_obj->lock);
 868}
 869
 870/* Get rotated scanout address (only valid if already pinned), at the
 871 * specified orientation and x,y offset from top-left corner of buffer
 872 * (only valid for tiled 2d buffers)
 873 */
 874int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
 875		int x, int y, dma_addr_t *dma_addr)
 876{
 877	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 878	int ret = -EINVAL;
 879
 880	mutex_lock(&omap_obj->lock);
 881
 882	if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
 883			(omap_obj->flags & OMAP_BO_TILED)) {
 884		*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
 885		ret = 0;
 886	}
 887
 888	mutex_unlock(&omap_obj->lock);
 889
 890	return ret;
 891}
 892
 893/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
 894int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
 895{
 896	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 897	int ret = -EINVAL;
 898	if (omap_obj->flags & OMAP_BO_TILED)
 899		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
 900	return ret;
 901}
 902
 903/* if !remap, and we don't have pages backing, then fail, rather than
 904 * increasing the pin count (which we don't really do yet anyways,
 905 * because we don't support swapping pages back out).  And 'remap'
 906 * might not be quite the right name, but I wanted to keep it working
 907 * similarly to omap_gem_pin().  Note though that mutex is not
 908 * aquired if !remap (because this can be called in atomic ctxt),
 909 * but probably omap_gem_unpin() should be changed to work in the
 910 * same way.  If !remap, a matching omap_gem_put_pages() call is not
 911 * required (and should not be made).
 912 */
 913int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
 914		bool remap)
 915{
 916	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 917	int ret = 0;
 918
 919	mutex_lock(&omap_obj->lock);
 920
 921	if (remap) {
 922		ret = omap_gem_attach_pages(obj);
 923		if (ret)
 924			goto unlock;
 925	}
 926
 927	if (!omap_obj->pages) {
 928		ret = -ENOMEM;
 929		goto unlock;
 930	}
 931
 932	*pages = omap_obj->pages;
 933
 934unlock:
 935	mutex_unlock(&omap_obj->lock);
 936
 937	return ret;
 938}
 939
 940/* release pages when DMA no longer being performed */
 941int omap_gem_put_pages(struct drm_gem_object *obj)
 942{
 943	/* do something here if we dynamically attach/detach pages.. at
 944	 * least they would no longer need to be pinned if everyone has
 945	 * released the pages..
 946	 */
 947	return 0;
 948}
 949
 950#ifdef CONFIG_DRM_FBDEV_EMULATION
 951/*
 952 * Get kernel virtual address for CPU access.. this more or less only
 953 * exists for omap_fbdev.
 954 */
 955void *omap_gem_vaddr(struct drm_gem_object *obj)
 956{
 957	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 958	void *vaddr;
 959	int ret;
 960
 961	mutex_lock(&omap_obj->lock);
 962
 963	if (!omap_obj->vaddr) {
 964		ret = omap_gem_attach_pages(obj);
 965		if (ret) {
 966			vaddr = ERR_PTR(ret);
 967			goto unlock;
 968		}
 969
 970		omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
 971				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 972	}
 973
 974	vaddr = omap_obj->vaddr;
 975
 976unlock:
 977	mutex_unlock(&omap_obj->lock);
 978	return vaddr;
 979}
 980#endif
 981
 982/* -----------------------------------------------------------------------------
 983 * Power Management
 984 */
 985
 986#ifdef CONFIG_PM
 987/* re-pin objects in DMM in resume path: */
 988int omap_gem_resume(struct drm_device *dev)
 989{
 990	struct omap_drm_private *priv = dev->dev_private;
 991	struct omap_gem_object *omap_obj;
 992	int ret = 0;
 993
 994	mutex_lock(&priv->list_lock);
 995	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
 996		if (omap_obj->block) {
 997			struct drm_gem_object *obj = &omap_obj->base;
 998			u32 npages = obj->size >> PAGE_SHIFT;
 999
1000			WARN_ON(!omap_obj->pages);  /* this can't happen */
1001			ret = tiler_pin(omap_obj->block,
1002					omap_obj->pages, npages,
1003					omap_obj->roll, true);
1004			if (ret) {
1005				dev_err(dev->dev, "could not repin: %d\n", ret);
1006				goto done;
1007			}
1008		}
1009	}
1010
1011done:
1012	mutex_unlock(&priv->list_lock);
1013	return ret;
1014}
1015#endif
1016
1017/* -----------------------------------------------------------------------------
1018 * DebugFS
1019 */
1020
1021#ifdef CONFIG_DEBUG_FS
1022void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1023{
1024	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1025	u64 off;
1026
1027	off = drm_vma_node_start(&obj->vma_node);
1028
1029	mutex_lock(&omap_obj->lock);
1030
1031	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1032			omap_obj->flags, obj->name, kref_read(&obj->refcount),
1033			off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
1034			omap_obj->vaddr, omap_obj->roll);
1035
1036	if (omap_obj->flags & OMAP_BO_TILED) {
1037		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1038		if (omap_obj->block) {
1039			struct tcm_area *area = &omap_obj->block->area;
1040			seq_printf(m, " (%dx%d, %dx%d)",
1041					area->p0.x, area->p0.y,
1042					area->p1.x, area->p1.y);
1043		}
1044	} else {
1045		seq_printf(m, " %zu", obj->size);
1046	}
1047
1048	mutex_unlock(&omap_obj->lock);
1049
1050	seq_printf(m, "\n");
1051}
1052
1053void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1054{
1055	struct omap_gem_object *omap_obj;
1056	int count = 0;
1057	size_t size = 0;
1058
1059	list_for_each_entry(omap_obj, list, mm_list) {
1060		struct drm_gem_object *obj = &omap_obj->base;
1061		seq_printf(m, "   ");
1062		omap_gem_describe(obj, m);
1063		count++;
1064		size += obj->size;
1065	}
1066
1067	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1068}
1069#endif
1070
1071/* -----------------------------------------------------------------------------
1072 * Constructor & Destructor
1073 */
1074
1075void omap_gem_free_object(struct drm_gem_object *obj)
1076{
1077	struct drm_device *dev = obj->dev;
1078	struct omap_drm_private *priv = dev->dev_private;
1079	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1080
1081	omap_gem_evict(obj);
1082
1083	mutex_lock(&priv->list_lock);
1084	list_del(&omap_obj->mm_list);
1085	mutex_unlock(&priv->list_lock);
1086
1087	/*
1088	 * We own the sole reference to the object at this point, but to keep
1089	 * lockdep happy, we must still take the omap_obj_lock to call
1090	 * omap_gem_detach_pages(). This should hardly make any difference as
1091	 * there can't be any lock contention.
1092	 */
1093	mutex_lock(&omap_obj->lock);
1094
1095	/* The object should not be pinned. */
1096	WARN_ON(omap_obj->dma_addr_cnt > 0);
1097
1098	if (omap_obj->pages) {
1099		if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1100			kfree(omap_obj->pages);
1101		else
1102			omap_gem_detach_pages(obj);
1103	}
1104
1105	if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1106		dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1107			    omap_obj->dma_addr);
1108	} else if (omap_obj->vaddr) {
1109		vunmap(omap_obj->vaddr);
1110	} else if (obj->import_attach) {
1111		drm_prime_gem_destroy(obj, omap_obj->sgt);
1112	}
1113
1114	mutex_unlock(&omap_obj->lock);
1115
1116	drm_gem_object_release(obj);
1117
1118	mutex_destroy(&omap_obj->lock);
1119
1120	kfree(omap_obj);
1121}
1122
1123/* GEM buffer object constructor */
1124struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1125		union omap_gem_size gsize, u32 flags)
1126{
1127	struct omap_drm_private *priv = dev->dev_private;
1128	struct omap_gem_object *omap_obj;
1129	struct drm_gem_object *obj;
1130	struct address_space *mapping;
1131	size_t size;
1132	int ret;
1133
1134	/* Validate the flags and compute the memory and cache flags. */
1135	if (flags & OMAP_BO_TILED) {
1136		if (!priv->usergart) {
1137			dev_err(dev->dev, "Tiled buffers require DMM\n");
1138			return NULL;
1139		}
1140
1141		/*
1142		 * Tiled buffers are always shmem paged backed. When they are
1143		 * scanned out, they are remapped into DMM/TILER.
1144		 */
1145		flags &= ~OMAP_BO_SCANOUT;
1146		flags |= OMAP_BO_MEM_SHMEM;
1147
1148		/*
1149		 * Currently don't allow cached buffers. There is some caching
1150		 * stuff that needs to be handled better.
1151		 */
1152		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1153		flags |= tiler_get_cpu_cache_flags();
1154	} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1155		/*
1156		 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1157		 * tiled. However, to lower the pressure on memory allocation,
1158		 * use contiguous memory only if no TILER is available.
1159		 */
1160		flags |= OMAP_BO_MEM_DMA_API;
1161	} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1162		/*
1163		 * All other buffers not backed by dma_buf are shmem-backed.
1164		 */
1165		flags |= OMAP_BO_MEM_SHMEM;
1166	}
1167
1168	/* Allocate the initialize the OMAP GEM object. */
1169	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1170	if (!omap_obj)
1171		return NULL;
1172
1173	obj = &omap_obj->base;
1174	omap_obj->flags = flags;
1175	mutex_init(&omap_obj->lock);
1176
1177	if (flags & OMAP_BO_TILED) {
1178		/*
1179		 * For tiled buffers align dimensions to slot boundaries and
1180		 * calculate size based on aligned dimensions.
1181		 */
1182		tiler_align(gem2fmt(flags), &gsize.tiled.width,
1183			    &gsize.tiled.height);
1184
1185		size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1186				  gsize.tiled.height);
1187
1188		omap_obj->width = gsize.tiled.width;
1189		omap_obj->height = gsize.tiled.height;
1190	} else {
1191		size = PAGE_ALIGN(gsize.bytes);
1192	}
1193
1194	/* Initialize the GEM object. */
1195	if (!(flags & OMAP_BO_MEM_SHMEM)) {
1196		drm_gem_private_object_init(dev, obj, size);
1197	} else {
1198		ret = drm_gem_object_init(dev, obj, size);
1199		if (ret)
1200			goto err_free;
1201
1202		mapping = obj->filp->f_mapping;
1203		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1204	}
1205
1206	/* Allocate memory if needed. */
1207	if (flags & OMAP_BO_MEM_DMA_API) {
1208		omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1209					       &omap_obj->dma_addr,
1210					       GFP_KERNEL);
1211		if (!omap_obj->vaddr)
1212			goto err_release;
1213	}
1214
1215	mutex_lock(&priv->list_lock);
1216	list_add(&omap_obj->mm_list, &priv->obj_list);
1217	mutex_unlock(&priv->list_lock);
1218
1219	return obj;
1220
1221err_release:
1222	drm_gem_object_release(obj);
1223err_free:
1224	kfree(omap_obj);
1225	return NULL;
1226}
1227
1228struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1229					   struct sg_table *sgt)
1230{
1231	struct omap_drm_private *priv = dev->dev_private;
1232	struct omap_gem_object *omap_obj;
1233	struct drm_gem_object *obj;
1234	union omap_gem_size gsize;
1235
1236	/* Without a DMM only physically contiguous buffers can be supported. */
1237	if (sgt->orig_nents != 1 && !priv->has_dmm)
1238		return ERR_PTR(-EINVAL);
1239
1240	gsize.bytes = PAGE_ALIGN(size);
1241	obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1242	if (!obj)
1243		return ERR_PTR(-ENOMEM);
1244
1245	omap_obj = to_omap_bo(obj);
1246
1247	mutex_lock(&omap_obj->lock);
1248
1249	omap_obj->sgt = sgt;
1250
1251	if (sgt->orig_nents == 1) {
1252		omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1253	} else {
1254		/* Create pages list from sgt */
1255		struct sg_page_iter iter;
1256		struct page **pages;
1257		unsigned int npages;
1258		unsigned int i = 0;
1259
1260		npages = DIV_ROUND_UP(size, PAGE_SIZE);
1261		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1262		if (!pages) {
1263			omap_gem_free_object(obj);
1264			obj = ERR_PTR(-ENOMEM);
1265			goto done;
1266		}
1267
1268		omap_obj->pages = pages;
1269
1270		for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1271			pages[i++] = sg_page_iter_page(&iter);
1272			if (i > npages)
1273				break;
1274		}
1275
1276		if (WARN_ON(i != npages)) {
1277			omap_gem_free_object(obj);
1278			obj = ERR_PTR(-ENOMEM);
1279			goto done;
1280		}
1281	}
1282
1283done:
1284	mutex_unlock(&omap_obj->lock);
1285	return obj;
1286}
1287
1288/* convenience method to construct a GEM buffer object, and userspace handle */
1289int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1290		union omap_gem_size gsize, u32 flags, u32 *handle)
1291{
1292	struct drm_gem_object *obj;
1293	int ret;
1294
1295	obj = omap_gem_new(dev, gsize, flags);
1296	if (!obj)
1297		return -ENOMEM;
1298
1299	ret = drm_gem_handle_create(file, obj, handle);
1300	if (ret) {
1301		omap_gem_free_object(obj);
1302		return ret;
1303	}
1304
1305	/* drop reference from allocate - handle holds it now */
1306	drm_gem_object_put_unlocked(obj);
1307
1308	return 0;
1309}
1310
1311/* -----------------------------------------------------------------------------
1312 * Init & Cleanup
1313 */
1314
1315/* If DMM is used, we need to set some stuff up.. */
1316void omap_gem_init(struct drm_device *dev)
1317{
1318	struct omap_drm_private *priv = dev->dev_private;
1319	struct omap_drm_usergart *usergart;
1320	const enum tiler_fmt fmts[] = {
1321			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1322	};
1323	int i, j;
1324
1325	if (!dmm_is_available()) {
1326		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1327		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1328		return;
1329	}
1330
1331	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1332	if (!usergart)
1333		return;
1334
1335	/* reserve 4k aligned/wide regions for userspace mappings: */
1336	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1337		u16 h = 1, w = PAGE_SIZE >> i;
1338
1339		tiler_align(fmts[i], &w, &h);
1340		/* note: since each region is 1 4kb page wide, and minimum
1341		 * number of rows, the height ends up being the same as the
1342		 * # of pages in the region
1343		 */
1344		usergart[i].height = h;
1345		usergart[i].height_shift = ilog2(h);
1346		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1347		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1348		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1349			struct omap_drm_usergart_entry *entry;
1350			struct tiler_block *block;
1351
1352			entry = &usergart[i].entry[j];
1353			block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1354			if (IS_ERR(block)) {
1355				dev_err(dev->dev,
1356						"reserve failed: %d, %d, %ld\n",
1357						i, j, PTR_ERR(block));
1358				return;
1359			}
1360			entry->dma_addr = tiler_ssptr(block);
1361			entry->block = block;
1362
1363			DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1364					&entry->dma_addr,
1365					usergart[i].stride_pfn << PAGE_SHIFT);
1366		}
1367	}
1368
1369	priv->usergart = usergart;
1370	priv->has_dmm = true;
1371}
1372
1373void omap_gem_deinit(struct drm_device *dev)
1374{
1375	struct omap_drm_private *priv = dev->dev_private;
1376
1377	/* I believe we can rely on there being no more outstanding GEM
1378	 * objects which could depend on usergart/dmm at this point.
1379	 */
1380	kfree(priv->usergart);
1381}