Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
   3 * Author: Rob Clark <rob.clark@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
 
  18#include <linux/seq_file.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/spinlock.h>
  21#include <linux/pfn_t.h>
  22
 
  23#include <drm/drm_vma_manager.h>
  24
  25#include "omap_drv.h"
  26#include "omap_dmm_tiler.h"
  27
  28/*
  29 * GEM buffer object implementation.
  30 */
  31
  32/* note: we use upper 8 bits of flags for driver-internal flags: */
  33#define OMAP_BO_MEM_DMA_API	0x01000000	/* memory allocated with the dma_alloc_* API */
  34#define OMAP_BO_MEM_SHMEM	0x02000000	/* memory allocated through shmem backing */
  35#define OMAP_BO_MEM_DMABUF	0x08000000	/* memory imported from a dmabuf */
  36
  37struct omap_gem_object {
  38	struct drm_gem_object base;
  39
  40	struct list_head mm_list;
  41
  42	u32 flags;
  43
  44	/** width/height for tiled formats (rounded up to slot boundaries) */
  45	u16 width, height;
  46
  47	/** roll applied when mapping to DMM */
  48	u32 roll;
  49
 
 
 
  50	/**
  51	 * dma_addr contains the buffer DMA address. It is valid for
  52	 *
  53	 * - buffers allocated through the DMA mapping API (with the
  54	 *   OMAP_BO_MEM_DMA_API flag set)
  55	 *
  56	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  57	 *   if they are physically contiguous (when sgt->orig_nents == 1)
  58	 *
  59	 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
  60	 *   which case the DMA address points to the TILER aperture
  61	 *
  62	 * Physically contiguous buffers have their DMA address equal to the
  63	 * physical address as we don't remap those buffers through the TILER.
  64	 *
  65	 * Buffers mapped to the TILER have their DMA address pointing to the
  66	 * TILER aperture. As TILER mappings are refcounted (through
  67	 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
  68	 * to ensure that the mapping won't disappear unexpectedly. References
  69	 * must be released with omap_gem_unpin().
  70	 */
  71	dma_addr_t dma_addr;
  72
  73	/**
  74	 * # of users of dma_addr
  75	 */
  76	u32 dma_addr_cnt;
  77
  78	/**
  79	 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  80	 * is set and the sgt field is valid.
  81	 */
  82	struct sg_table *sgt;
  83
  84	/**
  85	 * tiler block used when buffer is remapped in DMM/TILER.
  86	 */
  87	struct tiler_block *block;
  88
  89	/**
  90	 * Array of backing pages, if allocated.  Note that pages are never
  91	 * allocated for buffers originally allocated from contiguous memory
  92	 */
  93	struct page **pages;
  94
  95	/** addresses corresponding to pages in above array */
  96	dma_addr_t *dma_addrs;
  97
  98	/**
  99	 * Virtual address, if mapped.
 100	 */
 101	void *vaddr;
 102};
 103
 104#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
 105
 106/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
 107 * not necessarily pinned in TILER all the time, and (b) when they are
 108 * they are not necessarily page aligned, we reserve one or more small
 109 * regions in each of the 2d containers to use as a user-GART where we
 110 * can create a second page-aligned mapping of parts of the buffer
 111 * being accessed from userspace.
 112 *
 113 * Note that we could optimize slightly when we know that multiple
 114 * tiler containers are backed by the same PAT.. but I'll leave that
 115 * for later..
 116 */
 117#define NUM_USERGART_ENTRIES 2
 118struct omap_drm_usergart_entry {
 119	struct tiler_block *block;	/* the reserved tiler block */
 120	dma_addr_t dma_addr;
 121	struct drm_gem_object *obj;	/* the current pinned obj */
 122	pgoff_t obj_pgoff;		/* page offset of obj currently
 123					   mapped in */
 124};
 125
 126struct omap_drm_usergart {
 127	struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
 128	int height;				/* height in rows */
 129	int height_shift;		/* ilog2(height in rows) */
 130	int slot_shift;			/* ilog2(width per slot) */
 131	int stride_pfn;			/* stride in pages */
 132	int last;				/* index of last used entry */
 133};
 134
 135/* -----------------------------------------------------------------------------
 136 * Helpers
 137 */
 138
 139/** get mmap offset */
 140static u64 mmap_offset(struct drm_gem_object *obj)
 141{
 142	struct drm_device *dev = obj->dev;
 143	int ret;
 144	size_t size;
 145
 146	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 147
 148	/* Make it mmapable */
 149	size = omap_gem_mmap_size(obj);
 150	ret = drm_gem_create_mmap_offset_size(obj, size);
 151	if (ret) {
 152		dev_err(dev->dev, "could not allocate mmap offset\n");
 153		return 0;
 154	}
 155
 156	return drm_vma_node_offset_addr(&obj->vma_node);
 157}
 158
 159static bool is_contiguous(struct omap_gem_object *omap_obj)
 160{
 161	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
 162		return true;
 163
 164	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
 165		return true;
 166
 167	return false;
 168}
 169
 170/* -----------------------------------------------------------------------------
 171 * Eviction
 172 */
 173
 174static void evict_entry(struct drm_gem_object *obj,
 175		enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
 176{
 177	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 178	struct omap_drm_private *priv = obj->dev->dev_private;
 179	int n = priv->usergart[fmt].height;
 180	size_t size = PAGE_SIZE * n;
 181	loff_t off = mmap_offset(obj) +
 182			(entry->obj_pgoff << PAGE_SHIFT);
 183	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 184
 185	if (m > 1) {
 186		int i;
 187		/* if stride > than PAGE_SIZE then sparse mapping: */
 188		for (i = n; i > 0; i--) {
 189			unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 190					    off, PAGE_SIZE, 1);
 191			off += PAGE_SIZE * m;
 192		}
 193	} else {
 194		unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 195				    off, size, 1);
 196	}
 197
 198	entry->obj = NULL;
 199}
 200
 201/* Evict a buffer from usergart, if it is mapped there */
 202static void evict(struct drm_gem_object *obj)
 203{
 204	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 205	struct omap_drm_private *priv = obj->dev->dev_private;
 206
 207	if (omap_obj->flags & OMAP_BO_TILED) {
 208		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 209		int i;
 210
 211		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
 212			struct omap_drm_usergart_entry *entry =
 213				&priv->usergart[fmt].entry[i];
 214
 215			if (entry->obj == obj)
 216				evict_entry(obj, fmt, entry);
 217		}
 218	}
 219}
 220
 221/* -----------------------------------------------------------------------------
 222 * Page Management
 223 */
 224
 225/** ensure backing pages are allocated */
 
 
 
 226static int omap_gem_attach_pages(struct drm_gem_object *obj)
 227{
 228	struct drm_device *dev = obj->dev;
 229	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 230	struct page **pages;
 231	int npages = obj->size >> PAGE_SHIFT;
 232	int i, ret;
 233	dma_addr_t *addrs;
 234
 235	WARN_ON(omap_obj->pages);
 
 
 
 
 
 
 
 236
 237	pages = drm_gem_get_pages(obj);
 238	if (IS_ERR(pages)) {
 239		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 240		return PTR_ERR(pages);
 241	}
 242
 243	/* for non-cached buffers, ensure the new pages are clean because
 244	 * DSS, GPU, etc. are not cache coherent:
 245	 */
 246	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
 247		addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
 248		if (!addrs) {
 249			ret = -ENOMEM;
 250			goto free_pages;
 251		}
 252
 253		for (i = 0; i < npages; i++) {
 254			addrs[i] = dma_map_page(dev->dev, pages[i],
 255					0, PAGE_SIZE, DMA_TO_DEVICE);
 256
 257			if (dma_mapping_error(dev->dev, addrs[i])) {
 258				dev_warn(dev->dev,
 259					"%s: failed to map page\n", __func__);
 260
 261				for (i = i - 1; i >= 0; --i) {
 262					dma_unmap_page(dev->dev, addrs[i],
 263						PAGE_SIZE, DMA_TO_DEVICE);
 264				}
 265
 266				ret = -ENOMEM;
 267				goto free_addrs;
 268			}
 269		}
 270	} else {
 271		addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
 272		if (!addrs) {
 273			ret = -ENOMEM;
 274			goto free_pages;
 275		}
 276	}
 277
 278	omap_obj->dma_addrs = addrs;
 279	omap_obj->pages = pages;
 280
 281	return 0;
 282
 283free_addrs:
 284	kfree(addrs);
 285free_pages:
 286	drm_gem_put_pages(obj, pages, true, false);
 287
 288	return ret;
 289}
 290
 291/* acquire pages when needed (for example, for DMA where physically
 292 * contiguous buffer is not required
 293 */
 294static int get_pages(struct drm_gem_object *obj, struct page ***pages)
 295{
 296	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 297	int ret = 0;
 298
 299	if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
 300		ret = omap_gem_attach_pages(obj);
 301		if (ret) {
 302			dev_err(obj->dev->dev, "could not attach pages\n");
 303			return ret;
 304		}
 305	}
 306
 307	/* TODO: even phys-contig.. we should have a list of pages? */
 308	*pages = omap_obj->pages;
 309
 310	return 0;
 311}
 312
 313/** release backing pages */
 314static void omap_gem_detach_pages(struct drm_gem_object *obj)
 315{
 316	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 317	unsigned int npages = obj->size >> PAGE_SHIFT;
 318	unsigned int i;
 319
 
 
 320	for (i = 0; i < npages; i++) {
 321		if (omap_obj->dma_addrs[i])
 322			dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
 323				       PAGE_SIZE, DMA_TO_DEVICE);
 324	}
 325
 326	kfree(omap_obj->dma_addrs);
 327	omap_obj->dma_addrs = NULL;
 328
 329	drm_gem_put_pages(obj, omap_obj->pages, true, false);
 330	omap_obj->pages = NULL;
 331}
 332
 333/* get buffer flags */
 334u32 omap_gem_flags(struct drm_gem_object *obj)
 335{
 336	return to_omap_bo(obj)->flags;
 337}
 338
 339u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
 340{
 341	u64 offset;
 342
 343	mutex_lock(&obj->dev->struct_mutex);
 344	offset = mmap_offset(obj);
 345	mutex_unlock(&obj->dev->struct_mutex);
 346	return offset;
 347}
 348
 349/** get mmap size */
 350size_t omap_gem_mmap_size(struct drm_gem_object *obj)
 351{
 352	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 353	size_t size = obj->size;
 354
 355	if (omap_obj->flags & OMAP_BO_TILED) {
 356		/* for tiled buffers, the virtual size has stride rounded up
 357		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
 358		 * 32kb later!).  But we don't back the entire buffer with
 359		 * pages, only the valid picture part.. so need to adjust for
 360		 * this in the size used to mmap and generate mmap offset
 361		 */
 362		size = tiler_vsize(gem2fmt(omap_obj->flags),
 363				omap_obj->width, omap_obj->height);
 364	}
 365
 366	return size;
 367}
 368
 369/* -----------------------------------------------------------------------------
 370 * Fault Handling
 371 */
 372
 373/* Normal handling for the case of faulting in non-tiled buffers */
 374static int fault_1d(struct drm_gem_object *obj,
 375		struct vm_area_struct *vma, struct vm_fault *vmf)
 376{
 377	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 378	unsigned long pfn;
 379	pgoff_t pgoff;
 380
 381	/* We don't use vmf->pgoff since that has the fake offset: */
 382	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 383
 384	if (omap_obj->pages) {
 385		omap_gem_cpu_sync_page(obj, pgoff);
 386		pfn = page_to_pfn(omap_obj->pages[pgoff]);
 387	} else {
 388		BUG_ON(!is_contiguous(omap_obj));
 389		pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
 390	}
 391
 392	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 393			pfn, pfn << PAGE_SHIFT);
 394
 395	return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 
 396}
 397
 398/* Special handling for the case of faulting in 2d tiled buffers */
 399static int fault_2d(struct drm_gem_object *obj,
 400		struct vm_area_struct *vma, struct vm_fault *vmf)
 401{
 402	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 403	struct omap_drm_private *priv = obj->dev->dev_private;
 404	struct omap_drm_usergart_entry *entry;
 405	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 406	struct page *pages[64];  /* XXX is this too much to have on stack? */
 407	unsigned long pfn;
 408	pgoff_t pgoff, base_pgoff;
 409	unsigned long vaddr;
 410	int i, ret, slots;
 
 411
 412	/*
 413	 * Note the height of the slot is also equal to the number of pages
 414	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
 415	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
 416	 */
 417	const int n = priv->usergart[fmt].height;
 418	const int n_shift = priv->usergart[fmt].height_shift;
 419
 420	/*
 421	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
 422	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
 423	 * into account in some of the math, so figure out virtual stride
 424	 * in pages
 425	 */
 426	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 427
 428	/* We don't use vmf->pgoff since that has the fake offset: */
 429	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 430
 431	/*
 432	 * Actual address we start mapping at is rounded down to previous slot
 433	 * boundary in the y direction:
 434	 */
 435	base_pgoff = round_down(pgoff, m << n_shift);
 436
 437	/* figure out buffer width in slots */
 438	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 439
 440	vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 441
 442	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 443
 444	/* evict previous buffer using this usergart entry, if any: */
 445	if (entry->obj)
 446		evict_entry(entry->obj, fmt, entry);
 447
 448	entry->obj = obj;
 449	entry->obj_pgoff = base_pgoff;
 450
 451	/* now convert base_pgoff to phys offset from virt offset: */
 452	base_pgoff = (base_pgoff >> n_shift) * slots;
 453
 454	/* for wider-than 4k.. figure out which part of the slot-row we want: */
 455	if (m > 1) {
 456		int off = pgoff % m;
 457		entry->obj_pgoff += off;
 458		base_pgoff /= m;
 459		slots = min(slots - (off << n_shift), n);
 460		base_pgoff += off << n_shift;
 461		vaddr += off << PAGE_SHIFT;
 462	}
 463
 464	/*
 465	 * Map in pages. Beyond the valid pixel part of the buffer, we set
 466	 * pages[i] to NULL to get a dummy page mapped in.. if someone
 467	 * reads/writes it they will get random/undefined content, but at
 468	 * least it won't be corrupting whatever other random page used to
 469	 * be mapped in, or other undefined behavior.
 470	 */
 471	memcpy(pages, &omap_obj->pages[base_pgoff],
 472			sizeof(struct page *) * slots);
 473	memset(pages + slots, 0,
 474			sizeof(struct page *) * (n - slots));
 475
 476	ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
 477	if (ret) {
 478		dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
 
 479		return ret;
 480	}
 481
 482	pfn = entry->dma_addr >> PAGE_SHIFT;
 483
 484	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 485			pfn, pfn << PAGE_SHIFT);
 486
 487	for (i = n; i > 0; i--) {
 488		vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
 
 
 
 489		pfn += priv->usergart[fmt].stride_pfn;
 490		vaddr += PAGE_SIZE * m;
 491	}
 492
 493	/* simple round-robin: */
 494	priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
 495				 % NUM_USERGART_ENTRIES;
 496
 497	return 0;
 498}
 499
 500/**
 501 * omap_gem_fault		-	pagefault handler for GEM objects
 502 * @vmf: fault detail
 503 *
 504 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
 505 * does most of the work for us including the actual map/unmap calls
 506 * but we need to do the actual page work.
 507 *
 508 * The VMA was set up by GEM. In doing so it also ensured that the
 509 * vma->vm_private_data points to the GEM object that is backing this
 510 * mapping.
 511 */
 512int omap_gem_fault(struct vm_fault *vmf)
 513{
 514	struct vm_area_struct *vma = vmf->vma;
 515	struct drm_gem_object *obj = vma->vm_private_data;
 516	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 517	struct drm_device *dev = obj->dev;
 518	struct page **pages;
 519	int ret;
 520
 521	/* Make sure we don't parallel update on a fault, nor move or remove
 522	 * something from beneath our feet
 523	 */
 524	mutex_lock(&dev->struct_mutex);
 525
 526	/* if a shmem backed object, make sure we have pages attached now */
 527	ret = get_pages(obj, &pages);
 528	if (ret)
 
 529		goto fail;
 
 530
 531	/* where should we do corresponding put_pages().. we are mapping
 532	 * the original page, rather than thru a GART, so we can't rely
 533	 * on eviction to trigger this.  But munmap() or all mappings should
 534	 * probably trigger put_pages()?
 535	 */
 536
 537	if (omap_obj->flags & OMAP_BO_TILED)
 538		ret = fault_2d(obj, vma, vmf);
 539	else
 540		ret = fault_1d(obj, vma, vmf);
 541
 542
 543fail:
 544	mutex_unlock(&dev->struct_mutex);
 545	switch (ret) {
 546	case 0:
 547	case -ERESTARTSYS:
 548	case -EINTR:
 549	case -EBUSY:
 550		/*
 551		 * EBUSY is ok: this just means that another thread
 552		 * already did the job.
 553		 */
 554		return VM_FAULT_NOPAGE;
 555	case -ENOMEM:
 556		return VM_FAULT_OOM;
 557	default:
 558		return VM_FAULT_SIGBUS;
 559	}
 560}
 561
 562/** We override mainly to fix up some of the vm mapping flags.. */
 563int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 564{
 565	int ret;
 566
 567	ret = drm_gem_mmap(filp, vma);
 568	if (ret) {
 569		DBG("mmap failed: %d", ret);
 570		return ret;
 571	}
 572
 573	return omap_gem_mmap_obj(vma->vm_private_data, vma);
 574}
 575
 576int omap_gem_mmap_obj(struct drm_gem_object *obj,
 577		struct vm_area_struct *vma)
 578{
 579	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 580
 581	vma->vm_flags &= ~VM_PFNMAP;
 582	vma->vm_flags |= VM_MIXEDMAP;
 583
 584	if (omap_obj->flags & OMAP_BO_WC) {
 585		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 586	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
 587		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 588	} else {
 589		/*
 590		 * We do have some private objects, at least for scanout buffers
 591		 * on hardware without DMM/TILER.  But these are allocated write-
 592		 * combine
 593		 */
 594		if (WARN_ON(!obj->filp))
 595			return -EINVAL;
 596
 597		/*
 598		 * Shunt off cached objs to shmem file so they have their own
 599		 * address_space (so unmap_mapping_range does what we want,
 600		 * in particular in the case of mmap'd dmabufs)
 601		 */
 602		fput(vma->vm_file);
 603		vma->vm_pgoff = 0;
 604		vma->vm_file  = get_file(obj->filp);
 605
 606		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 607	}
 608
 609	return 0;
 610}
 611
 612/* -----------------------------------------------------------------------------
 613 * Dumb Buffers
 614 */
 615
 616/**
 617 * omap_gem_dumb_create	-	create a dumb buffer
 618 * @drm_file: our client file
 619 * @dev: our device
 620 * @args: the requested arguments copied from userspace
 621 *
 622 * Allocate a buffer suitable for use for a frame buffer of the
 623 * form described by user space. Give userspace a handle by which
 624 * to reference it.
 625 */
 626int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 627		struct drm_mode_create_dumb *args)
 628{
 629	union omap_gem_size gsize;
 630
 631	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 632
 633	args->size = PAGE_ALIGN(args->pitch * args->height);
 634
 635	gsize = (union omap_gem_size){
 636		.bytes = args->size,
 637	};
 638
 639	return omap_gem_new_handle(dev, file, gsize,
 640			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
 641}
 642
 643/**
 644 * omap_gem_dumb_map	-	buffer mapping for dumb interface
 645 * @file: our drm client file
 646 * @dev: drm device
 647 * @handle: GEM handle to the object (from dumb_create)
 
 648 *
 649 * Do the necessary setup to allow the mapping of the frame buffer
 650 * into user memory. We don't have to do much here at the moment.
 651 */
 652int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 653		u32 handle, u64 *offset)
 654{
 655	struct drm_gem_object *obj;
 656	int ret = 0;
 657
 658	/* GEM does all our handle to object mapping */
 659	obj = drm_gem_object_lookup(file, handle);
 660	if (obj == NULL) {
 661		ret = -ENOENT;
 662		goto fail;
 663	}
 664
 665	*offset = omap_gem_mmap_offset(obj);
 666
 667	drm_gem_object_unreference_unlocked(obj);
 668
 669fail:
 670	return ret;
 671}
 672
 673#ifdef CONFIG_DRM_FBDEV_EMULATION
 674/* Set scrolling position.  This allows us to implement fast scrolling
 675 * for console.
 676 *
 677 * Call only from non-atomic contexts.
 678 */
 679int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
 680{
 681	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 682	u32 npages = obj->size >> PAGE_SHIFT;
 683	int ret = 0;
 684
 685	if (roll > npages) {
 686		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
 687		return -EINVAL;
 688	}
 689
 690	omap_obj->roll = roll;
 691
 692	mutex_lock(&obj->dev->struct_mutex);
 693
 694	/* if we aren't mapped yet, we don't need to do anything */
 695	if (omap_obj->block) {
 696		struct page **pages;
 697		ret = get_pages(obj, &pages);
 698		if (ret)
 699			goto fail;
 700		ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
 
 
 701		if (ret)
 702			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
 703	}
 704
 705fail:
 706	mutex_unlock(&obj->dev->struct_mutex);
 707
 708	return ret;
 709}
 710#endif
 711
 712/* -----------------------------------------------------------------------------
 713 * Memory Management & DMA Sync
 714 */
 715
 716/*
 717 * shmem buffers that are mapped cached are not coherent.
 718 *
 719 * We keep track of dirty pages using page faulting to perform cache management.
 720 * When a page is mapped to the CPU in read/write mode the device can't access
 721 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
 722 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
 723 * unmapped from the CPU.
 724 */
 725static inline bool is_cached_coherent(struct drm_gem_object *obj)
 726{
 727	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 728
 729	return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
 730		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
 731}
 732
 733/* Sync the buffer for CPU access.. note pages should already be
 734 * attached, ie. omap_gem_get_pages()
 735 */
 736void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
 737{
 738	struct drm_device *dev = obj->dev;
 739	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 740
 741	if (is_cached_coherent(obj))
 742		return;
 743
 744	if (omap_obj->dma_addrs[pgoff]) {
 745		dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
 746				PAGE_SIZE, DMA_TO_DEVICE);
 747		omap_obj->dma_addrs[pgoff] = 0;
 748	}
 749}
 750
 751/* sync the buffer for DMA access */
 752void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
 753		enum dma_data_direction dir)
 754{
 755	struct drm_device *dev = obj->dev;
 756	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 757	int i, npages = obj->size >> PAGE_SHIFT;
 758	struct page **pages = omap_obj->pages;
 759	bool dirty = false;
 760
 761	if (is_cached_coherent(obj))
 762		return;
 763
 764	for (i = 0; i < npages; i++) {
 765		if (!omap_obj->dma_addrs[i]) {
 766			dma_addr_t addr;
 767
 768			addr = dma_map_page(dev->dev, pages[i], 0,
 769					    PAGE_SIZE, dir);
 770			if (dma_mapping_error(dev->dev, addr)) {
 771				dev_warn(dev->dev, "%s: failed to map page\n",
 772					__func__);
 773				break;
 774			}
 775
 776			dirty = true;
 777			omap_obj->dma_addrs[i] = addr;
 778		}
 779	}
 780
 781	if (dirty) {
 782		unmap_mapping_range(obj->filp->f_mapping, 0,
 783				    omap_gem_mmap_size(obj), 1);
 784	}
 785}
 786
 787/**
 788 * omap_gem_pin() - Pin a GEM object in memory
 789 * @obj: the GEM object
 790 * @dma_addr: the DMA address
 791 *
 792 * Pin the given GEM object in memory and fill the dma_addr pointer with the
 793 * object's DMA address. If the buffer is not physically contiguous it will be
 794 * remapped through the TILER to provide a contiguous view.
 795 *
 796 * Pins are reference-counted, calling this function multiple times is allowed
 797 * as long the corresponding omap_gem_unpin() calls are balanced.
 798 *
 799 * Return 0 on success or a negative error code otherwise.
 800 */
 801int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
 802{
 803	struct omap_drm_private *priv = obj->dev->dev_private;
 804	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 805	int ret = 0;
 806
 807	mutex_lock(&obj->dev->struct_mutex);
 808
 809	if (!is_contiguous(omap_obj) && priv->has_dmm) {
 810		if (omap_obj->dma_addr_cnt == 0) {
 811			struct page **pages;
 812			u32 npages = obj->size >> PAGE_SHIFT;
 813			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 814			struct tiler_block *block;
 815
 816			BUG_ON(omap_obj->block);
 817
 818			ret = get_pages(obj, &pages);
 
 
 819			if (ret)
 820				goto fail;
 821
 822			if (omap_obj->flags & OMAP_BO_TILED) {
 823				block = tiler_reserve_2d(fmt,
 824						omap_obj->width,
 825						omap_obj->height, 0);
 826			} else {
 827				block = tiler_reserve_1d(obj->size);
 828			}
 829
 830			if (IS_ERR(block)) {
 831				ret = PTR_ERR(block);
 832				dev_err(obj->dev->dev,
 833					"could not remap: %d (%d)\n", ret, fmt);
 834				goto fail;
 835			}
 836
 837			/* TODO: enable async refill.. */
 838			ret = tiler_pin(block, pages, npages,
 839					omap_obj->roll, true);
 840			if (ret) {
 841				tiler_release(block);
 842				dev_err(obj->dev->dev,
 843						"could not pin: %d\n", ret);
 844				goto fail;
 845			}
 846
 847			omap_obj->dma_addr = tiler_ssptr(block);
 848			omap_obj->block = block;
 849
 850			DBG("got dma address: %pad", &omap_obj->dma_addr);
 
 
 851		}
 852
 853		omap_obj->dma_addr_cnt++;
 854
 855		*dma_addr = omap_obj->dma_addr;
 856	} else if (is_contiguous(omap_obj)) {
 857		*dma_addr = omap_obj->dma_addr;
 858	} else {
 859		ret = -EINVAL;
 860		goto fail;
 861	}
 862
 863fail:
 864	mutex_unlock(&obj->dev->struct_mutex);
 865
 866	return ret;
 867}
 868
 869/**
 870 * omap_gem_unpin() - Unpin a GEM object from memory
 871 * @obj: the GEM object
 872 *
 873 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
 874 * reference-counted, the actualy unpin will only be performed when the number
 875 * of calls to this function matches the number of calls to omap_gem_pin().
 876 */
 877void omap_gem_unpin(struct drm_gem_object *obj)
 878{
 
 879	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 880	int ret;
 881
 882	mutex_lock(&obj->dev->struct_mutex);
 883	if (omap_obj->dma_addr_cnt > 0) {
 884		omap_obj->dma_addr_cnt--;
 885		if (omap_obj->dma_addr_cnt == 0) {
 886			ret = tiler_unpin(omap_obj->block);
 887			if (ret) {
 888				dev_err(obj->dev->dev,
 889					"could not unpin pages: %d\n", ret);
 890			}
 891			ret = tiler_release(omap_obj->block);
 892			if (ret) {
 893				dev_err(obj->dev->dev,
 894					"could not release unmap: %d\n", ret);
 895			}
 896			omap_obj->dma_addr = 0;
 897			omap_obj->block = NULL;
 898		}
 
 
 
 
 
 
 
 899	}
 
 900
 901	mutex_unlock(&obj->dev->struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 902}
 903
 904/* Get rotated scanout address (only valid if already pinned), at the
 905 * specified orientation and x,y offset from top-left corner of buffer
 906 * (only valid for tiled 2d buffers)
 907 */
 908int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
 909		int x, int y, dma_addr_t *dma_addr)
 910{
 911	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 912	int ret = -EINVAL;
 913
 914	mutex_lock(&obj->dev->struct_mutex);
 915	if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
 916			(omap_obj->flags & OMAP_BO_TILED)) {
 
 917		*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
 918		ret = 0;
 919	}
 920	mutex_unlock(&obj->dev->struct_mutex);
 
 
 921	return ret;
 922}
 923
 924/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
 925int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
 926{
 927	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 928	int ret = -EINVAL;
 929	if (omap_obj->flags & OMAP_BO_TILED)
 930		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
 931	return ret;
 932}
 933
 934/* if !remap, and we don't have pages backing, then fail, rather than
 935 * increasing the pin count (which we don't really do yet anyways,
 936 * because we don't support swapping pages back out).  And 'remap'
 937 * might not be quite the right name, but I wanted to keep it working
 938 * similarly to omap_gem_pin().  Note though that mutex is not
 939 * aquired if !remap (because this can be called in atomic ctxt),
 940 * but probably omap_gem_unpin() should be changed to work in the
 941 * same way.  If !remap, a matching omap_gem_put_pages() call is not
 942 * required (and should not be made).
 943 */
 944int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
 945		bool remap)
 946{
 947	int ret;
 948	if (!remap) {
 949		struct omap_gem_object *omap_obj = to_omap_bo(obj);
 950		if (!omap_obj->pages)
 951			return -ENOMEM;
 952		*pages = omap_obj->pages;
 953		return 0;
 
 
 
 
 
 
 
 954	}
 955	mutex_lock(&obj->dev->struct_mutex);
 956	ret = get_pages(obj, pages);
 957	mutex_unlock(&obj->dev->struct_mutex);
 
 
 
 958	return ret;
 959}
 960
 961/* release pages when DMA no longer being performed */
 962int omap_gem_put_pages(struct drm_gem_object *obj)
 963{
 964	/* do something here if we dynamically attach/detach pages.. at
 965	 * least they would no longer need to be pinned if everyone has
 966	 * released the pages..
 967	 */
 968	return 0;
 969}
 970
 971#ifdef CONFIG_DRM_FBDEV_EMULATION
 972/* Get kernel virtual address for CPU access.. this more or less only
 973 * exists for omap_fbdev.  This should be called with struct_mutex
 974 * held.
 975 */
 976void *omap_gem_vaddr(struct drm_gem_object *obj)
 977{
 978	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 979	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
 
 
 
 980	if (!omap_obj->vaddr) {
 981		struct page **pages;
 982		int ret = get_pages(obj, &pages);
 983		if (ret)
 984			return ERR_PTR(ret);
 985		omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 
 
 986				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 987	}
 988	return omap_obj->vaddr;
 
 
 
 
 
 989}
 990#endif
 991
 992/* -----------------------------------------------------------------------------
 993 * Power Management
 994 */
 995
 996#ifdef CONFIG_PM
 997/* re-pin objects in DMM in resume path: */
 998int omap_gem_resume(struct drm_device *dev)
 999{
1000	struct omap_drm_private *priv = dev->dev_private;
1001	struct omap_gem_object *omap_obj;
1002	int ret = 0;
1003
 
1004	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1005		if (omap_obj->block) {
1006			struct drm_gem_object *obj = &omap_obj->base;
1007			u32 npages = obj->size >> PAGE_SHIFT;
1008
1009			WARN_ON(!omap_obj->pages);  /* this can't happen */
1010			ret = tiler_pin(omap_obj->block,
1011					omap_obj->pages, npages,
1012					omap_obj->roll, true);
1013			if (ret) {
1014				dev_err(dev->dev, "could not repin: %d\n", ret);
1015				return ret;
1016			}
1017		}
1018	}
1019
1020	return 0;
 
 
1021}
1022#endif
1023
1024/* -----------------------------------------------------------------------------
1025 * DebugFS
1026 */
1027
1028#ifdef CONFIG_DEBUG_FS
1029void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1030{
1031	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1032	u64 off;
1033
1034	off = drm_vma_node_start(&obj->vma_node);
1035
 
 
1036	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1037			omap_obj->flags, obj->name, kref_read(&obj->refcount),
1038			off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
 
1039			omap_obj->vaddr, omap_obj->roll);
1040
1041	if (omap_obj->flags & OMAP_BO_TILED) {
1042		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1043		if (omap_obj->block) {
1044			struct tcm_area *area = &omap_obj->block->area;
1045			seq_printf(m, " (%dx%d, %dx%d)",
1046					area->p0.x, area->p0.y,
1047					area->p1.x, area->p1.y);
1048		}
1049	} else {
1050		seq_printf(m, " %zu", obj->size);
1051	}
1052
 
 
1053	seq_printf(m, "\n");
1054}
1055
1056void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1057{
1058	struct omap_gem_object *omap_obj;
1059	int count = 0;
1060	size_t size = 0;
1061
1062	list_for_each_entry(omap_obj, list, mm_list) {
1063		struct drm_gem_object *obj = &omap_obj->base;
1064		seq_printf(m, "   ");
1065		omap_gem_describe(obj, m);
1066		count++;
1067		size += obj->size;
1068	}
1069
1070	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1071}
1072#endif
1073
1074/* -----------------------------------------------------------------------------
1075 * Constructor & Destructor
1076 */
1077
1078void omap_gem_free_object(struct drm_gem_object *obj)
1079{
1080	struct drm_device *dev = obj->dev;
1081	struct omap_drm_private *priv = dev->dev_private;
1082	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1083
1084	evict(obj);
1085
1086	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1087
1088	spin_lock(&priv->list_lock);
1089	list_del(&omap_obj->mm_list);
1090	spin_unlock(&priv->list_lock);
1091
1092	/* this means the object is still pinned.. which really should
1093	 * not happen.  I think..
 
 
 
1094	 */
1095	WARN_ON(omap_obj->dma_addr_cnt > 0);
 
 
 
1096
1097	if (omap_obj->pages) {
1098		if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1099			kfree(omap_obj->pages);
1100		else
1101			omap_gem_detach_pages(obj);
1102	}
1103
1104	if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1105		dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1106			    omap_obj->dma_addr);
1107	} else if (omap_obj->vaddr) {
1108		vunmap(omap_obj->vaddr);
1109	} else if (obj->import_attach) {
1110		drm_prime_gem_destroy(obj, omap_obj->sgt);
1111	}
1112
 
 
1113	drm_gem_object_release(obj);
1114
 
 
1115	kfree(omap_obj);
1116}
1117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118/* GEM buffer object constructor */
1119struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1120		union omap_gem_size gsize, u32 flags)
1121{
1122	struct omap_drm_private *priv = dev->dev_private;
1123	struct omap_gem_object *omap_obj;
1124	struct drm_gem_object *obj;
1125	struct address_space *mapping;
1126	size_t size;
1127	int ret;
1128
1129	/* Validate the flags and compute the memory and cache flags. */
1130	if (flags & OMAP_BO_TILED) {
1131		if (!priv->usergart) {
1132			dev_err(dev->dev, "Tiled buffers require DMM\n");
1133			return NULL;
1134		}
1135
 
 
1136		/*
1137		 * Tiled buffers are always shmem paged backed. When they are
1138		 * scanned out, they are remapped into DMM/TILER.
1139		 */
1140		flags &= ~OMAP_BO_SCANOUT;
1141		flags |= OMAP_BO_MEM_SHMEM;
1142
1143		/*
1144		 * Currently don't allow cached buffers. There is some caching
1145		 * stuff that needs to be handled better.
1146		 */
1147		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1148		flags |= tiler_get_cpu_cache_flags();
1149	} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1150		/*
1151		 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1152		 * tiled. However, to lower the pressure on memory allocation,
1153		 * use contiguous memory only if no TILER is available.
1154		 */
1155		flags |= OMAP_BO_MEM_DMA_API;
1156	} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1157		/*
1158		 * All other buffers not backed by dma_buf are shmem-backed.
1159		 */
1160		flags |= OMAP_BO_MEM_SHMEM;
1161	}
1162
1163	/* Allocate the initialize the OMAP GEM object. */
1164	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1165	if (!omap_obj)
1166		return NULL;
1167
1168	obj = &omap_obj->base;
1169	omap_obj->flags = flags;
 
1170
1171	if (flags & OMAP_BO_TILED) {
1172		/*
1173		 * For tiled buffers align dimensions to slot boundaries and
1174		 * calculate size based on aligned dimensions.
1175		 */
1176		tiler_align(gem2fmt(flags), &gsize.tiled.width,
1177			    &gsize.tiled.height);
1178
1179		size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1180				  gsize.tiled.height);
1181
1182		omap_obj->width = gsize.tiled.width;
1183		omap_obj->height = gsize.tiled.height;
1184	} else {
1185		size = PAGE_ALIGN(gsize.bytes);
1186	}
1187
 
 
1188	/* Initialize the GEM object. */
1189	if (!(flags & OMAP_BO_MEM_SHMEM)) {
1190		drm_gem_private_object_init(dev, obj, size);
1191	} else {
1192		ret = drm_gem_object_init(dev, obj, size);
1193		if (ret)
1194			goto err_free;
1195
1196		mapping = obj->filp->f_mapping;
1197		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1198	}
1199
1200	/* Allocate memory if needed. */
1201	if (flags & OMAP_BO_MEM_DMA_API) {
1202		omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1203					       &omap_obj->dma_addr,
1204					       GFP_KERNEL);
1205		if (!omap_obj->vaddr)
1206			goto err_release;
1207	}
1208
1209	spin_lock(&priv->list_lock);
1210	list_add(&omap_obj->mm_list, &priv->obj_list);
1211	spin_unlock(&priv->list_lock);
1212
1213	return obj;
1214
1215err_release:
1216	drm_gem_object_release(obj);
1217err_free:
1218	kfree(omap_obj);
1219	return NULL;
1220}
1221
1222struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1223					   struct sg_table *sgt)
1224{
1225	struct omap_drm_private *priv = dev->dev_private;
1226	struct omap_gem_object *omap_obj;
1227	struct drm_gem_object *obj;
1228	union omap_gem_size gsize;
1229
1230	/* Without a DMM only physically contiguous buffers can be supported. */
1231	if (sgt->orig_nents != 1 && !priv->has_dmm)
1232		return ERR_PTR(-EINVAL);
1233
1234	mutex_lock(&dev->struct_mutex);
1235
1236	gsize.bytes = PAGE_ALIGN(size);
1237	obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1238	if (!obj) {
1239		obj = ERR_PTR(-ENOMEM);
1240		goto done;
1241	}
1242
1243	omap_obj = to_omap_bo(obj);
 
 
 
1244	omap_obj->sgt = sgt;
1245
1246	if (sgt->orig_nents == 1) {
1247		omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1248	} else {
1249		/* Create pages list from sgt */
1250		struct sg_page_iter iter;
1251		struct page **pages;
1252		unsigned int npages;
1253		unsigned int i = 0;
1254
1255		npages = DIV_ROUND_UP(size, PAGE_SIZE);
1256		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1257		if (!pages) {
1258			omap_gem_free_object(obj);
1259			obj = ERR_PTR(-ENOMEM);
1260			goto done;
1261		}
1262
1263		omap_obj->pages = pages;
1264
1265		for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1266			pages[i++] = sg_page_iter_page(&iter);
1267			if (i > npages)
1268				break;
1269		}
1270
1271		if (WARN_ON(i != npages)) {
1272			omap_gem_free_object(obj);
1273			obj = ERR_PTR(-ENOMEM);
1274			goto done;
1275		}
1276	}
1277
1278done:
1279	mutex_unlock(&dev->struct_mutex);
1280	return obj;
1281}
1282
1283/* convenience method to construct a GEM buffer object, and userspace handle */
1284int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1285		union omap_gem_size gsize, u32 flags, u32 *handle)
1286{
1287	struct drm_gem_object *obj;
1288	int ret;
1289
1290	obj = omap_gem_new(dev, gsize, flags);
1291	if (!obj)
1292		return -ENOMEM;
1293
1294	ret = drm_gem_handle_create(file, obj, handle);
1295	if (ret) {
1296		omap_gem_free_object(obj);
1297		return ret;
1298	}
1299
1300	/* drop reference from allocate - handle holds it now */
1301	drm_gem_object_unreference_unlocked(obj);
1302
1303	return 0;
1304}
1305
1306/* -----------------------------------------------------------------------------
1307 * Init & Cleanup
1308 */
1309
1310/* If DMM is used, we need to set some stuff up.. */
1311void omap_gem_init(struct drm_device *dev)
1312{
1313	struct omap_drm_private *priv = dev->dev_private;
1314	struct omap_drm_usergart *usergart;
1315	const enum tiler_fmt fmts[] = {
1316			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1317	};
1318	int i, j;
1319
1320	if (!dmm_is_available()) {
1321		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1322		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1323		return;
1324	}
1325
1326	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1327	if (!usergart)
1328		return;
1329
1330	/* reserve 4k aligned/wide regions for userspace mappings: */
1331	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1332		u16 h = 1, w = PAGE_SIZE >> i;
1333
1334		tiler_align(fmts[i], &w, &h);
1335		/* note: since each region is 1 4kb page wide, and minimum
1336		 * number of rows, the height ends up being the same as the
1337		 * # of pages in the region
1338		 */
1339		usergart[i].height = h;
1340		usergart[i].height_shift = ilog2(h);
1341		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1342		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1343		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1344			struct omap_drm_usergart_entry *entry;
1345			struct tiler_block *block;
1346
1347			entry = &usergart[i].entry[j];
1348			block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1349			if (IS_ERR(block)) {
1350				dev_err(dev->dev,
1351						"reserve failed: %d, %d, %ld\n",
1352						i, j, PTR_ERR(block));
1353				return;
1354			}
1355			entry->dma_addr = tiler_ssptr(block);
1356			entry->block = block;
1357
1358			DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1359					&entry->dma_addr,
1360					usergart[i].stride_pfn << PAGE_SHIFT);
1361		}
1362	}
1363
1364	priv->usergart = usergart;
1365	priv->has_dmm = true;
1366}
1367
1368void omap_gem_deinit(struct drm_device *dev)
1369{
1370	struct omap_drm_private *priv = dev->dev_private;
1371
1372	/* I believe we can rely on there being no more outstanding GEM
1373	 * objects which could depend on usergart/dmm at this point.
1374	 */
1375	kfree(priv->usergart);
1376}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
   4 * Author: Rob Clark <rob.clark@linaro.org>
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/dma-mapping.h>
   8#include <linux/seq_file.h>
   9#include <linux/shmem_fs.h>
  10#include <linux/spinlock.h>
  11#include <linux/pfn_t.h>
  12
  13#include <drm/drm_prime.h>
  14#include <drm/drm_vma_manager.h>
  15
  16#include "omap_drv.h"
  17#include "omap_dmm_tiler.h"
  18
  19/*
  20 * GEM buffer object implementation.
  21 */
  22
  23/* note: we use upper 8 bits of flags for driver-internal flags: */
  24#define OMAP_BO_MEM_DMA_API	0x01000000	/* memory allocated with the dma_alloc_* API */
  25#define OMAP_BO_MEM_SHMEM	0x02000000	/* memory allocated through shmem backing */
  26#define OMAP_BO_MEM_DMABUF	0x08000000	/* memory imported from a dmabuf */
  27
  28struct omap_gem_object {
  29	struct drm_gem_object base;
  30
  31	struct list_head mm_list;
  32
  33	u32 flags;
  34
  35	/** width/height for tiled formats (rounded up to slot boundaries) */
  36	u16 width, height;
  37
  38	/** roll applied when mapping to DMM */
  39	u32 roll;
  40
  41	/** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
  42	struct mutex lock;
  43
  44	/**
  45	 * dma_addr contains the buffer DMA address. It is valid for
  46	 *
  47	 * - buffers allocated through the DMA mapping API (with the
  48	 *   OMAP_BO_MEM_DMA_API flag set)
  49	 *
  50	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  51	 *   if they are physically contiguous (when sgt->orig_nents == 1)
  52	 *
  53	 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
  54	 *   which case the DMA address points to the TILER aperture
  55	 *
  56	 * Physically contiguous buffers have their DMA address equal to the
  57	 * physical address as we don't remap those buffers through the TILER.
  58	 *
  59	 * Buffers mapped to the TILER have their DMA address pointing to the
  60	 * TILER aperture. As TILER mappings are refcounted (through
  61	 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
  62	 * to ensure that the mapping won't disappear unexpectedly. References
  63	 * must be released with omap_gem_unpin().
  64	 */
  65	dma_addr_t dma_addr;
  66
  67	/**
  68	 * # of users of dma_addr
  69	 */
  70	refcount_t dma_addr_cnt;
  71
  72	/**
  73	 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  74	 * is set and the sgt field is valid.
  75	 */
  76	struct sg_table *sgt;
  77
  78	/**
  79	 * tiler block used when buffer is remapped in DMM/TILER.
  80	 */
  81	struct tiler_block *block;
  82
  83	/**
  84	 * Array of backing pages, if allocated.  Note that pages are never
  85	 * allocated for buffers originally allocated from contiguous memory
  86	 */
  87	struct page **pages;
  88
  89	/** addresses corresponding to pages in above array */
  90	dma_addr_t *dma_addrs;
  91
  92	/**
  93	 * Virtual address, if mapped.
  94	 */
  95	void *vaddr;
  96};
  97
  98#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  99
 100/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
 101 * not necessarily pinned in TILER all the time, and (b) when they are
 102 * they are not necessarily page aligned, we reserve one or more small
 103 * regions in each of the 2d containers to use as a user-GART where we
 104 * can create a second page-aligned mapping of parts of the buffer
 105 * being accessed from userspace.
 106 *
 107 * Note that we could optimize slightly when we know that multiple
 108 * tiler containers are backed by the same PAT.. but I'll leave that
 109 * for later..
 110 */
 111#define NUM_USERGART_ENTRIES 2
 112struct omap_drm_usergart_entry {
 113	struct tiler_block *block;	/* the reserved tiler block */
 114	dma_addr_t dma_addr;
 115	struct drm_gem_object *obj;	/* the current pinned obj */
 116	pgoff_t obj_pgoff;		/* page offset of obj currently
 117					   mapped in */
 118};
 119
 120struct omap_drm_usergart {
 121	struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
 122	int height;				/* height in rows */
 123	int height_shift;		/* ilog2(height in rows) */
 124	int slot_shift;			/* ilog2(width per slot) */
 125	int stride_pfn;			/* stride in pages */
 126	int last;				/* index of last used entry */
 127};
 128
 129/* -----------------------------------------------------------------------------
 130 * Helpers
 131 */
 132
 133/** get mmap offset */
 134u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
 135{
 136	struct drm_device *dev = obj->dev;
 137	int ret;
 138	size_t size;
 139
 
 
 140	/* Make it mmapable */
 141	size = omap_gem_mmap_size(obj);
 142	ret = drm_gem_create_mmap_offset_size(obj, size);
 143	if (ret) {
 144		dev_err(dev->dev, "could not allocate mmap offset\n");
 145		return 0;
 146	}
 147
 148	return drm_vma_node_offset_addr(&obj->vma_node);
 149}
 150
 151static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
 152{
 153	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
 154		return true;
 155
 156	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
 157		return true;
 158
 159	return false;
 160}
 161
 162/* -----------------------------------------------------------------------------
 163 * Eviction
 164 */
 165
 166static void omap_gem_evict_entry(struct drm_gem_object *obj,
 167		enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
 168{
 169	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 170	struct omap_drm_private *priv = obj->dev->dev_private;
 171	int n = priv->usergart[fmt].height;
 172	size_t size = PAGE_SIZE * n;
 173	loff_t off = omap_gem_mmap_offset(obj) +
 174			(entry->obj_pgoff << PAGE_SHIFT);
 175	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 176
 177	if (m > 1) {
 178		int i;
 179		/* if stride > than PAGE_SIZE then sparse mapping: */
 180		for (i = n; i > 0; i--) {
 181			unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 182					    off, PAGE_SIZE, 1);
 183			off += PAGE_SIZE * m;
 184		}
 185	} else {
 186		unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 187				    off, size, 1);
 188	}
 189
 190	entry->obj = NULL;
 191}
 192
 193/* Evict a buffer from usergart, if it is mapped there */
 194static void omap_gem_evict(struct drm_gem_object *obj)
 195{
 196	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 197	struct omap_drm_private *priv = obj->dev->dev_private;
 198
 199	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 200		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 201		int i;
 202
 203		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
 204			struct omap_drm_usergart_entry *entry =
 205				&priv->usergart[fmt].entry[i];
 206
 207			if (entry->obj == obj)
 208				omap_gem_evict_entry(obj, fmt, entry);
 209		}
 210	}
 211}
 212
 213/* -----------------------------------------------------------------------------
 214 * Page Management
 215 */
 216
 217/*
 218 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
 219 * held.
 220 */
 221static int omap_gem_attach_pages(struct drm_gem_object *obj)
 222{
 223	struct drm_device *dev = obj->dev;
 224	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 225	struct page **pages;
 226	int npages = obj->size >> PAGE_SHIFT;
 227	int i, ret;
 228	dma_addr_t *addrs;
 229
 230	lockdep_assert_held(&omap_obj->lock);
 231
 232	/*
 233	 * If not using shmem (in which case backing pages don't need to be
 234	 * allocated) or if pages are already allocated we're done.
 235	 */
 236	if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
 237		return 0;
 238
 239	pages = drm_gem_get_pages(obj);
 240	if (IS_ERR(pages)) {
 241		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 242		return PTR_ERR(pages);
 243	}
 244
 245	/* for non-cached buffers, ensure the new pages are clean because
 246	 * DSS, GPU, etc. are not cache coherent:
 247	 */
 248	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
 249		addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
 250		if (!addrs) {
 251			ret = -ENOMEM;
 252			goto free_pages;
 253		}
 254
 255		for (i = 0; i < npages; i++) {
 256			addrs[i] = dma_map_page(dev->dev, pages[i],
 257					0, PAGE_SIZE, DMA_TO_DEVICE);
 258
 259			if (dma_mapping_error(dev->dev, addrs[i])) {
 260				dev_warn(dev->dev,
 261					"%s: failed to map page\n", __func__);
 262
 263				for (i = i - 1; i >= 0; --i) {
 264					dma_unmap_page(dev->dev, addrs[i],
 265						PAGE_SIZE, DMA_TO_DEVICE);
 266				}
 267
 268				ret = -ENOMEM;
 269				goto free_addrs;
 270			}
 271		}
 272	} else {
 273		addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
 274		if (!addrs) {
 275			ret = -ENOMEM;
 276			goto free_pages;
 277		}
 278	}
 279
 280	omap_obj->dma_addrs = addrs;
 281	omap_obj->pages = pages;
 282
 283	return 0;
 284
 285free_addrs:
 286	kfree(addrs);
 287free_pages:
 288	drm_gem_put_pages(obj, pages, true, false);
 289
 290	return ret;
 291}
 292
 293/* Release backing pages. Must be called with the omap_obj.lock held. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 294static void omap_gem_detach_pages(struct drm_gem_object *obj)
 295{
 296	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 297	unsigned int npages = obj->size >> PAGE_SHIFT;
 298	unsigned int i;
 299
 300	lockdep_assert_held(&omap_obj->lock);
 301
 302	for (i = 0; i < npages; i++) {
 303		if (omap_obj->dma_addrs[i])
 304			dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
 305				       PAGE_SIZE, DMA_TO_DEVICE);
 306	}
 307
 308	kfree(omap_obj->dma_addrs);
 309	omap_obj->dma_addrs = NULL;
 310
 311	drm_gem_put_pages(obj, omap_obj->pages, true, false);
 312	omap_obj->pages = NULL;
 313}
 314
 315/* get buffer flags */
 316u32 omap_gem_flags(struct drm_gem_object *obj)
 317{
 318	return to_omap_bo(obj)->flags;
 319}
 320
 
 
 
 
 
 
 
 
 
 
 321/** get mmap size */
 322size_t omap_gem_mmap_size(struct drm_gem_object *obj)
 323{
 324	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 325	size_t size = obj->size;
 326
 327	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 328		/* for tiled buffers, the virtual size has stride rounded up
 329		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
 330		 * 32kb later!).  But we don't back the entire buffer with
 331		 * pages, only the valid picture part.. so need to adjust for
 332		 * this in the size used to mmap and generate mmap offset
 333		 */
 334		size = tiler_vsize(gem2fmt(omap_obj->flags),
 335				omap_obj->width, omap_obj->height);
 336	}
 337
 338	return size;
 339}
 340
 341/* -----------------------------------------------------------------------------
 342 * Fault Handling
 343 */
 344
 345/* Normal handling for the case of faulting in non-tiled buffers */
 346static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
 347		struct vm_area_struct *vma, struct vm_fault *vmf)
 348{
 349	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 350	unsigned long pfn;
 351	pgoff_t pgoff;
 352
 353	/* We don't use vmf->pgoff since that has the fake offset: */
 354	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 355
 356	if (omap_obj->pages) {
 357		omap_gem_cpu_sync_page(obj, pgoff);
 358		pfn = page_to_pfn(omap_obj->pages[pgoff]);
 359	} else {
 360		BUG_ON(!omap_gem_is_contiguous(omap_obj));
 361		pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
 362	}
 363
 364	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 365			pfn, pfn << PAGE_SHIFT);
 366
 367	return vmf_insert_mixed(vma, vmf->address,
 368			__pfn_to_pfn_t(pfn, PFN_DEV));
 369}
 370
 371/* Special handling for the case of faulting in 2d tiled buffers */
 372static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
 373		struct vm_area_struct *vma, struct vm_fault *vmf)
 374{
 375	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 376	struct omap_drm_private *priv = obj->dev->dev_private;
 377	struct omap_drm_usergart_entry *entry;
 378	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 379	struct page *pages[64];  /* XXX is this too much to have on stack? */
 380	unsigned long pfn;
 381	pgoff_t pgoff, base_pgoff;
 382	unsigned long vaddr;
 383	int i, err, slots;
 384	vm_fault_t ret = VM_FAULT_NOPAGE;
 385
 386	/*
 387	 * Note the height of the slot is also equal to the number of pages
 388	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
 389	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
 390	 */
 391	const int n = priv->usergart[fmt].height;
 392	const int n_shift = priv->usergart[fmt].height_shift;
 393
 394	/*
 395	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
 396	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
 397	 * into account in some of the math, so figure out virtual stride
 398	 * in pages
 399	 */
 400	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 401
 402	/* We don't use vmf->pgoff since that has the fake offset: */
 403	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 404
 405	/*
 406	 * Actual address we start mapping at is rounded down to previous slot
 407	 * boundary in the y direction:
 408	 */
 409	base_pgoff = round_down(pgoff, m << n_shift);
 410
 411	/* figure out buffer width in slots */
 412	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 413
 414	vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 415
 416	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 417
 418	/* evict previous buffer using this usergart entry, if any: */
 419	if (entry->obj)
 420		omap_gem_evict_entry(entry->obj, fmt, entry);
 421
 422	entry->obj = obj;
 423	entry->obj_pgoff = base_pgoff;
 424
 425	/* now convert base_pgoff to phys offset from virt offset: */
 426	base_pgoff = (base_pgoff >> n_shift) * slots;
 427
 428	/* for wider-than 4k.. figure out which part of the slot-row we want: */
 429	if (m > 1) {
 430		int off = pgoff % m;
 431		entry->obj_pgoff += off;
 432		base_pgoff /= m;
 433		slots = min(slots - (off << n_shift), n);
 434		base_pgoff += off << n_shift;
 435		vaddr += off << PAGE_SHIFT;
 436	}
 437
 438	/*
 439	 * Map in pages. Beyond the valid pixel part of the buffer, we set
 440	 * pages[i] to NULL to get a dummy page mapped in.. if someone
 441	 * reads/writes it they will get random/undefined content, but at
 442	 * least it won't be corrupting whatever other random page used to
 443	 * be mapped in, or other undefined behavior.
 444	 */
 445	memcpy(pages, &omap_obj->pages[base_pgoff],
 446			sizeof(struct page *) * slots);
 447	memset(pages + slots, 0,
 448			sizeof(struct page *) * (n - slots));
 449
 450	err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
 451	if (err) {
 452		ret = vmf_error(err);
 453		dev_err(obj->dev->dev, "failed to pin: %d\n", err);
 454		return ret;
 455	}
 456
 457	pfn = entry->dma_addr >> PAGE_SHIFT;
 458
 459	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 460			pfn, pfn << PAGE_SHIFT);
 461
 462	for (i = n; i > 0; i--) {
 463		ret = vmf_insert_mixed(vma,
 464			vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
 465		if (ret & VM_FAULT_ERROR)
 466			break;
 467		pfn += priv->usergart[fmt].stride_pfn;
 468		vaddr += PAGE_SIZE * m;
 469	}
 470
 471	/* simple round-robin: */
 472	priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
 473				 % NUM_USERGART_ENTRIES;
 474
 475	return ret;
 476}
 477
 478/**
 479 * omap_gem_fault		-	pagefault handler for GEM objects
 480 * @vmf: fault detail
 481 *
 482 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
 483 * does most of the work for us including the actual map/unmap calls
 484 * but we need to do the actual page work.
 485 *
 486 * The VMA was set up by GEM. In doing so it also ensured that the
 487 * vma->vm_private_data points to the GEM object that is backing this
 488 * mapping.
 489 */
 490static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
 491{
 492	struct vm_area_struct *vma = vmf->vma;
 493	struct drm_gem_object *obj = vma->vm_private_data;
 494	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 495	int err;
 496	vm_fault_t ret;
 
 497
 498	/* Make sure we don't parallel update on a fault, nor move or remove
 499	 * something from beneath our feet
 500	 */
 501	mutex_lock(&omap_obj->lock);
 502
 503	/* if a shmem backed object, make sure we have pages attached now */
 504	err = omap_gem_attach_pages(obj);
 505	if (err) {
 506		ret = vmf_error(err);
 507		goto fail;
 508	}
 509
 510	/* where should we do corresponding put_pages().. we are mapping
 511	 * the original page, rather than thru a GART, so we can't rely
 512	 * on eviction to trigger this.  But munmap() or all mappings should
 513	 * probably trigger put_pages()?
 514	 */
 515
 516	if (omap_obj->flags & OMAP_BO_TILED_MASK)
 517		ret = omap_gem_fault_2d(obj, vma, vmf);
 518	else
 519		ret = omap_gem_fault_1d(obj, vma, vmf);
 520
 521
 522fail:
 523	mutex_unlock(&omap_obj->lock);
 524	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 525}
 526
 527/** We override mainly to fix up some of the vm mapping flags.. */
 528int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 529{
 530	int ret;
 531
 532	ret = drm_gem_mmap(filp, vma);
 533	if (ret) {
 534		DBG("mmap failed: %d", ret);
 535		return ret;
 536	}
 537
 538	return omap_gem_mmap_obj(vma->vm_private_data, vma);
 539}
 540
 541int omap_gem_mmap_obj(struct drm_gem_object *obj,
 542		struct vm_area_struct *vma)
 543{
 544	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 545
 546	vma->vm_flags &= ~VM_PFNMAP;
 547	vma->vm_flags |= VM_MIXEDMAP;
 548
 549	if (omap_obj->flags & OMAP_BO_WC) {
 550		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 551	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
 552		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 553	} else {
 554		/*
 555		 * We do have some private objects, at least for scanout buffers
 556		 * on hardware without DMM/TILER.  But these are allocated write-
 557		 * combine
 558		 */
 559		if (WARN_ON(!obj->filp))
 560			return -EINVAL;
 561
 562		/*
 563		 * Shunt off cached objs to shmem file so they have their own
 564		 * address_space (so unmap_mapping_range does what we want,
 565		 * in particular in the case of mmap'd dmabufs)
 566		 */
 
 567		vma->vm_pgoff = 0;
 568		vma_set_file(vma, obj->filp);
 569
 570		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 571	}
 572
 573	return 0;
 574}
 575
 576/* -----------------------------------------------------------------------------
 577 * Dumb Buffers
 578 */
 579
 580/**
 581 * omap_gem_dumb_create	-	create a dumb buffer
 582 * @file: our client file
 583 * @dev: our device
 584 * @args: the requested arguments copied from userspace
 585 *
 586 * Allocate a buffer suitable for use for a frame buffer of the
 587 * form described by user space. Give userspace a handle by which
 588 * to reference it.
 589 */
 590int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 591		struct drm_mode_create_dumb *args)
 592{
 593	union omap_gem_size gsize;
 594
 595	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 596
 597	args->size = PAGE_ALIGN(args->pitch * args->height);
 598
 599	gsize = (union omap_gem_size){
 600		.bytes = args->size,
 601	};
 602
 603	return omap_gem_new_handle(dev, file, gsize,
 604			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
 605}
 606
 607/**
 608 * omap_gem_dumb_map	-	buffer mapping for dumb interface
 609 * @file: our drm client file
 610 * @dev: drm device
 611 * @handle: GEM handle to the object (from dumb_create)
 612 * @offset: memory map offset placeholder
 613 *
 614 * Do the necessary setup to allow the mapping of the frame buffer
 615 * into user memory. We don't have to do much here at the moment.
 616 */
 617int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 618		u32 handle, u64 *offset)
 619{
 620	struct drm_gem_object *obj;
 621	int ret = 0;
 622
 623	/* GEM does all our handle to object mapping */
 624	obj = drm_gem_object_lookup(file, handle);
 625	if (obj == NULL) {
 626		ret = -ENOENT;
 627		goto fail;
 628	}
 629
 630	*offset = omap_gem_mmap_offset(obj);
 631
 632	drm_gem_object_put(obj);
 633
 634fail:
 635	return ret;
 636}
 637
 638#ifdef CONFIG_DRM_FBDEV_EMULATION
 639/* Set scrolling position.  This allows us to implement fast scrolling
 640 * for console.
 641 *
 642 * Call only from non-atomic contexts.
 643 */
 644int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
 645{
 646	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 647	u32 npages = obj->size >> PAGE_SHIFT;
 648	int ret = 0;
 649
 650	if (roll > npages) {
 651		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
 652		return -EINVAL;
 653	}
 654
 655	omap_obj->roll = roll;
 656
 657	mutex_lock(&omap_obj->lock);
 658
 659	/* if we aren't mapped yet, we don't need to do anything */
 660	if (omap_obj->block) {
 661		ret = omap_gem_attach_pages(obj);
 
 662		if (ret)
 663			goto fail;
 664
 665		ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
 666				roll, true);
 667		if (ret)
 668			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
 669	}
 670
 671fail:
 672	mutex_unlock(&omap_obj->lock);
 673
 674	return ret;
 675}
 676#endif
 677
 678/* -----------------------------------------------------------------------------
 679 * Memory Management & DMA Sync
 680 */
 681
 682/*
 683 * shmem buffers that are mapped cached are not coherent.
 684 *
 685 * We keep track of dirty pages using page faulting to perform cache management.
 686 * When a page is mapped to the CPU in read/write mode the device can't access
 687 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
 688 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
 689 * unmapped from the CPU.
 690 */
 691static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
 692{
 693	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 694
 695	return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
 696		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
 697}
 698
 699/* Sync the buffer for CPU access.. note pages should already be
 700 * attached, ie. omap_gem_get_pages()
 701 */
 702void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
 703{
 704	struct drm_device *dev = obj->dev;
 705	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 706
 707	if (omap_gem_is_cached_coherent(obj))
 708		return;
 709
 710	if (omap_obj->dma_addrs[pgoff]) {
 711		dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
 712				PAGE_SIZE, DMA_TO_DEVICE);
 713		omap_obj->dma_addrs[pgoff] = 0;
 714	}
 715}
 716
 717/* sync the buffer for DMA access */
 718void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
 719		enum dma_data_direction dir)
 720{
 721	struct drm_device *dev = obj->dev;
 722	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 723	int i, npages = obj->size >> PAGE_SHIFT;
 724	struct page **pages = omap_obj->pages;
 725	bool dirty = false;
 726
 727	if (omap_gem_is_cached_coherent(obj))
 728		return;
 729
 730	for (i = 0; i < npages; i++) {
 731		if (!omap_obj->dma_addrs[i]) {
 732			dma_addr_t addr;
 733
 734			addr = dma_map_page(dev->dev, pages[i], 0,
 735					    PAGE_SIZE, dir);
 736			if (dma_mapping_error(dev->dev, addr)) {
 737				dev_warn(dev->dev, "%s: failed to map page\n",
 738					__func__);
 739				break;
 740			}
 741
 742			dirty = true;
 743			omap_obj->dma_addrs[i] = addr;
 744		}
 745	}
 746
 747	if (dirty) {
 748		unmap_mapping_range(obj->filp->f_mapping, 0,
 749				    omap_gem_mmap_size(obj), 1);
 750	}
 751}
 752
 753/**
 754 * omap_gem_pin() - Pin a GEM object in memory
 755 * @obj: the GEM object
 756 * @dma_addr: the DMA address
 757 *
 758 * Pin the given GEM object in memory and fill the dma_addr pointer with the
 759 * object's DMA address. If the buffer is not physically contiguous it will be
 760 * remapped through the TILER to provide a contiguous view.
 761 *
 762 * Pins are reference-counted, calling this function multiple times is allowed
 763 * as long the corresponding omap_gem_unpin() calls are balanced.
 764 *
 765 * Return 0 on success or a negative error code otherwise.
 766 */
 767int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
 768{
 769	struct omap_drm_private *priv = obj->dev->dev_private;
 770	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 771	int ret = 0;
 772
 773	mutex_lock(&omap_obj->lock);
 774
 775	if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
 776		if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
 
 777			u32 npages = obj->size >> PAGE_SHIFT;
 778			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 779			struct tiler_block *block;
 780
 781			BUG_ON(omap_obj->block);
 782
 783			refcount_set(&omap_obj->dma_addr_cnt, 1);
 784
 785			ret = omap_gem_attach_pages(obj);
 786			if (ret)
 787				goto fail;
 788
 789			if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 790				block = tiler_reserve_2d(fmt,
 791						omap_obj->width,
 792						omap_obj->height, 0);
 793			} else {
 794				block = tiler_reserve_1d(obj->size);
 795			}
 796
 797			if (IS_ERR(block)) {
 798				ret = PTR_ERR(block);
 799				dev_err(obj->dev->dev,
 800					"could not remap: %d (%d)\n", ret, fmt);
 801				goto fail;
 802			}
 803
 804			/* TODO: enable async refill.. */
 805			ret = tiler_pin(block, omap_obj->pages, npages,
 806					omap_obj->roll, true);
 807			if (ret) {
 808				tiler_release(block);
 809				dev_err(obj->dev->dev,
 810						"could not pin: %d\n", ret);
 811				goto fail;
 812			}
 813
 814			omap_obj->dma_addr = tiler_ssptr(block);
 815			omap_obj->block = block;
 816
 817			DBG("got dma address: %pad", &omap_obj->dma_addr);
 818		} else {
 819			refcount_inc(&omap_obj->dma_addr_cnt);
 820		}
 821
 822		if (dma_addr)
 823			*dma_addr = omap_obj->dma_addr;
 824	} else if (omap_gem_is_contiguous(omap_obj)) {
 825		if (dma_addr)
 826			*dma_addr = omap_obj->dma_addr;
 827	} else {
 828		ret = -EINVAL;
 829		goto fail;
 830	}
 831
 832fail:
 833	mutex_unlock(&omap_obj->lock);
 834
 835	return ret;
 836}
 837
 838/**
 839 * omap_gem_unpin_locked() - Unpin a GEM object from memory
 840 * @obj: the GEM object
 841 *
 842 * omap_gem_unpin() without locking.
 
 
 843 */
 844static void omap_gem_unpin_locked(struct drm_gem_object *obj)
 845{
 846	struct omap_drm_private *priv = obj->dev->dev_private;
 847	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 848	int ret;
 849
 850	if (omap_gem_is_contiguous(omap_obj) || !priv->has_dmm)
 851		return;
 852
 853	if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
 854		ret = tiler_unpin(omap_obj->block);
 855		if (ret) {
 856			dev_err(obj->dev->dev,
 857				"could not unpin pages: %d\n", ret);
 
 
 
 
 
 
 
 
 858		}
 859		ret = tiler_release(omap_obj->block);
 860		if (ret) {
 861			dev_err(obj->dev->dev,
 862				"could not release unmap: %d\n", ret);
 863		}
 864		omap_obj->dma_addr = 0;
 865		omap_obj->block = NULL;
 866	}
 867}
 868
 869/**
 870 * omap_gem_unpin() - Unpin a GEM object from memory
 871 * @obj: the GEM object
 872 *
 873 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
 874 * reference-counted, the actual unpin will only be performed when the number
 875 * of calls to this function matches the number of calls to omap_gem_pin().
 876 */
 877void omap_gem_unpin(struct drm_gem_object *obj)
 878{
 879	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 880
 881	mutex_lock(&omap_obj->lock);
 882	omap_gem_unpin_locked(obj);
 883	mutex_unlock(&omap_obj->lock);
 884}
 885
 886/* Get rotated scanout address (only valid if already pinned), at the
 887 * specified orientation and x,y offset from top-left corner of buffer
 888 * (only valid for tiled 2d buffers)
 889 */
 890int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
 891		int x, int y, dma_addr_t *dma_addr)
 892{
 893	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 894	int ret = -EINVAL;
 895
 896	mutex_lock(&omap_obj->lock);
 897
 898	if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
 899			(omap_obj->flags & OMAP_BO_TILED_MASK)) {
 900		*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
 901		ret = 0;
 902	}
 903
 904	mutex_unlock(&omap_obj->lock);
 905
 906	return ret;
 907}
 908
 909/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
 910int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
 911{
 912	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 913	int ret = -EINVAL;
 914	if (omap_obj->flags & OMAP_BO_TILED_MASK)
 915		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
 916	return ret;
 917}
 918
 919/* if !remap, and we don't have pages backing, then fail, rather than
 920 * increasing the pin count (which we don't really do yet anyways,
 921 * because we don't support swapping pages back out).  And 'remap'
 922 * might not be quite the right name, but I wanted to keep it working
 923 * similarly to omap_gem_pin().  Note though that mutex is not
 924 * aquired if !remap (because this can be called in atomic ctxt),
 925 * but probably omap_gem_unpin() should be changed to work in the
 926 * same way.  If !remap, a matching omap_gem_put_pages() call is not
 927 * required (and should not be made).
 928 */
 929int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
 930		bool remap)
 931{
 932	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 933	int ret = 0;
 934
 935	mutex_lock(&omap_obj->lock);
 936
 937	if (remap) {
 938		ret = omap_gem_attach_pages(obj);
 939		if (ret)
 940			goto unlock;
 941	}
 942
 943	if (!omap_obj->pages) {
 944		ret = -ENOMEM;
 945		goto unlock;
 946	}
 947
 948	*pages = omap_obj->pages;
 949
 950unlock:
 951	mutex_unlock(&omap_obj->lock);
 952
 953	return ret;
 954}
 955
 956/* release pages when DMA no longer being performed */
 957int omap_gem_put_pages(struct drm_gem_object *obj)
 958{
 959	/* do something here if we dynamically attach/detach pages.. at
 960	 * least they would no longer need to be pinned if everyone has
 961	 * released the pages..
 962	 */
 963	return 0;
 964}
 965
 966#ifdef CONFIG_DRM_FBDEV_EMULATION
 967/*
 968 * Get kernel virtual address for CPU access.. this more or less only
 969 * exists for omap_fbdev.
 970 */
 971void *omap_gem_vaddr(struct drm_gem_object *obj)
 972{
 973	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 974	void *vaddr;
 975	int ret;
 976
 977	mutex_lock(&omap_obj->lock);
 978
 979	if (!omap_obj->vaddr) {
 980		ret = omap_gem_attach_pages(obj);
 981		if (ret) {
 982			vaddr = ERR_PTR(ret);
 983			goto unlock;
 984		}
 985
 986		omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
 987				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 988	}
 989
 990	vaddr = omap_obj->vaddr;
 991
 992unlock:
 993	mutex_unlock(&omap_obj->lock);
 994	return vaddr;
 995}
 996#endif
 997
 998/* -----------------------------------------------------------------------------
 999 * Power Management
1000 */
1001
1002#ifdef CONFIG_PM
1003/* re-pin objects in DMM in resume path: */
1004int omap_gem_resume(struct drm_device *dev)
1005{
1006	struct omap_drm_private *priv = dev->dev_private;
1007	struct omap_gem_object *omap_obj;
1008	int ret = 0;
1009
1010	mutex_lock(&priv->list_lock);
1011	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1012		if (omap_obj->block) {
1013			struct drm_gem_object *obj = &omap_obj->base;
1014			u32 npages = obj->size >> PAGE_SHIFT;
1015
1016			WARN_ON(!omap_obj->pages);  /* this can't happen */
1017			ret = tiler_pin(omap_obj->block,
1018					omap_obj->pages, npages,
1019					omap_obj->roll, true);
1020			if (ret) {
1021				dev_err(dev->dev, "could not repin: %d\n", ret);
1022				goto done;
1023			}
1024		}
1025	}
1026
1027done:
1028	mutex_unlock(&priv->list_lock);
1029	return ret;
1030}
1031#endif
1032
1033/* -----------------------------------------------------------------------------
1034 * DebugFS
1035 */
1036
1037#ifdef CONFIG_DEBUG_FS
1038void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1039{
1040	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1041	u64 off;
1042
1043	off = drm_vma_node_start(&obj->vma_node);
1044
1045	mutex_lock(&omap_obj->lock);
1046
1047	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1048			omap_obj->flags, obj->name, kref_read(&obj->refcount),
1049			off, &omap_obj->dma_addr,
1050			refcount_read(&omap_obj->dma_addr_cnt),
1051			omap_obj->vaddr, omap_obj->roll);
1052
1053	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1054		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1055		if (omap_obj->block) {
1056			struct tcm_area *area = &omap_obj->block->area;
1057			seq_printf(m, " (%dx%d, %dx%d)",
1058					area->p0.x, area->p0.y,
1059					area->p1.x, area->p1.y);
1060		}
1061	} else {
1062		seq_printf(m, " %zu", obj->size);
1063	}
1064
1065	mutex_unlock(&omap_obj->lock);
1066
1067	seq_printf(m, "\n");
1068}
1069
1070void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1071{
1072	struct omap_gem_object *omap_obj;
1073	int count = 0;
1074	size_t size = 0;
1075
1076	list_for_each_entry(omap_obj, list, mm_list) {
1077		struct drm_gem_object *obj = &omap_obj->base;
1078		seq_printf(m, "   ");
1079		omap_gem_describe(obj, m);
1080		count++;
1081		size += obj->size;
1082	}
1083
1084	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1085}
1086#endif
1087
1088/* -----------------------------------------------------------------------------
1089 * Constructor & Destructor
1090 */
1091
1092static void omap_gem_free_object(struct drm_gem_object *obj)
1093{
1094	struct drm_device *dev = obj->dev;
1095	struct omap_drm_private *priv = dev->dev_private;
1096	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1097
1098	omap_gem_evict(obj);
1099
1100	mutex_lock(&priv->list_lock);
 
 
1101	list_del(&omap_obj->mm_list);
1102	mutex_unlock(&priv->list_lock);
1103
1104	/*
1105	 * We own the sole reference to the object at this point, but to keep
1106	 * lockdep happy, we must still take the omap_obj_lock to call
1107	 * omap_gem_detach_pages(). This should hardly make any difference as
1108	 * there can't be any lock contention.
1109	 */
1110	mutex_lock(&omap_obj->lock);
1111
1112	/* The object should not be pinned. */
1113	WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
1114
1115	if (omap_obj->pages) {
1116		if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1117			kfree(omap_obj->pages);
1118		else
1119			omap_gem_detach_pages(obj);
1120	}
1121
1122	if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1123		dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1124			    omap_obj->dma_addr);
1125	} else if (omap_obj->vaddr) {
1126		vunmap(omap_obj->vaddr);
1127	} else if (obj->import_attach) {
1128		drm_prime_gem_destroy(obj, omap_obj->sgt);
1129	}
1130
1131	mutex_unlock(&omap_obj->lock);
1132
1133	drm_gem_object_release(obj);
1134
1135	mutex_destroy(&omap_obj->lock);
1136
1137	kfree(omap_obj);
1138}
1139
1140static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1141{
1142	struct omap_drm_private *priv = dev->dev_private;
1143
1144	switch (flags & OMAP_BO_CACHE_MASK) {
1145	case OMAP_BO_CACHED:
1146	case OMAP_BO_WC:
1147	case OMAP_BO_CACHE_MASK:
1148		break;
1149
1150	default:
1151		return false;
1152	}
1153
1154	if (flags & OMAP_BO_TILED_MASK) {
1155		if (!priv->usergart)
1156			return false;
1157
1158		switch (flags & OMAP_BO_TILED_MASK) {
1159		case OMAP_BO_TILED_8:
1160		case OMAP_BO_TILED_16:
1161		case OMAP_BO_TILED_32:
1162			break;
1163
1164		default:
1165			return false;
1166		}
1167	}
1168
1169	return true;
1170}
1171
1172static const struct vm_operations_struct omap_gem_vm_ops = {
1173	.fault = omap_gem_fault,
1174	.open = drm_gem_vm_open,
1175	.close = drm_gem_vm_close,
1176};
1177
1178static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1179	.free = omap_gem_free_object,
1180	.export = omap_gem_prime_export,
1181	.vm_ops = &omap_gem_vm_ops,
1182};
1183
1184/* GEM buffer object constructor */
1185struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1186		union omap_gem_size gsize, u32 flags)
1187{
1188	struct omap_drm_private *priv = dev->dev_private;
1189	struct omap_gem_object *omap_obj;
1190	struct drm_gem_object *obj;
1191	struct address_space *mapping;
1192	size_t size;
1193	int ret;
1194
1195	if (!omap_gem_validate_flags(dev, flags))
1196		return NULL;
 
 
 
 
1197
1198	/* Validate the flags and compute the memory and cache flags. */
1199	if (flags & OMAP_BO_TILED_MASK) {
1200		/*
1201		 * Tiled buffers are always shmem paged backed. When they are
1202		 * scanned out, they are remapped into DMM/TILER.
1203		 */
 
1204		flags |= OMAP_BO_MEM_SHMEM;
1205
1206		/*
1207		 * Currently don't allow cached buffers. There is some caching
1208		 * stuff that needs to be handled better.
1209		 */
1210		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1211		flags |= tiler_get_cpu_cache_flags();
1212	} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1213		/*
1214		 * If we don't have DMM, we must allocate scanout buffers
1215		 * from contiguous DMA memory.
 
1216		 */
1217		flags |= OMAP_BO_MEM_DMA_API;
1218	} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1219		/*
1220		 * All other buffers not backed by dma_buf are shmem-backed.
1221		 */
1222		flags |= OMAP_BO_MEM_SHMEM;
1223	}
1224
1225	/* Allocate the initialize the OMAP GEM object. */
1226	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1227	if (!omap_obj)
1228		return NULL;
1229
1230	obj = &omap_obj->base;
1231	omap_obj->flags = flags;
1232	mutex_init(&omap_obj->lock);
1233
1234	if (flags & OMAP_BO_TILED_MASK) {
1235		/*
1236		 * For tiled buffers align dimensions to slot boundaries and
1237		 * calculate size based on aligned dimensions.
1238		 */
1239		tiler_align(gem2fmt(flags), &gsize.tiled.width,
1240			    &gsize.tiled.height);
1241
1242		size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1243				  gsize.tiled.height);
1244
1245		omap_obj->width = gsize.tiled.width;
1246		omap_obj->height = gsize.tiled.height;
1247	} else {
1248		size = PAGE_ALIGN(gsize.bytes);
1249	}
1250
1251	obj->funcs = &omap_gem_object_funcs;
1252
1253	/* Initialize the GEM object. */
1254	if (!(flags & OMAP_BO_MEM_SHMEM)) {
1255		drm_gem_private_object_init(dev, obj, size);
1256	} else {
1257		ret = drm_gem_object_init(dev, obj, size);
1258		if (ret)
1259			goto err_free;
1260
1261		mapping = obj->filp->f_mapping;
1262		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1263	}
1264
1265	/* Allocate memory if needed. */
1266	if (flags & OMAP_BO_MEM_DMA_API) {
1267		omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1268					       &omap_obj->dma_addr,
1269					       GFP_KERNEL);
1270		if (!omap_obj->vaddr)
1271			goto err_release;
1272	}
1273
1274	mutex_lock(&priv->list_lock);
1275	list_add(&omap_obj->mm_list, &priv->obj_list);
1276	mutex_unlock(&priv->list_lock);
1277
1278	return obj;
1279
1280err_release:
1281	drm_gem_object_release(obj);
1282err_free:
1283	kfree(omap_obj);
1284	return NULL;
1285}
1286
1287struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1288					   struct sg_table *sgt)
1289{
1290	struct omap_drm_private *priv = dev->dev_private;
1291	struct omap_gem_object *omap_obj;
1292	struct drm_gem_object *obj;
1293	union omap_gem_size gsize;
1294
1295	/* Without a DMM only physically contiguous buffers can be supported. */
1296	if (sgt->orig_nents != 1 && !priv->has_dmm)
1297		return ERR_PTR(-EINVAL);
1298
 
 
1299	gsize.bytes = PAGE_ALIGN(size);
1300	obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1301	if (!obj)
1302		return ERR_PTR(-ENOMEM);
 
 
1303
1304	omap_obj = to_omap_bo(obj);
1305
1306	mutex_lock(&omap_obj->lock);
1307
1308	omap_obj->sgt = sgt;
1309
1310	if (sgt->orig_nents == 1) {
1311		omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1312	} else {
1313		/* Create pages list from sgt */
 
1314		struct page **pages;
1315		unsigned int npages;
1316		unsigned int ret;
1317
1318		npages = DIV_ROUND_UP(size, PAGE_SIZE);
1319		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1320		if (!pages) {
1321			omap_gem_free_object(obj);
1322			obj = ERR_PTR(-ENOMEM);
1323			goto done;
1324		}
1325
1326		omap_obj->pages = pages;
1327		ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1328		if (ret) {
 
 
 
 
 
 
1329			omap_gem_free_object(obj);
1330			obj = ERR_PTR(-ENOMEM);
1331			goto done;
1332		}
1333	}
1334
1335done:
1336	mutex_unlock(&omap_obj->lock);
1337	return obj;
1338}
1339
1340/* convenience method to construct a GEM buffer object, and userspace handle */
1341int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1342		union omap_gem_size gsize, u32 flags, u32 *handle)
1343{
1344	struct drm_gem_object *obj;
1345	int ret;
1346
1347	obj = omap_gem_new(dev, gsize, flags);
1348	if (!obj)
1349		return -ENOMEM;
1350
1351	ret = drm_gem_handle_create(file, obj, handle);
1352	if (ret) {
1353		omap_gem_free_object(obj);
1354		return ret;
1355	}
1356
1357	/* drop reference from allocate - handle holds it now */
1358	drm_gem_object_put(obj);
1359
1360	return 0;
1361}
1362
1363/* -----------------------------------------------------------------------------
1364 * Init & Cleanup
1365 */
1366
1367/* If DMM is used, we need to set some stuff up.. */
1368void omap_gem_init(struct drm_device *dev)
1369{
1370	struct omap_drm_private *priv = dev->dev_private;
1371	struct omap_drm_usergart *usergart;
1372	const enum tiler_fmt fmts[] = {
1373			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1374	};
1375	int i, j;
1376
1377	if (!dmm_is_available()) {
1378		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1379		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1380		return;
1381	}
1382
1383	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1384	if (!usergart)
1385		return;
1386
1387	/* reserve 4k aligned/wide regions for userspace mappings: */
1388	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1389		u16 h = 1, w = PAGE_SIZE >> i;
1390
1391		tiler_align(fmts[i], &w, &h);
1392		/* note: since each region is 1 4kb page wide, and minimum
1393		 * number of rows, the height ends up being the same as the
1394		 * # of pages in the region
1395		 */
1396		usergart[i].height = h;
1397		usergart[i].height_shift = ilog2(h);
1398		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1399		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1400		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1401			struct omap_drm_usergart_entry *entry;
1402			struct tiler_block *block;
1403
1404			entry = &usergart[i].entry[j];
1405			block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1406			if (IS_ERR(block)) {
1407				dev_err(dev->dev,
1408						"reserve failed: %d, %d, %ld\n",
1409						i, j, PTR_ERR(block));
1410				return;
1411			}
1412			entry->dma_addr = tiler_ssptr(block);
1413			entry->block = block;
1414
1415			DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1416					&entry->dma_addr,
1417					usergart[i].stride_pfn << PAGE_SHIFT);
1418		}
1419	}
1420
1421	priv->usergart = usergart;
1422	priv->has_dmm = true;
1423}
1424
1425void omap_gem_deinit(struct drm_device *dev)
1426{
1427	struct omap_drm_private *priv = dev->dev_private;
1428
1429	/* I believe we can rely on there being no more outstanding GEM
1430	 * objects which could depend on usergart/dmm at this point.
1431	 */
1432	kfree(priv->usergart);
1433}