Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * drivers/gpu/drm/omapdrm/omap_gem.c
   3 *
   4 * Copyright (C) 2011 Texas Instruments
   5 * Author: Rob Clark <rob.clark@linaro.org>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published by
   9 * the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
 
 
  20#include <linux/shmem_fs.h>
  21#include <linux/spinlock.h>
  22#include <linux/pfn_t.h>
  23
 
  24#include <drm/drm_vma_manager.h>
  25
  26#include "omap_drv.h"
  27#include "omap_dmm_tiler.h"
  28
  29/*
  30 * GEM buffer object implementation.
  31 */
  32
  33/* note: we use upper 8 bits of flags for driver-internal flags: */
  34#define OMAP_BO_MEM_DMA_API	0x01000000	/* memory allocated with the dma_alloc_* API */
  35#define OMAP_BO_MEM_SHMEM	0x02000000	/* memory allocated through shmem backing */
  36#define OMAP_BO_MEM_DMABUF	0x08000000	/* memory imported from a dmabuf */
  37
  38struct omap_gem_object {
  39	struct drm_gem_object base;
  40
  41	struct list_head mm_list;
  42
  43	uint32_t flags;
  44
  45	/** width/height for tiled formats (rounded up to slot boundaries) */
  46	uint16_t width, height;
  47
  48	/** roll applied when mapping to DMM */
  49	uint32_t roll;
 
 
 
  50
  51	/**
  52	 * paddr contains the buffer DMA address. It is valid for
  53	 *
  54	 * - buffers allocated through the DMA mapping API (with the
  55	 *   OMAP_BO_MEM_DMA_API flag set)
  56	 *
  57	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  58	 *   if they are physically contiguous (when sgt->orig_nents == 1)
  59	 *
  60	 * - buffers mapped through the TILER when paddr_cnt is not zero, in
  61	 *   which case the DMA address points to the TILER aperture
  62	 *
  63	 * Physically contiguous buffers have their DMA address equal to the
  64	 * physical address as we don't remap those buffers through the TILER.
  65	 *
  66	 * Buffers mapped to the TILER have their DMA address pointing to the
  67	 * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
  68	 * the DMA address must be accessed through omap_get_get_paddr() to
  69	 * ensure that the mapping won't disappear unexpectedly. References must
  70	 * be released with omap_gem_put_paddr().
  71	 */
  72	dma_addr_t paddr;
  73
  74	/**
  75	 * # of users of paddr
  76	 */
  77	uint32_t paddr_cnt;
  78
  79	/**
  80	 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  81	 * is set and the sgt field is valid.
  82	 */
  83	struct sg_table *sgt;
  84
  85	/**
  86	 * tiler block used when buffer is remapped in DMM/TILER.
  87	 */
  88	struct tiler_block *block;
  89
  90	/**
  91	 * Array of backing pages, if allocated.  Note that pages are never
  92	 * allocated for buffers originally allocated from contiguous memory
  93	 */
  94	struct page **pages;
  95
  96	/** addresses corresponding to pages in above array */
  97	dma_addr_t *addrs;
  98
  99	/**
 100	 * Virtual address, if mapped.
 101	 */
 102	void *vaddr;
 103
 104	/**
 105	 * sync-object allocated on demand (if needed)
 106	 *
 107	 * Per-buffer sync-object for tracking pending and completed hw/dma
 108	 * read and write operations.
 109	 */
 110	struct {
 111		uint32_t write_pending;
 112		uint32_t write_complete;
 113		uint32_t read_pending;
 114		uint32_t read_complete;
 115	} *sync;
 116};
 117
 118#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
 119
 120/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
 121 * not necessarily pinned in TILER all the time, and (b) when they are
 122 * they are not necessarily page aligned, we reserve one or more small
 123 * regions in each of the 2d containers to use as a user-GART where we
 124 * can create a second page-aligned mapping of parts of the buffer
 125 * being accessed from userspace.
 126 *
 127 * Note that we could optimize slightly when we know that multiple
 128 * tiler containers are backed by the same PAT.. but I'll leave that
 129 * for later..
 130 */
 131#define NUM_USERGART_ENTRIES 2
 132struct omap_drm_usergart_entry {
 133	struct tiler_block *block;	/* the reserved tiler block */
 134	dma_addr_t paddr;
 135	struct drm_gem_object *obj;	/* the current pinned obj */
 136	pgoff_t obj_pgoff;		/* page offset of obj currently
 137					   mapped in */
 138};
 139
 140struct omap_drm_usergart {
 141	struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
 142	int height;				/* height in rows */
 143	int height_shift;		/* ilog2(height in rows) */
 144	int slot_shift;			/* ilog2(width per slot) */
 145	int stride_pfn;			/* stride in pages */
 146	int last;				/* index of last used entry */
 147};
 148
 149/* -----------------------------------------------------------------------------
 150 * Helpers
 151 */
 152
 153/** get mmap offset */
 154static uint64_t mmap_offset(struct drm_gem_object *obj)
 155{
 156	struct drm_device *dev = obj->dev;
 157	int ret;
 158	size_t size;
 159
 160	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 161
 162	/* Make it mmapable */
 163	size = omap_gem_mmap_size(obj);
 164	ret = drm_gem_create_mmap_offset_size(obj, size);
 165	if (ret) {
 166		dev_err(dev->dev, "could not allocate mmap offset\n");
 167		return 0;
 168	}
 169
 170	return drm_vma_node_offset_addr(&obj->vma_node);
 171}
 172
 173static bool is_contiguous(struct omap_gem_object *omap_obj)
 
 
 
 
 
 174{
 175	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
 176		return true;
 177
 178	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
 
 179		return true;
 180
 181	return false;
 182}
 183
 184/* -----------------------------------------------------------------------------
 185 * Eviction
 186 */
 187
 188static void evict_entry(struct drm_gem_object *obj,
 189		enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
 190{
 191	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 192	struct omap_drm_private *priv = obj->dev->dev_private;
 193	int n = priv->usergart[fmt].height;
 194	size_t size = PAGE_SIZE * n;
 195	loff_t off = mmap_offset(obj) +
 196			(entry->obj_pgoff << PAGE_SHIFT);
 197	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
 198
 199	if (m > 1) {
 200		int i;
 201		/* if stride > than PAGE_SIZE then sparse mapping: */
 202		for (i = n; i > 0; i--) {
 203			unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 204					    off, PAGE_SIZE, 1);
 205			off += PAGE_SIZE * m;
 206		}
 207	} else {
 208		unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 209				    off, size, 1);
 210	}
 211
 212	entry->obj = NULL;
 213}
 214
 215/* Evict a buffer from usergart, if it is mapped there */
 216static void evict(struct drm_gem_object *obj)
 217{
 218	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 219	struct omap_drm_private *priv = obj->dev->dev_private;
 220
 221	if (omap_obj->flags & OMAP_BO_TILED) {
 222		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 223		int i;
 224
 225		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
 226			struct omap_drm_usergart_entry *entry =
 227				&priv->usergart[fmt].entry[i];
 228
 229			if (entry->obj == obj)
 230				evict_entry(obj, fmt, entry);
 231		}
 232	}
 233}
 234
 235/* -----------------------------------------------------------------------------
 236 * Page Management
 237 */
 238
 239/** ensure backing pages are allocated */
 
 
 
 240static int omap_gem_attach_pages(struct drm_gem_object *obj)
 241{
 242	struct drm_device *dev = obj->dev;
 243	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 244	struct page **pages;
 245	int npages = obj->size >> PAGE_SHIFT;
 246	int i, ret;
 247	dma_addr_t *addrs;
 248
 249	WARN_ON(omap_obj->pages);
 
 
 
 
 
 
 
 250
 251	pages = drm_gem_get_pages(obj);
 252	if (IS_ERR(pages)) {
 253		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 254		return PTR_ERR(pages);
 255	}
 256
 257	/* for non-cached buffers, ensure the new pages are clean because
 258	 * DSS, GPU, etc. are not cache coherent:
 259	 */
 260	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
 261		addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
 262		if (!addrs) {
 263			ret = -ENOMEM;
 264			goto free_pages;
 265		}
 266
 267		for (i = 0; i < npages; i++) {
 268			addrs[i] = dma_map_page(dev->dev, pages[i],
 269					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
 270
 271			if (dma_mapping_error(dev->dev, addrs[i])) {
 272				dev_warn(dev->dev,
 273					"%s: failed to map page\n", __func__);
 274
 275				for (i = i - 1; i >= 0; --i) {
 276					dma_unmap_page(dev->dev, addrs[i],
 277						PAGE_SIZE, DMA_BIDIRECTIONAL);
 278				}
 279
 280				ret = -ENOMEM;
 281				goto free_addrs;
 282			}
 283		}
 284	} else {
 285		addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
 286		if (!addrs) {
 287			ret = -ENOMEM;
 288			goto free_pages;
 289		}
 290	}
 291
 292	omap_obj->addrs = addrs;
 293	omap_obj->pages = pages;
 294
 295	return 0;
 296
 297free_addrs:
 298	kfree(addrs);
 299free_pages:
 300	drm_gem_put_pages(obj, pages, true, false);
 301
 302	return ret;
 303}
 304
 305/* acquire pages when needed (for example, for DMA where physically
 306 * contiguous buffer is not required
 307 */
 308static int get_pages(struct drm_gem_object *obj, struct page ***pages)
 309{
 310	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 311	int ret = 0;
 312
 313	if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
 314		ret = omap_gem_attach_pages(obj);
 315		if (ret) {
 316			dev_err(obj->dev->dev, "could not attach pages\n");
 317			return ret;
 318		}
 319	}
 320
 321	/* TODO: even phys-contig.. we should have a list of pages? */
 322	*pages = omap_obj->pages;
 323
 324	return 0;
 325}
 326
 327/** release backing pages */
 328static void omap_gem_detach_pages(struct drm_gem_object *obj)
 329{
 330	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 
 
 331
 332	/* for non-cached buffers, ensure the new pages are clean because
 333	 * DSS, GPU, etc. are not cache coherent:
 334	 */
 335	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
 336		int i, npages = obj->size >> PAGE_SHIFT;
 337		for (i = 0; i < npages; i++) {
 338			dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
 339					PAGE_SIZE, DMA_BIDIRECTIONAL);
 340		}
 341	}
 342
 343	kfree(omap_obj->addrs);
 344	omap_obj->addrs = NULL;
 345
 346	drm_gem_put_pages(obj, omap_obj->pages, true, false);
 347	omap_obj->pages = NULL;
 348}
 349
 350/* get buffer flags */
 351uint32_t omap_gem_flags(struct drm_gem_object *obj)
 352{
 353	return to_omap_bo(obj)->flags;
 354}
 355
 356uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
 357{
 358	uint64_t offset;
 359	mutex_lock(&obj->dev->struct_mutex);
 360	offset = mmap_offset(obj);
 361	mutex_unlock(&obj->dev->struct_mutex);
 362	return offset;
 363}
 364
 365/** get mmap size */
 366size_t omap_gem_mmap_size(struct drm_gem_object *obj)
 367{
 368	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 369	size_t size = obj->size;
 370
 371	if (omap_obj->flags & OMAP_BO_TILED) {
 372		/* for tiled buffers, the virtual size has stride rounded up
 373		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
 374		 * 32kb later!).  But we don't back the entire buffer with
 375		 * pages, only the valid picture part.. so need to adjust for
 376		 * this in the size used to mmap and generate mmap offset
 377		 */
 378		size = tiler_vsize(gem2fmt(omap_obj->flags),
 379				omap_obj->width, omap_obj->height);
 380	}
 381
 382	return size;
 383}
 384
 385/* get tiled size, returns -EINVAL if not tiled buffer */
 386int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
 387{
 388	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 389	if (omap_obj->flags & OMAP_BO_TILED) {
 390		*w = omap_obj->width;
 391		*h = omap_obj->height;
 392		return 0;
 393	}
 394	return -EINVAL;
 395}
 396
 397/* -----------------------------------------------------------------------------
 398 * Fault Handling
 399 */
 400
 401/* Normal handling for the case of faulting in non-tiled buffers */
 402static int fault_1d(struct drm_gem_object *obj,
 403		struct vm_area_struct *vma, struct vm_fault *vmf)
 404{
 405	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 406	unsigned long pfn;
 407	pgoff_t pgoff;
 408
 409	/* We don't use vmf->pgoff since that has the fake offset: */
 410	pgoff = ((unsigned long)vmf->virtual_address -
 411			vma->vm_start) >> PAGE_SHIFT;
 412
 413	if (omap_obj->pages) {
 414		omap_gem_cpu_sync(obj, pgoff);
 415		pfn = page_to_pfn(omap_obj->pages[pgoff]);
 416	} else {
 417		BUG_ON(!is_contiguous(omap_obj));
 418		pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
 419	}
 420
 421	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
 422			pfn, pfn << PAGE_SHIFT);
 423
 424	return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
 425			__pfn_to_pfn_t(pfn, PFN_DEV));
 426}
 427
 428/* Special handling for the case of faulting in 2d tiled buffers */
 429static int fault_2d(struct drm_gem_object *obj,
 430		struct vm_area_struct *vma, struct vm_fault *vmf)
 431{
 432	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 433	struct omap_drm_private *priv = obj->dev->dev_private;
 434	struct omap_drm_usergart_entry *entry;
 435	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 436	struct page *pages[64];  /* XXX is this too much to have on stack? */
 437	unsigned long pfn;
 438	pgoff_t pgoff, base_pgoff;
 439	void __user *vaddr;
 440	int i, ret, slots;
 
 441
 442	/*
 443	 * Note the height of the slot is also equal to the number of pages
 444	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
 445	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
 446	 */
 447	const int n = priv->usergart[fmt].height;
 448	const int n_shift = priv->usergart[fmt].height_shift;
 449
 450	/*
 451	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
 452	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
 453	 * into account in some of the math, so figure out virtual stride
 454	 * in pages
 455	 */
 456	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
 457
 458	/* We don't use vmf->pgoff since that has the fake offset: */
 459	pgoff = ((unsigned long)vmf->virtual_address -
 460			vma->vm_start) >> PAGE_SHIFT;
 461
 462	/*
 463	 * Actual address we start mapping at is rounded down to previous slot
 464	 * boundary in the y direction:
 465	 */
 466	base_pgoff = round_down(pgoff, m << n_shift);
 467
 468	/* figure out buffer width in slots */
 469	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 470
 471	vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 472
 473	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 474
 475	/* evict previous buffer using this usergart entry, if any: */
 476	if (entry->obj)
 477		evict_entry(entry->obj, fmt, entry);
 478
 479	entry->obj = obj;
 480	entry->obj_pgoff = base_pgoff;
 481
 482	/* now convert base_pgoff to phys offset from virt offset: */
 483	base_pgoff = (base_pgoff >> n_shift) * slots;
 484
 485	/* for wider-than 4k.. figure out which part of the slot-row we want: */
 486	if (m > 1) {
 487		int off = pgoff % m;
 488		entry->obj_pgoff += off;
 489		base_pgoff /= m;
 490		slots = min(slots - (off << n_shift), n);
 491		base_pgoff += off << n_shift;
 492		vaddr += off << PAGE_SHIFT;
 493	}
 494
 495	/*
 496	 * Map in pages. Beyond the valid pixel part of the buffer, we set
 497	 * pages[i] to NULL to get a dummy page mapped in.. if someone
 498	 * reads/writes it they will get random/undefined content, but at
 499	 * least it won't be corrupting whatever other random page used to
 500	 * be mapped in, or other undefined behavior.
 501	 */
 502	memcpy(pages, &omap_obj->pages[base_pgoff],
 503			sizeof(struct page *) * slots);
 504	memset(pages + slots, 0,
 505			sizeof(struct page *) * (n - slots));
 506
 507	ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
 508	if (ret) {
 509		dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
 
 510		return ret;
 511	}
 512
 513	pfn = entry->paddr >> PAGE_SHIFT;
 514
 515	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
 516			pfn, pfn << PAGE_SHIFT);
 517
 518	for (i = n; i > 0; i--) {
 519		vm_insert_mixed(vma, (unsigned long)vaddr,
 520				__pfn_to_pfn_t(pfn, PFN_DEV));
 
 
 521		pfn += priv->usergart[fmt].stride_pfn;
 522		vaddr += PAGE_SIZE * m;
 523	}
 524
 525	/* simple round-robin: */
 526	priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
 527				 % NUM_USERGART_ENTRIES;
 528
 529	return 0;
 530}
 531
 532/**
 533 * omap_gem_fault		-	pagefault handler for GEM objects
 534 * @vma: the VMA of the GEM object
 535 * @vmf: fault detail
 536 *
 537 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
 538 * does most of the work for us including the actual map/unmap calls
 539 * but we need to do the actual page work.
 540 *
 541 * The VMA was set up by GEM. In doing so it also ensured that the
 542 * vma->vm_private_data points to the GEM object that is backing this
 543 * mapping.
 544 */
 545int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 546{
 
 547	struct drm_gem_object *obj = vma->vm_private_data;
 548	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 549	struct drm_device *dev = obj->dev;
 550	struct page **pages;
 551	int ret;
 552
 553	/* Make sure we don't parallel update on a fault, nor move or remove
 554	 * something from beneath our feet
 555	 */
 556	mutex_lock(&dev->struct_mutex);
 557
 558	/* if a shmem backed object, make sure we have pages attached now */
 559	ret = get_pages(obj, &pages);
 560	if (ret)
 
 561		goto fail;
 
 562
 563	/* where should we do corresponding put_pages().. we are mapping
 564	 * the original page, rather than thru a GART, so we can't rely
 565	 * on eviction to trigger this.  But munmap() or all mappings should
 566	 * probably trigger put_pages()?
 567	 */
 568
 569	if (omap_obj->flags & OMAP_BO_TILED)
 570		ret = fault_2d(obj, vma, vmf);
 571	else
 572		ret = fault_1d(obj, vma, vmf);
 573
 574
 575fail:
 576	mutex_unlock(&dev->struct_mutex);
 577	switch (ret) {
 578	case 0:
 579	case -ERESTARTSYS:
 580	case -EINTR:
 581	case -EBUSY:
 582		/*
 583		 * EBUSY is ok: this just means that another thread
 584		 * already did the job.
 585		 */
 586		return VM_FAULT_NOPAGE;
 587	case -ENOMEM:
 588		return VM_FAULT_OOM;
 589	default:
 590		return VM_FAULT_SIGBUS;
 591	}
 592}
 593
 594/** We override mainly to fix up some of the vm mapping flags.. */
 595int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 596{
 597	int ret;
 598
 599	ret = drm_gem_mmap(filp, vma);
 600	if (ret) {
 601		DBG("mmap failed: %d", ret);
 602		return ret;
 603	}
 604
 605	return omap_gem_mmap_obj(vma->vm_private_data, vma);
 606}
 607
 608int omap_gem_mmap_obj(struct drm_gem_object *obj,
 609		struct vm_area_struct *vma)
 610{
 611	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 612
 613	vma->vm_flags &= ~VM_PFNMAP;
 614	vma->vm_flags |= VM_MIXEDMAP;
 615
 616	if (omap_obj->flags & OMAP_BO_WC) {
 617		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 618	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
 619		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 620	} else {
 621		/*
 622		 * We do have some private objects, at least for scanout buffers
 623		 * on hardware without DMM/TILER.  But these are allocated write-
 624		 * combine
 625		 */
 626		if (WARN_ON(!obj->filp))
 627			return -EINVAL;
 628
 629		/*
 630		 * Shunt off cached objs to shmem file so they have their own
 631		 * address_space (so unmap_mapping_range does what we want,
 632		 * in particular in the case of mmap'd dmabufs)
 633		 */
 634		fput(vma->vm_file);
 635		vma->vm_pgoff = 0;
 636		vma->vm_file  = get_file(obj->filp);
 637
 638		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 639	}
 640
 
 
 641	return 0;
 642}
 643
 644/* -----------------------------------------------------------------------------
 645 * Dumb Buffers
 646 */
 647
 648/**
 649 * omap_gem_dumb_create	-	create a dumb buffer
 650 * @drm_file: our client file
 651 * @dev: our device
 652 * @args: the requested arguments copied from userspace
 653 *
 654 * Allocate a buffer suitable for use for a frame buffer of the
 655 * form described by user space. Give userspace a handle by which
 656 * to reference it.
 657 */
 658int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 659		struct drm_mode_create_dumb *args)
 660{
 661	union omap_gem_size gsize;
 662
 663	args->pitch = align_pitch(0, args->width, args->bpp);
 
 664	args->size = PAGE_ALIGN(args->pitch * args->height);
 665
 666	gsize = (union omap_gem_size){
 667		.bytes = args->size,
 668	};
 669
 670	return omap_gem_new_handle(dev, file, gsize,
 671			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
 672}
 673
 674/**
 675 * omap_gem_dumb_map	-	buffer mapping for dumb interface
 676 * @file: our drm client file
 677 * @dev: drm device
 678 * @handle: GEM handle to the object (from dumb_create)
 
 679 *
 680 * Do the necessary setup to allow the mapping of the frame buffer
 681 * into user memory. We don't have to do much here at the moment.
 682 */
 683int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 684		uint32_t handle, uint64_t *offset)
 685{
 686	struct drm_gem_object *obj;
 687	int ret = 0;
 688
 689	/* GEM does all our handle to object mapping */
 690	obj = drm_gem_object_lookup(dev, file, handle);
 691	if (obj == NULL) {
 692		ret = -ENOENT;
 693		goto fail;
 694	}
 695
 696	*offset = omap_gem_mmap_offset(obj);
 697
 698	drm_gem_object_unreference_unlocked(obj);
 699
 700fail:
 701	return ret;
 702}
 703
 704#ifdef CONFIG_DRM_FBDEV_EMULATION
 705/* Set scrolling position.  This allows us to implement fast scrolling
 706 * for console.
 707 *
 708 * Call only from non-atomic contexts.
 709 */
 710int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
 711{
 712	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 713	uint32_t npages = obj->size >> PAGE_SHIFT;
 714	int ret = 0;
 715
 716	if (roll > npages) {
 717		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
 718		return -EINVAL;
 719	}
 720
 721	omap_obj->roll = roll;
 722
 723	mutex_lock(&obj->dev->struct_mutex);
 724
 725	/* if we aren't mapped yet, we don't need to do anything */
 726	if (omap_obj->block) {
 727		struct page **pages;
 728		ret = get_pages(obj, &pages);
 729		if (ret)
 730			goto fail;
 731		ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
 
 
 732		if (ret)
 733			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
 734	}
 735
 736fail:
 737	mutex_unlock(&obj->dev->struct_mutex);
 738
 739	return ret;
 740}
 741#endif
 742
 743/* -----------------------------------------------------------------------------
 744 * Memory Management & DMA Sync
 745 */
 746
 747/**
 748 * shmem buffers that are mapped cached can simulate coherency via using
 749 * page faulting to keep track of dirty pages
 
 
 
 
 
 750 */
 751static inline bool is_cached_coherent(struct drm_gem_object *obj)
 752{
 753	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 754
 755	return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
 756		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
 757}
 758
 759/* Sync the buffer for CPU access.. note pages should already be
 760 * attached, ie. omap_gem_get_pages()
 761 */
 762void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
 763{
 764	struct drm_device *dev = obj->dev;
 765	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 766
 767	if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
 768		dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
 769				PAGE_SIZE, DMA_BIDIRECTIONAL);
 770		omap_obj->addrs[pgoff] = 0;
 
 
 
 771	}
 772}
 773
 774/* sync the buffer for DMA access */
 775void omap_gem_dma_sync(struct drm_gem_object *obj,
 776		enum dma_data_direction dir)
 777{
 778	struct drm_device *dev = obj->dev;
 779	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 
 
 
 780
 781	if (is_cached_coherent(obj)) {
 782		int i, npages = obj->size >> PAGE_SHIFT;
 783		struct page **pages = omap_obj->pages;
 784		bool dirty = false;
 785
 786		for (i = 0; i < npages; i++) {
 787			if (!omap_obj->addrs[i]) {
 788				dma_addr_t addr;
 
 
 
 
 
 
 
 
 789
 790				addr = dma_map_page(dev->dev, pages[i], 0,
 791						PAGE_SIZE, DMA_BIDIRECTIONAL);
 
 
 792
 793				if (dma_mapping_error(dev->dev, addr)) {
 794					dev_warn(dev->dev,
 795						"%s: failed to map page\n",
 796						__func__);
 797					break;
 798				}
 799
 800				dirty = true;
 801				omap_obj->addrs[i] = addr;
 802			}
 803		}
 
 
 
 804
 805		if (dirty) {
 806			unmap_mapping_range(obj->filp->f_mapping, 0,
 807					omap_gem_mmap_size(obj), 1);
 808		}
 
 
 
 
 
 
 
 
 
 809	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810}
 811
 812/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
 813 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
 814 * map in TILER)
 
 
 
 
 
 
 
 
 
 
 815 */
 816int omap_gem_get_paddr(struct drm_gem_object *obj,
 817		dma_addr_t *paddr, bool remap)
 818{
 819	struct omap_drm_private *priv = obj->dev->dev_private;
 820	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 821	int ret = 0;
 822
 823	mutex_lock(&obj->dev->struct_mutex);
 824
 825	if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
 826		if (omap_obj->paddr_cnt == 0) {
 827			struct page **pages;
 828			uint32_t npages = obj->size >> PAGE_SHIFT;
 829			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 830			struct tiler_block *block;
 831
 832			BUG_ON(omap_obj->block);
 833
 834			ret = get_pages(obj, &pages);
 835			if (ret)
 836				goto fail;
 837
 838			if (omap_obj->flags & OMAP_BO_TILED) {
 839				block = tiler_reserve_2d(fmt,
 840						omap_obj->width,
 841						omap_obj->height, 0);
 842			} else {
 843				block = tiler_reserve_1d(obj->size);
 844			}
 845
 846			if (IS_ERR(block)) {
 847				ret = PTR_ERR(block);
 848				dev_err(obj->dev->dev,
 849					"could not remap: %d (%d)\n", ret, fmt);
 850				goto fail;
 851			}
 852
 853			/* TODO: enable async refill.. */
 854			ret = tiler_pin(block, pages, npages,
 855					omap_obj->roll, true);
 856			if (ret) {
 857				tiler_release(block);
 858				dev_err(obj->dev->dev,
 859						"could not pin: %d\n", ret);
 860				goto fail;
 861			}
 862
 863			omap_obj->paddr = tiler_ssptr(block);
 864			omap_obj->block = block;
 865
 866			DBG("got paddr: %pad", &omap_obj->paddr);
 867		}
 868
 869		omap_obj->paddr_cnt++;
 870
 871		*paddr = omap_obj->paddr;
 872	} else if (is_contiguous(omap_obj)) {
 873		*paddr = omap_obj->paddr;
 874	} else {
 875		ret = -EINVAL;
 876		goto fail;
 877	}
 878
 
 
 
 879fail:
 880	mutex_unlock(&obj->dev->struct_mutex);
 881
 882	return ret;
 883}
 884
 885/* Release physical address, when DMA is no longer being performed.. this
 886 * could potentially unpin and unmap buffers from TILER
 
 
 
 887 */
 888void omap_gem_put_paddr(struct drm_gem_object *obj)
 889{
 
 890	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 891	int ret;
 892
 893	mutex_lock(&obj->dev->struct_mutex);
 894	if (omap_obj->paddr_cnt > 0) {
 895		omap_obj->paddr_cnt--;
 896		if (omap_obj->paddr_cnt == 0) {
 
 
 
 
 
 
 
 
 897			ret = tiler_unpin(omap_obj->block);
 898			if (ret) {
 899				dev_err(obj->dev->dev,
 900					"could not unpin pages: %d\n", ret);
 901			}
 902			ret = tiler_release(omap_obj->block);
 903			if (ret) {
 904				dev_err(obj->dev->dev,
 905					"could not release unmap: %d\n", ret);
 906			}
 907			omap_obj->paddr = 0;
 908			omap_obj->block = NULL;
 909		}
 910	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 911
 912	mutex_unlock(&obj->dev->struct_mutex);
 
 
 913}
 914
 915/* Get rotated scanout address (only valid if already pinned), at the
 916 * specified orientation and x,y offset from top-left corner of buffer
 917 * (only valid for tiled 2d buffers)
 918 */
 919int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
 920		int x, int y, dma_addr_t *paddr)
 921{
 922	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 923	int ret = -EINVAL;
 924
 925	mutex_lock(&obj->dev->struct_mutex);
 926	if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
 927			(omap_obj->flags & OMAP_BO_TILED)) {
 928		*paddr = tiler_tsptr(omap_obj->block, orient, x, y);
 
 929		ret = 0;
 930	}
 931	mutex_unlock(&obj->dev->struct_mutex);
 
 
 932	return ret;
 933}
 934
 935/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
 936int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
 937{
 938	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 939	int ret = -EINVAL;
 940	if (omap_obj->flags & OMAP_BO_TILED)
 941		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
 942	return ret;
 943}
 944
 945/* if !remap, and we don't have pages backing, then fail, rather than
 946 * increasing the pin count (which we don't really do yet anyways,
 947 * because we don't support swapping pages back out).  And 'remap'
 948 * might not be quite the right name, but I wanted to keep it working
 949 * similarly to omap_gem_get_paddr().  Note though that mutex is not
 950 * aquired if !remap (because this can be called in atomic ctxt),
 951 * but probably omap_gem_get_paddr() should be changed to work in the
 952 * same way.  If !remap, a matching omap_gem_put_pages() call is not
 953 * required (and should not be made).
 954 */
 955int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
 956		bool remap)
 957{
 958	int ret;
 959	if (!remap) {
 960		struct omap_gem_object *omap_obj = to_omap_bo(obj);
 961		if (!omap_obj->pages)
 962			return -ENOMEM;
 963		*pages = omap_obj->pages;
 964		return 0;
 
 
 965	}
 966	mutex_lock(&obj->dev->struct_mutex);
 967	ret = get_pages(obj, pages);
 968	mutex_unlock(&obj->dev->struct_mutex);
 
 
 
 
 
 
 
 
 969	return ret;
 970}
 971
 972/* release pages when DMA no longer being performed */
 973int omap_gem_put_pages(struct drm_gem_object *obj)
 974{
 975	/* do something here if we dynamically attach/detach pages.. at
 976	 * least they would no longer need to be pinned if everyone has
 977	 * released the pages..
 978	 */
 979	return 0;
 980}
 981
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982#ifdef CONFIG_DRM_FBDEV_EMULATION
 983/* Get kernel virtual address for CPU access.. this more or less only
 984 * exists for omap_fbdev.  This should be called with struct_mutex
 985 * held.
 986 */
 987void *omap_gem_vaddr(struct drm_gem_object *obj)
 988{
 989	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 990	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
 
 
 
 991	if (!omap_obj->vaddr) {
 992		struct page **pages;
 993		int ret = get_pages(obj, &pages);
 994		if (ret)
 995			return ERR_PTR(ret);
 996		omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 
 
 997				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 998	}
 999	return omap_obj->vaddr;
 
 
 
 
 
1000}
1001#endif
1002
1003/* -----------------------------------------------------------------------------
1004 * Power Management
1005 */
1006
1007#ifdef CONFIG_PM
1008/* re-pin objects in DMM in resume path: */
1009int omap_gem_resume(struct device *dev)
1010{
1011	struct drm_device *drm_dev = dev_get_drvdata(dev);
1012	struct omap_drm_private *priv = drm_dev->dev_private;
1013	struct omap_gem_object *omap_obj;
1014	int ret = 0;
1015
 
1016	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1017		if (omap_obj->block) {
1018			struct drm_gem_object *obj = &omap_obj->base;
1019			uint32_t npages = obj->size >> PAGE_SHIFT;
 
1020			WARN_ON(!omap_obj->pages);  /* this can't happen */
1021			ret = tiler_pin(omap_obj->block,
1022					omap_obj->pages, npages,
1023					omap_obj->roll, true);
1024			if (ret) {
1025				dev_err(dev, "could not repin: %d\n", ret);
1026				return ret;
1027			}
1028		}
1029	}
1030
1031	return 0;
 
 
1032}
1033#endif
1034
1035/* -----------------------------------------------------------------------------
1036 * DebugFS
1037 */
1038
1039#ifdef CONFIG_DEBUG_FS
1040void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1041{
1042	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1043	uint64_t off;
1044
1045	off = drm_vma_node_start(&obj->vma_node);
1046
 
 
1047	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1048			omap_obj->flags, obj->name, obj->refcount.refcount.counter,
1049			off, &omap_obj->paddr, omap_obj->paddr_cnt,
 
1050			omap_obj->vaddr, omap_obj->roll);
1051
1052	if (omap_obj->flags & OMAP_BO_TILED) {
1053		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1054		if (omap_obj->block) {
1055			struct tcm_area *area = &omap_obj->block->area;
1056			seq_printf(m, " (%dx%d, %dx%d)",
1057					area->p0.x, area->p0.y,
1058					area->p1.x, area->p1.y);
1059		}
1060	} else {
1061		seq_printf(m, " %d", obj->size);
1062	}
1063
 
 
1064	seq_printf(m, "\n");
1065}
1066
1067void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1068{
1069	struct omap_gem_object *omap_obj;
1070	int count = 0;
1071	size_t size = 0;
1072
1073	list_for_each_entry(omap_obj, list, mm_list) {
1074		struct drm_gem_object *obj = &omap_obj->base;
1075		seq_printf(m, "   ");
1076		omap_gem_describe(obj, m);
1077		count++;
1078		size += obj->size;
1079	}
1080
1081	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1082}
1083#endif
1084
1085/* -----------------------------------------------------------------------------
1086 * Buffer Synchronization
1087 */
1088
1089static DEFINE_SPINLOCK(sync_lock);
1090
1091struct omap_gem_sync_waiter {
1092	struct list_head list;
1093	struct omap_gem_object *omap_obj;
1094	enum omap_gem_op op;
1095	uint32_t read_target, write_target;
1096	/* notify called w/ sync_lock held */
1097	void (*notify)(void *arg);
1098	void *arg;
1099};
1100
1101/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1102 * the read and/or write target count is achieved which can call a user
1103 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1104 * cpu access), etc.
1105 */
1106static LIST_HEAD(waiters);
1107
1108static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1109{
1110	struct omap_gem_object *omap_obj = waiter->omap_obj;
1111	if ((waiter->op & OMAP_GEM_READ) &&
1112			(omap_obj->sync->write_complete < waiter->write_target))
1113		return true;
1114	if ((waiter->op & OMAP_GEM_WRITE) &&
1115			(omap_obj->sync->read_complete < waiter->read_target))
1116		return true;
1117	return false;
1118}
1119
1120/* macro for sync debug.. */
1121#define SYNCDBG 0
1122#define SYNC(fmt, ...) do { if (SYNCDBG) \
1123		printk(KERN_ERR "%s:%d: "fmt"\n", \
1124				__func__, __LINE__, ##__VA_ARGS__); \
1125	} while (0)
1126
1127
1128static void sync_op_update(void)
1129{
1130	struct omap_gem_sync_waiter *waiter, *n;
1131	list_for_each_entry_safe(waiter, n, &waiters, list) {
1132		if (!is_waiting(waiter)) {
1133			list_del(&waiter->list);
1134			SYNC("notify: %p", waiter);
1135			waiter->notify(waiter->arg);
1136			kfree(waiter);
1137		}
1138	}
1139}
1140
1141static inline int sync_op(struct drm_gem_object *obj,
1142		enum omap_gem_op op, bool start)
1143{
1144	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1145	int ret = 0;
1146
1147	spin_lock(&sync_lock);
1148
1149	if (!omap_obj->sync) {
1150		omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1151		if (!omap_obj->sync) {
1152			ret = -ENOMEM;
1153			goto unlock;
1154		}
1155	}
1156
1157	if (start) {
1158		if (op & OMAP_GEM_READ)
1159			omap_obj->sync->read_pending++;
1160		if (op & OMAP_GEM_WRITE)
1161			omap_obj->sync->write_pending++;
1162	} else {
1163		if (op & OMAP_GEM_READ)
1164			omap_obj->sync->read_complete++;
1165		if (op & OMAP_GEM_WRITE)
1166			omap_obj->sync->write_complete++;
1167		sync_op_update();
1168	}
1169
1170unlock:
1171	spin_unlock(&sync_lock);
1172
1173	return ret;
1174}
1175
1176/* mark the start of read and/or write operation */
1177int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1178{
1179	return sync_op(obj, op, true);
1180}
1181
1182int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1183{
1184	return sync_op(obj, op, false);
1185}
1186
1187static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1188
1189static void sync_notify(void *arg)
1190{
1191	struct task_struct **waiter_task = arg;
1192	*waiter_task = NULL;
1193	wake_up_all(&sync_event);
1194}
1195
1196int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1197{
1198	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1199	int ret = 0;
1200	if (omap_obj->sync) {
1201		struct task_struct *waiter_task = current;
1202		struct omap_gem_sync_waiter *waiter =
1203				kzalloc(sizeof(*waiter), GFP_KERNEL);
1204
1205		if (!waiter)
1206			return -ENOMEM;
1207
1208		waiter->omap_obj = omap_obj;
1209		waiter->op = op;
1210		waiter->read_target = omap_obj->sync->read_pending;
1211		waiter->write_target = omap_obj->sync->write_pending;
1212		waiter->notify = sync_notify;
1213		waiter->arg = &waiter_task;
1214
1215		spin_lock(&sync_lock);
1216		if (is_waiting(waiter)) {
1217			SYNC("waited: %p", waiter);
1218			list_add_tail(&waiter->list, &waiters);
1219			spin_unlock(&sync_lock);
1220			ret = wait_event_interruptible(sync_event,
1221					(waiter_task == NULL));
1222			spin_lock(&sync_lock);
1223			if (waiter_task) {
1224				SYNC("interrupted: %p", waiter);
1225				/* we were interrupted */
1226				list_del(&waiter->list);
1227				waiter_task = NULL;
1228			} else {
1229				/* freed in sync_op_update() */
1230				waiter = NULL;
1231			}
1232		}
1233		spin_unlock(&sync_lock);
1234		kfree(waiter);
1235	}
1236	return ret;
1237}
1238
1239/* call fxn(arg), either synchronously or asynchronously if the op
1240 * is currently blocked..  fxn() can be called from any context
1241 *
1242 * (TODO for now fxn is called back from whichever context calls
1243 * omap_gem_op_finish().. but this could be better defined later
1244 * if needed)
1245 *
1246 * TODO more code in common w/ _sync()..
1247 */
1248int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1249		void (*fxn)(void *arg), void *arg)
1250{
1251	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1252	if (omap_obj->sync) {
1253		struct omap_gem_sync_waiter *waiter =
1254				kzalloc(sizeof(*waiter), GFP_ATOMIC);
1255
1256		if (!waiter)
1257			return -ENOMEM;
1258
1259		waiter->omap_obj = omap_obj;
1260		waiter->op = op;
1261		waiter->read_target = omap_obj->sync->read_pending;
1262		waiter->write_target = omap_obj->sync->write_pending;
1263		waiter->notify = fxn;
1264		waiter->arg = arg;
1265
1266		spin_lock(&sync_lock);
1267		if (is_waiting(waiter)) {
1268			SYNC("waited: %p", waiter);
1269			list_add_tail(&waiter->list, &waiters);
1270			spin_unlock(&sync_lock);
1271			return 0;
1272		}
1273
1274		spin_unlock(&sync_lock);
1275
1276		kfree(waiter);
1277	}
1278
1279	/* no waiting.. */
1280	fxn(arg);
1281
1282	return 0;
1283}
1284
1285/* -----------------------------------------------------------------------------
1286 * Constructor & Destructor
1287 */
1288
1289void omap_gem_free_object(struct drm_gem_object *obj)
1290{
1291	struct drm_device *dev = obj->dev;
1292	struct omap_drm_private *priv = dev->dev_private;
1293	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1294
1295	evict(obj);
1296
1297	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1298
1299	spin_lock(&priv->list_lock);
1300	list_del(&omap_obj->mm_list);
1301	spin_unlock(&priv->list_lock);
1302
1303	/* this means the object is still pinned.. which really should
1304	 * not happen.  I think..
 
 
 
1305	 */
1306	WARN_ON(omap_obj->paddr_cnt > 0);
 
 
 
1307
1308	if (omap_obj->pages) {
1309		if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1310			kfree(omap_obj->pages);
1311		else
1312			omap_gem_detach_pages(obj);
1313	}
1314
1315	if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1316		dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1317			    omap_obj->paddr);
1318	} else if (omap_obj->vaddr) {
1319		vunmap(omap_obj->vaddr);
1320	} else if (obj->import_attach) {
1321		drm_prime_gem_destroy(obj, omap_obj->sgt);
1322	}
1323
1324	kfree(omap_obj->sync);
1325
1326	drm_gem_object_release(obj);
1327
 
 
1328	kfree(omap_obj);
1329}
1330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1331/* GEM buffer object constructor */
1332struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1333		union omap_gem_size gsize, uint32_t flags)
1334{
1335	struct omap_drm_private *priv = dev->dev_private;
1336	struct omap_gem_object *omap_obj;
1337	struct drm_gem_object *obj;
1338	struct address_space *mapping;
1339	size_t size;
1340	int ret;
1341
1342	/* Validate the flags and compute the memory and cache flags. */
1343	if (flags & OMAP_BO_TILED) {
1344		if (!priv->usergart) {
1345			dev_err(dev->dev, "Tiled buffers require DMM\n");
1346			return NULL;
1347		}
1348
 
 
1349		/*
1350		 * Tiled buffers are always shmem paged backed. When they are
1351		 * scanned out, they are remapped into DMM/TILER.
1352		 */
1353		flags &= ~OMAP_BO_SCANOUT;
1354		flags |= OMAP_BO_MEM_SHMEM;
1355
1356		/*
1357		 * Currently don't allow cached buffers. There is some caching
1358		 * stuff that needs to be handled better.
1359		 */
1360		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1361		flags |= tiler_get_cpu_cache_flags();
1362	} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1363		/*
1364		 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1365		 * tiled. However, to lower the pressure on memory allocation,
1366		 * use contiguous memory only if no TILER is available.
1367		 */
1368		flags |= OMAP_BO_MEM_DMA_API;
1369	} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1370		/*
1371		 * All other buffers not backed by dma_buf are shmem-backed.
1372		 */
1373		flags |= OMAP_BO_MEM_SHMEM;
1374	}
1375
1376	/* Allocate the initialize the OMAP GEM object. */
1377	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1378	if (!omap_obj)
1379		return NULL;
1380
1381	obj = &omap_obj->base;
1382	omap_obj->flags = flags;
 
1383
1384	if (flags & OMAP_BO_TILED) {
1385		/*
1386		 * For tiled buffers align dimensions to slot boundaries and
1387		 * calculate size based on aligned dimensions.
1388		 */
1389		tiler_align(gem2fmt(flags), &gsize.tiled.width,
1390			    &gsize.tiled.height);
1391
1392		size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1393				  gsize.tiled.height);
1394
1395		omap_obj->width = gsize.tiled.width;
1396		omap_obj->height = gsize.tiled.height;
1397	} else {
1398		size = PAGE_ALIGN(gsize.bytes);
1399	}
1400
 
 
1401	/* Initialize the GEM object. */
1402	if (!(flags & OMAP_BO_MEM_SHMEM)) {
1403		drm_gem_private_object_init(dev, obj, size);
1404	} else {
1405		ret = drm_gem_object_init(dev, obj, size);
1406		if (ret)
1407			goto err_free;
1408
1409		mapping = file_inode(obj->filp)->i_mapping;
1410		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1411	}
1412
1413	/* Allocate memory if needed. */
1414	if (flags & OMAP_BO_MEM_DMA_API) {
1415		omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1416					       &omap_obj->paddr,
1417					       GFP_KERNEL);
1418		if (!omap_obj->vaddr)
1419			goto err_release;
1420	}
1421
1422	spin_lock(&priv->list_lock);
1423	list_add(&omap_obj->mm_list, &priv->obj_list);
1424	spin_unlock(&priv->list_lock);
1425
1426	return obj;
1427
1428err_release:
1429	drm_gem_object_release(obj);
1430err_free:
1431	kfree(omap_obj);
1432	return NULL;
1433}
1434
1435struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1436					   struct sg_table *sgt)
1437{
1438	struct omap_drm_private *priv = dev->dev_private;
1439	struct omap_gem_object *omap_obj;
1440	struct drm_gem_object *obj;
1441	union omap_gem_size gsize;
1442
1443	/* Without a DMM only physically contiguous buffers can be supported. */
1444	if (sgt->orig_nents != 1 && !priv->has_dmm)
1445		return ERR_PTR(-EINVAL);
1446
1447	mutex_lock(&dev->struct_mutex);
1448
1449	gsize.bytes = PAGE_ALIGN(size);
1450	obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1451	if (!obj) {
1452		obj = ERR_PTR(-ENOMEM);
1453		goto done;
1454	}
1455
1456	omap_obj = to_omap_bo(obj);
 
 
 
1457	omap_obj->sgt = sgt;
1458
1459	if (sgt->orig_nents == 1) {
1460		omap_obj->paddr = sg_dma_address(sgt->sgl);
1461	} else {
1462		/* Create pages list from sgt */
1463		struct sg_page_iter iter;
1464		struct page **pages;
1465		unsigned int npages;
1466		unsigned int i = 0;
1467
1468		npages = DIV_ROUND_UP(size, PAGE_SIZE);
1469		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1470		if (!pages) {
1471			omap_gem_free_object(obj);
1472			obj = ERR_PTR(-ENOMEM);
1473			goto done;
1474		}
1475
1476		omap_obj->pages = pages;
1477
1478		for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1479			pages[i++] = sg_page_iter_page(&iter);
1480			if (i > npages)
1481				break;
1482		}
1483
1484		if (WARN_ON(i != npages)) {
1485			omap_gem_free_object(obj);
1486			obj = ERR_PTR(-ENOMEM);
1487			goto done;
1488		}
1489	}
1490
1491done:
1492	mutex_unlock(&dev->struct_mutex);
1493	return obj;
1494}
1495
1496/* convenience method to construct a GEM buffer object, and userspace handle */
1497int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1498		union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1499{
1500	struct drm_gem_object *obj;
1501	int ret;
1502
1503	obj = omap_gem_new(dev, gsize, flags);
1504	if (!obj)
1505		return -ENOMEM;
1506
1507	ret = drm_gem_handle_create(file, obj, handle);
1508	if (ret) {
1509		omap_gem_free_object(obj);
1510		return ret;
1511	}
1512
1513	/* drop reference from allocate - handle holds it now */
1514	drm_gem_object_unreference_unlocked(obj);
1515
1516	return 0;
1517}
1518
1519/* -----------------------------------------------------------------------------
1520 * Init & Cleanup
1521 */
1522
1523/* If DMM is used, we need to set some stuff up.. */
1524void omap_gem_init(struct drm_device *dev)
1525{
1526	struct omap_drm_private *priv = dev->dev_private;
1527	struct omap_drm_usergart *usergart;
1528	const enum tiler_fmt fmts[] = {
1529			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1530	};
1531	int i, j;
1532
1533	if (!dmm_is_available()) {
1534		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1535		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1536		return;
1537	}
1538
1539	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1540	if (!usergart)
1541		return;
1542
1543	/* reserve 4k aligned/wide regions for userspace mappings: */
1544	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1545		uint16_t h = 1, w = PAGE_SIZE >> i;
 
1546		tiler_align(fmts[i], &w, &h);
1547		/* note: since each region is 1 4kb page wide, and minimum
1548		 * number of rows, the height ends up being the same as the
1549		 * # of pages in the region
1550		 */
1551		usergart[i].height = h;
1552		usergart[i].height_shift = ilog2(h);
1553		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1554		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1555		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1556			struct omap_drm_usergart_entry *entry;
1557			struct tiler_block *block;
1558
1559			entry = &usergart[i].entry[j];
1560			block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1561			if (IS_ERR(block)) {
1562				dev_err(dev->dev,
1563						"reserve failed: %d, %d, %ld\n",
1564						i, j, PTR_ERR(block));
1565				return;
1566			}
1567			entry->paddr = tiler_ssptr(block);
1568			entry->block = block;
1569
1570			DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1571					&entry->paddr,
1572					usergart[i].stride_pfn << PAGE_SHIFT);
1573		}
1574	}
1575
1576	priv->usergart = usergart;
1577	priv->has_dmm = true;
1578}
1579
1580void omap_gem_deinit(struct drm_device *dev)
1581{
1582	struct omap_drm_private *priv = dev->dev_private;
1583
1584	/* I believe we can rely on there being no more outstanding GEM
1585	 * objects which could depend on usergart/dmm at this point.
1586	 */
1587	kfree(priv->usergart);
1588}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
 
 
   4 * Author: Rob Clark <rob.clark@linaro.org>
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/dma-mapping.h>
   8#include <linux/seq_file.h>
   9#include <linux/shmem_fs.h>
  10#include <linux/spinlock.h>
  11#include <linux/pfn_t.h>
  12
  13#include <drm/drm_prime.h>
  14#include <drm/drm_vma_manager.h>
  15
  16#include "omap_drv.h"
  17#include "omap_dmm_tiler.h"
  18
  19/*
  20 * GEM buffer object implementation.
  21 */
  22
  23/* note: we use upper 8 bits of flags for driver-internal flags: */
  24#define OMAP_BO_MEM_DMA_API	0x01000000	/* memory allocated with the dma_alloc_* API */
  25#define OMAP_BO_MEM_SHMEM	0x02000000	/* memory allocated through shmem backing */
  26#define OMAP_BO_MEM_DMABUF	0x08000000	/* memory imported from a dmabuf */
  27
  28struct omap_gem_object {
  29	struct drm_gem_object base;
  30
  31	struct list_head mm_list;
  32
  33	u32 flags;
  34
  35	/** width/height for tiled formats (rounded up to slot boundaries) */
  36	u16 width, height;
  37
  38	/** roll applied when mapping to DMM */
  39	u32 roll;
  40
  41	/** protects pin_cnt, block, pages, dma_addrs and vaddr */
  42	struct mutex lock;
  43
  44	/**
  45	 * dma_addr contains the buffer DMA address. It is valid for
  46	 *
  47	 * - buffers allocated through the DMA mapping API (with the
  48	 *   OMAP_BO_MEM_DMA_API flag set)
  49	 *
  50	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  51	 *   if they are physically contiguous
  52	 *
  53	 * - buffers mapped through the TILER when pin_cnt is not zero, in which
  54	 *   case the DMA address points to the TILER aperture
  55	 *
  56	 * Physically contiguous buffers have their DMA address equal to the
  57	 * physical address as we don't remap those buffers through the TILER.
  58	 *
  59	 * Buffers mapped to the TILER have their DMA address pointing to the
  60	 * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
  61	 * the DMA address must be accessed through omap_gem_pin() to ensure
  62	 * that the mapping won't disappear unexpectedly. References must be
  63	 * released with omap_gem_unpin().
  64	 */
  65	dma_addr_t dma_addr;
  66
  67	/**
  68	 * # of users
  69	 */
  70	refcount_t pin_cnt;
  71
  72	/**
  73	 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  74	 * is set and the sgt field is valid.
  75	 */
  76	struct sg_table *sgt;
  77
  78	/**
  79	 * tiler block used when buffer is remapped in DMM/TILER.
  80	 */
  81	struct tiler_block *block;
  82
  83	/**
  84	 * Array of backing pages, if allocated.  Note that pages are never
  85	 * allocated for buffers originally allocated from contiguous memory
  86	 */
  87	struct page **pages;
  88
  89	/** addresses corresponding to pages in above array */
  90	dma_addr_t *dma_addrs;
  91
  92	/**
  93	 * Virtual address, if mapped.
  94	 */
  95	void *vaddr;
 
 
 
 
 
 
 
 
 
 
 
 
 
  96};
  97
  98#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  99
 100/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
 101 * not necessarily pinned in TILER all the time, and (b) when they are
 102 * they are not necessarily page aligned, we reserve one or more small
 103 * regions in each of the 2d containers to use as a user-GART where we
 104 * can create a second page-aligned mapping of parts of the buffer
 105 * being accessed from userspace.
 106 *
 107 * Note that we could optimize slightly when we know that multiple
 108 * tiler containers are backed by the same PAT.. but I'll leave that
 109 * for later..
 110 */
 111#define NUM_USERGART_ENTRIES 2
 112struct omap_drm_usergart_entry {
 113	struct tiler_block *block;	/* the reserved tiler block */
 114	dma_addr_t dma_addr;
 115	struct drm_gem_object *obj;	/* the current pinned obj */
 116	pgoff_t obj_pgoff;		/* page offset of obj currently
 117					   mapped in */
 118};
 119
 120struct omap_drm_usergart {
 121	struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
 122	int height;				/* height in rows */
 123	int height_shift;		/* ilog2(height in rows) */
 124	int slot_shift;			/* ilog2(width per slot) */
 125	int stride_pfn;			/* stride in pages */
 126	int last;				/* index of last used entry */
 127};
 128
 129/* -----------------------------------------------------------------------------
 130 * Helpers
 131 */
 132
 133/** get mmap offset */
 134u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
 135{
 136	struct drm_device *dev = obj->dev;
 137	int ret;
 138	size_t size;
 139
 
 
 140	/* Make it mmapable */
 141	size = omap_gem_mmap_size(obj);
 142	ret = drm_gem_create_mmap_offset_size(obj, size);
 143	if (ret) {
 144		dev_err(dev->dev, "could not allocate mmap offset\n");
 145		return 0;
 146	}
 147
 148	return drm_vma_node_offset_addr(&obj->vma_node);
 149}
 150
 151static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
 152{
 153	return !(drm_prime_get_contiguous_size(sgt) < size);
 154}
 155
 156static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
 157{
 158	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
 159		return true;
 160
 161	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) &&
 162	    omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size))
 163		return true;
 164
 165	return false;
 166}
 167
 168/* -----------------------------------------------------------------------------
 169 * Eviction
 170 */
 171
 172static void omap_gem_evict_entry(struct drm_gem_object *obj,
 173		enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
 174{
 175	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 176	struct omap_drm_private *priv = obj->dev->dev_private;
 177	int n = priv->usergart[fmt].height;
 178	size_t size = PAGE_SIZE * n;
 179	loff_t off = omap_gem_mmap_offset(obj) +
 180			(entry->obj_pgoff << PAGE_SHIFT);
 181	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 182
 183	if (m > 1) {
 184		int i;
 185		/* if stride > than PAGE_SIZE then sparse mapping: */
 186		for (i = n; i > 0; i--) {
 187			unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 188					    off, PAGE_SIZE, 1);
 189			off += PAGE_SIZE * m;
 190		}
 191	} else {
 192		unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 193				    off, size, 1);
 194	}
 195
 196	entry->obj = NULL;
 197}
 198
 199/* Evict a buffer from usergart, if it is mapped there */
 200static void omap_gem_evict(struct drm_gem_object *obj)
 201{
 202	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 203	struct omap_drm_private *priv = obj->dev->dev_private;
 204
 205	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 206		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 207		int i;
 208
 209		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
 210			struct omap_drm_usergart_entry *entry =
 211				&priv->usergart[fmt].entry[i];
 212
 213			if (entry->obj == obj)
 214				omap_gem_evict_entry(obj, fmt, entry);
 215		}
 216	}
 217}
 218
 219/* -----------------------------------------------------------------------------
 220 * Page Management
 221 */
 222
 223/*
 224 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
 225 * held.
 226 */
 227static int omap_gem_attach_pages(struct drm_gem_object *obj)
 228{
 229	struct drm_device *dev = obj->dev;
 230	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 231	struct page **pages;
 232	int npages = obj->size >> PAGE_SHIFT;
 233	int i, ret;
 234	dma_addr_t *addrs;
 235
 236	lockdep_assert_held(&omap_obj->lock);
 237
 238	/*
 239	 * If not using shmem (in which case backing pages don't need to be
 240	 * allocated) or if pages are already allocated we're done.
 241	 */
 242	if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
 243		return 0;
 244
 245	pages = drm_gem_get_pages(obj);
 246	if (IS_ERR(pages)) {
 247		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 248		return PTR_ERR(pages);
 249	}
 250
 251	/* for non-cached buffers, ensure the new pages are clean because
 252	 * DSS, GPU, etc. are not cache coherent:
 253	 */
 254	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
 255		addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
 256		if (!addrs) {
 257			ret = -ENOMEM;
 258			goto free_pages;
 259		}
 260
 261		for (i = 0; i < npages; i++) {
 262			addrs[i] = dma_map_page(dev->dev, pages[i],
 263					0, PAGE_SIZE, DMA_TO_DEVICE);
 264
 265			if (dma_mapping_error(dev->dev, addrs[i])) {
 266				dev_warn(dev->dev,
 267					"%s: failed to map page\n", __func__);
 268
 269				for (i = i - 1; i >= 0; --i) {
 270					dma_unmap_page(dev->dev, addrs[i],
 271						PAGE_SIZE, DMA_TO_DEVICE);
 272				}
 273
 274				ret = -ENOMEM;
 275				goto free_addrs;
 276			}
 277		}
 278	} else {
 279		addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
 280		if (!addrs) {
 281			ret = -ENOMEM;
 282			goto free_pages;
 283		}
 284	}
 285
 286	omap_obj->dma_addrs = addrs;
 287	omap_obj->pages = pages;
 288
 289	return 0;
 290
 291free_addrs:
 292	kfree(addrs);
 293free_pages:
 294	drm_gem_put_pages(obj, pages, true, false);
 295
 296	return ret;
 297}
 298
 299/* Release backing pages. Must be called with the omap_obj.lock held. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300static void omap_gem_detach_pages(struct drm_gem_object *obj)
 301{
 302	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 303	unsigned int npages = obj->size >> PAGE_SHIFT;
 304	unsigned int i;
 305
 306	lockdep_assert_held(&omap_obj->lock);
 307
 308	for (i = 0; i < npages; i++) {
 309		if (omap_obj->dma_addrs[i])
 310			dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
 311				       PAGE_SIZE, DMA_TO_DEVICE);
 
 
 
 312	}
 313
 314	kfree(omap_obj->dma_addrs);
 315	omap_obj->dma_addrs = NULL;
 316
 317	drm_gem_put_pages(obj, omap_obj->pages, true, false);
 318	omap_obj->pages = NULL;
 319}
 320
 321/* get buffer flags */
 322u32 omap_gem_flags(struct drm_gem_object *obj)
 323{
 324	return to_omap_bo(obj)->flags;
 325}
 326
 
 
 
 
 
 
 
 
 
 327/** get mmap size */
 328size_t omap_gem_mmap_size(struct drm_gem_object *obj)
 329{
 330	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 331	size_t size = obj->size;
 332
 333	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 334		/* for tiled buffers, the virtual size has stride rounded up
 335		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
 336		 * 32kb later!).  But we don't back the entire buffer with
 337		 * pages, only the valid picture part.. so need to adjust for
 338		 * this in the size used to mmap and generate mmap offset
 339		 */
 340		size = tiler_vsize(gem2fmt(omap_obj->flags),
 341				omap_obj->width, omap_obj->height);
 342	}
 343
 344	return size;
 345}
 346
 
 
 
 
 
 
 
 
 
 
 
 
 347/* -----------------------------------------------------------------------------
 348 * Fault Handling
 349 */
 350
 351/* Normal handling for the case of faulting in non-tiled buffers */
 352static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
 353		struct vm_area_struct *vma, struct vm_fault *vmf)
 354{
 355	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 356	unsigned long pfn;
 357	pgoff_t pgoff;
 358
 359	/* We don't use vmf->pgoff since that has the fake offset: */
 360	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 361
 362	if (omap_obj->pages) {
 363		omap_gem_cpu_sync_page(obj, pgoff);
 364		pfn = page_to_pfn(omap_obj->pages[pgoff]);
 365	} else {
 366		BUG_ON(!omap_gem_is_contiguous(omap_obj));
 367		pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
 368	}
 369
 370	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 371			pfn, pfn << PAGE_SHIFT);
 372
 373	return vmf_insert_mixed(vma, vmf->address,
 374			__pfn_to_pfn_t(pfn, PFN_DEV));
 375}
 376
 377/* Special handling for the case of faulting in 2d tiled buffers */
 378static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
 379		struct vm_area_struct *vma, struct vm_fault *vmf)
 380{
 381	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 382	struct omap_drm_private *priv = obj->dev->dev_private;
 383	struct omap_drm_usergart_entry *entry;
 384	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 385	struct page *pages[64];  /* XXX is this too much to have on stack? */
 386	unsigned long pfn;
 387	pgoff_t pgoff, base_pgoff;
 388	unsigned long vaddr;
 389	int i, err, slots;
 390	vm_fault_t ret = VM_FAULT_NOPAGE;
 391
 392	/*
 393	 * Note the height of the slot is also equal to the number of pages
 394	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
 395	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
 396	 */
 397	const int n = priv->usergart[fmt].height;
 398	const int n_shift = priv->usergart[fmt].height_shift;
 399
 400	/*
 401	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
 402	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
 403	 * into account in some of the math, so figure out virtual stride
 404	 * in pages
 405	 */
 406	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 407
 408	/* We don't use vmf->pgoff since that has the fake offset: */
 409	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 410
 411	/*
 412	 * Actual address we start mapping at is rounded down to previous slot
 413	 * boundary in the y direction:
 414	 */
 415	base_pgoff = round_down(pgoff, m << n_shift);
 416
 417	/* figure out buffer width in slots */
 418	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 419
 420	vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 421
 422	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 423
 424	/* evict previous buffer using this usergart entry, if any: */
 425	if (entry->obj)
 426		omap_gem_evict_entry(entry->obj, fmt, entry);
 427
 428	entry->obj = obj;
 429	entry->obj_pgoff = base_pgoff;
 430
 431	/* now convert base_pgoff to phys offset from virt offset: */
 432	base_pgoff = (base_pgoff >> n_shift) * slots;
 433
 434	/* for wider-than 4k.. figure out which part of the slot-row we want: */
 435	if (m > 1) {
 436		int off = pgoff % m;
 437		entry->obj_pgoff += off;
 438		base_pgoff /= m;
 439		slots = min(slots - (off << n_shift), n);
 440		base_pgoff += off << n_shift;
 441		vaddr += off << PAGE_SHIFT;
 442	}
 443
 444	/*
 445	 * Map in pages. Beyond the valid pixel part of the buffer, we set
 446	 * pages[i] to NULL to get a dummy page mapped in.. if someone
 447	 * reads/writes it they will get random/undefined content, but at
 448	 * least it won't be corrupting whatever other random page used to
 449	 * be mapped in, or other undefined behavior.
 450	 */
 451	memcpy(pages, &omap_obj->pages[base_pgoff],
 452			sizeof(struct page *) * slots);
 453	memset(pages + slots, 0,
 454			sizeof(struct page *) * (n - slots));
 455
 456	err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
 457	if (err) {
 458		ret = vmf_error(err);
 459		dev_err(obj->dev->dev, "failed to pin: %d\n", err);
 460		return ret;
 461	}
 462
 463	pfn = entry->dma_addr >> PAGE_SHIFT;
 464
 465	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 466			pfn, pfn << PAGE_SHIFT);
 467
 468	for (i = n; i > 0; i--) {
 469		ret = vmf_insert_mixed(vma,
 470			vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
 471		if (ret & VM_FAULT_ERROR)
 472			break;
 473		pfn += priv->usergart[fmt].stride_pfn;
 474		vaddr += PAGE_SIZE * m;
 475	}
 476
 477	/* simple round-robin: */
 478	priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
 479				 % NUM_USERGART_ENTRIES;
 480
 481	return ret;
 482}
 483
 484/**
 485 * omap_gem_fault		-	pagefault handler for GEM objects
 
 486 * @vmf: fault detail
 487 *
 488 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
 489 * does most of the work for us including the actual map/unmap calls
 490 * but we need to do the actual page work.
 491 *
 492 * The VMA was set up by GEM. In doing so it also ensured that the
 493 * vma->vm_private_data points to the GEM object that is backing this
 494 * mapping.
 495 */
 496static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
 497{
 498	struct vm_area_struct *vma = vmf->vma;
 499	struct drm_gem_object *obj = vma->vm_private_data;
 500	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 501	int err;
 502	vm_fault_t ret;
 
 503
 504	/* Make sure we don't parallel update on a fault, nor move or remove
 505	 * something from beneath our feet
 506	 */
 507	mutex_lock(&omap_obj->lock);
 508
 509	/* if a shmem backed object, make sure we have pages attached now */
 510	err = omap_gem_attach_pages(obj);
 511	if (err) {
 512		ret = vmf_error(err);
 513		goto fail;
 514	}
 515
 516	/* where should we do corresponding put_pages().. we are mapping
 517	 * the original page, rather than thru a GART, so we can't rely
 518	 * on eviction to trigger this.  But munmap() or all mappings should
 519	 * probably trigger put_pages()?
 520	 */
 521
 522	if (omap_obj->flags & OMAP_BO_TILED_MASK)
 523		ret = omap_gem_fault_2d(obj, vma, vmf);
 524	else
 525		ret = omap_gem_fault_1d(obj, vma, vmf);
 526
 527
 528fail:
 529	mutex_unlock(&omap_obj->lock);
 530	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531}
 532
 533static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 
 534{
 535	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 536
 537	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
 
 538
 539	if (omap_obj->flags & OMAP_BO_WC) {
 540		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 541	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
 542		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 543	} else {
 544		/*
 545		 * We do have some private objects, at least for scanout buffers
 546		 * on hardware without DMM/TILER.  But these are allocated write-
 547		 * combine
 548		 */
 549		if (WARN_ON(!obj->filp))
 550			return -EINVAL;
 551
 552		/*
 553		 * Shunt off cached objs to shmem file so they have their own
 554		 * address_space (so unmap_mapping_range does what we want,
 555		 * in particular in the case of mmap'd dmabufs)
 556		 */
 557		vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
 558		vma_set_file(vma, obj->filp);
 
 559
 560		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 561	}
 562
 563	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 564
 565	return 0;
 566}
 567
 568/* -----------------------------------------------------------------------------
 569 * Dumb Buffers
 570 */
 571
 572/**
 573 * omap_gem_dumb_create	-	create a dumb buffer
 574 * @file: our client file
 575 * @dev: our device
 576 * @args: the requested arguments copied from userspace
 577 *
 578 * Allocate a buffer suitable for use for a frame buffer of the
 579 * form described by user space. Give userspace a handle by which
 580 * to reference it.
 581 */
 582int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 583		struct drm_mode_create_dumb *args)
 584{
 585	union omap_gem_size gsize;
 586
 587	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 588
 589	args->size = PAGE_ALIGN(args->pitch * args->height);
 590
 591	gsize = (union omap_gem_size){
 592		.bytes = args->size,
 593	};
 594
 595	return omap_gem_new_handle(dev, file, gsize,
 596			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
 597}
 598
 599/**
 600 * omap_gem_dumb_map_offset - create an offset for a dumb buffer
 601 * @file: our drm client file
 602 * @dev: drm device
 603 * @handle: GEM handle to the object (from dumb_create)
 604 * @offset: memory map offset placeholder
 605 *
 606 * Do the necessary setup to allow the mapping of the frame buffer
 607 * into user memory. We don't have to do much here at the moment.
 608 */
 609int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 610		u32 handle, u64 *offset)
 611{
 612	struct drm_gem_object *obj;
 613	int ret = 0;
 614
 615	/* GEM does all our handle to object mapping */
 616	obj = drm_gem_object_lookup(file, handle);
 617	if (obj == NULL) {
 618		ret = -ENOENT;
 619		goto fail;
 620	}
 621
 622	*offset = omap_gem_mmap_offset(obj);
 623
 624	drm_gem_object_put(obj);
 625
 626fail:
 627	return ret;
 628}
 629
 630#ifdef CONFIG_DRM_FBDEV_EMULATION
 631/* Set scrolling position.  This allows us to implement fast scrolling
 632 * for console.
 633 *
 634 * Call only from non-atomic contexts.
 635 */
 636int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
 637{
 638	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 639	u32 npages = obj->size >> PAGE_SHIFT;
 640	int ret = 0;
 641
 642	if (roll > npages) {
 643		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
 644		return -EINVAL;
 645	}
 646
 647	omap_obj->roll = roll;
 648
 649	mutex_lock(&omap_obj->lock);
 650
 651	/* if we aren't mapped yet, we don't need to do anything */
 652	if (omap_obj->block) {
 653		ret = omap_gem_attach_pages(obj);
 
 654		if (ret)
 655			goto fail;
 656
 657		ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
 658				roll, true);
 659		if (ret)
 660			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
 661	}
 662
 663fail:
 664	mutex_unlock(&omap_obj->lock);
 665
 666	return ret;
 667}
 668#endif
 669
 670/* -----------------------------------------------------------------------------
 671 * Memory Management & DMA Sync
 672 */
 673
 674/*
 675 * shmem buffers that are mapped cached are not coherent.
 676 *
 677 * We keep track of dirty pages using page faulting to perform cache management.
 678 * When a page is mapped to the CPU in read/write mode the device can't access
 679 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
 680 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
 681 * unmapped from the CPU.
 682 */
 683static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
 684{
 685	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 686
 687	return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
 688		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
 689}
 690
 691/* Sync the buffer for CPU access.. note pages should already be
 692 * attached, ie. omap_gem_get_pages()
 693 */
 694void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
 695{
 696	struct drm_device *dev = obj->dev;
 697	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 698
 699	if (omap_gem_is_cached_coherent(obj))
 700		return;
 701
 702	if (omap_obj->dma_addrs[pgoff]) {
 703		dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
 704				PAGE_SIZE, DMA_TO_DEVICE);
 705		omap_obj->dma_addrs[pgoff] = 0;
 706	}
 707}
 708
 709/* sync the buffer for DMA access */
 710void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
 711		enum dma_data_direction dir)
 712{
 713	struct drm_device *dev = obj->dev;
 714	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 715	int i, npages = obj->size >> PAGE_SHIFT;
 716	struct page **pages = omap_obj->pages;
 717	bool dirty = false;
 718
 719	if (omap_gem_is_cached_coherent(obj))
 720		return;
 
 
 721
 722	for (i = 0; i < npages; i++) {
 723		if (!omap_obj->dma_addrs[i]) {
 724			dma_addr_t addr;
 725
 726			addr = dma_map_page(dev->dev, pages[i], 0,
 727					    PAGE_SIZE, dir);
 728			if (dma_mapping_error(dev->dev, addr)) {
 729				dev_warn(dev->dev, "%s: failed to map page\n",
 730					__func__);
 731				break;
 732			}
 733
 734			dirty = true;
 735			omap_obj->dma_addrs[i] = addr;
 736		}
 737	}
 738
 739	if (dirty) {
 740		unmap_mapping_range(obj->filp->f_mapping, 0,
 741				    omap_gem_mmap_size(obj), 1);
 742	}
 743}
 
 744
 745static int omap_gem_pin_tiler(struct drm_gem_object *obj)
 746{
 747	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 748	u32 npages = obj->size >> PAGE_SHIFT;
 749	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 750	struct tiler_block *block;
 751	int ret;
 752
 753	BUG_ON(omap_obj->block);
 754
 755	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 756		block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
 757					 PAGE_SIZE);
 758	} else {
 759		block = tiler_reserve_1d(obj->size);
 760	}
 761
 762	if (IS_ERR(block)) {
 763		ret = PTR_ERR(block);
 764		dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
 765		goto fail;
 766	}
 767
 768	/* TODO: enable async refill.. */
 769	ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
 770	if (ret) {
 771		tiler_release(block);
 772		dev_err(obj->dev->dev, "could not pin: %d\n", ret);
 773		goto fail;
 774	}
 775
 776	omap_obj->dma_addr = tiler_ssptr(block);
 777	omap_obj->block = block;
 778
 779	DBG("got dma address: %pad", &omap_obj->dma_addr);
 780
 781fail:
 782	return ret;
 783}
 784
 785/**
 786 * omap_gem_pin() - Pin a GEM object in memory
 787 * @obj: the GEM object
 788 * @dma_addr: the DMA address
 789 *
 790 * Pin the given GEM object in memory and fill the dma_addr pointer with the
 791 * object's DMA address. If the buffer is not physically contiguous it will be
 792 * remapped through the TILER to provide a contiguous view.
 793 *
 794 * Pins are reference-counted, calling this function multiple times is allowed
 795 * as long the corresponding omap_gem_unpin() calls are balanced.
 796 *
 797 * Return 0 on success or a negative error code otherwise.
 798 */
 799int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
 
 800{
 801	struct omap_drm_private *priv = obj->dev->dev_private;
 802	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 803	int ret = 0;
 804
 805	mutex_lock(&omap_obj->lock);
 806
 807	if (!omap_gem_is_contiguous(omap_obj)) {
 808		if (refcount_read(&omap_obj->pin_cnt) == 0) {
 
 
 
 
 809
 810			refcount_set(&omap_obj->pin_cnt, 1);
 811
 812			ret = omap_gem_attach_pages(obj);
 813			if (ret)
 814				goto fail;
 815
 816			if (omap_obj->flags & OMAP_BO_SCANOUT) {
 817				if (priv->has_dmm) {
 818					ret = omap_gem_pin_tiler(obj);
 819					if (ret)
 820						goto fail;
 821				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822			}
 823		} else {
 824			refcount_inc(&omap_obj->pin_cnt);
 
 
 
 825		}
 
 
 
 
 
 
 
 
 
 826	}
 827
 828	if (dma_addr)
 829		*dma_addr = omap_obj->dma_addr;
 830
 831fail:
 832	mutex_unlock(&omap_obj->lock);
 833
 834	return ret;
 835}
 836
 837/**
 838 * omap_gem_unpin_locked() - Unpin a GEM object from memory
 839 * @obj: the GEM object
 840 *
 841 * omap_gem_unpin() without locking.
 842 */
 843static void omap_gem_unpin_locked(struct drm_gem_object *obj)
 844{
 845	struct omap_drm_private *priv = obj->dev->dev_private;
 846	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 847	int ret;
 848
 849	if (omap_gem_is_contiguous(omap_obj))
 850		return;
 851
 852	if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
 853		if (omap_obj->sgt) {
 854			sg_free_table(omap_obj->sgt);
 855			kfree(omap_obj->sgt);
 856			omap_obj->sgt = NULL;
 857		}
 858		if (!(omap_obj->flags & OMAP_BO_SCANOUT))
 859			return;
 860		if (priv->has_dmm) {
 861			ret = tiler_unpin(omap_obj->block);
 862			if (ret) {
 863				dev_err(obj->dev->dev,
 864					"could not unpin pages: %d\n", ret);
 865			}
 866			ret = tiler_release(omap_obj->block);
 867			if (ret) {
 868				dev_err(obj->dev->dev,
 869					"could not release unmap: %d\n", ret);
 870			}
 871			omap_obj->dma_addr = 0;
 872			omap_obj->block = NULL;
 873		}
 874	}
 875}
 876
 877/**
 878 * omap_gem_unpin() - Unpin a GEM object from memory
 879 * @obj: the GEM object
 880 *
 881 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
 882 * reference-counted, the actual unpin will only be performed when the number
 883 * of calls to this function matches the number of calls to omap_gem_pin().
 884 */
 885void omap_gem_unpin(struct drm_gem_object *obj)
 886{
 887	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 888
 889	mutex_lock(&omap_obj->lock);
 890	omap_gem_unpin_locked(obj);
 891	mutex_unlock(&omap_obj->lock);
 892}
 893
 894/* Get rotated scanout address (only valid if already pinned), at the
 895 * specified orientation and x,y offset from top-left corner of buffer
 896 * (only valid for tiled 2d buffers)
 897 */
 898int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
 899		int x, int y, dma_addr_t *dma_addr)
 900{
 901	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 902	int ret = -EINVAL;
 903
 904	mutex_lock(&omap_obj->lock);
 905
 906	if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
 907			(omap_obj->flags & OMAP_BO_TILED_MASK)) {
 908		*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
 909		ret = 0;
 910	}
 911
 912	mutex_unlock(&omap_obj->lock);
 913
 914	return ret;
 915}
 916
 917/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
 918int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
 919{
 920	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 921	int ret = -EINVAL;
 922	if (omap_obj->flags & OMAP_BO_TILED_MASK)
 923		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
 924	return ret;
 925}
 926
 927/* if !remap, and we don't have pages backing, then fail, rather than
 928 * increasing the pin count (which we don't really do yet anyways,
 929 * because we don't support swapping pages back out).  And 'remap'
 930 * might not be quite the right name, but I wanted to keep it working
 931 * similarly to omap_gem_pin().  Note though that mutex is not
 932 * aquired if !remap (because this can be called in atomic ctxt),
 933 * but probably omap_gem_unpin() should be changed to work in the
 934 * same way.  If !remap, a matching omap_gem_put_pages() call is not
 935 * required (and should not be made).
 936 */
 937int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
 938		bool remap)
 939{
 940	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 941	int ret = 0;
 942
 943	mutex_lock(&omap_obj->lock);
 944
 945	if (remap) {
 946		ret = omap_gem_attach_pages(obj);
 947		if (ret)
 948			goto unlock;
 949	}
 950
 951	if (!omap_obj->pages) {
 952		ret = -ENOMEM;
 953		goto unlock;
 954	}
 955
 956	*pages = omap_obj->pages;
 957
 958unlock:
 959	mutex_unlock(&omap_obj->lock);
 960
 961	return ret;
 962}
 963
 964/* release pages when DMA no longer being performed */
 965int omap_gem_put_pages(struct drm_gem_object *obj)
 966{
 967	/* do something here if we dynamically attach/detach pages.. at
 968	 * least they would no longer need to be pinned if everyone has
 969	 * released the pages..
 970	 */
 971	return 0;
 972}
 973
 974struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
 975		enum dma_data_direction dir)
 976{
 977	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 978	dma_addr_t addr;
 979	struct sg_table *sgt;
 980	struct scatterlist *sg;
 981	unsigned int count, len, stride, i;
 982	int ret;
 983
 984	ret = omap_gem_pin(obj, &addr);
 985	if (ret)
 986		return ERR_PTR(ret);
 987
 988	mutex_lock(&omap_obj->lock);
 989
 990	sgt = omap_obj->sgt;
 991	if (sgt)
 992		goto out;
 993
 994	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 995	if (!sgt) {
 996		ret = -ENOMEM;
 997		goto err_unpin;
 998	}
 999
1000	if (addr) {
1001		if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1002			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
1003
1004			len = omap_obj->width << (int)fmt;
1005			count = omap_obj->height;
1006			stride = tiler_stride(fmt, 0);
1007		} else {
1008			len = obj->size;
1009			count = 1;
1010			stride = 0;
1011		}
1012	} else {
1013		count = obj->size >> PAGE_SHIFT;
1014	}
1015
1016	ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1017	if (ret)
1018		goto err_free;
1019
1020	/* this must be after omap_gem_pin() to ensure we have pages attached */
1021	omap_gem_dma_sync_buffer(obj, dir);
1022
1023	if (addr) {
1024		for_each_sg(sgt->sgl, sg, count, i) {
1025			sg_set_page(sg, phys_to_page(addr), len,
1026				offset_in_page(addr));
1027			sg_dma_address(sg) = addr;
1028			sg_dma_len(sg) = len;
1029
1030			addr += stride;
1031		}
1032	} else {
1033		for_each_sg(sgt->sgl, sg, count, i) {
1034			sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
1035			sg_dma_address(sg) = omap_obj->dma_addrs[i];
1036			sg_dma_len(sg) =  PAGE_SIZE;
1037		}
1038	}
1039
1040	omap_obj->sgt = sgt;
1041out:
1042	mutex_unlock(&omap_obj->lock);
1043	return sgt;
1044
1045err_free:
1046	kfree(sgt);
1047err_unpin:
1048	mutex_unlock(&omap_obj->lock);
1049	omap_gem_unpin(obj);
1050	return ERR_PTR(ret);
1051}
1052
1053void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1054{
1055	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1056
1057	if (WARN_ON(omap_obj->sgt != sgt))
1058		return;
1059
1060	omap_gem_unpin(obj);
1061}
1062
1063#ifdef CONFIG_DRM_FBDEV_EMULATION
1064/*
1065 * Get kernel virtual address for CPU access.. this more or less only
1066 * exists for omap_fbdev.
1067 */
1068void *omap_gem_vaddr(struct drm_gem_object *obj)
1069{
1070	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1071	void *vaddr;
1072	int ret;
1073
1074	mutex_lock(&omap_obj->lock);
1075
1076	if (!omap_obj->vaddr) {
1077		ret = omap_gem_attach_pages(obj);
1078		if (ret) {
1079			vaddr = ERR_PTR(ret);
1080			goto unlock;
1081		}
1082
1083		omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
1084				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1085	}
1086
1087	vaddr = omap_obj->vaddr;
1088
1089unlock:
1090	mutex_unlock(&omap_obj->lock);
1091	return vaddr;
1092}
1093#endif
1094
1095/* -----------------------------------------------------------------------------
1096 * Power Management
1097 */
1098
1099#ifdef CONFIG_PM
1100/* re-pin objects in DMM in resume path: */
1101int omap_gem_resume(struct drm_device *dev)
1102{
1103	struct omap_drm_private *priv = dev->dev_private;
 
1104	struct omap_gem_object *omap_obj;
1105	int ret = 0;
1106
1107	mutex_lock(&priv->list_lock);
1108	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1109		if (omap_obj->block) {
1110			struct drm_gem_object *obj = &omap_obj->base;
1111			u32 npages = obj->size >> PAGE_SHIFT;
1112
1113			WARN_ON(!omap_obj->pages);  /* this can't happen */
1114			ret = tiler_pin(omap_obj->block,
1115					omap_obj->pages, npages,
1116					omap_obj->roll, true);
1117			if (ret) {
1118				dev_err(dev->dev, "could not repin: %d\n", ret);
1119				goto done;
1120			}
1121		}
1122	}
1123
1124done:
1125	mutex_unlock(&priv->list_lock);
1126	return ret;
1127}
1128#endif
1129
1130/* -----------------------------------------------------------------------------
1131 * DebugFS
1132 */
1133
1134#ifdef CONFIG_DEBUG_FS
1135void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1136{
1137	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1138	u64 off;
1139
1140	off = drm_vma_node_start(&obj->vma_node);
1141
1142	mutex_lock(&omap_obj->lock);
1143
1144	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1145			omap_obj->flags, obj->name, kref_read(&obj->refcount),
1146			off, &omap_obj->dma_addr,
1147			refcount_read(&omap_obj->pin_cnt),
1148			omap_obj->vaddr, omap_obj->roll);
1149
1150	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1151		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1152		if (omap_obj->block) {
1153			struct tcm_area *area = &omap_obj->block->area;
1154			seq_printf(m, " (%dx%d, %dx%d)",
1155					area->p0.x, area->p0.y,
1156					area->p1.x, area->p1.y);
1157		}
1158	} else {
1159		seq_printf(m, " %zu", obj->size);
1160	}
1161
1162	mutex_unlock(&omap_obj->lock);
1163
1164	seq_printf(m, "\n");
1165}
1166
1167void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1168{
1169	struct omap_gem_object *omap_obj;
1170	int count = 0;
1171	size_t size = 0;
1172
1173	list_for_each_entry(omap_obj, list, mm_list) {
1174		struct drm_gem_object *obj = &omap_obj->base;
1175		seq_printf(m, "   ");
1176		omap_gem_describe(obj, m);
1177		count++;
1178		size += obj->size;
1179	}
1180
1181	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1182}
1183#endif
1184
1185/* -----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186 * Constructor & Destructor
1187 */
1188
1189static void omap_gem_free_object(struct drm_gem_object *obj)
1190{
1191	struct drm_device *dev = obj->dev;
1192	struct omap_drm_private *priv = dev->dev_private;
1193	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1194
1195	omap_gem_evict(obj);
 
 
1196
1197	mutex_lock(&priv->list_lock);
1198	list_del(&omap_obj->mm_list);
1199	mutex_unlock(&priv->list_lock);
1200
1201	/*
1202	 * We own the sole reference to the object at this point, but to keep
1203	 * lockdep happy, we must still take the omap_obj_lock to call
1204	 * omap_gem_detach_pages(). This should hardly make any difference as
1205	 * there can't be any lock contention.
1206	 */
1207	mutex_lock(&omap_obj->lock);
1208
1209	/* The object should not be pinned. */
1210	WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
1211
1212	if (omap_obj->pages) {
1213		if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1214			kfree(omap_obj->pages);
1215		else
1216			omap_gem_detach_pages(obj);
1217	}
1218
1219	if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1220		dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1221			    omap_obj->dma_addr);
1222	} else if (omap_obj->vaddr) {
1223		vunmap(omap_obj->vaddr);
1224	} else if (obj->import_attach) {
1225		drm_prime_gem_destroy(obj, omap_obj->sgt);
1226	}
1227
1228	mutex_unlock(&omap_obj->lock);
1229
1230	drm_gem_object_release(obj);
1231
1232	mutex_destroy(&omap_obj->lock);
1233
1234	kfree(omap_obj);
1235}
1236
1237static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1238{
1239	struct omap_drm_private *priv = dev->dev_private;
1240
1241	switch (flags & OMAP_BO_CACHE_MASK) {
1242	case OMAP_BO_CACHED:
1243	case OMAP_BO_WC:
1244	case OMAP_BO_CACHE_MASK:
1245		break;
1246
1247	default:
1248		return false;
1249	}
1250
1251	if (flags & OMAP_BO_TILED_MASK) {
1252		if (!priv->usergart)
1253			return false;
1254
1255		switch (flags & OMAP_BO_TILED_MASK) {
1256		case OMAP_BO_TILED_8:
1257		case OMAP_BO_TILED_16:
1258		case OMAP_BO_TILED_32:
1259			break;
1260
1261		default:
1262			return false;
1263		}
1264	}
1265
1266	return true;
1267}
1268
1269static const struct vm_operations_struct omap_gem_vm_ops = {
1270	.fault = omap_gem_fault,
1271	.open = drm_gem_vm_open,
1272	.close = drm_gem_vm_close,
1273};
1274
1275static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1276	.free = omap_gem_free_object,
1277	.export = omap_gem_prime_export,
1278	.mmap = omap_gem_object_mmap,
1279	.vm_ops = &omap_gem_vm_ops,
1280};
1281
1282/* GEM buffer object constructor */
1283struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1284		union omap_gem_size gsize, u32 flags)
1285{
1286	struct omap_drm_private *priv = dev->dev_private;
1287	struct omap_gem_object *omap_obj;
1288	struct drm_gem_object *obj;
1289	struct address_space *mapping;
1290	size_t size;
1291	int ret;
1292
1293	if (!omap_gem_validate_flags(dev, flags))
1294		return NULL;
 
 
 
 
1295
1296	/* Validate the flags and compute the memory and cache flags. */
1297	if (flags & OMAP_BO_TILED_MASK) {
1298		/*
1299		 * Tiled buffers are always shmem paged backed. When they are
1300		 * scanned out, they are remapped into DMM/TILER.
1301		 */
 
1302		flags |= OMAP_BO_MEM_SHMEM;
1303
1304		/*
1305		 * Currently don't allow cached buffers. There is some caching
1306		 * stuff that needs to be handled better.
1307		 */
1308		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1309		flags |= tiler_get_cpu_cache_flags();
1310	} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1311		/*
1312		 * If we don't have DMM, we must allocate scanout buffers
1313		 * from contiguous DMA memory.
 
1314		 */
1315		flags |= OMAP_BO_MEM_DMA_API;
1316	} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1317		/*
1318		 * All other buffers not backed by dma_buf are shmem-backed.
1319		 */
1320		flags |= OMAP_BO_MEM_SHMEM;
1321	}
1322
1323	/* Allocate the initialize the OMAP GEM object. */
1324	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1325	if (!omap_obj)
1326		return NULL;
1327
1328	obj = &omap_obj->base;
1329	omap_obj->flags = flags;
1330	mutex_init(&omap_obj->lock);
1331
1332	if (flags & OMAP_BO_TILED_MASK) {
1333		/*
1334		 * For tiled buffers align dimensions to slot boundaries and
1335		 * calculate size based on aligned dimensions.
1336		 */
1337		tiler_align(gem2fmt(flags), &gsize.tiled.width,
1338			    &gsize.tiled.height);
1339
1340		size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1341				  gsize.tiled.height);
1342
1343		omap_obj->width = gsize.tiled.width;
1344		omap_obj->height = gsize.tiled.height;
1345	} else {
1346		size = PAGE_ALIGN(gsize.bytes);
1347	}
1348
1349	obj->funcs = &omap_gem_object_funcs;
1350
1351	/* Initialize the GEM object. */
1352	if (!(flags & OMAP_BO_MEM_SHMEM)) {
1353		drm_gem_private_object_init(dev, obj, size);
1354	} else {
1355		ret = drm_gem_object_init(dev, obj, size);
1356		if (ret)
1357			goto err_free;
1358
1359		mapping = obj->filp->f_mapping;
1360		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1361	}
1362
1363	/* Allocate memory if needed. */
1364	if (flags & OMAP_BO_MEM_DMA_API) {
1365		omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1366					       &omap_obj->dma_addr,
1367					       GFP_KERNEL);
1368		if (!omap_obj->vaddr)
1369			goto err_release;
1370	}
1371
1372	mutex_lock(&priv->list_lock);
1373	list_add(&omap_obj->mm_list, &priv->obj_list);
1374	mutex_unlock(&priv->list_lock);
1375
1376	return obj;
1377
1378err_release:
1379	drm_gem_object_release(obj);
1380err_free:
1381	kfree(omap_obj);
1382	return NULL;
1383}
1384
1385struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1386					   struct sg_table *sgt)
1387{
1388	struct omap_drm_private *priv = dev->dev_private;
1389	struct omap_gem_object *omap_obj;
1390	struct drm_gem_object *obj;
1391	union omap_gem_size gsize;
1392
1393	/* Without a DMM only physically contiguous buffers can be supported. */
1394	if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm)
1395		return ERR_PTR(-EINVAL);
1396
 
 
1397	gsize.bytes = PAGE_ALIGN(size);
1398	obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1399	if (!obj)
1400		return ERR_PTR(-ENOMEM);
 
 
1401
1402	omap_obj = to_omap_bo(obj);
1403
1404	mutex_lock(&omap_obj->lock);
1405
1406	omap_obj->sgt = sgt;
1407
1408	if (omap_gem_sgt_is_contiguous(sgt, size)) {
1409		omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1410	} else {
1411		/* Create pages list from sgt */
 
1412		struct page **pages;
1413		unsigned int npages;
1414		unsigned int ret;
1415
1416		npages = DIV_ROUND_UP(size, PAGE_SIZE);
1417		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1418		if (!pages) {
1419			omap_gem_free_object(obj);
1420			obj = ERR_PTR(-ENOMEM);
1421			goto done;
1422		}
1423
1424		omap_obj->pages = pages;
1425		ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1426		if (ret) {
 
 
 
 
 
 
1427			omap_gem_free_object(obj);
1428			obj = ERR_PTR(-ENOMEM);
1429			goto done;
1430		}
1431	}
1432
1433done:
1434	mutex_unlock(&omap_obj->lock);
1435	return obj;
1436}
1437
1438/* convenience method to construct a GEM buffer object, and userspace handle */
1439int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1440		union omap_gem_size gsize, u32 flags, u32 *handle)
1441{
1442	struct drm_gem_object *obj;
1443	int ret;
1444
1445	obj = omap_gem_new(dev, gsize, flags);
1446	if (!obj)
1447		return -ENOMEM;
1448
1449	ret = drm_gem_handle_create(file, obj, handle);
1450	if (ret) {
1451		omap_gem_free_object(obj);
1452		return ret;
1453	}
1454
1455	/* drop reference from allocate - handle holds it now */
1456	drm_gem_object_put(obj);
1457
1458	return 0;
1459}
1460
1461/* -----------------------------------------------------------------------------
1462 * Init & Cleanup
1463 */
1464
1465/* If DMM is used, we need to set some stuff up.. */
1466void omap_gem_init(struct drm_device *dev)
1467{
1468	struct omap_drm_private *priv = dev->dev_private;
1469	struct omap_drm_usergart *usergart;
1470	const enum tiler_fmt fmts[] = {
1471			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1472	};
1473	int i, j;
1474
1475	if (!dmm_is_available()) {
1476		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1477		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1478		return;
1479	}
1480
1481	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1482	if (!usergart)
1483		return;
1484
1485	/* reserve 4k aligned/wide regions for userspace mappings: */
1486	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1487		u16 h = 1, w = PAGE_SIZE >> i;
1488
1489		tiler_align(fmts[i], &w, &h);
1490		/* note: since each region is 1 4kb page wide, and minimum
1491		 * number of rows, the height ends up being the same as the
1492		 * # of pages in the region
1493		 */
1494		usergart[i].height = h;
1495		usergart[i].height_shift = ilog2(h);
1496		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1497		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1498		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1499			struct omap_drm_usergart_entry *entry;
1500			struct tiler_block *block;
1501
1502			entry = &usergart[i].entry[j];
1503			block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1504			if (IS_ERR(block)) {
1505				dev_err(dev->dev,
1506						"reserve failed: %d, %d, %ld\n",
1507						i, j, PTR_ERR(block));
1508				return;
1509			}
1510			entry->dma_addr = tiler_ssptr(block);
1511			entry->block = block;
1512
1513			DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1514					&entry->dma_addr,
1515					usergart[i].stride_pfn << PAGE_SHIFT);
1516		}
1517	}
1518
1519	priv->usergart = usergart;
1520	priv->has_dmm = true;
1521}
1522
1523void omap_gem_deinit(struct drm_device *dev)
1524{
1525	struct omap_drm_private *priv = dev->dev_private;
1526
1527	/* I believe we can rely on there being no more outstanding GEM
1528	 * objects which could depend on usergart/dmm at this point.
1529	 */
1530	kfree(priv->usergart);
1531}