Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4 * Author: Rob Clark <rob.clark@linaro.org>
5 */
6
7#include <linux/dma-mapping.h>
8#include <linux/seq_file.h>
9#include <linux/shmem_fs.h>
10#include <linux/spinlock.h>
11#include <linux/pfn_t.h>
12
13#include <drm/drm_prime.h>
14#include <drm/drm_vma_manager.h>
15
16#include "omap_drv.h"
17#include "omap_dmm_tiler.h"
18
19/*
20 * GEM buffer object implementation.
21 */
22
23/* note: we use upper 8 bits of flags for driver-internal flags: */
24#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
25#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
26#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
27
28struct omap_gem_object {
29 struct drm_gem_object base;
30
31 struct list_head mm_list;
32
33 u32 flags;
34
35 /** width/height for tiled formats (rounded up to slot boundaries) */
36 u16 width, height;
37
38 /** roll applied when mapping to DMM */
39 u32 roll;
40
41 /** protects pin_cnt, block, pages, dma_addrs and vaddr */
42 struct mutex lock;
43
44 /**
45 * dma_addr contains the buffer DMA address. It is valid for
46 *
47 * - buffers allocated through the DMA mapping API (with the
48 * OMAP_BO_MEM_DMA_API flag set)
49 *
50 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
51 * if they are physically contiguous
52 *
53 * - buffers mapped through the TILER when pin_cnt is not zero, in which
54 * case the DMA address points to the TILER aperture
55 *
56 * Physically contiguous buffers have their DMA address equal to the
57 * physical address as we don't remap those buffers through the TILER.
58 *
59 * Buffers mapped to the TILER have their DMA address pointing to the
60 * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
61 * the DMA address must be accessed through omap_gem_pin() to ensure
62 * that the mapping won't disappear unexpectedly. References must be
63 * released with omap_gem_unpin().
64 */
65 dma_addr_t dma_addr;
66
67 /**
68 * # of users
69 */
70 refcount_t pin_cnt;
71
72 /**
73 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
74 * is set and the sgt field is valid.
75 */
76 struct sg_table *sgt;
77
78 /**
79 * tiler block used when buffer is remapped in DMM/TILER.
80 */
81 struct tiler_block *block;
82
83 /**
84 * Array of backing pages, if allocated. Note that pages are never
85 * allocated for buffers originally allocated from contiguous memory
86 */
87 struct page **pages;
88
89 /** addresses corresponding to pages in above array */
90 dma_addr_t *dma_addrs;
91
92 /**
93 * Virtual address, if mapped.
94 */
95 void *vaddr;
96};
97
98#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
99
100/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
101 * not necessarily pinned in TILER all the time, and (b) when they are
102 * they are not necessarily page aligned, we reserve one or more small
103 * regions in each of the 2d containers to use as a user-GART where we
104 * can create a second page-aligned mapping of parts of the buffer
105 * being accessed from userspace.
106 *
107 * Note that we could optimize slightly when we know that multiple
108 * tiler containers are backed by the same PAT.. but I'll leave that
109 * for later..
110 */
111#define NUM_USERGART_ENTRIES 2
112struct omap_drm_usergart_entry {
113 struct tiler_block *block; /* the reserved tiler block */
114 dma_addr_t dma_addr;
115 struct drm_gem_object *obj; /* the current pinned obj */
116 pgoff_t obj_pgoff; /* page offset of obj currently
117 mapped in */
118};
119
120struct omap_drm_usergart {
121 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
122 int height; /* height in rows */
123 int height_shift; /* ilog2(height in rows) */
124 int slot_shift; /* ilog2(width per slot) */
125 int stride_pfn; /* stride in pages */
126 int last; /* index of last used entry */
127};
128
129/* -----------------------------------------------------------------------------
130 * Helpers
131 */
132
133/** get mmap offset */
134u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
135{
136 struct drm_device *dev = obj->dev;
137 int ret;
138 size_t size;
139
140 /* Make it mmapable */
141 size = omap_gem_mmap_size(obj);
142 ret = drm_gem_create_mmap_offset_size(obj, size);
143 if (ret) {
144 dev_err(dev->dev, "could not allocate mmap offset\n");
145 return 0;
146 }
147
148 return drm_vma_node_offset_addr(&obj->vma_node);
149}
150
151static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
152{
153 return !(drm_prime_get_contiguous_size(sgt) < size);
154}
155
156static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
157{
158 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
159 return true;
160
161 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) &&
162 omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size))
163 return true;
164
165 return false;
166}
167
168/* -----------------------------------------------------------------------------
169 * Eviction
170 */
171
172static void omap_gem_evict_entry(struct drm_gem_object *obj,
173 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
174{
175 struct omap_gem_object *omap_obj = to_omap_bo(obj);
176 struct omap_drm_private *priv = obj->dev->dev_private;
177 int n = priv->usergart[fmt].height;
178 size_t size = PAGE_SIZE * n;
179 loff_t off = omap_gem_mmap_offset(obj) +
180 (entry->obj_pgoff << PAGE_SHIFT);
181 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
182
183 if (m > 1) {
184 int i;
185 /* if stride > than PAGE_SIZE then sparse mapping: */
186 for (i = n; i > 0; i--) {
187 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
188 off, PAGE_SIZE, 1);
189 off += PAGE_SIZE * m;
190 }
191 } else {
192 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
193 off, size, 1);
194 }
195
196 entry->obj = NULL;
197}
198
199/* Evict a buffer from usergart, if it is mapped there */
200static void omap_gem_evict(struct drm_gem_object *obj)
201{
202 struct omap_gem_object *omap_obj = to_omap_bo(obj);
203 struct omap_drm_private *priv = obj->dev->dev_private;
204
205 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
206 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
207 int i;
208
209 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
210 struct omap_drm_usergart_entry *entry =
211 &priv->usergart[fmt].entry[i];
212
213 if (entry->obj == obj)
214 omap_gem_evict_entry(obj, fmt, entry);
215 }
216 }
217}
218
219/* -----------------------------------------------------------------------------
220 * Page Management
221 */
222
223/*
224 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
225 * held.
226 */
227static int omap_gem_attach_pages(struct drm_gem_object *obj)
228{
229 struct drm_device *dev = obj->dev;
230 struct omap_gem_object *omap_obj = to_omap_bo(obj);
231 struct page **pages;
232 int npages = obj->size >> PAGE_SHIFT;
233 int i, ret;
234 dma_addr_t *addrs;
235
236 lockdep_assert_held(&omap_obj->lock);
237
238 /*
239 * If not using shmem (in which case backing pages don't need to be
240 * allocated) or if pages are already allocated we're done.
241 */
242 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
243 return 0;
244
245 pages = drm_gem_get_pages(obj);
246 if (IS_ERR(pages)) {
247 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
248 return PTR_ERR(pages);
249 }
250
251 /* for non-cached buffers, ensure the new pages are clean because
252 * DSS, GPU, etc. are not cache coherent:
253 */
254 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
255 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
256 if (!addrs) {
257 ret = -ENOMEM;
258 goto free_pages;
259 }
260
261 for (i = 0; i < npages; i++) {
262 addrs[i] = dma_map_page(dev->dev, pages[i],
263 0, PAGE_SIZE, DMA_TO_DEVICE);
264
265 if (dma_mapping_error(dev->dev, addrs[i])) {
266 dev_warn(dev->dev,
267 "%s: failed to map page\n", __func__);
268
269 for (i = i - 1; i >= 0; --i) {
270 dma_unmap_page(dev->dev, addrs[i],
271 PAGE_SIZE, DMA_TO_DEVICE);
272 }
273
274 ret = -ENOMEM;
275 goto free_addrs;
276 }
277 }
278 } else {
279 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
280 if (!addrs) {
281 ret = -ENOMEM;
282 goto free_pages;
283 }
284 }
285
286 omap_obj->dma_addrs = addrs;
287 omap_obj->pages = pages;
288
289 return 0;
290
291free_addrs:
292 kfree(addrs);
293free_pages:
294 drm_gem_put_pages(obj, pages, true, false);
295
296 return ret;
297}
298
299/* Release backing pages. Must be called with the omap_obj.lock held. */
300static void omap_gem_detach_pages(struct drm_gem_object *obj)
301{
302 struct omap_gem_object *omap_obj = to_omap_bo(obj);
303 unsigned int npages = obj->size >> PAGE_SHIFT;
304 unsigned int i;
305
306 lockdep_assert_held(&omap_obj->lock);
307
308 for (i = 0; i < npages; i++) {
309 if (omap_obj->dma_addrs[i])
310 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
311 PAGE_SIZE, DMA_TO_DEVICE);
312 }
313
314 kfree(omap_obj->dma_addrs);
315 omap_obj->dma_addrs = NULL;
316
317 drm_gem_put_pages(obj, omap_obj->pages, true, false);
318 omap_obj->pages = NULL;
319}
320
321/* get buffer flags */
322u32 omap_gem_flags(struct drm_gem_object *obj)
323{
324 return to_omap_bo(obj)->flags;
325}
326
327/** get mmap size */
328size_t omap_gem_mmap_size(struct drm_gem_object *obj)
329{
330 struct omap_gem_object *omap_obj = to_omap_bo(obj);
331 size_t size = obj->size;
332
333 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
334 /* for tiled buffers, the virtual size has stride rounded up
335 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
336 * 32kb later!). But we don't back the entire buffer with
337 * pages, only the valid picture part.. so need to adjust for
338 * this in the size used to mmap and generate mmap offset
339 */
340 size = tiler_vsize(gem2fmt(omap_obj->flags),
341 omap_obj->width, omap_obj->height);
342 }
343
344 return size;
345}
346
347/* -----------------------------------------------------------------------------
348 * Fault Handling
349 */
350
351/* Normal handling for the case of faulting in non-tiled buffers */
352static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
353 struct vm_area_struct *vma, struct vm_fault *vmf)
354{
355 struct omap_gem_object *omap_obj = to_omap_bo(obj);
356 unsigned long pfn;
357 pgoff_t pgoff;
358
359 /* We don't use vmf->pgoff since that has the fake offset: */
360 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
361
362 if (omap_obj->pages) {
363 omap_gem_cpu_sync_page(obj, pgoff);
364 pfn = page_to_pfn(omap_obj->pages[pgoff]);
365 } else {
366 BUG_ON(!omap_gem_is_contiguous(omap_obj));
367 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
368 }
369
370 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
371 pfn, pfn << PAGE_SHIFT);
372
373 return vmf_insert_mixed(vma, vmf->address,
374 __pfn_to_pfn_t(pfn, PFN_DEV));
375}
376
377/* Special handling for the case of faulting in 2d tiled buffers */
378static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
379 struct vm_area_struct *vma, struct vm_fault *vmf)
380{
381 struct omap_gem_object *omap_obj = to_omap_bo(obj);
382 struct omap_drm_private *priv = obj->dev->dev_private;
383 struct omap_drm_usergart_entry *entry;
384 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
385 struct page *pages[64]; /* XXX is this too much to have on stack? */
386 unsigned long pfn;
387 pgoff_t pgoff, base_pgoff;
388 unsigned long vaddr;
389 int i, err, slots;
390 vm_fault_t ret = VM_FAULT_NOPAGE;
391
392 /*
393 * Note the height of the slot is also equal to the number of pages
394 * that need to be mapped in to fill 4kb wide CPU page. If the slot
395 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
396 */
397 const int n = priv->usergart[fmt].height;
398 const int n_shift = priv->usergart[fmt].height_shift;
399
400 /*
401 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
402 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
403 * into account in some of the math, so figure out virtual stride
404 * in pages
405 */
406 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
407
408 /* We don't use vmf->pgoff since that has the fake offset: */
409 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
410
411 /*
412 * Actual address we start mapping at is rounded down to previous slot
413 * boundary in the y direction:
414 */
415 base_pgoff = round_down(pgoff, m << n_shift);
416
417 /* figure out buffer width in slots */
418 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
419
420 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
421
422 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
423
424 /* evict previous buffer using this usergart entry, if any: */
425 if (entry->obj)
426 omap_gem_evict_entry(entry->obj, fmt, entry);
427
428 entry->obj = obj;
429 entry->obj_pgoff = base_pgoff;
430
431 /* now convert base_pgoff to phys offset from virt offset: */
432 base_pgoff = (base_pgoff >> n_shift) * slots;
433
434 /* for wider-than 4k.. figure out which part of the slot-row we want: */
435 if (m > 1) {
436 int off = pgoff % m;
437 entry->obj_pgoff += off;
438 base_pgoff /= m;
439 slots = min(slots - (off << n_shift), n);
440 base_pgoff += off << n_shift;
441 vaddr += off << PAGE_SHIFT;
442 }
443
444 /*
445 * Map in pages. Beyond the valid pixel part of the buffer, we set
446 * pages[i] to NULL to get a dummy page mapped in.. if someone
447 * reads/writes it they will get random/undefined content, but at
448 * least it won't be corrupting whatever other random page used to
449 * be mapped in, or other undefined behavior.
450 */
451 memcpy(pages, &omap_obj->pages[base_pgoff],
452 sizeof(struct page *) * slots);
453 memset(pages + slots, 0,
454 sizeof(struct page *) * (n - slots));
455
456 err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
457 if (err) {
458 ret = vmf_error(err);
459 dev_err(obj->dev->dev, "failed to pin: %d\n", err);
460 return ret;
461 }
462
463 pfn = entry->dma_addr >> PAGE_SHIFT;
464
465 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
466 pfn, pfn << PAGE_SHIFT);
467
468 for (i = n; i > 0; i--) {
469 ret = vmf_insert_mixed(vma,
470 vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
471 if (ret & VM_FAULT_ERROR)
472 break;
473 pfn += priv->usergart[fmt].stride_pfn;
474 vaddr += PAGE_SIZE * m;
475 }
476
477 /* simple round-robin: */
478 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
479 % NUM_USERGART_ENTRIES;
480
481 return ret;
482}
483
484/**
485 * omap_gem_fault - pagefault handler for GEM objects
486 * @vmf: fault detail
487 *
488 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
489 * does most of the work for us including the actual map/unmap calls
490 * but we need to do the actual page work.
491 *
492 * The VMA was set up by GEM. In doing so it also ensured that the
493 * vma->vm_private_data points to the GEM object that is backing this
494 * mapping.
495 */
496static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
497{
498 struct vm_area_struct *vma = vmf->vma;
499 struct drm_gem_object *obj = vma->vm_private_data;
500 struct omap_gem_object *omap_obj = to_omap_bo(obj);
501 int err;
502 vm_fault_t ret;
503
504 /* Make sure we don't parallel update on a fault, nor move or remove
505 * something from beneath our feet
506 */
507 mutex_lock(&omap_obj->lock);
508
509 /* if a shmem backed object, make sure we have pages attached now */
510 err = omap_gem_attach_pages(obj);
511 if (err) {
512 ret = vmf_error(err);
513 goto fail;
514 }
515
516 /* where should we do corresponding put_pages().. we are mapping
517 * the original page, rather than thru a GART, so we can't rely
518 * on eviction to trigger this. But munmap() or all mappings should
519 * probably trigger put_pages()?
520 */
521
522 if (omap_obj->flags & OMAP_BO_TILED_MASK)
523 ret = omap_gem_fault_2d(obj, vma, vmf);
524 else
525 ret = omap_gem_fault_1d(obj, vma, vmf);
526
527
528fail:
529 mutex_unlock(&omap_obj->lock);
530 return ret;
531}
532
533static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
534{
535 struct omap_gem_object *omap_obj = to_omap_bo(obj);
536
537 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
538
539 if (omap_obj->flags & OMAP_BO_WC) {
540 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
541 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
542 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
543 } else {
544 /*
545 * We do have some private objects, at least for scanout buffers
546 * on hardware without DMM/TILER. But these are allocated write-
547 * combine
548 */
549 if (WARN_ON(!obj->filp))
550 return -EINVAL;
551
552 /*
553 * Shunt off cached objs to shmem file so they have their own
554 * address_space (so unmap_mapping_range does what we want,
555 * in particular in the case of mmap'd dmabufs)
556 */
557 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
558 vma_set_file(vma, obj->filp);
559
560 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
561 }
562
563 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
564
565 return 0;
566}
567
568/* -----------------------------------------------------------------------------
569 * Dumb Buffers
570 */
571
572/**
573 * omap_gem_dumb_create - create a dumb buffer
574 * @file: our client file
575 * @dev: our device
576 * @args: the requested arguments copied from userspace
577 *
578 * Allocate a buffer suitable for use for a frame buffer of the
579 * form described by user space. Give userspace a handle by which
580 * to reference it.
581 */
582int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
583 struct drm_mode_create_dumb *args)
584{
585 union omap_gem_size gsize;
586
587 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
588
589 args->size = PAGE_ALIGN(args->pitch * args->height);
590
591 gsize = (union omap_gem_size){
592 .bytes = args->size,
593 };
594
595 return omap_gem_new_handle(dev, file, gsize,
596 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
597}
598
599/**
600 * omap_gem_dumb_map_offset - create an offset for a dumb buffer
601 * @file: our drm client file
602 * @dev: drm device
603 * @handle: GEM handle to the object (from dumb_create)
604 * @offset: memory map offset placeholder
605 *
606 * Do the necessary setup to allow the mapping of the frame buffer
607 * into user memory. We don't have to do much here at the moment.
608 */
609int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
610 u32 handle, u64 *offset)
611{
612 struct drm_gem_object *obj;
613 int ret = 0;
614
615 /* GEM does all our handle to object mapping */
616 obj = drm_gem_object_lookup(file, handle);
617 if (obj == NULL) {
618 ret = -ENOENT;
619 goto fail;
620 }
621
622 *offset = omap_gem_mmap_offset(obj);
623
624 drm_gem_object_put(obj);
625
626fail:
627 return ret;
628}
629
630#ifdef CONFIG_DRM_FBDEV_EMULATION
631/* Set scrolling position. This allows us to implement fast scrolling
632 * for console.
633 *
634 * Call only from non-atomic contexts.
635 */
636int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
637{
638 struct omap_gem_object *omap_obj = to_omap_bo(obj);
639 u32 npages = obj->size >> PAGE_SHIFT;
640 int ret = 0;
641
642 if (roll > npages) {
643 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
644 return -EINVAL;
645 }
646
647 omap_obj->roll = roll;
648
649 mutex_lock(&omap_obj->lock);
650
651 /* if we aren't mapped yet, we don't need to do anything */
652 if (omap_obj->block) {
653 ret = omap_gem_attach_pages(obj);
654 if (ret)
655 goto fail;
656
657 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
658 roll, true);
659 if (ret)
660 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
661 }
662
663fail:
664 mutex_unlock(&omap_obj->lock);
665
666 return ret;
667}
668#endif
669
670/* -----------------------------------------------------------------------------
671 * Memory Management & DMA Sync
672 */
673
674/*
675 * shmem buffers that are mapped cached are not coherent.
676 *
677 * We keep track of dirty pages using page faulting to perform cache management.
678 * When a page is mapped to the CPU in read/write mode the device can't access
679 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
680 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
681 * unmapped from the CPU.
682 */
683static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
684{
685 struct omap_gem_object *omap_obj = to_omap_bo(obj);
686
687 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
688 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
689}
690
691/* Sync the buffer for CPU access.. note pages should already be
692 * attached, ie. omap_gem_get_pages()
693 */
694void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
695{
696 struct drm_device *dev = obj->dev;
697 struct omap_gem_object *omap_obj = to_omap_bo(obj);
698
699 if (omap_gem_is_cached_coherent(obj))
700 return;
701
702 if (omap_obj->dma_addrs[pgoff]) {
703 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
704 PAGE_SIZE, DMA_TO_DEVICE);
705 omap_obj->dma_addrs[pgoff] = 0;
706 }
707}
708
709/* sync the buffer for DMA access */
710void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
711 enum dma_data_direction dir)
712{
713 struct drm_device *dev = obj->dev;
714 struct omap_gem_object *omap_obj = to_omap_bo(obj);
715 int i, npages = obj->size >> PAGE_SHIFT;
716 struct page **pages = omap_obj->pages;
717 bool dirty = false;
718
719 if (omap_gem_is_cached_coherent(obj))
720 return;
721
722 for (i = 0; i < npages; i++) {
723 if (!omap_obj->dma_addrs[i]) {
724 dma_addr_t addr;
725
726 addr = dma_map_page(dev->dev, pages[i], 0,
727 PAGE_SIZE, dir);
728 if (dma_mapping_error(dev->dev, addr)) {
729 dev_warn(dev->dev, "%s: failed to map page\n",
730 __func__);
731 break;
732 }
733
734 dirty = true;
735 omap_obj->dma_addrs[i] = addr;
736 }
737 }
738
739 if (dirty) {
740 unmap_mapping_range(obj->filp->f_mapping, 0,
741 omap_gem_mmap_size(obj), 1);
742 }
743}
744
745static int omap_gem_pin_tiler(struct drm_gem_object *obj)
746{
747 struct omap_gem_object *omap_obj = to_omap_bo(obj);
748 u32 npages = obj->size >> PAGE_SHIFT;
749 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
750 struct tiler_block *block;
751 int ret;
752
753 BUG_ON(omap_obj->block);
754
755 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
756 block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
757 PAGE_SIZE);
758 } else {
759 block = tiler_reserve_1d(obj->size);
760 }
761
762 if (IS_ERR(block)) {
763 ret = PTR_ERR(block);
764 dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
765 goto fail;
766 }
767
768 /* TODO: enable async refill.. */
769 ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
770 if (ret) {
771 tiler_release(block);
772 dev_err(obj->dev->dev, "could not pin: %d\n", ret);
773 goto fail;
774 }
775
776 omap_obj->dma_addr = tiler_ssptr(block);
777 omap_obj->block = block;
778
779 DBG("got dma address: %pad", &omap_obj->dma_addr);
780
781fail:
782 return ret;
783}
784
785/**
786 * omap_gem_pin() - Pin a GEM object in memory
787 * @obj: the GEM object
788 * @dma_addr: the DMA address
789 *
790 * Pin the given GEM object in memory and fill the dma_addr pointer with the
791 * object's DMA address. If the buffer is not physically contiguous it will be
792 * remapped through the TILER to provide a contiguous view.
793 *
794 * Pins are reference-counted, calling this function multiple times is allowed
795 * as long the corresponding omap_gem_unpin() calls are balanced.
796 *
797 * Return 0 on success or a negative error code otherwise.
798 */
799int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
800{
801 struct omap_drm_private *priv = obj->dev->dev_private;
802 struct omap_gem_object *omap_obj = to_omap_bo(obj);
803 int ret = 0;
804
805 mutex_lock(&omap_obj->lock);
806
807 if (!omap_gem_is_contiguous(omap_obj)) {
808 if (refcount_read(&omap_obj->pin_cnt) == 0) {
809
810 refcount_set(&omap_obj->pin_cnt, 1);
811
812 ret = omap_gem_attach_pages(obj);
813 if (ret)
814 goto fail;
815
816 if (omap_obj->flags & OMAP_BO_SCANOUT) {
817 if (priv->has_dmm) {
818 ret = omap_gem_pin_tiler(obj);
819 if (ret)
820 goto fail;
821 }
822 }
823 } else {
824 refcount_inc(&omap_obj->pin_cnt);
825 }
826 }
827
828 if (dma_addr)
829 *dma_addr = omap_obj->dma_addr;
830
831fail:
832 mutex_unlock(&omap_obj->lock);
833
834 return ret;
835}
836
837/**
838 * omap_gem_unpin_locked() - Unpin a GEM object from memory
839 * @obj: the GEM object
840 *
841 * omap_gem_unpin() without locking.
842 */
843static void omap_gem_unpin_locked(struct drm_gem_object *obj)
844{
845 struct omap_drm_private *priv = obj->dev->dev_private;
846 struct omap_gem_object *omap_obj = to_omap_bo(obj);
847 int ret;
848
849 if (omap_gem_is_contiguous(omap_obj))
850 return;
851
852 if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
853 if (omap_obj->sgt) {
854 sg_free_table(omap_obj->sgt);
855 kfree(omap_obj->sgt);
856 omap_obj->sgt = NULL;
857 }
858 if (!(omap_obj->flags & OMAP_BO_SCANOUT))
859 return;
860 if (priv->has_dmm) {
861 ret = tiler_unpin(omap_obj->block);
862 if (ret) {
863 dev_err(obj->dev->dev,
864 "could not unpin pages: %d\n", ret);
865 }
866 ret = tiler_release(omap_obj->block);
867 if (ret) {
868 dev_err(obj->dev->dev,
869 "could not release unmap: %d\n", ret);
870 }
871 omap_obj->dma_addr = 0;
872 omap_obj->block = NULL;
873 }
874 }
875}
876
877/**
878 * omap_gem_unpin() - Unpin a GEM object from memory
879 * @obj: the GEM object
880 *
881 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
882 * reference-counted, the actual unpin will only be performed when the number
883 * of calls to this function matches the number of calls to omap_gem_pin().
884 */
885void omap_gem_unpin(struct drm_gem_object *obj)
886{
887 struct omap_gem_object *omap_obj = to_omap_bo(obj);
888
889 mutex_lock(&omap_obj->lock);
890 omap_gem_unpin_locked(obj);
891 mutex_unlock(&omap_obj->lock);
892}
893
894/* Get rotated scanout address (only valid if already pinned), at the
895 * specified orientation and x,y offset from top-left corner of buffer
896 * (only valid for tiled 2d buffers)
897 */
898int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
899 int x, int y, dma_addr_t *dma_addr)
900{
901 struct omap_gem_object *omap_obj = to_omap_bo(obj);
902 int ret = -EINVAL;
903
904 mutex_lock(&omap_obj->lock);
905
906 if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
907 (omap_obj->flags & OMAP_BO_TILED_MASK)) {
908 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
909 ret = 0;
910 }
911
912 mutex_unlock(&omap_obj->lock);
913
914 return ret;
915}
916
917/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
918int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
919{
920 struct omap_gem_object *omap_obj = to_omap_bo(obj);
921 int ret = -EINVAL;
922 if (omap_obj->flags & OMAP_BO_TILED_MASK)
923 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
924 return ret;
925}
926
927/* if !remap, and we don't have pages backing, then fail, rather than
928 * increasing the pin count (which we don't really do yet anyways,
929 * because we don't support swapping pages back out). And 'remap'
930 * might not be quite the right name, but I wanted to keep it working
931 * similarly to omap_gem_pin(). Note though that mutex is not
932 * aquired if !remap (because this can be called in atomic ctxt),
933 * but probably omap_gem_unpin() should be changed to work in the
934 * same way. If !remap, a matching omap_gem_put_pages() call is not
935 * required (and should not be made).
936 */
937int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
938 bool remap)
939{
940 struct omap_gem_object *omap_obj = to_omap_bo(obj);
941 int ret = 0;
942
943 mutex_lock(&omap_obj->lock);
944
945 if (remap) {
946 ret = omap_gem_attach_pages(obj);
947 if (ret)
948 goto unlock;
949 }
950
951 if (!omap_obj->pages) {
952 ret = -ENOMEM;
953 goto unlock;
954 }
955
956 *pages = omap_obj->pages;
957
958unlock:
959 mutex_unlock(&omap_obj->lock);
960
961 return ret;
962}
963
964/* release pages when DMA no longer being performed */
965int omap_gem_put_pages(struct drm_gem_object *obj)
966{
967 /* do something here if we dynamically attach/detach pages.. at
968 * least they would no longer need to be pinned if everyone has
969 * released the pages..
970 */
971 return 0;
972}
973
974struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
975 enum dma_data_direction dir)
976{
977 struct omap_gem_object *omap_obj = to_omap_bo(obj);
978 dma_addr_t addr;
979 struct sg_table *sgt;
980 struct scatterlist *sg;
981 unsigned int count, len, stride, i;
982 int ret;
983
984 ret = omap_gem_pin(obj, &addr);
985 if (ret)
986 return ERR_PTR(ret);
987
988 mutex_lock(&omap_obj->lock);
989
990 sgt = omap_obj->sgt;
991 if (sgt)
992 goto out;
993
994 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
995 if (!sgt) {
996 ret = -ENOMEM;
997 goto err_unpin;
998 }
999
1000 if (addr) {
1001 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1002 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
1003
1004 len = omap_obj->width << (int)fmt;
1005 count = omap_obj->height;
1006 stride = tiler_stride(fmt, 0);
1007 } else {
1008 len = obj->size;
1009 count = 1;
1010 stride = 0;
1011 }
1012 } else {
1013 count = obj->size >> PAGE_SHIFT;
1014 }
1015
1016 ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1017 if (ret)
1018 goto err_free;
1019
1020 /* this must be after omap_gem_pin() to ensure we have pages attached */
1021 omap_gem_dma_sync_buffer(obj, dir);
1022
1023 if (addr) {
1024 for_each_sg(sgt->sgl, sg, count, i) {
1025 sg_set_page(sg, phys_to_page(addr), len,
1026 offset_in_page(addr));
1027 sg_dma_address(sg) = addr;
1028 sg_dma_len(sg) = len;
1029
1030 addr += stride;
1031 }
1032 } else {
1033 for_each_sg(sgt->sgl, sg, count, i) {
1034 sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
1035 sg_dma_address(sg) = omap_obj->dma_addrs[i];
1036 sg_dma_len(sg) = PAGE_SIZE;
1037 }
1038 }
1039
1040 omap_obj->sgt = sgt;
1041out:
1042 mutex_unlock(&omap_obj->lock);
1043 return sgt;
1044
1045err_free:
1046 kfree(sgt);
1047err_unpin:
1048 mutex_unlock(&omap_obj->lock);
1049 omap_gem_unpin(obj);
1050 return ERR_PTR(ret);
1051}
1052
1053void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1054{
1055 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1056
1057 if (WARN_ON(omap_obj->sgt != sgt))
1058 return;
1059
1060 omap_gem_unpin(obj);
1061}
1062
1063#ifdef CONFIG_DRM_FBDEV_EMULATION
1064/*
1065 * Get kernel virtual address for CPU access.. this more or less only
1066 * exists for omap_fbdev.
1067 */
1068void *omap_gem_vaddr(struct drm_gem_object *obj)
1069{
1070 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1071 void *vaddr;
1072 int ret;
1073
1074 mutex_lock(&omap_obj->lock);
1075
1076 if (!omap_obj->vaddr) {
1077 ret = omap_gem_attach_pages(obj);
1078 if (ret) {
1079 vaddr = ERR_PTR(ret);
1080 goto unlock;
1081 }
1082
1083 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
1084 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1085 }
1086
1087 vaddr = omap_obj->vaddr;
1088
1089unlock:
1090 mutex_unlock(&omap_obj->lock);
1091 return vaddr;
1092}
1093#endif
1094
1095/* -----------------------------------------------------------------------------
1096 * Power Management
1097 */
1098
1099#ifdef CONFIG_PM
1100/* re-pin objects in DMM in resume path: */
1101int omap_gem_resume(struct drm_device *dev)
1102{
1103 struct omap_drm_private *priv = dev->dev_private;
1104 struct omap_gem_object *omap_obj;
1105 int ret = 0;
1106
1107 mutex_lock(&priv->list_lock);
1108 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1109 if (omap_obj->block) {
1110 struct drm_gem_object *obj = &omap_obj->base;
1111 u32 npages = obj->size >> PAGE_SHIFT;
1112
1113 WARN_ON(!omap_obj->pages); /* this can't happen */
1114 ret = tiler_pin(omap_obj->block,
1115 omap_obj->pages, npages,
1116 omap_obj->roll, true);
1117 if (ret) {
1118 dev_err(dev->dev, "could not repin: %d\n", ret);
1119 goto done;
1120 }
1121 }
1122 }
1123
1124done:
1125 mutex_unlock(&priv->list_lock);
1126 return ret;
1127}
1128#endif
1129
1130/* -----------------------------------------------------------------------------
1131 * DebugFS
1132 */
1133
1134#ifdef CONFIG_DEBUG_FS
1135void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1136{
1137 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1138 u64 off;
1139
1140 off = drm_vma_node_start(&obj->vma_node);
1141
1142 mutex_lock(&omap_obj->lock);
1143
1144 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1145 omap_obj->flags, obj->name, kref_read(&obj->refcount),
1146 off, &omap_obj->dma_addr,
1147 refcount_read(&omap_obj->pin_cnt),
1148 omap_obj->vaddr, omap_obj->roll);
1149
1150 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1151 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1152 if (omap_obj->block) {
1153 struct tcm_area *area = &omap_obj->block->area;
1154 seq_printf(m, " (%dx%d, %dx%d)",
1155 area->p0.x, area->p0.y,
1156 area->p1.x, area->p1.y);
1157 }
1158 } else {
1159 seq_printf(m, " %zu", obj->size);
1160 }
1161
1162 mutex_unlock(&omap_obj->lock);
1163
1164 seq_printf(m, "\n");
1165}
1166
1167void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1168{
1169 struct omap_gem_object *omap_obj;
1170 int count = 0;
1171 size_t size = 0;
1172
1173 list_for_each_entry(omap_obj, list, mm_list) {
1174 struct drm_gem_object *obj = &omap_obj->base;
1175 seq_printf(m, " ");
1176 omap_gem_describe(obj, m);
1177 count++;
1178 size += obj->size;
1179 }
1180
1181 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1182}
1183#endif
1184
1185/* -----------------------------------------------------------------------------
1186 * Constructor & Destructor
1187 */
1188
1189static void omap_gem_free_object(struct drm_gem_object *obj)
1190{
1191 struct drm_device *dev = obj->dev;
1192 struct omap_drm_private *priv = dev->dev_private;
1193 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1194
1195 omap_gem_evict(obj);
1196
1197 mutex_lock(&priv->list_lock);
1198 list_del(&omap_obj->mm_list);
1199 mutex_unlock(&priv->list_lock);
1200
1201 /*
1202 * We own the sole reference to the object at this point, but to keep
1203 * lockdep happy, we must still take the omap_obj_lock to call
1204 * omap_gem_detach_pages(). This should hardly make any difference as
1205 * there can't be any lock contention.
1206 */
1207 mutex_lock(&omap_obj->lock);
1208
1209 /* The object should not be pinned. */
1210 WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
1211
1212 if (omap_obj->pages) {
1213 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1214 kfree(omap_obj->pages);
1215 else
1216 omap_gem_detach_pages(obj);
1217 }
1218
1219 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1220 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1221 omap_obj->dma_addr);
1222 } else if (omap_obj->vaddr) {
1223 vunmap(omap_obj->vaddr);
1224 } else if (obj->import_attach) {
1225 drm_prime_gem_destroy(obj, omap_obj->sgt);
1226 }
1227
1228 mutex_unlock(&omap_obj->lock);
1229
1230 drm_gem_object_release(obj);
1231
1232 mutex_destroy(&omap_obj->lock);
1233
1234 kfree(omap_obj);
1235}
1236
1237static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1238{
1239 struct omap_drm_private *priv = dev->dev_private;
1240
1241 switch (flags & OMAP_BO_CACHE_MASK) {
1242 case OMAP_BO_CACHED:
1243 case OMAP_BO_WC:
1244 case OMAP_BO_CACHE_MASK:
1245 break;
1246
1247 default:
1248 return false;
1249 }
1250
1251 if (flags & OMAP_BO_TILED_MASK) {
1252 if (!priv->usergart)
1253 return false;
1254
1255 switch (flags & OMAP_BO_TILED_MASK) {
1256 case OMAP_BO_TILED_8:
1257 case OMAP_BO_TILED_16:
1258 case OMAP_BO_TILED_32:
1259 break;
1260
1261 default:
1262 return false;
1263 }
1264 }
1265
1266 return true;
1267}
1268
1269static const struct vm_operations_struct omap_gem_vm_ops = {
1270 .fault = omap_gem_fault,
1271 .open = drm_gem_vm_open,
1272 .close = drm_gem_vm_close,
1273};
1274
1275static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1276 .free = omap_gem_free_object,
1277 .export = omap_gem_prime_export,
1278 .mmap = omap_gem_object_mmap,
1279 .vm_ops = &omap_gem_vm_ops,
1280};
1281
1282/* GEM buffer object constructor */
1283struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1284 union omap_gem_size gsize, u32 flags)
1285{
1286 struct omap_drm_private *priv = dev->dev_private;
1287 struct omap_gem_object *omap_obj;
1288 struct drm_gem_object *obj;
1289 struct address_space *mapping;
1290 size_t size;
1291 int ret;
1292
1293 if (!omap_gem_validate_flags(dev, flags))
1294 return NULL;
1295
1296 /* Validate the flags and compute the memory and cache flags. */
1297 if (flags & OMAP_BO_TILED_MASK) {
1298 /*
1299 * Tiled buffers are always shmem paged backed. When they are
1300 * scanned out, they are remapped into DMM/TILER.
1301 */
1302 flags |= OMAP_BO_MEM_SHMEM;
1303
1304 /*
1305 * Currently don't allow cached buffers. There is some caching
1306 * stuff that needs to be handled better.
1307 */
1308 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1309 flags |= tiler_get_cpu_cache_flags();
1310 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1311 /*
1312 * If we don't have DMM, we must allocate scanout buffers
1313 * from contiguous DMA memory.
1314 */
1315 flags |= OMAP_BO_MEM_DMA_API;
1316 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1317 /*
1318 * All other buffers not backed by dma_buf are shmem-backed.
1319 */
1320 flags |= OMAP_BO_MEM_SHMEM;
1321 }
1322
1323 /* Allocate the initialize the OMAP GEM object. */
1324 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1325 if (!omap_obj)
1326 return NULL;
1327
1328 obj = &omap_obj->base;
1329 omap_obj->flags = flags;
1330 mutex_init(&omap_obj->lock);
1331
1332 if (flags & OMAP_BO_TILED_MASK) {
1333 /*
1334 * For tiled buffers align dimensions to slot boundaries and
1335 * calculate size based on aligned dimensions.
1336 */
1337 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1338 &gsize.tiled.height);
1339
1340 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1341 gsize.tiled.height);
1342
1343 omap_obj->width = gsize.tiled.width;
1344 omap_obj->height = gsize.tiled.height;
1345 } else {
1346 size = PAGE_ALIGN(gsize.bytes);
1347 }
1348
1349 obj->funcs = &omap_gem_object_funcs;
1350
1351 /* Initialize the GEM object. */
1352 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1353 drm_gem_private_object_init(dev, obj, size);
1354 } else {
1355 ret = drm_gem_object_init(dev, obj, size);
1356 if (ret)
1357 goto err_free;
1358
1359 mapping = obj->filp->f_mapping;
1360 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1361 }
1362
1363 /* Allocate memory if needed. */
1364 if (flags & OMAP_BO_MEM_DMA_API) {
1365 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1366 &omap_obj->dma_addr,
1367 GFP_KERNEL);
1368 if (!omap_obj->vaddr)
1369 goto err_release;
1370 }
1371
1372 mutex_lock(&priv->list_lock);
1373 list_add(&omap_obj->mm_list, &priv->obj_list);
1374 mutex_unlock(&priv->list_lock);
1375
1376 return obj;
1377
1378err_release:
1379 drm_gem_object_release(obj);
1380err_free:
1381 kfree(omap_obj);
1382 return NULL;
1383}
1384
1385struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1386 struct sg_table *sgt)
1387{
1388 struct omap_drm_private *priv = dev->dev_private;
1389 struct omap_gem_object *omap_obj;
1390 struct drm_gem_object *obj;
1391 union omap_gem_size gsize;
1392
1393 /* Without a DMM only physically contiguous buffers can be supported. */
1394 if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm)
1395 return ERR_PTR(-EINVAL);
1396
1397 gsize.bytes = PAGE_ALIGN(size);
1398 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1399 if (!obj)
1400 return ERR_PTR(-ENOMEM);
1401
1402 omap_obj = to_omap_bo(obj);
1403
1404 mutex_lock(&omap_obj->lock);
1405
1406 omap_obj->sgt = sgt;
1407
1408 if (omap_gem_sgt_is_contiguous(sgt, size)) {
1409 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1410 } else {
1411 /* Create pages list from sgt */
1412 struct page **pages;
1413 unsigned int npages;
1414 unsigned int ret;
1415
1416 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1417 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1418 if (!pages) {
1419 omap_gem_free_object(obj);
1420 obj = ERR_PTR(-ENOMEM);
1421 goto done;
1422 }
1423
1424 omap_obj->pages = pages;
1425 ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1426 if (ret) {
1427 omap_gem_free_object(obj);
1428 obj = ERR_PTR(-ENOMEM);
1429 goto done;
1430 }
1431 }
1432
1433done:
1434 mutex_unlock(&omap_obj->lock);
1435 return obj;
1436}
1437
1438/* convenience method to construct a GEM buffer object, and userspace handle */
1439int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1440 union omap_gem_size gsize, u32 flags, u32 *handle)
1441{
1442 struct drm_gem_object *obj;
1443 int ret;
1444
1445 obj = omap_gem_new(dev, gsize, flags);
1446 if (!obj)
1447 return -ENOMEM;
1448
1449 ret = drm_gem_handle_create(file, obj, handle);
1450 if (ret) {
1451 omap_gem_free_object(obj);
1452 return ret;
1453 }
1454
1455 /* drop reference from allocate - handle holds it now */
1456 drm_gem_object_put(obj);
1457
1458 return 0;
1459}
1460
1461/* -----------------------------------------------------------------------------
1462 * Init & Cleanup
1463 */
1464
1465/* If DMM is used, we need to set some stuff up.. */
1466void omap_gem_init(struct drm_device *dev)
1467{
1468 struct omap_drm_private *priv = dev->dev_private;
1469 struct omap_drm_usergart *usergart;
1470 const enum tiler_fmt fmts[] = {
1471 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1472 };
1473 int i, j;
1474
1475 if (!dmm_is_available()) {
1476 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1477 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1478 return;
1479 }
1480
1481 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1482 if (!usergart)
1483 return;
1484
1485 /* reserve 4k aligned/wide regions for userspace mappings: */
1486 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1487 u16 h = 1, w = PAGE_SIZE >> i;
1488
1489 tiler_align(fmts[i], &w, &h);
1490 /* note: since each region is 1 4kb page wide, and minimum
1491 * number of rows, the height ends up being the same as the
1492 * # of pages in the region
1493 */
1494 usergart[i].height = h;
1495 usergart[i].height_shift = ilog2(h);
1496 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1497 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1498 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1499 struct omap_drm_usergart_entry *entry;
1500 struct tiler_block *block;
1501
1502 entry = &usergart[i].entry[j];
1503 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1504 if (IS_ERR(block)) {
1505 dev_err(dev->dev,
1506 "reserve failed: %d, %d, %ld\n",
1507 i, j, PTR_ERR(block));
1508 return;
1509 }
1510 entry->dma_addr = tiler_ssptr(block);
1511 entry->block = block;
1512
1513 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1514 &entry->dma_addr,
1515 usergart[i].stride_pfn << PAGE_SHIFT);
1516 }
1517 }
1518
1519 priv->usergart = usergart;
1520 priv->has_dmm = true;
1521}
1522
1523void omap_gem_deinit(struct drm_device *dev)
1524{
1525 struct omap_drm_private *priv = dev->dev_private;
1526
1527 /* I believe we can rely on there being no more outstanding GEM
1528 * objects which could depend on usergart/dmm at this point.
1529 */
1530 kfree(priv->usergart);
1531}
1/*
2 * drivers/gpu/drm/omapdrm/omap_gem.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20
21#include <linux/spinlock.h>
22#include <linux/shmem_fs.h>
23#include <drm/drm_vma_manager.h>
24
25#include "omap_drv.h"
26#include "omap_dmm_tiler.h"
27
28/* remove these once drm core helpers are merged */
29struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
30void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
31 bool dirty, bool accessed);
32int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
33
34/*
35 * GEM buffer object implementation.
36 */
37
38#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
39
40/* note: we use upper 8 bits of flags for driver-internal flags: */
41#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
42#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
43#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
44
45
46struct omap_gem_object {
47 struct drm_gem_object base;
48
49 struct list_head mm_list;
50
51 uint32_t flags;
52
53 /** width/height for tiled formats (rounded up to slot boundaries) */
54 uint16_t width, height;
55
56 /** roll applied when mapping to DMM */
57 uint32_t roll;
58
59 /**
60 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
61 * is set and the paddr is valid. Also if the buffer is remapped in
62 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
63 * the physical address and OMAP_BO_DMA is not set, then you should
64 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
65 * not removed from under your feet.
66 *
67 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
68 * buffer is requested, but doesn't mean that it is. Use the
69 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
70 * physical address.
71 */
72 dma_addr_t paddr;
73
74 /**
75 * # of users of paddr
76 */
77 uint32_t paddr_cnt;
78
79 /**
80 * tiler block used when buffer is remapped in DMM/TILER.
81 */
82 struct tiler_block *block;
83
84 /**
85 * Array of backing pages, if allocated. Note that pages are never
86 * allocated for buffers originally allocated from contiguous memory
87 */
88 struct page **pages;
89
90 /** addresses corresponding to pages in above array */
91 dma_addr_t *addrs;
92
93 /**
94 * Virtual address, if mapped.
95 */
96 void *vaddr;
97
98 /**
99 * sync-object allocated on demand (if needed)
100 *
101 * Per-buffer sync-object for tracking pending and completed hw/dma
102 * read and write operations. The layout in memory is dictated by
103 * the SGX firmware, which uses this information to stall the command
104 * stream if a surface is not ready yet.
105 *
106 * Note that when buffer is used by SGX, the sync-object needs to be
107 * allocated from a special heap of sync-objects. This way many sync
108 * objects can be packed in a page, and not waste GPU virtual address
109 * space. Because of this we have to have a omap_gem_set_sync_object()
110 * API to allow replacement of the syncobj after it has (potentially)
111 * already been allocated. A bit ugly but I haven't thought of a
112 * better alternative.
113 */
114 struct {
115 uint32_t write_pending;
116 uint32_t write_complete;
117 uint32_t read_pending;
118 uint32_t read_complete;
119 } *sync;
120};
121
122static int get_pages(struct drm_gem_object *obj, struct page ***pages);
123static uint64_t mmap_offset(struct drm_gem_object *obj);
124
125/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
126 * not necessarily pinned in TILER all the time, and (b) when they are
127 * they are not necessarily page aligned, we reserve one or more small
128 * regions in each of the 2d containers to use as a user-GART where we
129 * can create a second page-aligned mapping of parts of the buffer
130 * being accessed from userspace.
131 *
132 * Note that we could optimize slightly when we know that multiple
133 * tiler containers are backed by the same PAT.. but I'll leave that
134 * for later..
135 */
136#define NUM_USERGART_ENTRIES 2
137struct usergart_entry {
138 struct tiler_block *block; /* the reserved tiler block */
139 dma_addr_t paddr;
140 struct drm_gem_object *obj; /* the current pinned obj */
141 pgoff_t obj_pgoff; /* page offset of obj currently
142 mapped in */
143};
144static struct {
145 struct usergart_entry entry[NUM_USERGART_ENTRIES];
146 int height; /* height in rows */
147 int height_shift; /* ilog2(height in rows) */
148 int slot_shift; /* ilog2(width per slot) */
149 int stride_pfn; /* stride in pages */
150 int last; /* index of last used entry */
151} *usergart;
152
153static void evict_entry(struct drm_gem_object *obj,
154 enum tiler_fmt fmt, struct usergart_entry *entry)
155{
156 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 int n = usergart[fmt].height;
158 size_t size = PAGE_SIZE * n;
159 loff_t off = mmap_offset(obj) +
160 (entry->obj_pgoff << PAGE_SHIFT);
161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
162
163 if (m > 1) {
164 int i;
165 /* if stride > than PAGE_SIZE then sparse mapping: */
166 for (i = n; i > 0; i--) {
167 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
168 off, PAGE_SIZE, 1);
169 off += PAGE_SIZE * m;
170 }
171 } else {
172 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
173 off, size, 1);
174 }
175
176 entry->obj = NULL;
177}
178
179/* Evict a buffer from usergart, if it is mapped there */
180static void evict(struct drm_gem_object *obj)
181{
182 struct omap_gem_object *omap_obj = to_omap_bo(obj);
183
184 if (omap_obj->flags & OMAP_BO_TILED) {
185 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
186 int i;
187
188 if (!usergart)
189 return;
190
191 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
192 struct usergart_entry *entry = &usergart[fmt].entry[i];
193 if (entry->obj == obj)
194 evict_entry(obj, fmt, entry);
195 }
196 }
197}
198
199/* GEM objects can either be allocated from contiguous memory (in which
200 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
201 * contiguous buffers can be remapped in TILER/DMM if they need to be
202 * contiguous... but we don't do this all the time to reduce pressure
203 * on TILER/DMM space when we know at allocation time that the buffer
204 * will need to be scanned out.
205 */
206static inline bool is_shmem(struct drm_gem_object *obj)
207{
208 return obj->filp != NULL;
209}
210
211/**
212 * shmem buffers that are mapped cached can simulate coherency via using
213 * page faulting to keep track of dirty pages
214 */
215static inline bool is_cached_coherent(struct drm_gem_object *obj)
216{
217 struct omap_gem_object *omap_obj = to_omap_bo(obj);
218 return is_shmem(obj) &&
219 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
220}
221
222static DEFINE_SPINLOCK(sync_lock);
223
224/** ensure backing pages are allocated */
225static int omap_gem_attach_pages(struct drm_gem_object *obj)
226{
227 struct drm_device *dev = obj->dev;
228 struct omap_gem_object *omap_obj = to_omap_bo(obj);
229 struct page **pages;
230 int npages = obj->size >> PAGE_SHIFT;
231 int i, ret;
232 dma_addr_t *addrs;
233
234 WARN_ON(omap_obj->pages);
235
236 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
237 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
238 * we actually want CMA memory for it all anyways..
239 */
240 pages = drm_gem_get_pages(obj, GFP_KERNEL);
241 if (IS_ERR(pages)) {
242 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
243 return PTR_ERR(pages);
244 }
245
246 /* for non-cached buffers, ensure the new pages are clean because
247 * DSS, GPU, etc. are not cache coherent:
248 */
249 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
250 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
251 if (!addrs) {
252 ret = -ENOMEM;
253 goto free_pages;
254 }
255
256 for (i = 0; i < npages; i++) {
257 addrs[i] = dma_map_page(dev->dev, pages[i],
258 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
259 }
260 } else {
261 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
262 if (!addrs) {
263 ret = -ENOMEM;
264 goto free_pages;
265 }
266 }
267
268 omap_obj->addrs = addrs;
269 omap_obj->pages = pages;
270
271 return 0;
272
273free_pages:
274 drm_gem_put_pages(obj, pages, true, false);
275
276 return ret;
277}
278
279/** release backing pages */
280static void omap_gem_detach_pages(struct drm_gem_object *obj)
281{
282 struct omap_gem_object *omap_obj = to_omap_bo(obj);
283
284 /* for non-cached buffers, ensure the new pages are clean because
285 * DSS, GPU, etc. are not cache coherent:
286 */
287 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
288 int i, npages = obj->size >> PAGE_SHIFT;
289 for (i = 0; i < npages; i++) {
290 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
291 PAGE_SIZE, DMA_BIDIRECTIONAL);
292 }
293 }
294
295 kfree(omap_obj->addrs);
296 omap_obj->addrs = NULL;
297
298 drm_gem_put_pages(obj, omap_obj->pages, true, false);
299 omap_obj->pages = NULL;
300}
301
302/* get buffer flags */
303uint32_t omap_gem_flags(struct drm_gem_object *obj)
304{
305 return to_omap_bo(obj)->flags;
306}
307
308/** get mmap offset */
309static uint64_t mmap_offset(struct drm_gem_object *obj)
310{
311 struct drm_device *dev = obj->dev;
312 int ret;
313 size_t size;
314
315 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
316
317 /* Make it mmapable */
318 size = omap_gem_mmap_size(obj);
319 ret = drm_gem_create_mmap_offset_size(obj, size);
320 if (ret) {
321 dev_err(dev->dev, "could not allocate mmap offset\n");
322 return 0;
323 }
324
325 return drm_vma_node_offset_addr(&obj->vma_node);
326}
327
328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
329{
330 uint64_t offset;
331 mutex_lock(&obj->dev->struct_mutex);
332 offset = mmap_offset(obj);
333 mutex_unlock(&obj->dev->struct_mutex);
334 return offset;
335}
336
337/** get mmap size */
338size_t omap_gem_mmap_size(struct drm_gem_object *obj)
339{
340 struct omap_gem_object *omap_obj = to_omap_bo(obj);
341 size_t size = obj->size;
342
343 if (omap_obj->flags & OMAP_BO_TILED) {
344 /* for tiled buffers, the virtual size has stride rounded up
345 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
346 * 32kb later!). But we don't back the entire buffer with
347 * pages, only the valid picture part.. so need to adjust for
348 * this in the size used to mmap and generate mmap offset
349 */
350 size = tiler_vsize(gem2fmt(omap_obj->flags),
351 omap_obj->width, omap_obj->height);
352 }
353
354 return size;
355}
356
357/* get tiled size, returns -EINVAL if not tiled buffer */
358int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
359{
360 struct omap_gem_object *omap_obj = to_omap_bo(obj);
361 if (omap_obj->flags & OMAP_BO_TILED) {
362 *w = omap_obj->width;
363 *h = omap_obj->height;
364 return 0;
365 }
366 return -EINVAL;
367}
368
369/* Normal handling for the case of faulting in non-tiled buffers */
370static int fault_1d(struct drm_gem_object *obj,
371 struct vm_area_struct *vma, struct vm_fault *vmf)
372{
373 struct omap_gem_object *omap_obj = to_omap_bo(obj);
374 unsigned long pfn;
375 pgoff_t pgoff;
376
377 /* We don't use vmf->pgoff since that has the fake offset: */
378 pgoff = ((unsigned long)vmf->virtual_address -
379 vma->vm_start) >> PAGE_SHIFT;
380
381 if (omap_obj->pages) {
382 omap_gem_cpu_sync(obj, pgoff);
383 pfn = page_to_pfn(omap_obj->pages[pgoff]);
384 } else {
385 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
386 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
387 }
388
389 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
390 pfn, pfn << PAGE_SHIFT);
391
392 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
393}
394
395/* Special handling for the case of faulting in 2d tiled buffers */
396static int fault_2d(struct drm_gem_object *obj,
397 struct vm_area_struct *vma, struct vm_fault *vmf)
398{
399 struct omap_gem_object *omap_obj = to_omap_bo(obj);
400 struct usergart_entry *entry;
401 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
402 struct page *pages[64]; /* XXX is this too much to have on stack? */
403 unsigned long pfn;
404 pgoff_t pgoff, base_pgoff;
405 void __user *vaddr;
406 int i, ret, slots;
407
408 /*
409 * Note the height of the slot is also equal to the number of pages
410 * that need to be mapped in to fill 4kb wide CPU page. If the slot
411 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
412 */
413 const int n = usergart[fmt].height;
414 const int n_shift = usergart[fmt].height_shift;
415
416 /*
417 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
418 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
419 * into account in some of the math, so figure out virtual stride
420 * in pages
421 */
422 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
423
424 /* We don't use vmf->pgoff since that has the fake offset: */
425 pgoff = ((unsigned long)vmf->virtual_address -
426 vma->vm_start) >> PAGE_SHIFT;
427
428 /*
429 * Actual address we start mapping at is rounded down to previous slot
430 * boundary in the y direction:
431 */
432 base_pgoff = round_down(pgoff, m << n_shift);
433
434 /* figure out buffer width in slots */
435 slots = omap_obj->width >> usergart[fmt].slot_shift;
436
437 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
438
439 entry = &usergart[fmt].entry[usergart[fmt].last];
440
441 /* evict previous buffer using this usergart entry, if any: */
442 if (entry->obj)
443 evict_entry(entry->obj, fmt, entry);
444
445 entry->obj = obj;
446 entry->obj_pgoff = base_pgoff;
447
448 /* now convert base_pgoff to phys offset from virt offset: */
449 base_pgoff = (base_pgoff >> n_shift) * slots;
450
451 /* for wider-than 4k.. figure out which part of the slot-row we want: */
452 if (m > 1) {
453 int off = pgoff % m;
454 entry->obj_pgoff += off;
455 base_pgoff /= m;
456 slots = min(slots - (off << n_shift), n);
457 base_pgoff += off << n_shift;
458 vaddr += off << PAGE_SHIFT;
459 }
460
461 /*
462 * Map in pages. Beyond the valid pixel part of the buffer, we set
463 * pages[i] to NULL to get a dummy page mapped in.. if someone
464 * reads/writes it they will get random/undefined content, but at
465 * least it won't be corrupting whatever other random page used to
466 * be mapped in, or other undefined behavior.
467 */
468 memcpy(pages, &omap_obj->pages[base_pgoff],
469 sizeof(struct page *) * slots);
470 memset(pages + slots, 0,
471 sizeof(struct page *) * (n - slots));
472
473 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
474 if (ret) {
475 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
476 return ret;
477 }
478
479 pfn = entry->paddr >> PAGE_SHIFT;
480
481 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
482 pfn, pfn << PAGE_SHIFT);
483
484 for (i = n; i > 0; i--) {
485 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
486 pfn += usergart[fmt].stride_pfn;
487 vaddr += PAGE_SIZE * m;
488 }
489
490 /* simple round-robin: */
491 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
492
493 return 0;
494}
495
496/**
497 * omap_gem_fault - pagefault handler for GEM objects
498 * @vma: the VMA of the GEM object
499 * @vmf: fault detail
500 *
501 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
502 * does most of the work for us including the actual map/unmap calls
503 * but we need to do the actual page work.
504 *
505 * The VMA was set up by GEM. In doing so it also ensured that the
506 * vma->vm_private_data points to the GEM object that is backing this
507 * mapping.
508 */
509int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
510{
511 struct drm_gem_object *obj = vma->vm_private_data;
512 struct omap_gem_object *omap_obj = to_omap_bo(obj);
513 struct drm_device *dev = obj->dev;
514 struct page **pages;
515 int ret;
516
517 /* Make sure we don't parallel update on a fault, nor move or remove
518 * something from beneath our feet
519 */
520 mutex_lock(&dev->struct_mutex);
521
522 /* if a shmem backed object, make sure we have pages attached now */
523 ret = get_pages(obj, &pages);
524 if (ret)
525 goto fail;
526
527 /* where should we do corresponding put_pages().. we are mapping
528 * the original page, rather than thru a GART, so we can't rely
529 * on eviction to trigger this. But munmap() or all mappings should
530 * probably trigger put_pages()?
531 */
532
533 if (omap_obj->flags & OMAP_BO_TILED)
534 ret = fault_2d(obj, vma, vmf);
535 else
536 ret = fault_1d(obj, vma, vmf);
537
538
539fail:
540 mutex_unlock(&dev->struct_mutex);
541 switch (ret) {
542 case 0:
543 case -ERESTARTSYS:
544 case -EINTR:
545 return VM_FAULT_NOPAGE;
546 case -ENOMEM:
547 return VM_FAULT_OOM;
548 default:
549 return VM_FAULT_SIGBUS;
550 }
551}
552
553/** We override mainly to fix up some of the vm mapping flags.. */
554int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
555{
556 int ret;
557
558 ret = drm_gem_mmap(filp, vma);
559 if (ret) {
560 DBG("mmap failed: %d", ret);
561 return ret;
562 }
563
564 return omap_gem_mmap_obj(vma->vm_private_data, vma);
565}
566
567int omap_gem_mmap_obj(struct drm_gem_object *obj,
568 struct vm_area_struct *vma)
569{
570 struct omap_gem_object *omap_obj = to_omap_bo(obj);
571
572 vma->vm_flags &= ~VM_PFNMAP;
573 vma->vm_flags |= VM_MIXEDMAP;
574
575 if (omap_obj->flags & OMAP_BO_WC) {
576 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
577 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
578 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
579 } else {
580 /*
581 * We do have some private objects, at least for scanout buffers
582 * on hardware without DMM/TILER. But these are allocated write-
583 * combine
584 */
585 if (WARN_ON(!obj->filp))
586 return -EINVAL;
587
588 /*
589 * Shunt off cached objs to shmem file so they have their own
590 * address_space (so unmap_mapping_range does what we want,
591 * in particular in the case of mmap'd dmabufs)
592 */
593 fput(vma->vm_file);
594 vma->vm_pgoff = 0;
595 vma->vm_file = get_file(obj->filp);
596
597 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
598 }
599
600 return 0;
601}
602
603
604/**
605 * omap_gem_dumb_create - create a dumb buffer
606 * @drm_file: our client file
607 * @dev: our device
608 * @args: the requested arguments copied from userspace
609 *
610 * Allocate a buffer suitable for use for a frame buffer of the
611 * form described by user space. Give userspace a handle by which
612 * to reference it.
613 */
614int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
615 struct drm_mode_create_dumb *args)
616{
617 union omap_gem_size gsize;
618
619 /* in case someone tries to feed us a completely bogus stride: */
620 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
621 args->size = PAGE_ALIGN(args->pitch * args->height);
622
623 gsize = (union omap_gem_size){
624 .bytes = args->size,
625 };
626
627 return omap_gem_new_handle(dev, file, gsize,
628 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
629}
630
631/**
632 * omap_gem_dumb_map - buffer mapping for dumb interface
633 * @file: our drm client file
634 * @dev: drm device
635 * @handle: GEM handle to the object (from dumb_create)
636 *
637 * Do the necessary setup to allow the mapping of the frame buffer
638 * into user memory. We don't have to do much here at the moment.
639 */
640int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
641 uint32_t handle, uint64_t *offset)
642{
643 struct drm_gem_object *obj;
644 int ret = 0;
645
646 /* GEM does all our handle to object mapping */
647 obj = drm_gem_object_lookup(dev, file, handle);
648 if (obj == NULL) {
649 ret = -ENOENT;
650 goto fail;
651 }
652
653 *offset = omap_gem_mmap_offset(obj);
654
655 drm_gem_object_unreference_unlocked(obj);
656
657fail:
658 return ret;
659}
660
661/* Set scrolling position. This allows us to implement fast scrolling
662 * for console.
663 *
664 * Call only from non-atomic contexts.
665 */
666int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
667{
668 struct omap_gem_object *omap_obj = to_omap_bo(obj);
669 uint32_t npages = obj->size >> PAGE_SHIFT;
670 int ret = 0;
671
672 if (roll > npages) {
673 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
674 return -EINVAL;
675 }
676
677 omap_obj->roll = roll;
678
679 mutex_lock(&obj->dev->struct_mutex);
680
681 /* if we aren't mapped yet, we don't need to do anything */
682 if (omap_obj->block) {
683 struct page **pages;
684 ret = get_pages(obj, &pages);
685 if (ret)
686 goto fail;
687 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
688 if (ret)
689 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
690 }
691
692fail:
693 mutex_unlock(&obj->dev->struct_mutex);
694
695 return ret;
696}
697
698/* Sync the buffer for CPU access.. note pages should already be
699 * attached, ie. omap_gem_get_pages()
700 */
701void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
702{
703 struct drm_device *dev = obj->dev;
704 struct omap_gem_object *omap_obj = to_omap_bo(obj);
705
706 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
707 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
708 PAGE_SIZE, DMA_BIDIRECTIONAL);
709 omap_obj->addrs[pgoff] = 0;
710 }
711}
712
713/* sync the buffer for DMA access */
714void omap_gem_dma_sync(struct drm_gem_object *obj,
715 enum dma_data_direction dir)
716{
717 struct drm_device *dev = obj->dev;
718 struct omap_gem_object *omap_obj = to_omap_bo(obj);
719
720 if (is_cached_coherent(obj)) {
721 int i, npages = obj->size >> PAGE_SHIFT;
722 struct page **pages = omap_obj->pages;
723 bool dirty = false;
724
725 for (i = 0; i < npages; i++) {
726 if (!omap_obj->addrs[i]) {
727 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
728 PAGE_SIZE, DMA_BIDIRECTIONAL);
729 dirty = true;
730 }
731 }
732
733 if (dirty) {
734 unmap_mapping_range(obj->filp->f_mapping, 0,
735 omap_gem_mmap_size(obj), 1);
736 }
737 }
738}
739
740/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
741 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
742 * map in TILER)
743 */
744int omap_gem_get_paddr(struct drm_gem_object *obj,
745 dma_addr_t *paddr, bool remap)
746{
747 struct omap_drm_private *priv = obj->dev->dev_private;
748 struct omap_gem_object *omap_obj = to_omap_bo(obj);
749 int ret = 0;
750
751 mutex_lock(&obj->dev->struct_mutex);
752
753 if (remap && is_shmem(obj) && priv->has_dmm) {
754 if (omap_obj->paddr_cnt == 0) {
755 struct page **pages;
756 uint32_t npages = obj->size >> PAGE_SHIFT;
757 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
758 struct tiler_block *block;
759
760 BUG_ON(omap_obj->block);
761
762 ret = get_pages(obj, &pages);
763 if (ret)
764 goto fail;
765
766 if (omap_obj->flags & OMAP_BO_TILED) {
767 block = tiler_reserve_2d(fmt,
768 omap_obj->width,
769 omap_obj->height, 0);
770 } else {
771 block = tiler_reserve_1d(obj->size);
772 }
773
774 if (IS_ERR(block)) {
775 ret = PTR_ERR(block);
776 dev_err(obj->dev->dev,
777 "could not remap: %d (%d)\n", ret, fmt);
778 goto fail;
779 }
780
781 /* TODO: enable async refill.. */
782 ret = tiler_pin(block, pages, npages,
783 omap_obj->roll, true);
784 if (ret) {
785 tiler_release(block);
786 dev_err(obj->dev->dev,
787 "could not pin: %d\n", ret);
788 goto fail;
789 }
790
791 omap_obj->paddr = tiler_ssptr(block);
792 omap_obj->block = block;
793
794 DBG("got paddr: %08x", omap_obj->paddr);
795 }
796
797 omap_obj->paddr_cnt++;
798
799 *paddr = omap_obj->paddr;
800 } else if (omap_obj->flags & OMAP_BO_DMA) {
801 *paddr = omap_obj->paddr;
802 } else {
803 ret = -EINVAL;
804 goto fail;
805 }
806
807fail:
808 mutex_unlock(&obj->dev->struct_mutex);
809
810 return ret;
811}
812
813/* Release physical address, when DMA is no longer being performed.. this
814 * could potentially unpin and unmap buffers from TILER
815 */
816int omap_gem_put_paddr(struct drm_gem_object *obj)
817{
818 struct omap_gem_object *omap_obj = to_omap_bo(obj);
819 int ret = 0;
820
821 mutex_lock(&obj->dev->struct_mutex);
822 if (omap_obj->paddr_cnt > 0) {
823 omap_obj->paddr_cnt--;
824 if (omap_obj->paddr_cnt == 0) {
825 ret = tiler_unpin(omap_obj->block);
826 if (ret) {
827 dev_err(obj->dev->dev,
828 "could not unpin pages: %d\n", ret);
829 goto fail;
830 }
831 ret = tiler_release(omap_obj->block);
832 if (ret) {
833 dev_err(obj->dev->dev,
834 "could not release unmap: %d\n", ret);
835 }
836 omap_obj->block = NULL;
837 }
838 }
839fail:
840 mutex_unlock(&obj->dev->struct_mutex);
841 return ret;
842}
843
844/* Get rotated scanout address (only valid if already pinned), at the
845 * specified orientation and x,y offset from top-left corner of buffer
846 * (only valid for tiled 2d buffers)
847 */
848int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
849 int x, int y, dma_addr_t *paddr)
850{
851 struct omap_gem_object *omap_obj = to_omap_bo(obj);
852 int ret = -EINVAL;
853
854 mutex_lock(&obj->dev->struct_mutex);
855 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
856 (omap_obj->flags & OMAP_BO_TILED)) {
857 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
858 ret = 0;
859 }
860 mutex_unlock(&obj->dev->struct_mutex);
861 return ret;
862}
863
864/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
865int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
866{
867 struct omap_gem_object *omap_obj = to_omap_bo(obj);
868 int ret = -EINVAL;
869 if (omap_obj->flags & OMAP_BO_TILED)
870 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
871 return ret;
872}
873
874/* acquire pages when needed (for example, for DMA where physically
875 * contiguous buffer is not required
876 */
877static int get_pages(struct drm_gem_object *obj, struct page ***pages)
878{
879 struct omap_gem_object *omap_obj = to_omap_bo(obj);
880 int ret = 0;
881
882 if (is_shmem(obj) && !omap_obj->pages) {
883 ret = omap_gem_attach_pages(obj);
884 if (ret) {
885 dev_err(obj->dev->dev, "could not attach pages\n");
886 return ret;
887 }
888 }
889
890 /* TODO: even phys-contig.. we should have a list of pages? */
891 *pages = omap_obj->pages;
892
893 return 0;
894}
895
896/* if !remap, and we don't have pages backing, then fail, rather than
897 * increasing the pin count (which we don't really do yet anyways,
898 * because we don't support swapping pages back out). And 'remap'
899 * might not be quite the right name, but I wanted to keep it working
900 * similarly to omap_gem_get_paddr(). Note though that mutex is not
901 * aquired if !remap (because this can be called in atomic ctxt),
902 * but probably omap_gem_get_paddr() should be changed to work in the
903 * same way. If !remap, a matching omap_gem_put_pages() call is not
904 * required (and should not be made).
905 */
906int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
907 bool remap)
908{
909 int ret;
910 if (!remap) {
911 struct omap_gem_object *omap_obj = to_omap_bo(obj);
912 if (!omap_obj->pages)
913 return -ENOMEM;
914 *pages = omap_obj->pages;
915 return 0;
916 }
917 mutex_lock(&obj->dev->struct_mutex);
918 ret = get_pages(obj, pages);
919 mutex_unlock(&obj->dev->struct_mutex);
920 return ret;
921}
922
923/* release pages when DMA no longer being performed */
924int omap_gem_put_pages(struct drm_gem_object *obj)
925{
926 /* do something here if we dynamically attach/detach pages.. at
927 * least they would no longer need to be pinned if everyone has
928 * released the pages..
929 */
930 return 0;
931}
932
933/* Get kernel virtual address for CPU access.. this more or less only
934 * exists for omap_fbdev. This should be called with struct_mutex
935 * held.
936 */
937void *omap_gem_vaddr(struct drm_gem_object *obj)
938{
939 struct omap_gem_object *omap_obj = to_omap_bo(obj);
940 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
941 if (!omap_obj->vaddr) {
942 struct page **pages;
943 int ret = get_pages(obj, &pages);
944 if (ret)
945 return ERR_PTR(ret);
946 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
947 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
948 }
949 return omap_obj->vaddr;
950}
951
952#ifdef CONFIG_PM
953/* re-pin objects in DMM in resume path: */
954int omap_gem_resume(struct device *dev)
955{
956 struct drm_device *drm_dev = dev_get_drvdata(dev);
957 struct omap_drm_private *priv = drm_dev->dev_private;
958 struct omap_gem_object *omap_obj;
959 int ret = 0;
960
961 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
962 if (omap_obj->block) {
963 struct drm_gem_object *obj = &omap_obj->base;
964 uint32_t npages = obj->size >> PAGE_SHIFT;
965 WARN_ON(!omap_obj->pages); /* this can't happen */
966 ret = tiler_pin(omap_obj->block,
967 omap_obj->pages, npages,
968 omap_obj->roll, true);
969 if (ret) {
970 dev_err(dev, "could not repin: %d\n", ret);
971 return ret;
972 }
973 }
974 }
975
976 return 0;
977}
978#endif
979
980#ifdef CONFIG_DEBUG_FS
981void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
982{
983 struct omap_gem_object *omap_obj = to_omap_bo(obj);
984 uint64_t off;
985
986 off = drm_vma_node_start(&obj->vma_node);
987
988 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
989 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
990 off, omap_obj->paddr, omap_obj->paddr_cnt,
991 omap_obj->vaddr, omap_obj->roll);
992
993 if (omap_obj->flags & OMAP_BO_TILED) {
994 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
995 if (omap_obj->block) {
996 struct tcm_area *area = &omap_obj->block->area;
997 seq_printf(m, " (%dx%d, %dx%d)",
998 area->p0.x, area->p0.y,
999 area->p1.x, area->p1.y);
1000 }
1001 } else {
1002 seq_printf(m, " %d", obj->size);
1003 }
1004
1005 seq_printf(m, "\n");
1006}
1007
1008void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1009{
1010 struct omap_gem_object *omap_obj;
1011 int count = 0;
1012 size_t size = 0;
1013
1014 list_for_each_entry(omap_obj, list, mm_list) {
1015 struct drm_gem_object *obj = &omap_obj->base;
1016 seq_printf(m, " ");
1017 omap_gem_describe(obj, m);
1018 count++;
1019 size += obj->size;
1020 }
1021
1022 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1023}
1024#endif
1025
1026/* Buffer Synchronization:
1027 */
1028
1029struct omap_gem_sync_waiter {
1030 struct list_head list;
1031 struct omap_gem_object *omap_obj;
1032 enum omap_gem_op op;
1033 uint32_t read_target, write_target;
1034 /* notify called w/ sync_lock held */
1035 void (*notify)(void *arg);
1036 void *arg;
1037};
1038
1039/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1040 * the read and/or write target count is achieved which can call a user
1041 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1042 * cpu access), etc.
1043 */
1044static LIST_HEAD(waiters);
1045
1046static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1047{
1048 struct omap_gem_object *omap_obj = waiter->omap_obj;
1049 if ((waiter->op & OMAP_GEM_READ) &&
1050 (omap_obj->sync->write_complete < waiter->write_target))
1051 return true;
1052 if ((waiter->op & OMAP_GEM_WRITE) &&
1053 (omap_obj->sync->read_complete < waiter->read_target))
1054 return true;
1055 return false;
1056}
1057
1058/* macro for sync debug.. */
1059#define SYNCDBG 0
1060#define SYNC(fmt, ...) do { if (SYNCDBG) \
1061 printk(KERN_ERR "%s:%d: "fmt"\n", \
1062 __func__, __LINE__, ##__VA_ARGS__); \
1063 } while (0)
1064
1065
1066static void sync_op_update(void)
1067{
1068 struct omap_gem_sync_waiter *waiter, *n;
1069 list_for_each_entry_safe(waiter, n, &waiters, list) {
1070 if (!is_waiting(waiter)) {
1071 list_del(&waiter->list);
1072 SYNC("notify: %p", waiter);
1073 waiter->notify(waiter->arg);
1074 kfree(waiter);
1075 }
1076 }
1077}
1078
1079static inline int sync_op(struct drm_gem_object *obj,
1080 enum omap_gem_op op, bool start)
1081{
1082 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1083 int ret = 0;
1084
1085 spin_lock(&sync_lock);
1086
1087 if (!omap_obj->sync) {
1088 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1089 if (!omap_obj->sync) {
1090 ret = -ENOMEM;
1091 goto unlock;
1092 }
1093 }
1094
1095 if (start) {
1096 if (op & OMAP_GEM_READ)
1097 omap_obj->sync->read_pending++;
1098 if (op & OMAP_GEM_WRITE)
1099 omap_obj->sync->write_pending++;
1100 } else {
1101 if (op & OMAP_GEM_READ)
1102 omap_obj->sync->read_complete++;
1103 if (op & OMAP_GEM_WRITE)
1104 omap_obj->sync->write_complete++;
1105 sync_op_update();
1106 }
1107
1108unlock:
1109 spin_unlock(&sync_lock);
1110
1111 return ret;
1112}
1113
1114/* it is a bit lame to handle updates in this sort of polling way, but
1115 * in case of PVR, the GPU can directly update read/write complete
1116 * values, and not really tell us which ones it updated.. this also
1117 * means that sync_lock is not quite sufficient. So we'll need to
1118 * do something a bit better when it comes time to add support for
1119 * separate 2d hw..
1120 */
1121void omap_gem_op_update(void)
1122{
1123 spin_lock(&sync_lock);
1124 sync_op_update();
1125 spin_unlock(&sync_lock);
1126}
1127
1128/* mark the start of read and/or write operation */
1129int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1130{
1131 return sync_op(obj, op, true);
1132}
1133
1134int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1135{
1136 return sync_op(obj, op, false);
1137}
1138
1139static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1140
1141static void sync_notify(void *arg)
1142{
1143 struct task_struct **waiter_task = arg;
1144 *waiter_task = NULL;
1145 wake_up_all(&sync_event);
1146}
1147
1148int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1149{
1150 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1151 int ret = 0;
1152 if (omap_obj->sync) {
1153 struct task_struct *waiter_task = current;
1154 struct omap_gem_sync_waiter *waiter =
1155 kzalloc(sizeof(*waiter), GFP_KERNEL);
1156
1157 if (!waiter)
1158 return -ENOMEM;
1159
1160 waiter->omap_obj = omap_obj;
1161 waiter->op = op;
1162 waiter->read_target = omap_obj->sync->read_pending;
1163 waiter->write_target = omap_obj->sync->write_pending;
1164 waiter->notify = sync_notify;
1165 waiter->arg = &waiter_task;
1166
1167 spin_lock(&sync_lock);
1168 if (is_waiting(waiter)) {
1169 SYNC("waited: %p", waiter);
1170 list_add_tail(&waiter->list, &waiters);
1171 spin_unlock(&sync_lock);
1172 ret = wait_event_interruptible(sync_event,
1173 (waiter_task == NULL));
1174 spin_lock(&sync_lock);
1175 if (waiter_task) {
1176 SYNC("interrupted: %p", waiter);
1177 /* we were interrupted */
1178 list_del(&waiter->list);
1179 waiter_task = NULL;
1180 } else {
1181 /* freed in sync_op_update() */
1182 waiter = NULL;
1183 }
1184 }
1185 spin_unlock(&sync_lock);
1186
1187 if (waiter)
1188 kfree(waiter);
1189 }
1190 return ret;
1191}
1192
1193/* call fxn(arg), either synchronously or asynchronously if the op
1194 * is currently blocked.. fxn() can be called from any context
1195 *
1196 * (TODO for now fxn is called back from whichever context calls
1197 * omap_gem_op_update().. but this could be better defined later
1198 * if needed)
1199 *
1200 * TODO more code in common w/ _sync()..
1201 */
1202int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1203 void (*fxn)(void *arg), void *arg)
1204{
1205 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1206 if (omap_obj->sync) {
1207 struct omap_gem_sync_waiter *waiter =
1208 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1209
1210 if (!waiter)
1211 return -ENOMEM;
1212
1213 waiter->omap_obj = omap_obj;
1214 waiter->op = op;
1215 waiter->read_target = omap_obj->sync->read_pending;
1216 waiter->write_target = omap_obj->sync->write_pending;
1217 waiter->notify = fxn;
1218 waiter->arg = arg;
1219
1220 spin_lock(&sync_lock);
1221 if (is_waiting(waiter)) {
1222 SYNC("waited: %p", waiter);
1223 list_add_tail(&waiter->list, &waiters);
1224 spin_unlock(&sync_lock);
1225 return 0;
1226 }
1227
1228 spin_unlock(&sync_lock);
1229
1230 kfree(waiter);
1231 }
1232
1233 /* no waiting.. */
1234 fxn(arg);
1235
1236 return 0;
1237}
1238
1239/* special API so PVR can update the buffer to use a sync-object allocated
1240 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1241 * perspective) sync-object, so we overwrite the new syncobj w/ values
1242 * from the already allocated syncobj (if there is one)
1243 */
1244int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1245{
1246 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1247 int ret = 0;
1248
1249 spin_lock(&sync_lock);
1250
1251 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1252 /* clearing a previously set syncobj */
1253 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1254 GFP_ATOMIC);
1255 if (!syncobj) {
1256 ret = -ENOMEM;
1257 goto unlock;
1258 }
1259 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1260 omap_obj->sync = syncobj;
1261 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1262 /* replacing an existing syncobj */
1263 if (omap_obj->sync) {
1264 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1265 kfree(omap_obj->sync);
1266 }
1267 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1268 omap_obj->sync = syncobj;
1269 }
1270
1271unlock:
1272 spin_unlock(&sync_lock);
1273 return ret;
1274}
1275
1276/* don't call directly.. called from GEM core when it is time to actually
1277 * free the object..
1278 */
1279void omap_gem_free_object(struct drm_gem_object *obj)
1280{
1281 struct drm_device *dev = obj->dev;
1282 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1283
1284 evict(obj);
1285
1286 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1287
1288 list_del(&omap_obj->mm_list);
1289
1290 drm_gem_free_mmap_offset(obj);
1291
1292 /* this means the object is still pinned.. which really should
1293 * not happen. I think..
1294 */
1295 WARN_ON(omap_obj->paddr_cnt > 0);
1296
1297 /* don't free externally allocated backing memory */
1298 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1299 if (omap_obj->pages)
1300 omap_gem_detach_pages(obj);
1301
1302 if (!is_shmem(obj)) {
1303 dma_free_writecombine(dev->dev, obj->size,
1304 omap_obj->vaddr, omap_obj->paddr);
1305 } else if (omap_obj->vaddr) {
1306 vunmap(omap_obj->vaddr);
1307 }
1308 }
1309
1310 /* don't free externally allocated syncobj */
1311 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
1312 kfree(omap_obj->sync);
1313
1314 drm_gem_object_release(obj);
1315
1316 kfree(obj);
1317}
1318
1319/* convenience method to construct a GEM buffer object, and userspace handle */
1320int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1321 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1322{
1323 struct drm_gem_object *obj;
1324 int ret;
1325
1326 obj = omap_gem_new(dev, gsize, flags);
1327 if (!obj)
1328 return -ENOMEM;
1329
1330 ret = drm_gem_handle_create(file, obj, handle);
1331 if (ret) {
1332 drm_gem_object_release(obj);
1333 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1334 return ret;
1335 }
1336
1337 /* drop reference from allocate - handle holds it now */
1338 drm_gem_object_unreference_unlocked(obj);
1339
1340 return 0;
1341}
1342
1343/* GEM buffer object constructor */
1344struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1345 union omap_gem_size gsize, uint32_t flags)
1346{
1347 struct omap_drm_private *priv = dev->dev_private;
1348 struct omap_gem_object *omap_obj;
1349 struct drm_gem_object *obj = NULL;
1350 size_t size;
1351 int ret;
1352
1353 if (flags & OMAP_BO_TILED) {
1354 if (!usergart) {
1355 dev_err(dev->dev, "Tiled buffers require DMM\n");
1356 goto fail;
1357 }
1358
1359 /* tiled buffers are always shmem paged backed.. when they are
1360 * scanned out, they are remapped into DMM/TILER
1361 */
1362 flags &= ~OMAP_BO_SCANOUT;
1363
1364 /* currently don't allow cached buffers.. there is some caching
1365 * stuff that needs to be handled better
1366 */
1367 flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
1368 flags |= OMAP_BO_WC;
1369
1370 /* align dimensions to slot boundaries... */
1371 tiler_align(gem2fmt(flags),
1372 &gsize.tiled.width, &gsize.tiled.height);
1373
1374 /* ...and calculate size based on aligned dimensions */
1375 size = tiler_size(gem2fmt(flags),
1376 gsize.tiled.width, gsize.tiled.height);
1377 } else {
1378 size = PAGE_ALIGN(gsize.bytes);
1379 }
1380
1381 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1382 if (!omap_obj)
1383 goto fail;
1384
1385 list_add(&omap_obj->mm_list, &priv->obj_list);
1386
1387 obj = &omap_obj->base;
1388
1389 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1390 /* attempt to allocate contiguous memory if we don't
1391 * have DMM for remappign discontiguous buffers
1392 */
1393 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1394 &omap_obj->paddr, GFP_KERNEL);
1395 if (omap_obj->vaddr)
1396 flags |= OMAP_BO_DMA;
1397
1398 }
1399
1400 omap_obj->flags = flags;
1401
1402 if (flags & OMAP_BO_TILED) {
1403 omap_obj->width = gsize.tiled.width;
1404 omap_obj->height = gsize.tiled.height;
1405 }
1406
1407 ret = 0;
1408 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1409 drm_gem_private_object_init(dev, obj, size);
1410 else
1411 ret = drm_gem_object_init(dev, obj, size);
1412
1413 if (ret)
1414 goto fail;
1415
1416 return obj;
1417
1418fail:
1419 if (obj)
1420 omap_gem_free_object(obj);
1421
1422 return NULL;
1423}
1424
1425/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1426void omap_gem_init(struct drm_device *dev)
1427{
1428 struct omap_drm_private *priv = dev->dev_private;
1429 const enum tiler_fmt fmts[] = {
1430 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1431 };
1432 int i, j;
1433
1434 if (!dmm_is_available()) {
1435 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1436 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1437 return;
1438 }
1439
1440 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1441 if (!usergart)
1442 return;
1443
1444 /* reserve 4k aligned/wide regions for userspace mappings: */
1445 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1446 uint16_t h = 1, w = PAGE_SIZE >> i;
1447 tiler_align(fmts[i], &w, &h);
1448 /* note: since each region is 1 4kb page wide, and minimum
1449 * number of rows, the height ends up being the same as the
1450 * # of pages in the region
1451 */
1452 usergart[i].height = h;
1453 usergart[i].height_shift = ilog2(h);
1454 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1455 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1456 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1457 struct usergart_entry *entry = &usergart[i].entry[j];
1458 struct tiler_block *block =
1459 tiler_reserve_2d(fmts[i], w, h,
1460 PAGE_SIZE);
1461 if (IS_ERR(block)) {
1462 dev_err(dev->dev,
1463 "reserve failed: %d, %d, %ld\n",
1464 i, j, PTR_ERR(block));
1465 return;
1466 }
1467 entry->paddr = tiler_ssptr(block);
1468 entry->block = block;
1469
1470 DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
1471 entry->paddr,
1472 usergart[i].stride_pfn << PAGE_SHIFT);
1473 }
1474 }
1475
1476 priv->has_dmm = true;
1477}
1478
1479void omap_gem_deinit(struct drm_device *dev)
1480{
1481 /* I believe we can rely on there being no more outstanding GEM
1482 * objects which could depend on usergart/dmm at this point.
1483 */
1484 kfree(usergart);
1485}