Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#include <linux/spinlock.h>
 19#include <linux/shmem_fs.h>
 20#include <linux/dma-buf.h>
 21#include <linux/pfn_t.h>
 22
 23#include "msm_drv.h"
 24#include "msm_fence.h"
 25#include "msm_gem.h"
 26#include "msm_gpu.h"
 27#include "msm_mmu.h"
 28
 29static dma_addr_t physaddr(struct drm_gem_object *obj)
 30{
 31	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 32	struct msm_drm_private *priv = obj->dev->dev_private;
 33	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
 34			priv->vram.paddr;
 35}
 36
 37static bool use_pages(struct drm_gem_object *obj)
 38{
 39	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 40	return !msm_obj->vram_node;
 41}
 42
 43/* allocate pages from VRAM carveout, used when no IOMMU: */
 44static struct page **get_pages_vram(struct drm_gem_object *obj,
 45		int npages)
 46{
 47	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 48	struct msm_drm_private *priv = obj->dev->dev_private;
 49	dma_addr_t paddr;
 50	struct page **p;
 51	int ret, i;
 52
 53	p = drm_malloc_ab(npages, sizeof(struct page *));
 54	if (!p)
 55		return ERR_PTR(-ENOMEM);
 56
 57	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
 58			npages, 0, DRM_MM_SEARCH_DEFAULT);
 59	if (ret) {
 60		drm_free_large(p);
 61		return ERR_PTR(ret);
 62	}
 63
 64	paddr = physaddr(obj);
 65	for (i = 0; i < npages; i++) {
 66		p[i] = phys_to_page(paddr);
 67		paddr += PAGE_SIZE;
 68	}
 69
 70	return p;
 71}
 72
 73/* called with dev->struct_mutex held */
 74static struct page **get_pages(struct drm_gem_object *obj)
 75{
 76	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 77
 78	if (!msm_obj->pages) {
 79		struct drm_device *dev = obj->dev;
 80		struct page **p;
 81		int npages = obj->size >> PAGE_SHIFT;
 82
 83		if (use_pages(obj))
 84			p = drm_gem_get_pages(obj);
 85		else
 86			p = get_pages_vram(obj, npages);
 87
 88		if (IS_ERR(p)) {
 89			dev_err(dev->dev, "could not get pages: %ld\n",
 90					PTR_ERR(p));
 91			return p;
 92		}
 93
 94		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
 95		if (IS_ERR(msm_obj->sgt)) {
 96			dev_err(dev->dev, "failed to allocate sgt\n");
 97			return ERR_CAST(msm_obj->sgt);
 98		}
 99
100		msm_obj->pages = p;
101
102		/* For non-cached buffers, ensure the new pages are clean
103		 * because display controller, GPU, etc. are not coherent:
104		 */
105		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
106			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
107					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
108	}
109
110	return msm_obj->pages;
111}
112
113static void put_pages(struct drm_gem_object *obj)
114{
115	struct msm_gem_object *msm_obj = to_msm_bo(obj);
116
117	if (msm_obj->pages) {
118		/* For non-cached buffers, ensure the new pages are clean
119		 * because display controller, GPU, etc. are not coherent:
120		 */
121		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
122			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
123					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
124		sg_free_table(msm_obj->sgt);
125		kfree(msm_obj->sgt);
126
127		if (use_pages(obj))
128			drm_gem_put_pages(obj, msm_obj->pages, true, false);
129		else {
130			drm_mm_remove_node(msm_obj->vram_node);
131			drm_free_large(msm_obj->pages);
132		}
133
134		msm_obj->pages = NULL;
135	}
136}
137
138struct page **msm_gem_get_pages(struct drm_gem_object *obj)
139{
140	struct drm_device *dev = obj->dev;
141	struct page **p;
142	mutex_lock(&dev->struct_mutex);
143	p = get_pages(obj);
144	mutex_unlock(&dev->struct_mutex);
145	return p;
146}
147
148void msm_gem_put_pages(struct drm_gem_object *obj)
149{
150	/* when we start tracking the pin count, then do something here */
151}
152
153int msm_gem_mmap_obj(struct drm_gem_object *obj,
154		struct vm_area_struct *vma)
155{
156	struct msm_gem_object *msm_obj = to_msm_bo(obj);
157
158	vma->vm_flags &= ~VM_PFNMAP;
159	vma->vm_flags |= VM_MIXEDMAP;
160
161	if (msm_obj->flags & MSM_BO_WC) {
162		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
163	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
164		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
165	} else {
166		/*
167		 * Shunt off cached objs to shmem file so they have their own
168		 * address_space (so unmap_mapping_range does what we want,
169		 * in particular in the case of mmap'd dmabufs)
170		 */
171		fput(vma->vm_file);
172		get_file(obj->filp);
173		vma->vm_pgoff = 0;
174		vma->vm_file  = obj->filp;
175
176		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
177	}
178
179	return 0;
180}
181
182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
183{
184	int ret;
185
186	ret = drm_gem_mmap(filp, vma);
187	if (ret) {
188		DBG("mmap failed: %d", ret);
189		return ret;
190	}
191
192	return msm_gem_mmap_obj(vma->vm_private_data, vma);
193}
194
195int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196{
197	struct drm_gem_object *obj = vma->vm_private_data;
198	struct drm_device *dev = obj->dev;
199	struct msm_drm_private *priv = dev->dev_private;
200	struct page **pages;
201	unsigned long pfn;
202	pgoff_t pgoff;
203	int ret;
204
205	/* This should only happen if userspace tries to pass a mmap'd
206	 * but unfaulted gem bo vaddr into submit ioctl, triggering
207	 * a page fault while struct_mutex is already held.  This is
208	 * not a valid use-case so just bail.
209	 */
210	if (priv->struct_mutex_task == current)
211		return VM_FAULT_SIGBUS;
212
213	/* Make sure we don't parallel update on a fault, nor move or remove
214	 * something from beneath our feet
215	 */
216	ret = mutex_lock_interruptible(&dev->struct_mutex);
217	if (ret)
218		goto out;
219
220	/* make sure we have pages attached now */
221	pages = get_pages(obj);
222	if (IS_ERR(pages)) {
223		ret = PTR_ERR(pages);
224		goto out_unlock;
225	}
226
227	/* We don't use vmf->pgoff since that has the fake offset: */
228	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
229
230	pfn = page_to_pfn(pages[pgoff]);
231
232	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
233			pfn, pfn << PAGE_SHIFT);
234
235	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
236
237out_unlock:
238	mutex_unlock(&dev->struct_mutex);
239out:
240	switch (ret) {
241	case -EAGAIN:
242	case 0:
243	case -ERESTARTSYS:
244	case -EINTR:
245	case -EBUSY:
246		/*
247		 * EBUSY is ok: this just means that another thread
248		 * already did the job.
249		 */
250		return VM_FAULT_NOPAGE;
251	case -ENOMEM:
252		return VM_FAULT_OOM;
253	default:
254		return VM_FAULT_SIGBUS;
255	}
256}
257
258/** get mmap offset */
259static uint64_t mmap_offset(struct drm_gem_object *obj)
260{
261	struct drm_device *dev = obj->dev;
262	int ret;
263
264	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
265
266	/* Make it mmapable */
267	ret = drm_gem_create_mmap_offset(obj);
268
269	if (ret) {
270		dev_err(dev->dev, "could not allocate mmap offset\n");
271		return 0;
272	}
273
274	return drm_vma_node_offset_addr(&obj->vma_node);
275}
276
277uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
278{
279	uint64_t offset;
280	mutex_lock(&obj->dev->struct_mutex);
281	offset = mmap_offset(obj);
282	mutex_unlock(&obj->dev->struct_mutex);
283	return offset;
284}
285
286static void
287put_iova(struct drm_gem_object *obj)
288{
289	struct drm_device *dev = obj->dev;
290	struct msm_drm_private *priv = obj->dev->dev_private;
291	struct msm_gem_object *msm_obj = to_msm_bo(obj);
292	int id;
293
294	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
295
296	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
297		if (!priv->aspace[id])
298			continue;
299		msm_gem_unmap_vma(priv->aspace[id],
300				&msm_obj->domain[id], msm_obj->sgt);
301	}
302}
303
304/* should be called under struct_mutex.. although it can be called
305 * from atomic context without struct_mutex to acquire an extra
306 * iova ref if you know one is already held.
307 *
308 * That means when I do eventually need to add support for unpinning
309 * the refcnt counter needs to be atomic_t.
310 */
311int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
312		uint64_t *iova)
313{
314	struct msm_gem_object *msm_obj = to_msm_bo(obj);
315	int ret = 0;
316
317	if (!msm_obj->domain[id].iova) {
318		struct msm_drm_private *priv = obj->dev->dev_private;
319		struct page **pages = get_pages(obj);
320
321		if (IS_ERR(pages))
322			return PTR_ERR(pages);
323
324		if (iommu_present(&platform_bus_type)) {
325			ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
326					msm_obj->sgt, obj->size >> PAGE_SHIFT);
327		} else {
328			msm_obj->domain[id].iova = physaddr(obj);
329		}
330	}
331
332	if (!ret)
333		*iova = msm_obj->domain[id].iova;
334
335	return ret;
336}
337
338/* get iova, taking a reference.  Should have a matching put */
339int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
340{
341	struct msm_gem_object *msm_obj = to_msm_bo(obj);
342	int ret;
343
344	/* this is safe right now because we don't unmap until the
345	 * bo is deleted:
346	 */
347	if (msm_obj->domain[id].iova) {
348		*iova = msm_obj->domain[id].iova;
349		return 0;
350	}
351
352	mutex_lock(&obj->dev->struct_mutex);
353	ret = msm_gem_get_iova_locked(obj, id, iova);
354	mutex_unlock(&obj->dev->struct_mutex);
355	return ret;
356}
357
358/* get iova without taking a reference, used in places where you have
359 * already done a 'msm_gem_get_iova()'.
360 */
361uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
362{
363	struct msm_gem_object *msm_obj = to_msm_bo(obj);
364	WARN_ON(!msm_obj->domain[id].iova);
365	return msm_obj->domain[id].iova;
366}
367
368void msm_gem_put_iova(struct drm_gem_object *obj, int id)
369{
370	// XXX TODO ..
371	// NOTE: probably don't need a _locked() version.. we wouldn't
372	// normally unmap here, but instead just mark that it could be
373	// unmapped (if the iova refcnt drops to zero), but then later
374	// if another _get_iova_locked() fails we can start unmapping
375	// things that are no longer needed..
376}
377
378int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
379		struct drm_mode_create_dumb *args)
380{
381	args->pitch = align_pitch(args->width, args->bpp);
382	args->size  = PAGE_ALIGN(args->pitch * args->height);
383	return msm_gem_new_handle(dev, file, args->size,
384			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
385}
386
387int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
388		uint32_t handle, uint64_t *offset)
389{
390	struct drm_gem_object *obj;
391	int ret = 0;
392
393	/* GEM does all our handle to object mapping */
394	obj = drm_gem_object_lookup(file, handle);
395	if (obj == NULL) {
396		ret = -ENOENT;
397		goto fail;
398	}
399
400	*offset = msm_gem_mmap_offset(obj);
401
402	drm_gem_object_unreference_unlocked(obj);
403
404fail:
405	return ret;
406}
407
408void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
409{
410	struct msm_gem_object *msm_obj = to_msm_bo(obj);
411	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
412	if (!msm_obj->vaddr) {
413		struct page **pages = get_pages(obj);
414		if (IS_ERR(pages))
415			return ERR_CAST(pages);
416		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
417				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
418		if (msm_obj->vaddr == NULL)
419			return ERR_PTR(-ENOMEM);
420	}
421	msm_obj->vmap_count++;
422	return msm_obj->vaddr;
423}
424
425void *msm_gem_get_vaddr(struct drm_gem_object *obj)
426{
427	void *ret;
428	mutex_lock(&obj->dev->struct_mutex);
429	ret = msm_gem_get_vaddr_locked(obj);
430	mutex_unlock(&obj->dev->struct_mutex);
431	return ret;
432}
433
434void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
435{
436	struct msm_gem_object *msm_obj = to_msm_bo(obj);
437	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
438	WARN_ON(msm_obj->vmap_count < 1);
439	msm_obj->vmap_count--;
440}
441
442void msm_gem_put_vaddr(struct drm_gem_object *obj)
443{
444	mutex_lock(&obj->dev->struct_mutex);
445	msm_gem_put_vaddr_locked(obj);
446	mutex_unlock(&obj->dev->struct_mutex);
447}
448
449/* Update madvise status, returns true if not purged, else
450 * false or -errno.
451 */
452int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
453{
454	struct msm_gem_object *msm_obj = to_msm_bo(obj);
455
456	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
457
458	if (msm_obj->madv != __MSM_MADV_PURGED)
459		msm_obj->madv = madv;
460
461	return (msm_obj->madv != __MSM_MADV_PURGED);
462}
463
464void msm_gem_purge(struct drm_gem_object *obj)
465{
466	struct drm_device *dev = obj->dev;
467	struct msm_gem_object *msm_obj = to_msm_bo(obj);
468
469	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
470	WARN_ON(!is_purgeable(msm_obj));
471	WARN_ON(obj->import_attach);
472
473	put_iova(obj);
474
475	msm_gem_vunmap(obj);
476
477	put_pages(obj);
478
479	msm_obj->madv = __MSM_MADV_PURGED;
480
481	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
482	drm_gem_free_mmap_offset(obj);
483
484	/* Our goal here is to return as much of the memory as
485	 * is possible back to the system as we are called from OOM.
486	 * To do this we must instruct the shmfs to drop all of its
487	 * backing pages, *now*.
488	 */
489	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
490
491	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
492			0, (loff_t)-1);
493}
494
495void msm_gem_vunmap(struct drm_gem_object *obj)
496{
497	struct msm_gem_object *msm_obj = to_msm_bo(obj);
498
499	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
500		return;
501
502	vunmap(msm_obj->vaddr);
503	msm_obj->vaddr = NULL;
504}
505
506/* must be called before _move_to_active().. */
507int msm_gem_sync_object(struct drm_gem_object *obj,
508		struct msm_fence_context *fctx, bool exclusive)
509{
510	struct msm_gem_object *msm_obj = to_msm_bo(obj);
511	struct reservation_object_list *fobj;
512	struct dma_fence *fence;
513	int i, ret;
514
515	if (!exclusive) {
516		/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
517		 * which makes this a slightly strange place to call it.  OTOH this
518		 * is a convenient can-fail point to hook it in.  (And similar to
519		 * how etnaviv and nouveau handle this.)
520		 */
521		ret = reservation_object_reserve_shared(msm_obj->resv);
522		if (ret)
523			return ret;
524	}
525
526	fobj = reservation_object_get_list(msm_obj->resv);
527	if (!fobj || (fobj->shared_count == 0)) {
528		fence = reservation_object_get_excl(msm_obj->resv);
529		/* don't need to wait on our own fences, since ring is fifo */
530		if (fence && (fence->context != fctx->context)) {
531			ret = dma_fence_wait(fence, true);
532			if (ret)
533				return ret;
534		}
535	}
536
537	if (!exclusive || !fobj)
538		return 0;
539
540	for (i = 0; i < fobj->shared_count; i++) {
541		fence = rcu_dereference_protected(fobj->shared[i],
542						reservation_object_held(msm_obj->resv));
543		if (fence->context != fctx->context) {
544			ret = dma_fence_wait(fence, true);
545			if (ret)
546				return ret;
547		}
548	}
549
550	return 0;
551}
552
553void msm_gem_move_to_active(struct drm_gem_object *obj,
554		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
555{
556	struct msm_gem_object *msm_obj = to_msm_bo(obj);
557	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
558	msm_obj->gpu = gpu;
559	if (exclusive)
560		reservation_object_add_excl_fence(msm_obj->resv, fence);
561	else
562		reservation_object_add_shared_fence(msm_obj->resv, fence);
563	list_del_init(&msm_obj->mm_list);
564	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
565}
566
567void msm_gem_move_to_inactive(struct drm_gem_object *obj)
568{
569	struct drm_device *dev = obj->dev;
570	struct msm_drm_private *priv = dev->dev_private;
571	struct msm_gem_object *msm_obj = to_msm_bo(obj);
572
573	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
574
575	msm_obj->gpu = NULL;
576	list_del_init(&msm_obj->mm_list);
577	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
578}
579
580int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
581{
582	struct msm_gem_object *msm_obj = to_msm_bo(obj);
583	bool write = !!(op & MSM_PREP_WRITE);
584	unsigned long remain =
585		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
586	long ret;
587
588	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
589						  true,  remain);
590	if (ret == 0)
591		return remain == 0 ? -EBUSY : -ETIMEDOUT;
592	else if (ret < 0)
593		return ret;
594
595	/* TODO cache maintenance */
596
597	return 0;
598}
599
600int msm_gem_cpu_fini(struct drm_gem_object *obj)
601{
602	/* TODO cache maintenance */
603	return 0;
604}
605
606#ifdef CONFIG_DEBUG_FS
607static void describe_fence(struct dma_fence *fence, const char *type,
608		struct seq_file *m)
609{
610	if (!dma_fence_is_signaled(fence))
611		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
612				fence->ops->get_driver_name(fence),
613				fence->ops->get_timeline_name(fence),
614				fence->seqno);
615}
616
617void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
618{
619	struct msm_gem_object *msm_obj = to_msm_bo(obj);
620	struct reservation_object *robj = msm_obj->resv;
621	struct reservation_object_list *fobj;
622	struct msm_drm_private *priv = obj->dev->dev_private;
623	struct dma_fence *fence;
624	uint64_t off = drm_vma_node_start(&obj->vma_node);
625	const char *madv;
626	unsigned id;
627
628	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
629
630	switch (msm_obj->madv) {
631	case __MSM_MADV_PURGED:
632		madv = " purged";
633		break;
634	case MSM_MADV_DONTNEED:
635		madv = " purgeable";
636		break;
637	case MSM_MADV_WILLNEED:
638	default:
639		madv = "";
640		break;
641	}
642
643	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
644			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
645			obj->name, obj->refcount.refcount.counter,
646			off, msm_obj->vaddr);
647
648	for (id = 0; id < priv->num_aspaces; id++)
649		seq_printf(m, " %08llx", msm_obj->domain[id].iova);
650
651	seq_printf(m, " %zu%s\n", obj->size, madv);
652
653	rcu_read_lock();
654	fobj = rcu_dereference(robj->fence);
655	if (fobj) {
656		unsigned int i, shared_count = fobj->shared_count;
657
658		for (i = 0; i < shared_count; i++) {
659			fence = rcu_dereference(fobj->shared[i]);
660			describe_fence(fence, "Shared", m);
661		}
662	}
663
664	fence = rcu_dereference(robj->fence_excl);
665	if (fence)
666		describe_fence(fence, "Exclusive", m);
667	rcu_read_unlock();
668}
669
670void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
671{
672	struct msm_gem_object *msm_obj;
673	int count = 0;
674	size_t size = 0;
675
676	list_for_each_entry(msm_obj, list, mm_list) {
677		struct drm_gem_object *obj = &msm_obj->base;
678		seq_printf(m, "   ");
679		msm_gem_describe(obj, m);
680		count++;
681		size += obj->size;
682	}
683
684	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
685}
686#endif
687
688void msm_gem_free_object(struct drm_gem_object *obj)
689{
690	struct drm_device *dev = obj->dev;
691	struct msm_gem_object *msm_obj = to_msm_bo(obj);
692
693	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
694
695	/* object should not be on active list: */
696	WARN_ON(is_active(msm_obj));
697
698	list_del(&msm_obj->mm_list);
699
700	put_iova(obj);
701
702	if (obj->import_attach) {
703		if (msm_obj->vaddr)
704			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
705
706		/* Don't drop the pages for imported dmabuf, as they are not
707		 * ours, just free the array we allocated:
708		 */
709		if (msm_obj->pages)
710			drm_free_large(msm_obj->pages);
711
712		drm_prime_gem_destroy(obj, msm_obj->sgt);
713	} else {
714		msm_gem_vunmap(obj);
715		put_pages(obj);
716	}
717
718	if (msm_obj->resv == &msm_obj->_resv)
719		reservation_object_fini(msm_obj->resv);
720
721	drm_gem_object_release(obj);
722
723	kfree(msm_obj);
724}
725
726/* convenience method to construct a GEM buffer object, and userspace handle */
727int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
728		uint32_t size, uint32_t flags, uint32_t *handle)
729{
730	struct drm_gem_object *obj;
731	int ret;
732
733	ret = mutex_lock_interruptible(&dev->struct_mutex);
734	if (ret)
735		return ret;
736
737	obj = msm_gem_new(dev, size, flags);
738
739	mutex_unlock(&dev->struct_mutex);
740
741	if (IS_ERR(obj))
742		return PTR_ERR(obj);
743
744	ret = drm_gem_handle_create(file, obj, handle);
745
746	/* drop reference from allocate - handle holds it now */
747	drm_gem_object_unreference_unlocked(obj);
748
749	return ret;
750}
751
752static int msm_gem_new_impl(struct drm_device *dev,
753		uint32_t size, uint32_t flags,
754		struct reservation_object *resv,
755		struct drm_gem_object **obj)
756{
757	struct msm_drm_private *priv = dev->dev_private;
758	struct msm_gem_object *msm_obj;
759	bool use_vram = false;
760
761	switch (flags & MSM_BO_CACHE_MASK) {
762	case MSM_BO_UNCACHED:
763	case MSM_BO_CACHED:
764	case MSM_BO_WC:
765		break;
766	default:
767		dev_err(dev->dev, "invalid cache flag: %x\n",
768				(flags & MSM_BO_CACHE_MASK));
769		return -EINVAL;
770	}
771
772	if (!iommu_present(&platform_bus_type))
773		use_vram = true;
774	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
775		use_vram = true;
776
777	if (WARN_ON(use_vram && !priv->vram.size))
778		return -EINVAL;
779
780	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
781	if (!msm_obj)
782		return -ENOMEM;
783
784	if (use_vram)
785		msm_obj->vram_node = &msm_obj->domain[0].node;
786
787	msm_obj->flags = flags;
788	msm_obj->madv = MSM_MADV_WILLNEED;
789
790	if (resv) {
791		msm_obj->resv = resv;
792	} else {
793		msm_obj->resv = &msm_obj->_resv;
794		reservation_object_init(msm_obj->resv);
795	}
796
797	INIT_LIST_HEAD(&msm_obj->submit_entry);
798	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
799
800	*obj = &msm_obj->base;
801
802	return 0;
803}
804
805struct drm_gem_object *msm_gem_new(struct drm_device *dev,
806		uint32_t size, uint32_t flags)
807{
808	struct drm_gem_object *obj = NULL;
809	int ret;
810
811	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
812
813	size = PAGE_ALIGN(size);
814
815	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
816	if (ret)
817		goto fail;
818
819	if (use_pages(obj)) {
820		ret = drm_gem_object_init(dev, obj, size);
821		if (ret)
822			goto fail;
823	} else {
824		drm_gem_private_object_init(dev, obj, size);
825	}
826
827	return obj;
828
829fail:
830	drm_gem_object_unreference(obj);
831	return ERR_PTR(ret);
832}
833
834struct drm_gem_object *msm_gem_import(struct drm_device *dev,
835		struct dma_buf *dmabuf, struct sg_table *sgt)
836{
837	struct msm_gem_object *msm_obj;
838	struct drm_gem_object *obj;
839	uint32_t size;
840	int ret, npages;
841
842	/* if we don't have IOMMU, don't bother pretending we can import: */
843	if (!iommu_present(&platform_bus_type)) {
844		dev_err(dev->dev, "cannot import without IOMMU\n");
845		return ERR_PTR(-EINVAL);
846	}
847
848	size = PAGE_ALIGN(dmabuf->size);
849
850	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
851	if (ret)
852		goto fail;
853
854	drm_gem_private_object_init(dev, obj, size);
855
856	npages = size / PAGE_SIZE;
857
858	msm_obj = to_msm_bo(obj);
859	msm_obj->sgt = sgt;
860	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
861	if (!msm_obj->pages) {
862		ret = -ENOMEM;
863		goto fail;
864	}
865
866	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
867	if (ret)
868		goto fail;
869
870	return obj;
871
872fail:
873	drm_gem_object_unreference_unlocked(obj);
874	return ERR_PTR(ret);
875}