Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015-2018 Etnaviv Project
  4 */
  5
  6#include <drm/drm_prime.h>
  7#include <linux/dma-mapping.h>
  8#include <linux/shmem_fs.h>
  9#include <linux/spinlock.h>
 10#include <linux/vmalloc.h>
 11
 12#include "etnaviv_drv.h"
 13#include "etnaviv_gem.h"
 14#include "etnaviv_gpu.h"
 15#include "etnaviv_mmu.h"
 16
 17static struct lock_class_key etnaviv_shm_lock_class;
 18static struct lock_class_key etnaviv_userptr_lock_class;
 19
 20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
 21{
 22	struct drm_device *dev = etnaviv_obj->base.dev;
 23	struct sg_table *sgt = etnaviv_obj->sgt;
 24
 25	/*
 26	 * For non-cached buffers, ensure the new pages are clean
 27	 * because display controller, GPU, etc. are not coherent.
 28	 */
 29	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
 30		dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 31}
 32
 33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
 34{
 35	struct drm_device *dev = etnaviv_obj->base.dev;
 36	struct sg_table *sgt = etnaviv_obj->sgt;
 37
 38	/*
 39	 * For non-cached buffers, ensure the new pages are clean
 40	 * because display controller, GPU, etc. are not coherent:
 41	 *
 42	 * WARNING: The DMA API does not support concurrent CPU
 43	 * and device access to the memory area.  With BIDIRECTIONAL,
 44	 * we will clean the cache lines which overlap the region,
 45	 * and invalidate all cache lines (partially) contained in
 46	 * the region.
 47	 *
 48	 * If you have dirty data in the overlapping cache lines,
 49	 * that will corrupt the GPU-written data.  If you have
 50	 * written into the remainder of the region, this can
 51	 * discard those writes.
 52	 */
 53	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
 54		dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 55}
 56
 57/* called with etnaviv_obj->lock held */
 58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 59{
 60	struct drm_device *dev = etnaviv_obj->base.dev;
 61	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
 62
 63	if (IS_ERR(p)) {
 64		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
 65		return PTR_ERR(p);
 66	}
 67
 68	etnaviv_obj->pages = p;
 69
 70	return 0;
 71}
 72
 73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
 74{
 75	if (etnaviv_obj->sgt) {
 76		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
 77		sg_free_table(etnaviv_obj->sgt);
 78		kfree(etnaviv_obj->sgt);
 79		etnaviv_obj->sgt = NULL;
 80	}
 81	if (etnaviv_obj->pages) {
 82		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
 83				  true, false);
 84
 85		etnaviv_obj->pages = NULL;
 86	}
 87}
 88
 89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 90{
 91	int ret;
 92
 93	lockdep_assert_held(&etnaviv_obj->lock);
 94
 95	if (!etnaviv_obj->pages) {
 96		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
 97		if (ret < 0)
 98			return ERR_PTR(ret);
 99	}
100
101	if (!etnaviv_obj->sgt) {
102		struct drm_device *dev = etnaviv_obj->base.dev;
103		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104		struct sg_table *sgt;
105
106		sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
 
107		if (IS_ERR(sgt)) {
108			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
109				PTR_ERR(sgt));
110			return ERR_CAST(sgt);
111		}
112
113		etnaviv_obj->sgt = sgt;
114
115		etnaviv_gem_scatter_map(etnaviv_obj);
116	}
117
118	return etnaviv_obj->pages;
119}
120
121void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
122{
123	lockdep_assert_held(&etnaviv_obj->lock);
124	/* when we start tracking the pin count, then do something here */
125}
126
127static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128		struct vm_area_struct *vma)
129{
130	pgprot_t vm_page_prot;
131
132	vma->vm_flags &= ~VM_PFNMAP;
133	vma->vm_flags |= VM_MIXEDMAP;
134
135	vm_page_prot = vm_get_page_prot(vma->vm_flags);
136
137	if (etnaviv_obj->flags & ETNA_BO_WC) {
138		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141	} else {
142		/*
143		 * Shunt off cached objs to shmem file so they have their own
144		 * address_space (so unmap_mapping_range does what we want,
145		 * in particular in the case of mmap'd dmabufs)
146		 */
147		fput(vma->vm_file);
148		get_file(etnaviv_obj->base.filp);
149		vma->vm_pgoff = 0;
150		vma->vm_file  = etnaviv_obj->base.filp;
151
152		vma->vm_page_prot = vm_page_prot;
153	}
154
155	return 0;
156}
157
158int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
159{
160	struct etnaviv_gem_object *obj;
161	int ret;
162
163	ret = drm_gem_mmap(filp, vma);
164	if (ret) {
165		DBG("mmap failed: %d", ret);
166		return ret;
167	}
168
169	obj = to_etnaviv_bo(vma->vm_private_data);
170	return obj->ops->mmap(obj, vma);
171}
172
173vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
174{
175	struct vm_area_struct *vma = vmf->vma;
176	struct drm_gem_object *obj = vma->vm_private_data;
177	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
178	struct page **pages, *page;
 
179	pgoff_t pgoff;
180	int err;
181
182	/*
183	 * Make sure we don't parallel update on a fault, nor move or remove
184	 * something from beneath our feet.  Note that vmf_insert_page() is
185	 * specifically coded to take care of this, so we don't have to.
186	 */
187	err = mutex_lock_interruptible(&etnaviv_obj->lock);
188	if (err)
189		return VM_FAULT_NOPAGE;
190	/* make sure we have pages attached now */
191	pages = etnaviv_gem_get_pages(etnaviv_obj);
192	mutex_unlock(&etnaviv_obj->lock);
193
194	if (IS_ERR(pages)) {
195		err = PTR_ERR(pages);
196		return vmf_error(err);
197	}
198
199	/* We don't use vmf->pgoff since that has the fake offset: */
200	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
201
202	page = pages[pgoff];
203
204	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
205	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
206
207	return vmf_insert_page(vma, vmf->address, page);
208}
209
210int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
211{
212	int ret;
213
214	/* Make it mmapable */
215	ret = drm_gem_create_mmap_offset(obj);
216	if (ret)
217		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
218	else
219		*offset = drm_vma_node_offset_addr(&obj->vma_node);
220
221	return ret;
222}
223
224static struct etnaviv_vram_mapping *
225etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
226			     struct etnaviv_iommu_context *context)
227{
228	struct etnaviv_vram_mapping *mapping;
229
230	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
231		if (mapping->context == context)
232			return mapping;
233	}
234
235	return NULL;
236}
237
238void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
239{
240	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
241
242	mutex_lock(&etnaviv_obj->lock);
243	WARN_ON(mapping->use == 0);
244	mapping->use -= 1;
245	mutex_unlock(&etnaviv_obj->lock);
246
247	drm_gem_object_put_unlocked(&etnaviv_obj->base);
248}
249
250struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
251	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
252	u64 va)
253{
254	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
255	struct etnaviv_vram_mapping *mapping;
256	struct page **pages;
257	int ret = 0;
258
259	mutex_lock(&etnaviv_obj->lock);
260	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
261	if (mapping) {
262		/*
263		 * Holding the object lock prevents the use count changing
264		 * beneath us.  If the use count is zero, the MMU might be
265		 * reaping this object, so take the lock and re-check that
266		 * the MMU owns this mapping to close this race.
267		 */
268		if (mapping->use == 0) {
269			mutex_lock(&mmu_context->lock);
270			if (mapping->context == mmu_context)
271				mapping->use += 1;
 
 
 
 
 
272			else
273				mapping = NULL;
274			mutex_unlock(&mmu_context->lock);
275			if (mapping)
276				goto out;
277		} else {
278			mapping->use += 1;
279			goto out;
280		}
281	}
282
283	pages = etnaviv_gem_get_pages(etnaviv_obj);
284	if (IS_ERR(pages)) {
285		ret = PTR_ERR(pages);
286		goto out;
287	}
288
289	/*
290	 * See if we have a reaped vram mapping we can re-use before
291	 * allocating a fresh mapping.
292	 */
293	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
294	if (!mapping) {
295		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
296		if (!mapping) {
297			ret = -ENOMEM;
298			goto out;
299		}
300
301		INIT_LIST_HEAD(&mapping->scan_node);
302		mapping->object = etnaviv_obj;
303	} else {
304		list_del(&mapping->obj_node);
305	}
306
307	etnaviv_iommu_context_get(mmu_context);
308	mapping->context = mmu_context;
309	mapping->use = 1;
310
311	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
312				    mmu_context->global->memory_base,
313				    mapping, va);
314	if (ret < 0) {
315		etnaviv_iommu_context_put(mmu_context);
316		kfree(mapping);
317	} else {
318		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
319	}
320
321out:
322	mutex_unlock(&etnaviv_obj->lock);
323
324	if (ret)
325		return ERR_PTR(ret);
326
327	/* Take a reference on the object */
328	drm_gem_object_get(obj);
329	return mapping;
330}
331
332void *etnaviv_gem_vmap(struct drm_gem_object *obj)
333{
334	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
335
336	if (etnaviv_obj->vaddr)
337		return etnaviv_obj->vaddr;
338
339	mutex_lock(&etnaviv_obj->lock);
340	/*
341	 * Need to check again, as we might have raced with another thread
342	 * while waiting for the mutex.
343	 */
344	if (!etnaviv_obj->vaddr)
345		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
346	mutex_unlock(&etnaviv_obj->lock);
347
348	return etnaviv_obj->vaddr;
349}
350
351static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
352{
353	struct page **pages;
354
355	lockdep_assert_held(&obj->lock);
356
357	pages = etnaviv_gem_get_pages(obj);
358	if (IS_ERR(pages))
359		return NULL;
360
361	return vmap(pages, obj->base.size >> PAGE_SHIFT,
362			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
363}
364
365static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
366{
367	if (op & ETNA_PREP_READ)
368		return DMA_FROM_DEVICE;
369	else if (op & ETNA_PREP_WRITE)
370		return DMA_TO_DEVICE;
371	else
372		return DMA_BIDIRECTIONAL;
373}
374
375int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
376		struct timespec *timeout)
377{
378	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
379	struct drm_device *dev = obj->dev;
380	bool write = !!(op & ETNA_PREP_WRITE);
381	int ret;
382
383	if (!etnaviv_obj->sgt) {
384		void *ret;
385
386		mutex_lock(&etnaviv_obj->lock);
387		ret = etnaviv_gem_get_pages(etnaviv_obj);
388		mutex_unlock(&etnaviv_obj->lock);
389		if (IS_ERR(ret))
390			return PTR_ERR(ret);
391	}
392
393	if (op & ETNA_PREP_NOSYNC) {
394		if (!dma_resv_test_signaled_rcu(obj->resv,
395							  write))
396			return -EBUSY;
397	} else {
398		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
399
400		ret = dma_resv_wait_timeout_rcu(obj->resv,
401							  write, true, remain);
402		if (ret <= 0)
403			return ret == 0 ? -ETIMEDOUT : ret;
404	}
405
406	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
407		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
408				    etnaviv_obj->sgt->nents,
409				    etnaviv_op_to_dma_dir(op));
410		etnaviv_obj->last_cpu_prep_op = op;
411	}
412
413	return 0;
414}
415
416int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
417{
418	struct drm_device *dev = obj->dev;
419	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
420
421	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
422		/* fini without a prep is almost certainly a userspace error */
423		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
424		dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
425			etnaviv_obj->sgt->nents,
426			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
427		etnaviv_obj->last_cpu_prep_op = 0;
428	}
429
430	return 0;
431}
432
433int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
434	struct timespec *timeout)
435{
436	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
437
438	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
439}
440
441#ifdef CONFIG_DEBUG_FS
442static void etnaviv_gem_describe_fence(struct dma_fence *fence,
443	const char *type, struct seq_file *m)
444{
445	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
446		seq_printf(m, "\t%9s: %s %s seq %llu\n",
447			   type,
448			   fence->ops->get_driver_name(fence),
449			   fence->ops->get_timeline_name(fence),
450			   fence->seqno);
451}
452
453static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
454{
455	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
456	struct dma_resv *robj = obj->resv;
457	struct dma_resv_list *fobj;
458	struct dma_fence *fence;
459	unsigned long off = drm_vma_node_start(&obj->vma_node);
 
460
461	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
462			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
463			obj->name, kref_read(&obj->refcount),
464			off, etnaviv_obj->vaddr, obj->size);
465
466	rcu_read_lock();
467	fobj = rcu_dereference(robj->fence);
468	if (fobj) {
469		unsigned int i, shared_count = fobj->shared_count;
470
471		for (i = 0; i < shared_count; i++) {
472			fence = rcu_dereference(fobj->shared[i]);
473			etnaviv_gem_describe_fence(fence, "Shared", m);
474		}
475	}
476
477	fence = rcu_dereference(robj->fence_excl);
478	if (fence)
479		etnaviv_gem_describe_fence(fence, "Exclusive", m);
480	rcu_read_unlock();
481}
482
483void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
484	struct seq_file *m)
485{
486	struct etnaviv_gem_object *etnaviv_obj;
487	int count = 0;
488	size_t size = 0;
489
490	mutex_lock(&priv->gem_lock);
491	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
492		struct drm_gem_object *obj = &etnaviv_obj->base;
493
494		seq_puts(m, "   ");
495		etnaviv_gem_describe(obj, m);
496		count++;
497		size += obj->size;
498	}
499	mutex_unlock(&priv->gem_lock);
500
501	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
502}
503#endif
504
505static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
506{
507	vunmap(etnaviv_obj->vaddr);
508	put_pages(etnaviv_obj);
509}
510
511static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
512	.get_pages = etnaviv_gem_shmem_get_pages,
513	.release = etnaviv_gem_shmem_release,
514	.vmap = etnaviv_gem_vmap_impl,
515	.mmap = etnaviv_gem_mmap_obj,
516};
517
518void etnaviv_gem_free_object(struct drm_gem_object *obj)
519{
520	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
521	struct etnaviv_drm_private *priv = obj->dev->dev_private;
522	struct etnaviv_vram_mapping *mapping, *tmp;
523
524	/* object should not be active */
525	WARN_ON(is_active(etnaviv_obj));
526
527	mutex_lock(&priv->gem_lock);
528	list_del(&etnaviv_obj->gem_node);
529	mutex_unlock(&priv->gem_lock);
530
531	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
532				 obj_node) {
533		struct etnaviv_iommu_context *context = mapping->context;
534
535		WARN_ON(mapping->use);
536
537		if (context) {
538			etnaviv_iommu_unmap_gem(context, mapping);
539			etnaviv_iommu_context_put(context);
540		}
541
542		list_del(&mapping->obj_node);
543		kfree(mapping);
544	}
545
546	drm_gem_free_mmap_offset(obj);
547	etnaviv_obj->ops->release(etnaviv_obj);
548	drm_gem_object_release(obj);
549
550	kfree(etnaviv_obj);
551}
552
553void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
554{
555	struct etnaviv_drm_private *priv = dev->dev_private;
556	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
557
558	mutex_lock(&priv->gem_lock);
559	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
560	mutex_unlock(&priv->gem_lock);
561}
562
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
563static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
564	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
565{
566	struct etnaviv_gem_object *etnaviv_obj;
567	unsigned sz = sizeof(*etnaviv_obj);
568	bool valid = true;
569
570	/* validate flags */
571	switch (flags & ETNA_BO_CACHE_MASK) {
572	case ETNA_BO_UNCACHED:
573	case ETNA_BO_CACHED:
574	case ETNA_BO_WC:
575		break;
576	default:
577		valid = false;
578	}
579
580	if (!valid) {
581		dev_err(dev->dev, "invalid cache flag: %x\n",
582			(flags & ETNA_BO_CACHE_MASK));
583		return -EINVAL;
584	}
585
586	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
587	if (!etnaviv_obj)
588		return -ENOMEM;
589
590	etnaviv_obj->flags = flags;
591	etnaviv_obj->ops = ops;
592
593	mutex_init(&etnaviv_obj->lock);
594	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
595
596	*obj = &etnaviv_obj->base;
 
597
598	return 0;
599}
600
601/* convenience method to construct a GEM buffer object, and userspace handle */
602int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
603	u32 size, u32 flags, u32 *handle)
604{
 
605	struct drm_gem_object *obj = NULL;
606	int ret;
607
608	size = PAGE_ALIGN(size);
609
610	ret = etnaviv_gem_new_impl(dev, size, flags,
611				   &etnaviv_gem_shmem_ops, &obj);
612	if (ret)
613		goto fail;
614
615	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
616
617	ret = drm_gem_object_init(dev, obj, size);
618	if (ret)
619		goto fail;
620
621	/*
622	 * Our buffers are kept pinned, so allocating them from the MOVABLE
623	 * zone is a really bad idea, and conflicts with CMA. See comments
624	 * above new_inode() why this is required _and_ expected if you're
625	 * going to pin these pages.
626	 */
627	mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
628			     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
629
630	etnaviv_gem_obj_add(dev, obj);
631
632	ret = drm_gem_handle_create(file, obj, handle);
633
634	/* drop reference from allocate - handle holds it now */
635fail:
636	drm_gem_object_put_unlocked(obj);
637
638	return ret;
639}
640
641int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
642	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
643{
644	struct drm_gem_object *obj;
645	int ret;
646
647	ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
648	if (ret)
649		return ret;
650
651	drm_gem_private_object_init(dev, obj, size);
652
653	*res = to_etnaviv_bo(obj);
654
655	return 0;
656}
657
658static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
659{
660	struct page **pvec = NULL;
661	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
662	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 
663
664	might_lock_read(&current->mm->mmap_sem);
665
666	if (userptr->mm != current->mm)
667		return -EPERM;
668
669	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
670	if (!pvec)
671		return -ENOMEM;
672
 
 
 
673	do {
674		unsigned num_pages = npages - pinned;
675		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
676		struct page **pages = pvec + pinned;
677
678		ret = get_user_pages_fast(ptr, num_pages,
679					  !userptr->ro ? FOLL_WRITE : 0, pages);
680		if (ret < 0) {
681			release_pages(pvec, pinned);
682			kvfree(pvec);
683			return ret;
684		}
685
686		pinned += ret;
687
688	} while (pinned < npages);
689
690	etnaviv_obj->pages = pvec;
691
692	return 0;
693}
694
695static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
696{
697	if (etnaviv_obj->sgt) {
698		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
699		sg_free_table(etnaviv_obj->sgt);
700		kfree(etnaviv_obj->sgt);
701	}
702	if (etnaviv_obj->pages) {
703		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
704
705		release_pages(etnaviv_obj->pages, npages);
706		kvfree(etnaviv_obj->pages);
707	}
708}
709
710static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
711		struct vm_area_struct *vma)
712{
713	return -EINVAL;
714}
715
716static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
717	.get_pages = etnaviv_gem_userptr_get_pages,
718	.release = etnaviv_gem_userptr_release,
719	.vmap = etnaviv_gem_vmap_impl,
720	.mmap = etnaviv_gem_userptr_mmap_obj,
721};
722
723int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
724	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
725{
726	struct etnaviv_gem_object *etnaviv_obj;
727	int ret;
728
729	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
730				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
731	if (ret)
732		return ret;
733
734	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
735
736	etnaviv_obj->userptr.ptr = ptr;
737	etnaviv_obj->userptr.mm = current->mm;
738	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
739
740	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
741
742	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
743
744	/* drop reference from allocate - handle holds it now */
745	drm_gem_object_put_unlocked(&etnaviv_obj->base);
746	return ret;
747}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015-2018 Etnaviv Project
  4 */
  5
  6#include <drm/drm_prime.h>
  7#include <linux/dma-mapping.h>
  8#include <linux/shmem_fs.h>
  9#include <linux/spinlock.h>
 10#include <linux/vmalloc.h>
 11
 12#include "etnaviv_drv.h"
 13#include "etnaviv_gem.h"
 14#include "etnaviv_gpu.h"
 15#include "etnaviv_mmu.h"
 16
 17static struct lock_class_key etnaviv_shm_lock_class;
 18static struct lock_class_key etnaviv_userptr_lock_class;
 19
 20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
 21{
 22	struct drm_device *dev = etnaviv_obj->base.dev;
 23	struct sg_table *sgt = etnaviv_obj->sgt;
 24
 25	/*
 26	 * For non-cached buffers, ensure the new pages are clean
 27	 * because display controller, GPU, etc. are not coherent.
 28	 */
 29	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
 30		dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
 31}
 32
 33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
 34{
 35	struct drm_device *dev = etnaviv_obj->base.dev;
 36	struct sg_table *sgt = etnaviv_obj->sgt;
 37
 38	/*
 39	 * For non-cached buffers, ensure the new pages are clean
 40	 * because display controller, GPU, etc. are not coherent:
 41	 *
 42	 * WARNING: The DMA API does not support concurrent CPU
 43	 * and device access to the memory area.  With BIDIRECTIONAL,
 44	 * we will clean the cache lines which overlap the region,
 45	 * and invalidate all cache lines (partially) contained in
 46	 * the region.
 47	 *
 48	 * If you have dirty data in the overlapping cache lines,
 49	 * that will corrupt the GPU-written data.  If you have
 50	 * written into the remainder of the region, this can
 51	 * discard those writes.
 52	 */
 53	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
 54		dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
 55}
 56
 57/* called with etnaviv_obj->lock held */
 58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 59{
 60	struct drm_device *dev = etnaviv_obj->base.dev;
 61	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
 62
 63	if (IS_ERR(p)) {
 64		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
 65		return PTR_ERR(p);
 66	}
 67
 68	etnaviv_obj->pages = p;
 69
 70	return 0;
 71}
 72
 73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
 74{
 75	if (etnaviv_obj->sgt) {
 76		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
 77		sg_free_table(etnaviv_obj->sgt);
 78		kfree(etnaviv_obj->sgt);
 79		etnaviv_obj->sgt = NULL;
 80	}
 81	if (etnaviv_obj->pages) {
 82		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
 83				  true, false);
 84
 85		etnaviv_obj->pages = NULL;
 86	}
 87}
 88
 89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 90{
 91	int ret;
 92
 93	lockdep_assert_held(&etnaviv_obj->lock);
 94
 95	if (!etnaviv_obj->pages) {
 96		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
 97		if (ret < 0)
 98			return ERR_PTR(ret);
 99	}
100
101	if (!etnaviv_obj->sgt) {
102		struct drm_device *dev = etnaviv_obj->base.dev;
103		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104		struct sg_table *sgt;
105
106		sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107					    etnaviv_obj->pages, npages);
108		if (IS_ERR(sgt)) {
109			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110				PTR_ERR(sgt));
111			return ERR_CAST(sgt);
112		}
113
114		etnaviv_obj->sgt = sgt;
115
116		etnaviv_gem_scatter_map(etnaviv_obj);
117	}
118
119	return etnaviv_obj->pages;
120}
121
122void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123{
124	lockdep_assert_held(&etnaviv_obj->lock);
125	/* when we start tracking the pin count, then do something here */
126}
127
128static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
129		struct vm_area_struct *vma)
130{
131	pgprot_t vm_page_prot;
132
133	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 
134
135	vm_page_prot = vm_get_page_prot(vma->vm_flags);
136
137	if (etnaviv_obj->flags & ETNA_BO_WC) {
138		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141	} else {
142		/*
143		 * Shunt off cached objs to shmem file so they have their own
144		 * address_space (so unmap_mapping_range does what we want,
145		 * in particular in the case of mmap'd dmabufs)
146		 */
 
 
147		vma->vm_pgoff = 0;
148		vma_set_file(vma, etnaviv_obj->base.filp);
149
150		vma->vm_page_prot = vm_page_prot;
151	}
152
153	return 0;
154}
155
156static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
157{
158	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 
 
 
 
 
 
 
159
160	return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
 
161}
162
163static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
164{
165	struct vm_area_struct *vma = vmf->vma;
166	struct drm_gem_object *obj = vma->vm_private_data;
167	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
168	struct page **pages;
169	unsigned long pfn;
170	pgoff_t pgoff;
171	int err;
172
173	/*
174	 * Make sure we don't parallel update on a fault, nor move or remove
175	 * something from beneath our feet.  Note that vmf_insert_page() is
176	 * specifically coded to take care of this, so we don't have to.
177	 */
178	err = mutex_lock_interruptible(&etnaviv_obj->lock);
179	if (err)
180		return VM_FAULT_NOPAGE;
181	/* make sure we have pages attached now */
182	pages = etnaviv_gem_get_pages(etnaviv_obj);
183	mutex_unlock(&etnaviv_obj->lock);
184
185	if (IS_ERR(pages)) {
186		err = PTR_ERR(pages);
187		return vmf_error(err);
188	}
189
190	/* We don't use vmf->pgoff since that has the fake offset: */
191	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
192
193	pfn = page_to_pfn(pages[pgoff]);
194
195	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
196	     pfn, pfn << PAGE_SHIFT);
197
198	return vmf_insert_pfn(vma, vmf->address, pfn);
199}
200
201int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
202{
203	int ret;
204
205	/* Make it mmapable */
206	ret = drm_gem_create_mmap_offset(obj);
207	if (ret)
208		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
209	else
210		*offset = drm_vma_node_offset_addr(&obj->vma_node);
211
212	return ret;
213}
214
215static struct etnaviv_vram_mapping *
216etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
217			     struct etnaviv_iommu_context *context)
218{
219	struct etnaviv_vram_mapping *mapping;
220
221	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
222		if (mapping->context == context)
223			return mapping;
224	}
225
226	return NULL;
227}
228
229void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
230{
231	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
232
233	mutex_lock(&etnaviv_obj->lock);
234	WARN_ON(mapping->use == 0);
235	mapping->use -= 1;
236	mutex_unlock(&etnaviv_obj->lock);
237
238	drm_gem_object_put(&etnaviv_obj->base);
239}
240
241struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
242	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
243	u64 va)
244{
245	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
246	struct etnaviv_vram_mapping *mapping;
247	struct page **pages;
248	int ret = 0;
249
250	mutex_lock(&etnaviv_obj->lock);
251	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
252	if (mapping) {
253		/*
254		 * Holding the object lock prevents the use count changing
255		 * beneath us.  If the use count is zero, the MMU might be
256		 * reaping this object, so take the lock and re-check that
257		 * the MMU owns this mapping to close this race.
258		 */
259		if (mapping->use == 0) {
260			mutex_lock(&mmu_context->lock);
261			if (mapping->context == mmu_context)
262				if (va && mapping->iova != va) {
263					etnaviv_iommu_reap_mapping(mapping);
264					mapping = NULL;
265				} else {
266					mapping->use += 1;
267				}
268			else
269				mapping = NULL;
270			mutex_unlock(&mmu_context->lock);
271			if (mapping)
272				goto out;
273		} else {
274			mapping->use += 1;
275			goto out;
276		}
277	}
278
279	pages = etnaviv_gem_get_pages(etnaviv_obj);
280	if (IS_ERR(pages)) {
281		ret = PTR_ERR(pages);
282		goto out;
283	}
284
285	/*
286	 * See if we have a reaped vram mapping we can re-use before
287	 * allocating a fresh mapping.
288	 */
289	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
290	if (!mapping) {
291		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
292		if (!mapping) {
293			ret = -ENOMEM;
294			goto out;
295		}
296
297		INIT_LIST_HEAD(&mapping->scan_node);
298		mapping->object = etnaviv_obj;
299	} else {
300		list_del(&mapping->obj_node);
301	}
302
 
 
303	mapping->use = 1;
304
305	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
306				    mmu_context->global->memory_base,
307				    mapping, va);
308	if (ret < 0)
 
309		kfree(mapping);
310	else
311		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
 
312
313out:
314	mutex_unlock(&etnaviv_obj->lock);
315
316	if (ret)
317		return ERR_PTR(ret);
318
319	/* Take a reference on the object */
320	drm_gem_object_get(obj);
321	return mapping;
322}
323
324void *etnaviv_gem_vmap(struct drm_gem_object *obj)
325{
326	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
327
328	if (etnaviv_obj->vaddr)
329		return etnaviv_obj->vaddr;
330
331	mutex_lock(&etnaviv_obj->lock);
332	/*
333	 * Need to check again, as we might have raced with another thread
334	 * while waiting for the mutex.
335	 */
336	if (!etnaviv_obj->vaddr)
337		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
338	mutex_unlock(&etnaviv_obj->lock);
339
340	return etnaviv_obj->vaddr;
341}
342
343static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
344{
345	struct page **pages;
346
347	lockdep_assert_held(&obj->lock);
348
349	pages = etnaviv_gem_get_pages(obj);
350	if (IS_ERR(pages))
351		return NULL;
352
353	return vmap(pages, obj->base.size >> PAGE_SHIFT,
354			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
355}
356
357static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
358{
359	if (op & ETNA_PREP_READ)
360		return DMA_FROM_DEVICE;
361	else if (op & ETNA_PREP_WRITE)
362		return DMA_TO_DEVICE;
363	else
364		return DMA_BIDIRECTIONAL;
365}
366
367int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
368		struct drm_etnaviv_timespec *timeout)
369{
370	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
371	struct drm_device *dev = obj->dev;
372	bool write = !!(op & ETNA_PREP_WRITE);
373	int ret;
374
375	if (!etnaviv_obj->sgt) {
376		void *ret;
377
378		mutex_lock(&etnaviv_obj->lock);
379		ret = etnaviv_gem_get_pages(etnaviv_obj);
380		mutex_unlock(&etnaviv_obj->lock);
381		if (IS_ERR(ret))
382			return PTR_ERR(ret);
383	}
384
385	if (op & ETNA_PREP_NOSYNC) {
386		if (!dma_resv_test_signaled(obj->resv,
387					    dma_resv_usage_rw(write)))
388			return -EBUSY;
389	} else {
390		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
391
392		ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
393					    true, remain);
394		if (ret <= 0)
395			return ret == 0 ? -ETIMEDOUT : ret;
396	}
397
398	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
399		dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
400					 etnaviv_op_to_dma_dir(op));
 
401		etnaviv_obj->last_cpu_prep_op = op;
402	}
403
404	return 0;
405}
406
407int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
408{
409	struct drm_device *dev = obj->dev;
410	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
411
412	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
413		/* fini without a prep is almost certainly a userspace error */
414		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
415		dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
 
416			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
417		etnaviv_obj->last_cpu_prep_op = 0;
418	}
419
420	return 0;
421}
422
423int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
424	struct drm_etnaviv_timespec *timeout)
425{
426	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
427
428	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
429}
430
431#ifdef CONFIG_DEBUG_FS
 
 
 
 
 
 
 
 
 
 
 
432static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
433{
434	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
435	struct dma_resv *robj = obj->resv;
 
 
436	unsigned long off = drm_vma_node_start(&obj->vma_node);
437	int r;
438
439	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
440			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
441			obj->name, kref_read(&obj->refcount),
442			off, etnaviv_obj->vaddr, obj->size);
443
444	r = dma_resv_lock(robj, NULL);
445	if (r)
446		return;
 
 
 
 
 
 
 
447
448	dma_resv_describe(robj, m);
449	dma_resv_unlock(robj);
 
 
450}
451
452void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
453	struct seq_file *m)
454{
455	struct etnaviv_gem_object *etnaviv_obj;
456	int count = 0;
457	size_t size = 0;
458
459	mutex_lock(&priv->gem_lock);
460	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
461		struct drm_gem_object *obj = &etnaviv_obj->base;
462
463		seq_puts(m, "   ");
464		etnaviv_gem_describe(obj, m);
465		count++;
466		size += obj->size;
467	}
468	mutex_unlock(&priv->gem_lock);
469
470	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
471}
472#endif
473
474static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
475{
476	vunmap(etnaviv_obj->vaddr);
477	put_pages(etnaviv_obj);
478}
479
480static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
481	.get_pages = etnaviv_gem_shmem_get_pages,
482	.release = etnaviv_gem_shmem_release,
483	.vmap = etnaviv_gem_vmap_impl,
484	.mmap = etnaviv_gem_mmap_obj,
485};
486
487void etnaviv_gem_free_object(struct drm_gem_object *obj)
488{
489	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
490	struct etnaviv_drm_private *priv = obj->dev->dev_private;
491	struct etnaviv_vram_mapping *mapping, *tmp;
492
493	/* object should not be active */
494	WARN_ON(is_active(etnaviv_obj));
495
496	mutex_lock(&priv->gem_lock);
497	list_del(&etnaviv_obj->gem_node);
498	mutex_unlock(&priv->gem_lock);
499
500	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
501				 obj_node) {
502		struct etnaviv_iommu_context *context = mapping->context;
503
504		WARN_ON(mapping->use);
505
506		if (context)
507			etnaviv_iommu_unmap_gem(context, mapping);
 
 
508
509		list_del(&mapping->obj_node);
510		kfree(mapping);
511	}
512
 
513	etnaviv_obj->ops->release(etnaviv_obj);
514	drm_gem_object_release(obj);
515
516	kfree(etnaviv_obj);
517}
518
519void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
520{
521	struct etnaviv_drm_private *priv = dev->dev_private;
522	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
523
524	mutex_lock(&priv->gem_lock);
525	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
526	mutex_unlock(&priv->gem_lock);
527}
528
529static const struct vm_operations_struct vm_ops = {
530	.fault = etnaviv_gem_fault,
531	.open = drm_gem_vm_open,
532	.close = drm_gem_vm_close,
533};
534
535static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
536	.free = etnaviv_gem_free_object,
537	.pin = etnaviv_gem_prime_pin,
538	.unpin = etnaviv_gem_prime_unpin,
539	.get_sg_table = etnaviv_gem_prime_get_sg_table,
540	.vmap = etnaviv_gem_prime_vmap,
541	.mmap = etnaviv_gem_mmap,
542	.vm_ops = &vm_ops,
543};
544
545static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
546	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
547{
548	struct etnaviv_gem_object *etnaviv_obj;
549	unsigned sz = sizeof(*etnaviv_obj);
550	bool valid = true;
551
552	/* validate flags */
553	switch (flags & ETNA_BO_CACHE_MASK) {
554	case ETNA_BO_UNCACHED:
555	case ETNA_BO_CACHED:
556	case ETNA_BO_WC:
557		break;
558	default:
559		valid = false;
560	}
561
562	if (!valid) {
563		dev_err(dev->dev, "invalid cache flag: %x\n",
564			(flags & ETNA_BO_CACHE_MASK));
565		return -EINVAL;
566	}
567
568	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
569	if (!etnaviv_obj)
570		return -ENOMEM;
571
572	etnaviv_obj->flags = flags;
573	etnaviv_obj->ops = ops;
574
575	mutex_init(&etnaviv_obj->lock);
576	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
577
578	*obj = &etnaviv_obj->base;
579	(*obj)->funcs = &etnaviv_gem_object_funcs;
580
581	return 0;
582}
583
584/* convenience method to construct a GEM buffer object, and userspace handle */
585int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
586	u32 size, u32 flags, u32 *handle)
587{
588	struct etnaviv_drm_private *priv = dev->dev_private;
589	struct drm_gem_object *obj = NULL;
590	int ret;
591
592	size = PAGE_ALIGN(size);
593
594	ret = etnaviv_gem_new_impl(dev, size, flags,
595				   &etnaviv_gem_shmem_ops, &obj);
596	if (ret)
597		goto fail;
598
599	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
600
601	ret = drm_gem_object_init(dev, obj, size);
602	if (ret)
603		goto fail;
604
605	/*
606	 * Our buffers are kept pinned, so allocating them from the MOVABLE
607	 * zone is a really bad idea, and conflicts with CMA. See comments
608	 * above new_inode() why this is required _and_ expected if you're
609	 * going to pin these pages.
610	 */
611	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
 
612
613	etnaviv_gem_obj_add(dev, obj);
614
615	ret = drm_gem_handle_create(file, obj, handle);
616
617	/* drop reference from allocate - handle holds it now */
618fail:
619	drm_gem_object_put(obj);
620
621	return ret;
622}
623
624int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
625	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
626{
627	struct drm_gem_object *obj;
628	int ret;
629
630	ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
631	if (ret)
632		return ret;
633
634	drm_gem_private_object_init(dev, obj, size);
635
636	*res = to_etnaviv_bo(obj);
637
638	return 0;
639}
640
641static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
642{
643	struct page **pvec = NULL;
644	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
645	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
646	unsigned int gup_flags = FOLL_LONGTERM;
647
648	might_lock_read(&current->mm->mmap_lock);
649
650	if (userptr->mm != current->mm)
651		return -EPERM;
652
653	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
654	if (!pvec)
655		return -ENOMEM;
656
657	if (!userptr->ro)
658		gup_flags |= FOLL_WRITE;
659
660	do {
661		unsigned num_pages = npages - pinned;
662		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
663		struct page **pages = pvec + pinned;
664
665		ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages);
 
666		if (ret < 0) {
667			unpin_user_pages(pvec, pinned);
668			kvfree(pvec);
669			return ret;
670		}
671
672		pinned += ret;
673
674	} while (pinned < npages);
675
676	etnaviv_obj->pages = pvec;
677
678	return 0;
679}
680
681static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
682{
683	if (etnaviv_obj->sgt) {
684		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
685		sg_free_table(etnaviv_obj->sgt);
686		kfree(etnaviv_obj->sgt);
687	}
688	if (etnaviv_obj->pages) {
689		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
690
691		unpin_user_pages(etnaviv_obj->pages, npages);
692		kvfree(etnaviv_obj->pages);
693	}
694}
695
696static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
697		struct vm_area_struct *vma)
698{
699	return -EINVAL;
700}
701
702static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
703	.get_pages = etnaviv_gem_userptr_get_pages,
704	.release = etnaviv_gem_userptr_release,
705	.vmap = etnaviv_gem_vmap_impl,
706	.mmap = etnaviv_gem_userptr_mmap_obj,
707};
708
709int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
710	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
711{
712	struct etnaviv_gem_object *etnaviv_obj;
713	int ret;
714
715	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
716				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
717	if (ret)
718		return ret;
719
720	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
721
722	etnaviv_obj->userptr.ptr = ptr;
723	etnaviv_obj->userptr.mm = current->mm;
724	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
725
726	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
727
728	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
729
730	/* drop reference from allocate - handle holds it now */
731	drm_gem_object_put(&etnaviv_obj->base);
732	return ret;
733}