Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 
 
 18#include <linux/spinlock.h>
 19#include <linux/shmem_fs.h>
 20#include <linux/dma-buf.h>
 
 
 
 21
 22#include "msm_drv.h"
 
 23#include "msm_gem.h"
 24#include "msm_gpu.h"
 25#include "msm_mmu.h"
 26
 
 
 27static dma_addr_t physaddr(struct drm_gem_object *obj)
 28{
 29	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 30	struct msm_drm_private *priv = obj->dev->dev_private;
 31	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
 32			priv->vram.paddr;
 33}
 34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35/* allocate pages from VRAM carveout, used when no IOMMU: */
 36static struct page **get_pages_vram(struct drm_gem_object *obj,
 37		int npages)
 38{
 39	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 40	struct msm_drm_private *priv = obj->dev->dev_private;
 41	dma_addr_t paddr;
 42	struct page **p;
 43	int ret, i;
 44
 45	p = drm_malloc_ab(npages, sizeof(struct page *));
 46	if (!p)
 47		return ERR_PTR(-ENOMEM);
 48
 49	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
 50			npages, 0, DRM_MM_SEARCH_DEFAULT);
 
 51	if (ret) {
 52		drm_free_large(p);
 53		return ERR_PTR(ret);
 54	}
 55
 56	paddr = physaddr(obj);
 57	for (i = 0; i < npages; i++) {
 58		p[i] = phys_to_page(paddr);
 59		paddr += PAGE_SIZE;
 60	}
 61
 62	return p;
 63}
 64
 65/* called with dev->struct_mutex held */
 66static struct page **get_pages(struct drm_gem_object *obj)
 67{
 68	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 69
 
 
 70	if (!msm_obj->pages) {
 71		struct drm_device *dev = obj->dev;
 72		struct page **p;
 73		int npages = obj->size >> PAGE_SHIFT;
 74
 75		if (iommu_present(&platform_bus_type))
 76			p = drm_gem_get_pages(obj, 0);
 77		else
 78			p = get_pages_vram(obj, npages);
 79
 80		if (IS_ERR(p)) {
 81			dev_err(dev->dev, "could not get pages: %ld\n",
 82					PTR_ERR(p));
 83			return p;
 84		}
 85
 86		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
 
 
 87		if (IS_ERR(msm_obj->sgt)) {
 88			dev_err(dev->dev, "failed to allocate sgt\n");
 89			return ERR_CAST(msm_obj->sgt);
 90		}
 91
 92		msm_obj->pages = p;
 
 
 
 93
 94		/* For non-cached buffers, ensure the new pages are clean
 95		 * because display controller, GPU, etc. are not coherent:
 96		 */
 97		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 98			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
 99					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 
100	}
101
102	return msm_obj->pages;
103}
104
 
 
 
 
 
 
 
 
 
 
 
 
105static void put_pages(struct drm_gem_object *obj)
106{
107	struct msm_gem_object *msm_obj = to_msm_bo(obj);
108
109	if (msm_obj->pages) {
110		/* For non-cached buffers, ensure the new pages are clean
111		 * because display controller, GPU, etc. are not coherent:
112		 */
113		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
114			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
115					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
116		sg_free_table(msm_obj->sgt);
117		kfree(msm_obj->sgt);
 
 
 
 
118
119		if (iommu_present(&platform_bus_type))
120			drm_gem_put_pages(obj, msm_obj->pages, true, false);
121		else {
122			drm_mm_remove_node(msm_obj->vram_node);
123			drm_free_large(msm_obj->pages);
124		}
125
126		msm_obj->pages = NULL;
 
127	}
128}
129
130struct page **msm_gem_get_pages(struct drm_gem_object *obj)
131{
132	struct drm_device *dev = obj->dev;
133	struct page **p;
134	mutex_lock(&dev->struct_mutex);
 
 
 
 
 
 
135	p = get_pages(obj);
136	mutex_unlock(&dev->struct_mutex);
137	return p;
138}
 
139
140void msm_gem_put_pages(struct drm_gem_object *obj)
141{
142	/* when we start tracking the pin count, then do something here */
143}
144
145int msm_gem_mmap_obj(struct drm_gem_object *obj,
146		struct vm_area_struct *vma)
147{
148	struct msm_gem_object *msm_obj = to_msm_bo(obj);
149
150	vma->vm_flags &= ~VM_PFNMAP;
151	vma->vm_flags |= VM_MIXEDMAP;
152
153	if (msm_obj->flags & MSM_BO_WC) {
154		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
155	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
156		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
157	} else {
158		/*
159		 * Shunt off cached objs to shmem file so they have their own
160		 * address_space (so unmap_mapping_range does what we want,
161		 * in particular in the case of mmap'd dmabufs)
162		 */
163		fput(vma->vm_file);
164		get_file(obj->filp);
165		vma->vm_pgoff = 0;
166		vma->vm_file  = obj->filp;
167
168		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
169	}
 
170
171	return 0;
172}
173
174int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
175{
176	int ret;
177
178	ret = drm_gem_mmap(filp, vma);
179	if (ret) {
180		DBG("mmap failed: %d", ret);
181		return ret;
182	}
183
184	return msm_gem_mmap_obj(vma->vm_private_data, vma);
 
 
 
 
185}
186
187int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
188{
 
189	struct drm_gem_object *obj = vma->vm_private_data;
190	struct drm_device *dev = obj->dev;
191	struct page **pages;
192	unsigned long pfn;
193	pgoff_t pgoff;
194	int ret;
 
195
196	/* Make sure we don't parallel update on a fault, nor move or remove
197	 * something from beneath our feet
 
198	 */
199	ret = mutex_lock_interruptible(&dev->struct_mutex);
200	if (ret)
 
201		goto out;
 
 
 
 
 
 
202
203	/* make sure we have pages attached now */
204	pages = get_pages(obj);
205	if (IS_ERR(pages)) {
206		ret = PTR_ERR(pages);
207		goto out_unlock;
208	}
209
210	/* We don't use vmf->pgoff since that has the fake offset: */
211	pgoff = ((unsigned long)vmf->virtual_address -
212			vma->vm_start) >> PAGE_SHIFT;
213
214	pfn = page_to_pfn(pages[pgoff]);
215
216	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
217			pfn, pfn << PAGE_SHIFT);
218
219	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
220
221out_unlock:
222	mutex_unlock(&dev->struct_mutex);
223out:
224	switch (ret) {
225	case -EAGAIN:
226	case 0:
227	case -ERESTARTSYS:
228	case -EINTR:
229	case -EBUSY:
230		/*
231		 * EBUSY is ok: this just means that another thread
232		 * already did the job.
233		 */
234		return VM_FAULT_NOPAGE;
235	case -ENOMEM:
236		return VM_FAULT_OOM;
237	default:
238		return VM_FAULT_SIGBUS;
239	}
240}
241
242/** get mmap offset */
243static uint64_t mmap_offset(struct drm_gem_object *obj)
244{
245	struct drm_device *dev = obj->dev;
246	int ret;
247
248	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
249
250	/* Make it mmapable */
251	ret = drm_gem_create_mmap_offset(obj);
252
253	if (ret) {
254		dev_err(dev->dev, "could not allocate mmap offset\n");
255		return 0;
256	}
257
258	return drm_vma_node_offset_addr(&obj->vma_node);
259}
260
261uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
262{
263	uint64_t offset;
264	mutex_lock(&obj->dev->struct_mutex);
 
265	offset = mmap_offset(obj);
266	mutex_unlock(&obj->dev->struct_mutex);
267	return offset;
268}
269
270/* should be called under struct_mutex.. although it can be called
271 * from atomic context without struct_mutex to acquire an extra
272 * iova ref if you know one is already held.
273 *
274 * That means when I do eventually need to add support for unpinning
275 * the refcnt counter needs to be atomic_t.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276 */
277int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
278		uint32_t *iova)
279{
280	struct msm_gem_object *msm_obj = to_msm_bo(obj);
281	int ret = 0;
282
283	if (!msm_obj->domain[id].iova) {
284		struct msm_drm_private *priv = obj->dev->dev_private;
285		struct msm_mmu *mmu = priv->mmus[id];
286		struct page **pages = get_pages(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
288		if (IS_ERR(pages))
289			return PTR_ERR(pages);
290
291		if (iommu_present(&platform_bus_type)) {
292			uint32_t offset = (uint32_t)mmap_offset(obj);
293			ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
294					obj->size, IOMMU_READ | IOMMU_WRITE);
295			msm_obj->domain[id].iova = offset;
296		} else {
297			msm_obj->domain[id].iova = physaddr(obj);
 
 
 
 
 
298		}
 
 
 
299	}
300
301	if (!ret)
302		*iova = msm_obj->domain[id].iova;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
304	return ret;
305}
306
307int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
308{
309	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310	int ret;
311
312	/* this is safe right now because we don't unmap until the
313	 * bo is deleted:
314	 */
315	if (msm_obj->domain[id].iova) {
316		*iova = msm_obj->domain[id].iova;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318	}
 
319
320	mutex_lock(&obj->dev->struct_mutex);
321	ret = msm_gem_get_iova_locked(obj, id, iova);
322	mutex_unlock(&obj->dev->struct_mutex);
323	return ret;
324}
325
326void msm_gem_put_iova(struct drm_gem_object *obj, int id)
 
 
 
 
 
 
327{
328	// XXX TODO ..
329	// NOTE: probably don't need a _locked() version.. we wouldn't
330	// normally unmap here, but instead just mark that it could be
331	// unmapped (if the iova refcnt drops to zero), but then later
332	// if another _get_iova_locked() fails we can start unmapping
333	// things that are no longer needed..
 
 
 
334}
335
336int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
337		struct drm_mode_create_dumb *args)
338{
339	args->pitch = align_pitch(args->width, args->bpp);
340	args->size  = PAGE_ALIGN(args->pitch * args->height);
341	return msm_gem_new_handle(dev, file, args->size,
342			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
343}
344
345int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
346		uint32_t handle, uint64_t *offset)
347{
348	struct drm_gem_object *obj;
349	int ret = 0;
350
351	/* GEM does all our handle to object mapping */
352	obj = drm_gem_object_lookup(dev, file, handle);
353	if (obj == NULL) {
354		ret = -ENOENT;
355		goto fail;
356	}
357
358	*offset = msm_gem_mmap_offset(obj);
359
360	drm_gem_object_unreference_unlocked(obj);
361
362fail:
363	return ret;
364}
365
366void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
367{
368	struct msm_gem_object *msm_obj = to_msm_bo(obj);
369	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370	if (!msm_obj->vaddr) {
371		struct page **pages = get_pages(obj);
372		if (IS_ERR(pages))
373			return ERR_CAST(pages);
 
 
374		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
375				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 
 
 
 
 
 
376	}
 
377	return msm_obj->vaddr;
 
 
 
 
 
 
 
 
 
378}
379
380void *msm_gem_vaddr(struct drm_gem_object *obj)
381{
382	void *ret;
383	mutex_lock(&obj->dev->struct_mutex);
384	ret = msm_gem_vaddr_locked(obj);
385	mutex_unlock(&obj->dev->struct_mutex);
 
 
386	return ret;
387}
388
389/* setup callback for when bo is no longer busy..
390 * TODO probably want to differentiate read vs write..
 
 
 
391 */
392int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
393		struct msm_fence_cb *cb)
 
 
 
 
394{
395	struct drm_device *dev = obj->dev;
396	struct msm_drm_private *priv = dev->dev_private;
397	struct msm_gem_object *msm_obj = to_msm_bo(obj);
398	int ret = 0;
399
400	mutex_lock(&dev->struct_mutex);
401	if (!list_empty(&cb->work.entry)) {
402		ret = -EINVAL;
403	} else if (is_active(msm_obj)) {
404		cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
405		list_add_tail(&cb->work.entry, &priv->fence_cbs);
406	} else {
407		queue_work(priv->wq, &cb->work);
408	}
409	mutex_unlock(&dev->struct_mutex);
410
411	return ret;
412}
413
414void msm_gem_move_to_active(struct drm_gem_object *obj,
415		struct msm_gpu *gpu, bool write, uint32_t fence)
 
 
 
 
 
 
 
 
 
416{
417	struct msm_gem_object *msm_obj = to_msm_bo(obj);
418	msm_obj->gpu = gpu;
419	if (write)
420		msm_obj->write_fence = fence;
421	else
422		msm_obj->read_fence = fence;
423	list_del_init(&msm_obj->mm_list);
424	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 
 
 
 
 
 
 
 
 
425}
426
427void msm_gem_move_to_inactive(struct drm_gem_object *obj)
428{
429	struct drm_device *dev = obj->dev;
430	struct msm_drm_private *priv = dev->dev_private;
431	struct msm_gem_object *msm_obj = to_msm_bo(obj);
432
433	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
435	msm_obj->gpu = NULL;
436	msm_obj->read_fence = 0;
437	msm_obj->write_fence = 0;
438	list_del_init(&msm_obj->mm_list);
439	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
 
 
 
 
 
440}
441
442int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
443		struct timespec *timeout)
 
 
444{
445	struct drm_device *dev = obj->dev;
446	struct msm_gem_object *msm_obj = to_msm_bo(obj);
447	int ret = 0;
448
449	if (is_active(msm_obj)) {
450		uint32_t fence = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
452		if (op & MSM_PREP_READ)
453			fence = msm_obj->write_fence;
454		if (op & MSM_PREP_WRITE)
455			fence = max(fence, msm_obj->read_fence);
456		if (op & MSM_PREP_NOSYNC)
457			timeout = NULL;
 
458
459		ret = msm_wait_fence_interruptable(dev, fence, timeout);
460	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461
462	/* TODO cache maintenance */
463
464	return ret;
465}
466
467int msm_gem_cpu_fini(struct drm_gem_object *obj)
468{
469	/* TODO cache maintenance */
470	return 0;
471}
472
473#ifdef CONFIG_DEBUG_FS
474void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
475{
476	struct drm_device *dev = obj->dev;
477	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 
478	uint64_t off = drm_vma_node_start(&obj->vma_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
480	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
481	seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
482			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
483			msm_obj->read_fence, msm_obj->write_fence,
484			obj->name, obj->refcount.refcount.counter,
485			off, msm_obj->vaddr, obj->size);
486}
487
488void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
489{
 
490	struct msm_gem_object *msm_obj;
491	int count = 0;
492	size_t size = 0;
493
494	list_for_each_entry(msm_obj, list, mm_list) {
 
495		struct drm_gem_object *obj = &msm_obj->base;
496		seq_printf(m, "   ");
497		msm_gem_describe(obj, m);
498		count++;
499		size += obj->size;
500	}
501
502	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 
 
 
 
 
 
 
 
 
503}
504#endif
505
506void msm_gem_free_object(struct drm_gem_object *obj)
 
507{
508	struct drm_device *dev = obj->dev;
509	struct msm_drm_private *priv = obj->dev->dev_private;
510	struct msm_gem_object *msm_obj = to_msm_bo(obj);
511	int id;
512
513	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
514
515	/* object should not be on active list: */
516	WARN_ON(is_active(msm_obj));
 
517
518	list_del(&msm_obj->mm_list);
519
520	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
521		struct msm_mmu *mmu = priv->mmus[id];
522		if (mmu && msm_obj->domain[id].iova) {
523			uint32_t offset = (uint32_t)mmap_offset(obj);
524			mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
525		}
526	}
527
528	drm_gem_free_mmap_offset(obj);
529
530	if (obj->import_attach) {
531		if (msm_obj->vaddr)
532			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
533
534		/* Don't drop the pages for imported dmabuf, as they are not
535		 * ours, just free the array we allocated:
536		 */
537		if (msm_obj->pages)
538			drm_free_large(msm_obj->pages);
539
 
 
 
540	} else {
541		if (msm_obj->vaddr)
542			vunmap(msm_obj->vaddr);
543		put_pages(obj);
 
544	}
545
546	if (msm_obj->resv == &msm_obj->_resv)
547		reservation_object_fini(msm_obj->resv);
548
549	drm_gem_object_release(obj);
550
551	kfree(msm_obj);
552}
553
 
 
 
 
 
 
 
 
 
 
554/* convenience method to construct a GEM buffer object, and userspace handle */
555int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
556		uint32_t size, uint32_t flags, uint32_t *handle)
 
557{
558	struct drm_gem_object *obj;
559	int ret;
560
561	ret = mutex_lock_interruptible(&dev->struct_mutex);
562	if (ret)
563		return ret;
564
565	obj = msm_gem_new(dev, size, flags);
566
567	mutex_unlock(&dev->struct_mutex);
568
569	if (IS_ERR(obj))
570		return PTR_ERR(obj);
571
 
 
 
572	ret = drm_gem_handle_create(file, obj, handle);
573
574	/* drop reference from allocate - handle holds it now */
575	drm_gem_object_unreference_unlocked(obj);
576
577	return ret;
578}
579
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580static int msm_gem_new_impl(struct drm_device *dev,
581		uint32_t size, uint32_t flags,
582		struct drm_gem_object **obj)
583{
584	struct msm_drm_private *priv = dev->dev_private;
585	struct msm_gem_object *msm_obj;
586	unsigned sz;
587
588	switch (flags & MSM_BO_CACHE_MASK) {
589	case MSM_BO_UNCACHED:
590	case MSM_BO_CACHED:
591	case MSM_BO_WC:
592		break;
 
 
 
 
593	default:
594		dev_err(dev->dev, "invalid cache flag: %x\n",
595				(flags & MSM_BO_CACHE_MASK));
596		return -EINVAL;
597	}
598
599	sz = sizeof(*msm_obj);
600	if (!iommu_present(&platform_bus_type))
601		sz += sizeof(struct drm_mm_node);
602
603	msm_obj = kzalloc(sz, GFP_KERNEL);
604	if (!msm_obj)
605		return -ENOMEM;
606
607	if (!iommu_present(&platform_bus_type))
608		msm_obj->vram_node = (void *)&msm_obj[1];
609
610	msm_obj->flags = flags;
 
611
612	msm_obj->resv = &msm_obj->_resv;
613	reservation_object_init(msm_obj->resv);
614
615	INIT_LIST_HEAD(&msm_obj->submit_entry);
616	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
617
618	*obj = &msm_obj->base;
 
619
620	return 0;
621}
622
623struct drm_gem_object *msm_gem_new(struct drm_device *dev,
624		uint32_t size, uint32_t flags)
625{
 
 
626	struct drm_gem_object *obj = NULL;
 
627	int ret;
628
629	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
630
631	size = PAGE_ALIGN(size);
632
 
 
 
 
 
 
 
 
 
 
 
 
 
 
633	ret = msm_gem_new_impl(dev, size, flags, &obj);
634	if (ret)
635		goto fail;
 
 
 
 
 
 
 
 
636
637	if (iommu_present(&platform_bus_type)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638		ret = drm_gem_object_init(dev, obj, size);
639		if (ret)
640			goto fail;
641	} else {
642		drm_gem_private_object_init(dev, obj, size);
 
 
 
 
 
643	}
644
 
 
 
 
 
 
645	return obj;
646
647fail:
648	if (obj)
649		drm_gem_object_unreference(obj);
650
651	return ERR_PTR(ret);
652}
653
654struct drm_gem_object *msm_gem_import(struct drm_device *dev,
655		uint32_t size, struct sg_table *sgt)
656{
 
657	struct msm_gem_object *msm_obj;
658	struct drm_gem_object *obj;
 
659	int ret, npages;
660
661	/* if we don't have IOMMU, don't bother pretending we can import: */
662	if (!iommu_present(&platform_bus_type)) {
663		dev_err(dev->dev, "cannot import without IOMMU\n");
664		return ERR_PTR(-EINVAL);
665	}
666
667	size = PAGE_ALIGN(size);
668
669	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
670	if (ret)
671		goto fail;
672
673	drm_gem_private_object_init(dev, obj, size);
674
675	npages = size / PAGE_SIZE;
676
677	msm_obj = to_msm_bo(obj);
 
678	msm_obj->sgt = sgt;
679	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
680	if (!msm_obj->pages) {
 
681		ret = -ENOMEM;
682		goto fail;
683	}
684
685	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
686	if (ret)
 
687		goto fail;
 
 
 
 
 
 
 
 
 
688
689	return obj;
690
691fail:
692	if (obj)
693		drm_gem_object_unreference_unlocked(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
694
695	return ERR_PTR(ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
696}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/dma-map-ops.h>
   8#include <linux/vmalloc.h>
   9#include <linux/spinlock.h>
  10#include <linux/shmem_fs.h>
  11#include <linux/dma-buf.h>
  12#include <linux/pfn_t.h>
  13
  14#include <drm/drm_prime.h>
  15
  16#include "msm_drv.h"
  17#include "msm_fence.h"
  18#include "msm_gem.h"
  19#include "msm_gpu.h"
  20#include "msm_mmu.h"
  21
  22static void update_lru(struct drm_gem_object *obj);
  23
  24static dma_addr_t physaddr(struct drm_gem_object *obj)
  25{
  26	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  27	struct msm_drm_private *priv = obj->dev->dev_private;
  28	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  29			priv->vram.paddr;
  30}
  31
  32static bool use_pages(struct drm_gem_object *obj)
  33{
  34	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  35	return !msm_obj->vram_node;
  36}
  37
  38/*
  39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  40 * API.  Really GPU cache is out of scope here (handled on cmdstream)
  41 * and all we need to do is invalidate newly allocated pages before
  42 * mapping to CPU as uncached/writecombine.
  43 *
  44 * On top of this, we have the added headache, that depending on
  45 * display generation, the display's iommu may be wired up to either
  46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
  47 * that here we either have dma-direct or iommu ops.
  48 *
  49 * Let this be a cautionary tail of abstraction gone wrong.
  50 */
  51
  52static void sync_for_device(struct msm_gem_object *msm_obj)
  53{
  54	struct device *dev = msm_obj->base.dev->dev;
  55
  56	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  57}
  58
  59static void sync_for_cpu(struct msm_gem_object *msm_obj)
  60{
  61	struct device *dev = msm_obj->base.dev->dev;
  62
  63	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  64}
  65
  66/* allocate pages from VRAM carveout, used when no IOMMU: */
  67static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 
  68{
  69	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  70	struct msm_drm_private *priv = obj->dev->dev_private;
  71	dma_addr_t paddr;
  72	struct page **p;
  73	int ret, i;
  74
  75	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  76	if (!p)
  77		return ERR_PTR(-ENOMEM);
  78
  79	spin_lock(&priv->vram.lock);
  80	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  81	spin_unlock(&priv->vram.lock);
  82	if (ret) {
  83		kvfree(p);
  84		return ERR_PTR(ret);
  85	}
  86
  87	paddr = physaddr(obj);
  88	for (i = 0; i < npages; i++) {
  89		p[i] = pfn_to_page(__phys_to_pfn(paddr));
  90		paddr += PAGE_SIZE;
  91	}
  92
  93	return p;
  94}
  95
 
  96static struct page **get_pages(struct drm_gem_object *obj)
  97{
  98	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  99
 100	msm_gem_assert_locked(obj);
 101
 102	if (!msm_obj->pages) {
 103		struct drm_device *dev = obj->dev;
 104		struct page **p;
 105		int npages = obj->size >> PAGE_SHIFT;
 106
 107		if (use_pages(obj))
 108			p = drm_gem_get_pages(obj);
 109		else
 110			p = get_pages_vram(obj, npages);
 111
 112		if (IS_ERR(p)) {
 113			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
 114					PTR_ERR(p));
 115			return p;
 116		}
 117
 118		msm_obj->pages = p;
 119
 120		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
 121		if (IS_ERR(msm_obj->sgt)) {
 122			void *ptr = ERR_CAST(msm_obj->sgt);
 
 
 123
 124			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
 125			msm_obj->sgt = NULL;
 126			return ptr;
 127		}
 128
 129		/* For non-cached buffers, ensure the new pages are clean
 130		 * because display controller, GPU, etc. are not coherent:
 131		 */
 132		if (msm_obj->flags & MSM_BO_WC)
 133			sync_for_device(msm_obj);
 134
 135		update_lru(obj);
 136	}
 137
 138	return msm_obj->pages;
 139}
 140
 141static void put_pages_vram(struct drm_gem_object *obj)
 142{
 143	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 144	struct msm_drm_private *priv = obj->dev->dev_private;
 145
 146	spin_lock(&priv->vram.lock);
 147	drm_mm_remove_node(msm_obj->vram_node);
 148	spin_unlock(&priv->vram.lock);
 149
 150	kvfree(msm_obj->pages);
 151}
 152
 153static void put_pages(struct drm_gem_object *obj)
 154{
 155	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 156
 157	if (msm_obj->pages) {
 158		if (msm_obj->sgt) {
 159			/* For non-cached buffers, ensure the new
 160			 * pages are clean because display controller,
 161			 * GPU, etc. are not coherent:
 162			 */
 163			if (msm_obj->flags & MSM_BO_WC)
 164				sync_for_cpu(msm_obj);
 165
 166			sg_free_table(msm_obj->sgt);
 167			kfree(msm_obj->sgt);
 168			msm_obj->sgt = NULL;
 169		}
 170
 171		if (use_pages(obj))
 172			drm_gem_put_pages(obj, msm_obj->pages, true, false);
 173		else
 174			put_pages_vram(obj);
 
 
 175
 176		msm_obj->pages = NULL;
 177		update_lru(obj);
 178	}
 179}
 180
 181static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
 182{
 183	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 184	struct page **p;
 185
 186	msm_gem_assert_locked(obj);
 187
 188	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 189		return ERR_PTR(-EBUSY);
 190	}
 191
 192	p = get_pages(obj);
 193	if (!IS_ERR(p)) {
 194		to_msm_bo(obj)->pin_count++;
 195		update_lru(obj);
 196	}
 197
 198	return p;
 
 
 199}
 200
 201struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
 
 202{
 203	struct page **p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 204
 205	msm_gem_lock(obj);
 206	p = msm_gem_pin_pages_locked(obj);
 207	msm_gem_unlock(obj);
 208
 209	return p;
 210}
 211
 212void msm_gem_unpin_pages(struct drm_gem_object *obj)
 213{
 214	msm_gem_lock(obj);
 215	msm_gem_unpin_locked(obj);
 216	msm_gem_unlock(obj);
 217}
 
 
 
 218
 219static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
 220{
 221	if (msm_obj->flags & MSM_BO_WC)
 222		return pgprot_writecombine(prot);
 223	return prot;
 224}
 225
 226static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 227{
 228	struct vm_area_struct *vma = vmf->vma;
 229	struct drm_gem_object *obj = vma->vm_private_data;
 230	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 231	struct page **pages;
 232	unsigned long pfn;
 233	pgoff_t pgoff;
 234	int err;
 235	vm_fault_t ret;
 236
 237	/*
 238	 * vm_ops.open/drm_gem_mmap_obj and close get and put
 239	 * a reference on obj. So, we dont need to hold one here.
 240	 */
 241	err = msm_gem_lock_interruptible(obj);
 242	if (err) {
 243		ret = VM_FAULT_NOPAGE;
 244		goto out;
 245	}
 246
 247	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 248		msm_gem_unlock(obj);
 249		return VM_FAULT_SIGBUS;
 250	}
 251
 252	/* make sure we have pages attached now */
 253	pages = get_pages(obj);
 254	if (IS_ERR(pages)) {
 255		ret = vmf_error(PTR_ERR(pages));
 256		goto out_unlock;
 257	}
 258
 259	/* We don't use vmf->pgoff since that has the fake offset: */
 260	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 261
 262	pfn = page_to_pfn(pages[pgoff]);
 263
 264	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 265			pfn, pfn << PAGE_SHIFT);
 266
 267	ret = vmf_insert_pfn(vma, vmf->address, pfn);
 268
 269out_unlock:
 270	msm_gem_unlock(obj);
 271out:
 272	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 273}
 274
 275/** get mmap offset */
 276static uint64_t mmap_offset(struct drm_gem_object *obj)
 277{
 278	struct drm_device *dev = obj->dev;
 279	int ret;
 280
 281	msm_gem_assert_locked(obj);
 282
 283	/* Make it mmapable */
 284	ret = drm_gem_create_mmap_offset(obj);
 285
 286	if (ret) {
 287		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
 288		return 0;
 289	}
 290
 291	return drm_vma_node_offset_addr(&obj->vma_node);
 292}
 293
 294uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 295{
 296	uint64_t offset;
 297
 298	msm_gem_lock(obj);
 299	offset = mmap_offset(obj);
 300	msm_gem_unlock(obj);
 301	return offset;
 302}
 303
 304static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 305		struct msm_gem_address_space *aspace)
 306{
 307	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 308	struct msm_gem_vma *vma;
 309
 310	msm_gem_assert_locked(obj);
 311
 312	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 313	if (!vma)
 314		return ERR_PTR(-ENOMEM);
 315
 316	vma->aspace = aspace;
 317
 318	list_add_tail(&vma->list, &msm_obj->vmas);
 319
 320	return vma;
 321}
 322
 323static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 324		struct msm_gem_address_space *aspace)
 325{
 326	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 327	struct msm_gem_vma *vma;
 328
 329	msm_gem_assert_locked(obj);
 330
 331	list_for_each_entry(vma, &msm_obj->vmas, list) {
 332		if (vma->aspace == aspace)
 333			return vma;
 334	}
 335
 336	return NULL;
 337}
 338
 339static void del_vma(struct msm_gem_vma *vma)
 340{
 341	if (!vma)
 342		return;
 343
 344	list_del(&vma->list);
 345	kfree(vma);
 346}
 347
 348/*
 349 * If close is true, this also closes the VMA (releasing the allocated
 350 * iova range) in addition to removing the iommu mapping.  In the eviction
 351 * case (!close), we keep the iova allocated, but only remove the iommu
 352 * mapping.
 353 */
 354static void
 355put_iova_spaces(struct drm_gem_object *obj, bool close)
 356{
 357	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 358	struct msm_gem_vma *vma;
 359
 360	msm_gem_assert_locked(obj);
 361
 362	list_for_each_entry(vma, &msm_obj->vmas, list) {
 363		if (vma->aspace) {
 364			msm_gem_purge_vma(vma->aspace, vma);
 365			if (close)
 366				msm_gem_close_vma(vma->aspace, vma);
 367		}
 368	}
 369}
 370
 371/* Called with msm_obj locked */
 372static void
 373put_iova_vmas(struct drm_gem_object *obj)
 374{
 375	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 376	struct msm_gem_vma *vma, *tmp;
 377
 378	msm_gem_assert_locked(obj);
 379
 380	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 381		del_vma(vma);
 382	}
 383}
 384
 385static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
 386		struct msm_gem_address_space *aspace,
 387		u64 range_start, u64 range_end)
 388{
 389	struct msm_gem_vma *vma;
 390
 391	msm_gem_assert_locked(obj);
 392
 393	vma = lookup_vma(obj, aspace);
 
 394
 395	if (!vma) {
 396		int ret;
 397
 398		vma = add_vma(obj, aspace);
 399		if (IS_ERR(vma))
 400			return vma;
 401
 402		ret = msm_gem_init_vma(aspace, vma, obj->size,
 403			range_start, range_end);
 404		if (ret) {
 405			del_vma(vma);
 406			return ERR_PTR(ret);
 407		}
 408	} else {
 409		GEM_WARN_ON(vma->iova < range_start);
 410		GEM_WARN_ON((vma->iova + obj->size) > range_end);
 411	}
 412
 413	return vma;
 414}
 415
 416int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 417{
 418	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 419	struct page **pages;
 420	int ret, prot = IOMMU_READ;
 421
 422	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
 423		prot |= IOMMU_WRITE;
 424
 425	if (msm_obj->flags & MSM_BO_MAP_PRIV)
 426		prot |= IOMMU_PRIV;
 427
 428	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
 429		prot |= IOMMU_CACHE;
 430
 431	msm_gem_assert_locked(obj);
 432
 433	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
 434		return -EBUSY;
 435
 436	pages = msm_gem_pin_pages_locked(obj);
 437	if (IS_ERR(pages))
 438		return PTR_ERR(pages);
 439
 440	ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
 441	if (ret)
 442		msm_gem_unpin_locked(obj);
 443
 444	return ret;
 445}
 446
 447void msm_gem_unpin_locked(struct drm_gem_object *obj)
 448{
 449	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 450
 451	msm_gem_assert_locked(obj);
 452
 453	msm_obj->pin_count--;
 454	GEM_WARN_ON(msm_obj->pin_count < 0);
 455
 456	update_lru(obj);
 457}
 458
 459struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
 460					   struct msm_gem_address_space *aspace)
 461{
 462	return get_vma_locked(obj, aspace, 0, U64_MAX);
 463}
 464
 465static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
 466		struct msm_gem_address_space *aspace, uint64_t *iova,
 467		u64 range_start, u64 range_end)
 468{
 469	struct msm_gem_vma *vma;
 470	int ret;
 471
 472	msm_gem_assert_locked(obj);
 473
 474	vma = get_vma_locked(obj, aspace, range_start, range_end);
 475	if (IS_ERR(vma))
 476		return PTR_ERR(vma);
 477
 478	ret = msm_gem_pin_vma_locked(obj, vma);
 479	if (!ret)
 480		*iova = vma->iova;
 481
 482	return ret;
 483}
 484
 485/*
 486 * get iova and pin it. Should have a matching put
 487 * limits iova to specified range (in pages)
 488 */
 489int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
 490		struct msm_gem_address_space *aspace, uint64_t *iova,
 491		u64 range_start, u64 range_end)
 492{
 493	int ret;
 494
 495	msm_gem_lock(obj);
 496	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
 497	msm_gem_unlock(obj);
 498
 499	return ret;
 500}
 501
 502/* get iova and pin it. Should have a matching put */
 503int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 504		struct msm_gem_address_space *aspace, uint64_t *iova)
 505{
 506	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
 507}
 508
 509/*
 510 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 511 * valid for the life of the object
 512 */
 513int msm_gem_get_iova(struct drm_gem_object *obj,
 514		struct msm_gem_address_space *aspace, uint64_t *iova)
 515{
 516	struct msm_gem_vma *vma;
 517	int ret = 0;
 518
 519	msm_gem_lock(obj);
 520	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
 521	if (IS_ERR(vma)) {
 522		ret = PTR_ERR(vma);
 523	} else {
 524		*iova = vma->iova;
 525	}
 526	msm_gem_unlock(obj);
 527
 528	return ret;
 529}
 530
 531static int clear_iova(struct drm_gem_object *obj,
 532		      struct msm_gem_address_space *aspace)
 533{
 534	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
 535
 536	if (!vma)
 537		return 0;
 538
 539	if (msm_gem_vma_inuse(vma))
 540		return -EBUSY;
 541
 542	msm_gem_purge_vma(vma->aspace, vma);
 543	msm_gem_close_vma(vma->aspace, vma);
 544	del_vma(vma);
 545
 546	return 0;
 547}
 548
 549/*
 550 * Get the requested iova but don't pin it.  Fails if the requested iova is
 551 * not available.  Doesn't need a put because iovas are currently valid for
 552 * the life of the object.
 553 *
 554 * Setting an iova of zero will clear the vma.
 555 */
 556int msm_gem_set_iova(struct drm_gem_object *obj,
 557		     struct msm_gem_address_space *aspace, uint64_t iova)
 558{
 559	int ret = 0;
 560
 561	msm_gem_lock(obj);
 562	if (!iova) {
 563		ret = clear_iova(obj, aspace);
 564	} else {
 565		struct msm_gem_vma *vma;
 566		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
 567		if (IS_ERR(vma)) {
 568			ret = PTR_ERR(vma);
 569		} else if (GEM_WARN_ON(vma->iova != iova)) {
 570			clear_iova(obj, aspace);
 571			ret = -EBUSY;
 572		}
 573	}
 574	msm_gem_unlock(obj);
 575
 
 
 
 576	return ret;
 577}
 578
 579/*
 580 * Unpin a iova by updating the reference counts. The memory isn't actually
 581 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 582 * to get rid of it
 583 */
 584void msm_gem_unpin_iova(struct drm_gem_object *obj,
 585		struct msm_gem_address_space *aspace)
 586{
 587	struct msm_gem_vma *vma;
 588
 589	msm_gem_lock(obj);
 590	vma = lookup_vma(obj, aspace);
 591	if (!GEM_WARN_ON(!vma)) {
 592		msm_gem_unpin_vma(vma);
 593		msm_gem_unpin_locked(obj);
 594	}
 595	msm_gem_unlock(obj);
 596}
 597
 598int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 599		struct drm_mode_create_dumb *args)
 600{
 601	args->pitch = align_pitch(args->width, args->bpp);
 602	args->size  = PAGE_ALIGN(args->pitch * args->height);
 603	return msm_gem_new_handle(dev, file, args->size,
 604			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 605}
 606
 607int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 608		uint32_t handle, uint64_t *offset)
 609{
 610	struct drm_gem_object *obj;
 611	int ret = 0;
 612
 613	/* GEM does all our handle to object mapping */
 614	obj = drm_gem_object_lookup(file, handle);
 615	if (obj == NULL) {
 616		ret = -ENOENT;
 617		goto fail;
 618	}
 619
 620	*offset = msm_gem_mmap_offset(obj);
 621
 622	drm_gem_object_put(obj);
 623
 624fail:
 625	return ret;
 626}
 627
 628static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 629{
 630	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 631	int ret = 0;
 632
 633	msm_gem_assert_locked(obj);
 634
 635	if (obj->import_attach)
 636		return ERR_PTR(-ENODEV);
 637
 638	if (GEM_WARN_ON(msm_obj->madv > madv)) {
 639		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 640			msm_obj->madv, madv);
 641		return ERR_PTR(-EBUSY);
 642	}
 643
 644	/* increment vmap_count *before* vmap() call, so shrinker can
 645	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
 646	 * This guarantees that we won't try to msm_gem_vunmap() this
 647	 * same object from within the vmap() call (while we already
 648	 * hold msm_obj lock)
 649	 */
 650	msm_obj->vmap_count++;
 651
 652	if (!msm_obj->vaddr) {
 653		struct page **pages = get_pages(obj);
 654		if (IS_ERR(pages)) {
 655			ret = PTR_ERR(pages);
 656			goto fail;
 657		}
 658		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 659				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
 660		if (msm_obj->vaddr == NULL) {
 661			ret = -ENOMEM;
 662			goto fail;
 663		}
 664
 665		update_lru(obj);
 666	}
 667
 668	return msm_obj->vaddr;
 669
 670fail:
 671	msm_obj->vmap_count--;
 672	return ERR_PTR(ret);
 673}
 674
 675void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
 676{
 677	return get_vaddr(obj, MSM_MADV_WILLNEED);
 678}
 679
 680void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 681{
 682	void *ret;
 683
 684	msm_gem_lock(obj);
 685	ret = msm_gem_get_vaddr_locked(obj);
 686	msm_gem_unlock(obj);
 687
 688	return ret;
 689}
 690
 691/*
 692 * Don't use this!  It is for the very special case of dumping
 693 * submits from GPU hangs or faults, were the bo may already
 694 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 695 * active list.
 696 */
 697void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 698{
 699	return get_vaddr(obj, __MSM_MADV_PURGED);
 700}
 701
 702void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
 703{
 
 
 704	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 705
 706	msm_gem_assert_locked(obj);
 707	GEM_WARN_ON(msm_obj->vmap_count < 1);
 
 
 
 
 
 
 
 
 708
 709	msm_obj->vmap_count--;
 710}
 711
 712void msm_gem_put_vaddr(struct drm_gem_object *obj)
 713{
 714	msm_gem_lock(obj);
 715	msm_gem_put_vaddr_locked(obj);
 716	msm_gem_unlock(obj);
 717}
 718
 719/* Update madvise status, returns true if not purged, else
 720 * false or -errno.
 721 */
 722int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 723{
 724	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 725
 726	msm_gem_lock(obj);
 727
 728	if (msm_obj->madv != __MSM_MADV_PURGED)
 729		msm_obj->madv = madv;
 730
 731	madv = msm_obj->madv;
 732
 733	/* If the obj is inactive, we might need to move it
 734	 * between inactive lists
 735	 */
 736	update_lru(obj);
 737
 738	msm_gem_unlock(obj);
 739
 740	return (madv != __MSM_MADV_PURGED);
 741}
 742
 743void msm_gem_purge(struct drm_gem_object *obj)
 744{
 745	struct drm_device *dev = obj->dev;
 
 746	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 747
 748	msm_gem_assert_locked(obj);
 749	GEM_WARN_ON(!is_purgeable(msm_obj));
 750
 751	/* Get rid of any iommu mapping(s): */
 752	put_iova_spaces(obj, true);
 753
 754	msm_gem_vunmap(obj);
 755
 756	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 757
 758	put_pages(obj);
 759
 760	put_iova_vmas(obj);
 761
 762	msm_obj->madv = __MSM_MADV_PURGED;
 763
 764	drm_gem_free_mmap_offset(obj);
 765
 766	/* Our goal here is to return as much of the memory as
 767	 * is possible back to the system as we are called from OOM.
 768	 * To do this we must instruct the shmfs to drop all of its
 769	 * backing pages, *now*.
 770	 */
 771	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 772
 773	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 774			0, (loff_t)-1);
 775}
 776
 777/*
 778 * Unpin the backing pages and make them available to be swapped out.
 779 */
 780void msm_gem_evict(struct drm_gem_object *obj)
 781{
 782	struct drm_device *dev = obj->dev;
 783	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 784
 785	msm_gem_assert_locked(obj);
 786	GEM_WARN_ON(is_unevictable(msm_obj));
 787
 788	/* Get rid of any iommu mapping(s): */
 789	put_iova_spaces(obj, false);
 790
 791	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 792
 793	put_pages(obj);
 794}
 795
 796void msm_gem_vunmap(struct drm_gem_object *obj)
 797{
 798	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 799
 800	msm_gem_assert_locked(obj);
 801
 802	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
 803		return;
 804
 805	vunmap(msm_obj->vaddr);
 806	msm_obj->vaddr = NULL;
 807}
 808
 809static void update_lru(struct drm_gem_object *obj)
 810{
 811	struct msm_drm_private *priv = obj->dev->dev_private;
 812	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 813
 814	msm_gem_assert_locked(&msm_obj->base);
 815
 816	if (!msm_obj->pages) {
 817		GEM_WARN_ON(msm_obj->pin_count);
 818		GEM_WARN_ON(msm_obj->vmap_count);
 819
 820		drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
 821	} else if (msm_obj->pin_count || msm_obj->vmap_count) {
 822		drm_gem_lru_move_tail(&priv->lru.pinned, obj);
 823	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
 824		drm_gem_lru_move_tail(&priv->lru.willneed, obj);
 825	} else {
 826		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
 827
 828		drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
 829	}
 830}
 831
 832bool msm_gem_active(struct drm_gem_object *obj)
 833{
 834	msm_gem_assert_locked(obj);
 835
 836	if (to_msm_bo(obj)->pin_count)
 837		return true;
 838
 839	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
 840}
 841
 842int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 843{
 844	bool write = !!(op & MSM_PREP_WRITE);
 845	unsigned long remain =
 846		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 847	long ret;
 848
 849	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
 850				    true,  remain);
 851	if (ret == 0)
 852		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 853	else if (ret < 0)
 854		return ret;
 855
 856	/* TODO cache maintenance */
 857
 858	return 0;
 859}
 860
 861int msm_gem_cpu_fini(struct drm_gem_object *obj)
 862{
 863	/* TODO cache maintenance */
 864	return 0;
 865}
 866
 867#ifdef CONFIG_DEBUG_FS
 868void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 869		struct msm_gem_stats *stats)
 870{
 
 871	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 872	struct dma_resv *robj = obj->resv;
 873	struct msm_gem_vma *vma;
 874	uint64_t off = drm_vma_node_start(&obj->vma_node);
 875	const char *madv;
 876
 877	msm_gem_lock(obj);
 878
 879	stats->all.count++;
 880	stats->all.size += obj->size;
 881
 882	if (msm_gem_active(obj)) {
 883		stats->active.count++;
 884		stats->active.size += obj->size;
 885	}
 886
 887	if (msm_obj->pages) {
 888		stats->resident.count++;
 889		stats->resident.size += obj->size;
 890	}
 891
 892	switch (msm_obj->madv) {
 893	case __MSM_MADV_PURGED:
 894		stats->purged.count++;
 895		stats->purged.size += obj->size;
 896		madv = " purged";
 897		break;
 898	case MSM_MADV_DONTNEED:
 899		stats->purgeable.count++;
 900		stats->purgeable.size += obj->size;
 901		madv = " purgeable";
 902		break;
 903	case MSM_MADV_WILLNEED:
 904	default:
 905		madv = "";
 906		break;
 907	}
 908
 909	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
 910			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
 911			obj->name, kref_read(&obj->refcount),
 912			off, msm_obj->vaddr);
 913
 914	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
 915
 916	if (!list_empty(&msm_obj->vmas)) {
 917
 918		seq_puts(m, "      vmas:");
 919
 920		list_for_each_entry(vma, &msm_obj->vmas, list) {
 921			const char *name, *comm;
 922			if (vma->aspace) {
 923				struct msm_gem_address_space *aspace = vma->aspace;
 924				struct task_struct *task =
 925					get_pid_task(aspace->pid, PIDTYPE_PID);
 926				if (task) {
 927					comm = kstrdup(task->comm, GFP_KERNEL);
 928					put_task_struct(task);
 929				} else {
 930					comm = NULL;
 931				}
 932				name = aspace->name;
 933			} else {
 934				name = comm = NULL;
 935			}
 936			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
 937				name, comm ? ":" : "", comm ? comm : "",
 938				vma->aspace, vma->iova,
 939				vma->mapped ? "mapped" : "unmapped",
 940				msm_gem_vma_inuse(vma));
 941			kfree(comm);
 942		}
 943
 944		seq_puts(m, "\n");
 945	}
 946
 947	dma_resv_describe(robj, m);
 948	msm_gem_unlock(obj);
 
 949}
 950
 951void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 952{
 953	struct msm_gem_stats stats = {};
 954	struct msm_gem_object *msm_obj;
 
 
 955
 956	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
 957	list_for_each_entry(msm_obj, list, node) {
 958		struct drm_gem_object *obj = &msm_obj->base;
 959		seq_puts(m, "   ");
 960		msm_gem_describe(obj, m, &stats);
 
 
 961	}
 962
 963	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
 964			stats.all.count, stats.all.size);
 965	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
 966			stats.active.count, stats.active.size);
 967	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
 968			stats.resident.count, stats.resident.size);
 969	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
 970			stats.purgeable.count, stats.purgeable.size);
 971	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
 972			stats.purged.count, stats.purged.size);
 973}
 974#endif
 975
 976/* don't call directly!  Use drm_gem_object_put() */
 977static void msm_gem_free_object(struct drm_gem_object *obj)
 978{
 
 
 979	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 980	struct drm_device *dev = obj->dev;
 981	struct msm_drm_private *priv = dev->dev_private;
 
 982
 983	mutex_lock(&priv->obj_lock);
 984	list_del(&msm_obj->node);
 985	mutex_unlock(&priv->obj_lock);
 986
 987	put_iova_spaces(obj, true);
 
 
 
 
 
 
 
 
 
 
 988
 989	if (obj->import_attach) {
 990		GEM_WARN_ON(msm_obj->vaddr);
 
 991
 992		/* Don't drop the pages for imported dmabuf, as they are not
 993		 * ours, just free the array we allocated:
 994		 */
 995		kvfree(msm_obj->pages);
 
 996
 997		put_iova_vmas(obj);
 998
 999		drm_prime_gem_destroy(obj, msm_obj->sgt);
1000	} else {
1001		msm_gem_vunmap(obj);
 
1002		put_pages(obj);
1003		put_iova_vmas(obj);
1004	}
1005
 
 
 
1006	drm_gem_object_release(obj);
1007
1008	kfree(msm_obj);
1009}
1010
1011static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1012{
1013	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1014
1015	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1016	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1017
1018	return 0;
1019}
1020
1021/* convenience method to construct a GEM buffer object, and userspace handle */
1022int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1023		uint32_t size, uint32_t flags, uint32_t *handle,
1024		char *name)
1025{
1026	struct drm_gem_object *obj;
1027	int ret;
1028
 
 
 
 
1029	obj = msm_gem_new(dev, size, flags);
1030
 
 
1031	if (IS_ERR(obj))
1032		return PTR_ERR(obj);
1033
1034	if (name)
1035		msm_gem_object_set_name(obj, "%s", name);
1036
1037	ret = drm_gem_handle_create(file, obj, handle);
1038
1039	/* drop reference from allocate - handle holds it now */
1040	drm_gem_object_put(obj);
1041
1042	return ret;
1043}
1044
1045static const struct vm_operations_struct vm_ops = {
1046	.fault = msm_gem_fault,
1047	.open = drm_gem_vm_open,
1048	.close = drm_gem_vm_close,
1049};
1050
1051static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1052	.free = msm_gem_free_object,
1053	.pin = msm_gem_prime_pin,
1054	.unpin = msm_gem_prime_unpin,
1055	.get_sg_table = msm_gem_prime_get_sg_table,
1056	.vmap = msm_gem_prime_vmap,
1057	.vunmap = msm_gem_prime_vunmap,
1058	.mmap = msm_gem_object_mmap,
1059	.vm_ops = &vm_ops,
1060};
1061
1062static int msm_gem_new_impl(struct drm_device *dev,
1063		uint32_t size, uint32_t flags,
1064		struct drm_gem_object **obj)
1065{
1066	struct msm_drm_private *priv = dev->dev_private;
1067	struct msm_gem_object *msm_obj;
 
1068
1069	switch (flags & MSM_BO_CACHE_MASK) {
 
1070	case MSM_BO_CACHED:
1071	case MSM_BO_WC:
1072		break;
1073	case MSM_BO_CACHED_COHERENT:
1074		if (priv->has_cached_coherent)
1075			break;
1076		fallthrough;
1077	default:
1078		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1079				(flags & MSM_BO_CACHE_MASK));
1080		return -EINVAL;
1081	}
1082
1083	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
 
 
 
 
1084	if (!msm_obj)
1085		return -ENOMEM;
1086
 
 
 
1087	msm_obj->flags = flags;
1088	msm_obj->madv = MSM_MADV_WILLNEED;
1089
1090	INIT_LIST_HEAD(&msm_obj->node);
1091	INIT_LIST_HEAD(&msm_obj->vmas);
 
 
 
1092
1093	*obj = &msm_obj->base;
1094	(*obj)->funcs = &msm_gem_object_funcs;
1095
1096	return 0;
1097}
1098
1099struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
 
1100{
1101	struct msm_drm_private *priv = dev->dev_private;
1102	struct msm_gem_object *msm_obj;
1103	struct drm_gem_object *obj = NULL;
1104	bool use_vram = false;
1105	int ret;
1106
 
 
1107	size = PAGE_ALIGN(size);
1108
1109	if (!msm_use_mmu(dev))
1110		use_vram = true;
1111	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1112		use_vram = true;
1113
1114	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1115		return ERR_PTR(-EINVAL);
1116
1117	/* Disallow zero sized objects as they make the underlying
1118	 * infrastructure grumpy
1119	 */
1120	if (size == 0)
1121		return ERR_PTR(-EINVAL);
1122
1123	ret = msm_gem_new_impl(dev, size, flags, &obj);
1124	if (ret)
1125		return ERR_PTR(ret);
1126
1127	msm_obj = to_msm_bo(obj);
1128
1129	if (use_vram) {
1130		struct msm_gem_vma *vma;
1131		struct page **pages;
1132
1133		drm_gem_private_object_init(dev, obj, size);
1134
1135		msm_gem_lock(obj);
1136
1137		vma = add_vma(obj, NULL);
1138		msm_gem_unlock(obj);
1139		if (IS_ERR(vma)) {
1140			ret = PTR_ERR(vma);
1141			goto fail;
1142		}
1143
1144		to_msm_bo(obj)->vram_node = &vma->node;
1145
1146		msm_gem_lock(obj);
1147		pages = get_pages(obj);
1148		msm_gem_unlock(obj);
1149		if (IS_ERR(pages)) {
1150			ret = PTR_ERR(pages);
1151			goto fail;
1152		}
1153
1154		vma->iova = physaddr(obj);
1155	} else {
1156		ret = drm_gem_object_init(dev, obj, size);
1157		if (ret)
1158			goto fail;
1159		/*
1160		 * Our buffers are kept pinned, so allocating them from the
1161		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1162		 * See comments above new_inode() why this is required _and_
1163		 * expected if you're going to pin these pages.
1164		 */
1165		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1166	}
1167
1168	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1169
1170	mutex_lock(&priv->obj_lock);
1171	list_add_tail(&msm_obj->node, &priv->objects);
1172	mutex_unlock(&priv->obj_lock);
1173
1174	return obj;
1175
1176fail:
1177	drm_gem_object_put(obj);
 
 
1178	return ERR_PTR(ret);
1179}
1180
1181struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1182		struct dma_buf *dmabuf, struct sg_table *sgt)
1183{
1184	struct msm_drm_private *priv = dev->dev_private;
1185	struct msm_gem_object *msm_obj;
1186	struct drm_gem_object *obj;
1187	uint32_t size;
1188	int ret, npages;
1189
1190	/* if we don't have IOMMU, don't bother pretending we can import: */
1191	if (!msm_use_mmu(dev)) {
1192		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1193		return ERR_PTR(-EINVAL);
1194	}
1195
1196	size = PAGE_ALIGN(dmabuf->size);
1197
1198	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1199	if (ret)
1200		return ERR_PTR(ret);
1201
1202	drm_gem_private_object_init(dev, obj, size);
1203
1204	npages = size / PAGE_SIZE;
1205
1206	msm_obj = to_msm_bo(obj);
1207	msm_gem_lock(obj);
1208	msm_obj->sgt = sgt;
1209	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1210	if (!msm_obj->pages) {
1211		msm_gem_unlock(obj);
1212		ret = -ENOMEM;
1213		goto fail;
1214	}
1215
1216	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1217	if (ret) {
1218		msm_gem_unlock(obj);
1219		goto fail;
1220	}
1221
1222	msm_gem_unlock(obj);
1223
1224	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1225
1226	mutex_lock(&priv->obj_lock);
1227	list_add_tail(&msm_obj->node, &priv->objects);
1228	mutex_unlock(&priv->obj_lock);
1229
1230	return obj;
1231
1232fail:
1233	drm_gem_object_put(obj);
1234	return ERR_PTR(ret);
1235}
1236
1237void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1238		uint32_t flags, struct msm_gem_address_space *aspace,
1239		struct drm_gem_object **bo, uint64_t *iova)
1240{
1241	void *vaddr;
1242	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1243	int ret;
1244
1245	if (IS_ERR(obj))
1246		return ERR_CAST(obj);
1247
1248	if (iova) {
1249		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1250		if (ret)
1251			goto err;
1252	}
1253
1254	vaddr = msm_gem_get_vaddr(obj);
1255	if (IS_ERR(vaddr)) {
1256		msm_gem_unpin_iova(obj, aspace);
1257		ret = PTR_ERR(vaddr);
1258		goto err;
1259	}
1260
1261	if (bo)
1262		*bo = obj;
1263
1264	return vaddr;
1265err:
1266	drm_gem_object_put(obj);
1267
1268	return ERR_PTR(ret);
1269
1270}
1271
1272void msm_gem_kernel_put(struct drm_gem_object *bo,
1273		struct msm_gem_address_space *aspace)
1274{
1275	if (IS_ERR_OR_NULL(bo))
1276		return;
1277
1278	msm_gem_put_vaddr(bo);
1279	msm_gem_unpin_iova(bo, aspace);
1280	drm_gem_object_put(bo);
1281}
1282
1283void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1284{
1285	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1286	va_list ap;
1287
1288	if (!fmt)
1289		return;
1290
1291	va_start(ap, fmt);
1292	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1293	va_end(ap);
1294}