Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Russell King
 
 
 
 
  4 */
  5
  6#include <linux/dma-buf.h>
  7#include <linux/dma-mapping.h>
  8#include <linux/mman.h>
  9#include <linux/shmem_fs.h>
 10
 11#include <drm/armada_drm.h>
 12#include <drm/drm_prime.h>
 13
 14#include "armada_drm.h"
 15#include "armada_gem.h"
 
 16#include "armada_ioctlP.h"
 17
 18MODULE_IMPORT_NS("DMA_BUF");
 19
 20static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
 21{
 22	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
 23	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
 24	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 
 25
 26	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
 27	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
 
 
 
 
 
 
 
 
 
 
 28}
 29
 30static const struct vm_operations_struct armada_gem_vm_ops = {
 31	.fault	= armada_gem_vm_fault,
 32	.open	= drm_gem_vm_open,
 33	.close	= drm_gem_vm_close,
 34};
 35
 36static size_t roundup_gem_size(size_t size)
 37{
 38	return roundup(size, PAGE_SIZE);
 39}
 40
 
 41void armada_gem_free_object(struct drm_gem_object *obj)
 42{
 43	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
 44	struct armada_private *priv = drm_to_armada_dev(obj->dev);
 45
 46	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
 47
 48	drm_gem_free_mmap_offset(&dobj->obj);
 49
 50	might_lock(&priv->linear_lock);
 51
 52	if (dobj->page) {
 53		/* page backed memory */
 54		unsigned int order = get_order(dobj->obj.size);
 55		__free_pages(dobj->page, order);
 56	} else if (dobj->linear) {
 57		/* linear backed memory */
 58		mutex_lock(&priv->linear_lock);
 59		drm_mm_remove_node(dobj->linear);
 60		mutex_unlock(&priv->linear_lock);
 61		kfree(dobj->linear);
 62		if (dobj->addr)
 63			iounmap(dobj->addr);
 64	}
 65
 66	if (dobj->obj.import_attach) {
 67		/* We only ever display imported data */
 68		if (dobj->sgt)
 69			dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
 70							  dobj->sgt, DMA_TO_DEVICE);
 71		drm_prime_gem_destroy(&dobj->obj, NULL);
 72	}
 73
 74	drm_gem_object_release(&dobj->obj);
 75
 76	kfree(dobj);
 77}
 78
 79int
 80armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
 81{
 82	struct armada_private *priv = drm_to_armada_dev(dev);
 83	size_t size = obj->obj.size;
 84
 85	if (obj->page || obj->linear)
 86		return 0;
 87
 88	/*
 89	 * If it is a small allocation (typically cursor, which will
 90	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
 91	 * Framebuffers will never be this small (our minimum size for
 92	 * framebuffers is larger than this anyway.)  Such objects are
 93	 * only accessed by the CPU so we don't need any special handing
 94	 * here.
 95	 */
 96	if (size <= 8192) {
 97		unsigned int order = get_order(size);
 98		struct page *p = alloc_pages(GFP_KERNEL, order);
 99
100		if (p) {
101			obj->addr = page_address(p);
102			obj->phys_addr = page_to_phys(p);
103			obj->page = p;
104
105			memset(obj->addr, 0, PAGE_ALIGN(size));
106		}
107	}
108
109	/*
110	 * We could grab something from DMA if it's enabled, but that
111	 * involves building in a problem:
112	 *
113	 * GEM DMA helper interface uses dma_alloc_coherent(), which provides
114	 * us with an CPU virtual address and a device address.
115	 *
116	 * The CPU virtual address may be either an address in the kernel
117	 * direct mapped region (for example, as it would be on x86) or
118	 * it may be remapped into another part of kernel memory space
119	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
120	 * returned virtual address is invalid depending on the architecture
121	 * implementation.
122	 *
123	 * The device address may also not be a physical address; it may
124	 * be that there is some kind of remapping between the device and
125	 * system RAM, which makes the use of the device address also
126	 * unsafe to re-use as a physical address.
127	 *
128	 * This makes DRM usage of dma_alloc_coherent() in a generic way
129	 * at best very questionable and unsafe.
130	 */
131
132	/* Otherwise, grab it from our linear allocation */
133	if (!obj->page) {
134		struct drm_mm_node *node;
135		unsigned align = min_t(unsigned, size, SZ_2M);
136		void __iomem *ptr;
137		int ret;
138
139		node = kzalloc(sizeof(*node), GFP_KERNEL);
140		if (!node)
141			return -ENOSPC;
142
143		mutex_lock(&priv->linear_lock);
144		ret = drm_mm_insert_node_generic(&priv->linear, node,
145						 size, align, 0, 0);
146		mutex_unlock(&priv->linear_lock);
147		if (ret) {
148			kfree(node);
149			return ret;
150		}
151
152		obj->linear = node;
153
154		/* Ensure that the memory we're returning is cleared. */
155		ptr = ioremap_wc(obj->linear->start, size);
156		if (!ptr) {
157			mutex_lock(&priv->linear_lock);
158			drm_mm_remove_node(obj->linear);
159			mutex_unlock(&priv->linear_lock);
160			kfree(obj->linear);
161			obj->linear = NULL;
162			return -ENOMEM;
163		}
164
165		memset_io(ptr, 0, size);
166		iounmap(ptr);
167
168		obj->phys_addr = obj->linear->start;
169		obj->dev_addr = obj->linear->start;
170		obj->mapped = true;
171	}
172
173	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
174			 (unsigned long long)obj->phys_addr,
175			 (unsigned long long)obj->dev_addr);
176
177	return 0;
178}
179
180void *
181armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
182{
183	/* only linear objects need to be ioremap'd */
184	if (!dobj->addr && dobj->linear)
185		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
186	return dobj->addr;
187}
188
189static const struct drm_gem_object_funcs armada_gem_object_funcs = {
190	.free = armada_gem_free_object,
191	.export = armada_gem_prime_export,
192	.vm_ops = &armada_gem_vm_ops,
193};
194
195struct armada_gem_object *
196armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
197{
198	struct armada_gem_object *obj;
199
200	size = roundup_gem_size(size);
201
202	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
203	if (!obj)
204		return NULL;
205
206	obj->obj.funcs = &armada_gem_object_funcs;
207
208	drm_gem_private_object_init(dev, &obj->obj, size);
 
209
210	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
211
212	return obj;
213}
214
215static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
216	size_t size)
217{
218	struct armada_gem_object *obj;
219	struct address_space *mapping;
220
221	size = roundup_gem_size(size);
222
223	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
224	if (!obj)
225		return NULL;
226
227	obj->obj.funcs = &armada_gem_object_funcs;
228
229	if (drm_gem_object_init(dev, &obj->obj, size)) {
230		kfree(obj);
231		return NULL;
232	}
233
234	mapping = obj->obj.filp->f_mapping;
 
 
235	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
236
237	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
238
239	return obj;
240}
241
242/* Dumb alloc support */
243int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
244	struct drm_mode_create_dumb *args)
245{
246	struct armada_gem_object *dobj;
247	u32 handle;
248	size_t size;
249	int ret;
250
251	args->pitch = armada_pitch(args->width, args->bpp);
252	args->size = size = args->pitch * args->height;
253
254	dobj = armada_gem_alloc_private_object(dev, size);
255	if (dobj == NULL)
256		return -ENOMEM;
257
258	ret = armada_gem_linear_back(dev, dobj);
259	if (ret)
260		goto err;
261
262	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
263	if (ret)
264		goto err;
265
266	args->handle = handle;
267
268	/* drop reference from allocate - handle holds it now */
269	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
270 err:
271	drm_gem_object_put(&dobj->obj);
272	return ret;
273}
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275/* Private driver gem ioctls */
276int armada_gem_create_ioctl(struct drm_device *dev, void *data,
277	struct drm_file *file)
278{
279	struct drm_armada_gem_create *args = data;
280	struct armada_gem_object *dobj;
281	size_t size;
282	u32 handle;
283	int ret;
284
285	if (args->size == 0)
286		return -ENOMEM;
287
288	size = args->size;
289
290	dobj = armada_gem_alloc_object(dev, size);
291	if (dobj == NULL)
292		return -ENOMEM;
293
294	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
295	if (ret)
296		goto err;
297
298	args->handle = handle;
299
300	/* drop reference from allocate - handle holds it now */
301	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
302 err:
303	drm_gem_object_put(&dobj->obj);
304	return ret;
305}
306
307/* Map a shmem-backed object into process memory space */
308int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
309	struct drm_file *file)
310{
311	struct drm_armada_gem_mmap *args = data;
312	struct armada_gem_object *dobj;
313	unsigned long addr;
314
315	dobj = armada_gem_object_lookup(file, args->handle);
316	if (dobj == NULL)
317		return -ENOENT;
318
319	if (!dobj->obj.filp) {
320		drm_gem_object_put(&dobj->obj);
321		return -EINVAL;
322	}
323
324	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
325		       MAP_SHARED, args->offset);
326	drm_gem_object_put(&dobj->obj);
327	if (IS_ERR_VALUE(addr))
328		return addr;
329
330	args->addr = addr;
331
332	return 0;
333}
334
335int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
336	struct drm_file *file)
337{
338	struct drm_armada_gem_pwrite *args = data;
339	struct armada_gem_object *dobj;
340	char __user *ptr;
341	int ret = 0;
342
343	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
344		args->handle, args->offset, args->size, args->ptr);
345
346	if (args->size == 0)
347		return 0;
348
349	ptr = (char __user *)(uintptr_t)args->ptr;
350
351	if (!access_ok(ptr, args->size))
352		return -EFAULT;
353
354	if (fault_in_readable(ptr, args->size))
355		return -EFAULT;
 
356
357	dobj = armada_gem_object_lookup(file, args->handle);
358	if (dobj == NULL)
359		return -ENOENT;
360
361	/* Must be a kernel-mapped object */
362	if (!dobj->addr)
363		return -EINVAL;
364
365	if (args->offset > dobj->obj.size ||
366	    args->size > dobj->obj.size - args->offset) {
367		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
368		ret = -EINVAL;
369		goto unref;
370	}
371
372	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
373		ret = -EFAULT;
374	} else if (dobj->update) {
375		dobj->update(dobj->update_data);
376		ret = 0;
377	}
378
379 unref:
380	drm_gem_object_put(&dobj->obj);
381	return ret;
382}
383
384/* Prime support */
385static struct sg_table *
386armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
387	enum dma_data_direction dir)
388{
389	struct drm_gem_object *obj = attach->dmabuf->priv;
390	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
391	struct scatterlist *sg;
392	struct sg_table *sgt;
393	int i;
394
395	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
396	if (!sgt)
397		return NULL;
398
399	if (dobj->obj.filp) {
400		struct address_space *mapping;
 
401		int count;
402
403		count = dobj->obj.size / PAGE_SIZE;
404		if (sg_alloc_table(sgt, count, GFP_KERNEL))
405			goto free_sgt;
406
407		mapping = dobj->obj.filp->f_mapping;
 
408
409		for_each_sgtable_sg(sgt, sg, i) {
410			struct page *page;
411
412			page = shmem_read_mapping_page(mapping, i);
413			if (IS_ERR(page))
 
414				goto release;
 
415
416			sg_set_page(sg, page, PAGE_SIZE, 0);
417		}
418
419		if (dma_map_sgtable(attach->dev, sgt, dir, 0))
 
420			goto release;
 
421	} else if (dobj->page) {
422		/* Single contiguous page */
423		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
424			goto free_sgt;
425
426		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
427
428		if (dma_map_sgtable(attach->dev, sgt, dir, 0))
429			goto free_table;
430	} else if (dobj->linear) {
431		/* Single contiguous physical region - no struct page */
432		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
433			goto free_sgt;
434		sg_dma_address(sgt->sgl) = dobj->dev_addr;
435		sg_dma_len(sgt->sgl) = dobj->obj.size;
436	} else {
437		goto free_sgt;
438	}
439	return sgt;
440
441 release:
442	for_each_sgtable_sg(sgt, sg, i)
443		if (sg_page(sg))
444			put_page(sg_page(sg));
445 free_table:
446	sg_free_table(sgt);
447 free_sgt:
448	kfree(sgt);
449	return NULL;
450}
451
452static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
453	struct sg_table *sgt, enum dma_data_direction dir)
454{
455	struct drm_gem_object *obj = attach->dmabuf->priv;
456	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
457	int i;
458
459	if (!dobj->linear)
460		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
461
462	if (dobj->obj.filp) {
463		struct scatterlist *sg;
464
465		for_each_sgtable_sg(sgt, sg, i)
466			put_page(sg_page(sg));
467	}
468
469	sg_free_table(sgt);
470	kfree(sgt);
471}
472
 
 
 
 
 
 
 
 
 
 
473static int
474armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
475{
476	return -EINVAL;
477}
478
479static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
480	.map_dma_buf	= armada_gem_prime_map_dma_buf,
481	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
482	.release	= drm_gem_dmabuf_release,
 
 
 
 
483	.mmap		= armada_gem_dmabuf_mmap,
484};
485
486struct dma_buf *
487armada_gem_prime_export(struct drm_gem_object *obj, int flags)
 
488{
489	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
490
491	exp_info.ops = &armada_gem_prime_dmabuf_ops;
492	exp_info.size = obj->size;
493	exp_info.flags = O_RDWR;
494	exp_info.priv = obj;
495
496	return drm_gem_dmabuf_export(obj->dev, &exp_info);
497}
498
499struct drm_gem_object *
500armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
501{
502	struct dma_buf_attachment *attach;
503	struct armada_gem_object *dobj;
504
505	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
506		struct drm_gem_object *obj = buf->priv;
507		if (obj->dev == dev) {
508			/*
509			 * Importing our own dmabuf(s) increases the
510			 * refcount on the gem object itself.
511			 */
512			drm_gem_object_get(obj);
513			return obj;
514		}
515	}
516
517	attach = dma_buf_attach(buf, dev->dev);
518	if (IS_ERR(attach))
519		return ERR_CAST(attach);
520
521	dobj = armada_gem_alloc_private_object(dev, buf->size);
522	if (!dobj) {
523		dma_buf_detach(buf, attach);
524		return ERR_PTR(-ENOMEM);
525	}
526
527	dobj->obj.import_attach = attach;
528	get_dma_buf(buf);
529
530	/*
531	 * Don't call dma_buf_map_attachment() here - it maps the
532	 * scatterlist immediately for DMA, and this is not always
533	 * an appropriate thing to do.
534	 */
535	return &dobj->obj;
536}
537
538int armada_gem_map_import(struct armada_gem_object *dobj)
539{
540	int ret;
541
542	dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
543						    DMA_TO_DEVICE);
 
 
 
 
544	if (IS_ERR(dobj->sgt)) {
545		ret = PTR_ERR(dobj->sgt);
546		dobj->sgt = NULL;
547		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
548		return ret;
549	}
550	if (dobj->sgt->nents > 1) {
551		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
552		return -EINVAL;
553	}
554	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
555		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
556		return -EINVAL;
557	}
558	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
559	dobj->mapped = true;
560	return 0;
561}
v3.15
 
  1/*
  2 * Copyright (C) 2012 Russell King
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
 
  8#include <linux/dma-buf.h>
  9#include <linux/dma-mapping.h>
 
 10#include <linux/shmem_fs.h>
 11#include <drm/drmP.h>
 
 
 
 12#include "armada_drm.h"
 13#include "armada_gem.h"
 14#include <drm/armada_drm.h>
 15#include "armada_ioctlP.h"
 16
 17static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 
 18{
 19	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
 20	unsigned long addr = (unsigned long)vmf->virtual_address;
 21	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 22	int ret;
 23
 24	pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
 25	ret = vm_insert_pfn(vma, addr, pfn);
 26
 27	switch (ret) {
 28	case 0:
 29	case -EBUSY:
 30		return VM_FAULT_NOPAGE;
 31	case -ENOMEM:
 32		return VM_FAULT_OOM;
 33	default:
 34		return VM_FAULT_SIGBUS;
 35	}
 36}
 37
 38const struct vm_operations_struct armada_gem_vm_ops = {
 39	.fault	= armada_gem_vm_fault,
 40	.open	= drm_gem_vm_open,
 41	.close	= drm_gem_vm_close,
 42};
 43
 44static size_t roundup_gem_size(size_t size)
 45{
 46	return roundup(size, PAGE_SIZE);
 47}
 48
 49/* dev->struct_mutex is held here */
 50void armada_gem_free_object(struct drm_gem_object *obj)
 51{
 52	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
 
 53
 54	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
 55
 56	drm_gem_free_mmap_offset(&dobj->obj);
 57
 
 
 58	if (dobj->page) {
 59		/* page backed memory */
 60		unsigned int order = get_order(dobj->obj.size);
 61		__free_pages(dobj->page, order);
 62	} else if (dobj->linear) {
 63		/* linear backed memory */
 
 64		drm_mm_remove_node(dobj->linear);
 
 65		kfree(dobj->linear);
 66		if (dobj->addr)
 67			iounmap(dobj->addr);
 68	}
 69
 70	if (dobj->obj.import_attach) {
 71		/* We only ever display imported data */
 72		dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
 73					 DMA_TO_DEVICE);
 
 74		drm_prime_gem_destroy(&dobj->obj, NULL);
 75	}
 76
 77	drm_gem_object_release(&dobj->obj);
 78
 79	kfree(dobj);
 80}
 81
 82int
 83armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
 84{
 85	struct armada_private *priv = dev->dev_private;
 86	size_t size = obj->obj.size;
 87
 88	if (obj->page || obj->linear)
 89		return 0;
 90
 91	/*
 92	 * If it is a small allocation (typically cursor, which will
 93	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
 94	 * Framebuffers will never be this small (our minimum size for
 95	 * framebuffers is larger than this anyway.)  Such objects are
 96	 * only accessed by the CPU so we don't need any special handing
 97	 * here.
 98	 */
 99	if (size <= 8192) {
100		unsigned int order = get_order(size);
101		struct page *p = alloc_pages(GFP_KERNEL, order);
102
103		if (p) {
104			obj->addr = page_address(p);
105			obj->phys_addr = page_to_phys(p);
106			obj->page = p;
107
108			memset(obj->addr, 0, PAGE_ALIGN(size));
109		}
110	}
111
112	/*
113	 * We could grab something from CMA if it's enabled, but that
114	 * involves building in a problem:
115	 *
116	 * CMA's interface uses dma_alloc_coherent(), which provides us
117	 * with an CPU virtual address and a device address.
118	 *
119	 * The CPU virtual address may be either an address in the kernel
120	 * direct mapped region (for example, as it would be on x86) or
121	 * it may be remapped into another part of kernel memory space
122	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
123	 * returned virtual address is invalid depending on the architecture
124	 * implementation.
125	 *
126	 * The device address may also not be a physical address; it may
127	 * be that there is some kind of remapping between the device and
128	 * system RAM, which makes the use of the device address also
129	 * unsafe to re-use as a physical address.
130	 *
131	 * This makes DRM usage of dma_alloc_coherent() in a generic way
132	 * at best very questionable and unsafe.
133	 */
134
135	/* Otherwise, grab it from our linear allocation */
136	if (!obj->page) {
137		struct drm_mm_node *node;
138		unsigned align = min_t(unsigned, size, SZ_2M);
139		void __iomem *ptr;
140		int ret;
141
142		node = kzalloc(sizeof(*node), GFP_KERNEL);
143		if (!node)
144			return -ENOSPC;
145
146		mutex_lock(&dev->struct_mutex);
147		ret = drm_mm_insert_node(&priv->linear, node, size, align,
148					 DRM_MM_SEARCH_DEFAULT);
149		mutex_unlock(&dev->struct_mutex);
150		if (ret) {
151			kfree(node);
152			return ret;
153		}
154
155		obj->linear = node;
156
157		/* Ensure that the memory we're returning is cleared. */
158		ptr = ioremap_wc(obj->linear->start, size);
159		if (!ptr) {
160			mutex_lock(&dev->struct_mutex);
161			drm_mm_remove_node(obj->linear);
162			mutex_unlock(&dev->struct_mutex);
163			kfree(obj->linear);
164			obj->linear = NULL;
165			return -ENOMEM;
166		}
167
168		memset_io(ptr, 0, size);
169		iounmap(ptr);
170
171		obj->phys_addr = obj->linear->start;
172		obj->dev_addr = obj->linear->start;
 
173	}
174
175	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176			 (unsigned long long)obj->phys_addr,
177			 (unsigned long long)obj->dev_addr);
178
179	return 0;
180}
181
182void *
183armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
184{
185	/* only linear objects need to be ioremap'd */
186	if (!dobj->addr && dobj->linear)
187		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
188	return dobj->addr;
189}
190
 
 
 
 
 
 
191struct armada_gem_object *
192armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
193{
194	struct armada_gem_object *obj;
195
196	size = roundup_gem_size(size);
197
198	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
199	if (!obj)
200		return NULL;
201
 
 
202	drm_gem_private_object_init(dev, &obj->obj, size);
203	obj->dev_addr = DMA_ERROR_CODE;
204
205	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
206
207	return obj;
208}
209
210struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
211	size_t size)
212{
213	struct armada_gem_object *obj;
214	struct address_space *mapping;
215
216	size = roundup_gem_size(size);
217
218	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
219	if (!obj)
220		return NULL;
221
 
 
222	if (drm_gem_object_init(dev, &obj->obj, size)) {
223		kfree(obj);
224		return NULL;
225	}
226
227	obj->dev_addr = DMA_ERROR_CODE;
228
229	mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
230	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
231
232	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
233
234	return obj;
235}
236
237/* Dumb alloc support */
238int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
239	struct drm_mode_create_dumb *args)
240{
241	struct armada_gem_object *dobj;
242	u32 handle;
243	size_t size;
244	int ret;
245
246	args->pitch = armada_pitch(args->width, args->bpp);
247	args->size = size = args->pitch * args->height;
248
249	dobj = armada_gem_alloc_private_object(dev, size);
250	if (dobj == NULL)
251		return -ENOMEM;
252
253	ret = armada_gem_linear_back(dev, dobj);
254	if (ret)
255		goto err;
256
257	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
258	if (ret)
259		goto err;
260
261	args->handle = handle;
262
263	/* drop reference from allocate - handle holds it now */
264	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
265 err:
266	drm_gem_object_unreference_unlocked(&dobj->obj);
267	return ret;
268}
269
270int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
271	uint32_t handle, uint64_t *offset)
272{
273	struct armada_gem_object *obj;
274	int ret = 0;
275
276	mutex_lock(&dev->struct_mutex);
277	obj = armada_gem_object_lookup(dev, file, handle);
278	if (!obj) {
279		DRM_ERROR("failed to lookup gem object\n");
280		ret = -EINVAL;
281		goto err_unlock;
282	}
283
284	/* Don't allow imported objects to be mapped */
285	if (obj->obj.import_attach) {
286		ret = -EINVAL;
287		goto err_unlock;
288	}
289
290	ret = drm_gem_create_mmap_offset(&obj->obj);
291	if (ret == 0) {
292		*offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
293		DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
294	}
295
296	drm_gem_object_unreference(&obj->obj);
297 err_unlock:
298	mutex_unlock(&dev->struct_mutex);
299
300	return ret;
301}
302
303int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
304	uint32_t handle)
305{
306	return drm_gem_handle_delete(file, handle);
307}
308
309/* Private driver gem ioctls */
310int armada_gem_create_ioctl(struct drm_device *dev, void *data,
311	struct drm_file *file)
312{
313	struct drm_armada_gem_create *args = data;
314	struct armada_gem_object *dobj;
315	size_t size;
316	u32 handle;
317	int ret;
318
319	if (args->size == 0)
320		return -ENOMEM;
321
322	size = args->size;
323
324	dobj = armada_gem_alloc_object(dev, size);
325	if (dobj == NULL)
326		return -ENOMEM;
327
328	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
329	if (ret)
330		goto err;
331
332	args->handle = handle;
333
334	/* drop reference from allocate - handle holds it now */
335	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
336 err:
337	drm_gem_object_unreference_unlocked(&dobj->obj);
338	return ret;
339}
340
341/* Map a shmem-backed object into process memory space */
342int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
343	struct drm_file *file)
344{
345	struct drm_armada_gem_mmap *args = data;
346	struct armada_gem_object *dobj;
347	unsigned long addr;
348
349	dobj = armada_gem_object_lookup(dev, file, args->handle);
350	if (dobj == NULL)
351		return -ENOENT;
352
353	if (!dobj->obj.filp) {
354		drm_gem_object_unreference(&dobj->obj);
355		return -EINVAL;
356	}
357
358	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
359		       MAP_SHARED, args->offset);
360	drm_gem_object_unreference(&dobj->obj);
361	if (IS_ERR_VALUE(addr))
362		return addr;
363
364	args->addr = addr;
365
366	return 0;
367}
368
369int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
370	struct drm_file *file)
371{
372	struct drm_armada_gem_pwrite *args = data;
373	struct armada_gem_object *dobj;
374	char __user *ptr;
375	int ret;
376
377	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
378		args->handle, args->offset, args->size, args->ptr);
379
380	if (args->size == 0)
381		return 0;
382
383	ptr = (char __user *)(uintptr_t)args->ptr;
384
385	if (!access_ok(VERIFY_READ, ptr, args->size))
386		return -EFAULT;
387
388	ret = fault_in_multipages_readable(ptr, args->size);
389	if (ret)
390		return ret;
391
392	dobj = armada_gem_object_lookup(dev, file, args->handle);
393	if (dobj == NULL)
394		return -ENOENT;
395
396	/* Must be a kernel-mapped object */
397	if (!dobj->addr)
398		return -EINVAL;
399
400	if (args->offset > dobj->obj.size ||
401	    args->size > dobj->obj.size - args->offset) {
402		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
403		ret = -EINVAL;
404		goto unref;
405	}
406
407	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
408		ret = -EFAULT;
409	} else if (dobj->update) {
410		dobj->update(dobj->update_data);
411		ret = 0;
412	}
413
414 unref:
415	drm_gem_object_unreference_unlocked(&dobj->obj);
416	return ret;
417}
418
419/* Prime support */
420struct sg_table *
421armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
422	enum dma_data_direction dir)
423{
424	struct drm_gem_object *obj = attach->dmabuf->priv;
425	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
426	struct scatterlist *sg;
427	struct sg_table *sgt;
428	int i, num;
429
430	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
431	if (!sgt)
432		return NULL;
433
434	if (dobj->obj.filp) {
435		struct address_space *mapping;
436		gfp_t gfp;
437		int count;
438
439		count = dobj->obj.size / PAGE_SIZE;
440		if (sg_alloc_table(sgt, count, GFP_KERNEL))
441			goto free_sgt;
442
443		mapping = file_inode(dobj->obj.filp)->i_mapping;
444		gfp = mapping_gfp_mask(mapping);
445
446		for_each_sg(sgt->sgl, sg, count, i) {
447			struct page *page;
448
449			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
450			if (IS_ERR(page)) {
451				num = i;
452				goto release;
453			}
454
455			sg_set_page(sg, page, PAGE_SIZE, 0);
456		}
457
458		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
459			num = sgt->nents;
460			goto release;
461		}
462	} else if (dobj->page) {
463		/* Single contiguous page */
464		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
465			goto free_sgt;
466
467		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
468
469		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
470			goto free_table;
471	} else if (dobj->linear) {
472		/* Single contiguous physical region - no struct page */
473		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
474			goto free_sgt;
475		sg_dma_address(sgt->sgl) = dobj->dev_addr;
476		sg_dma_len(sgt->sgl) = dobj->obj.size;
477	} else {
478		goto free_sgt;
479	}
480	return sgt;
481
482 release:
483	for_each_sg(sgt->sgl, sg, num, i)
484		page_cache_release(sg_page(sg));
 
485 free_table:
486	sg_free_table(sgt);
487 free_sgt:
488	kfree(sgt);
489	return NULL;
490}
491
492static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
493	struct sg_table *sgt, enum dma_data_direction dir)
494{
495	struct drm_gem_object *obj = attach->dmabuf->priv;
496	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
497	int i;
498
499	if (!dobj->linear)
500		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
501
502	if (dobj->obj.filp) {
503		struct scatterlist *sg;
504		for_each_sg(sgt->sgl, sg, sgt->nents, i)
505			page_cache_release(sg_page(sg));
 
506	}
507
508	sg_free_table(sgt);
509	kfree(sgt);
510}
511
512static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
513{
514	return NULL;
515}
516
517static void
518armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
519{
520}
521
522static int
523armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
524{
525	return -EINVAL;
526}
527
528static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
529	.map_dma_buf	= armada_gem_prime_map_dma_buf,
530	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
531	.release	= drm_gem_dmabuf_release,
532	.kmap_atomic	= armada_gem_dmabuf_no_kmap,
533	.kunmap_atomic	= armada_gem_dmabuf_no_kunmap,
534	.kmap		= armada_gem_dmabuf_no_kmap,
535	.kunmap		= armada_gem_dmabuf_no_kunmap,
536	.mmap		= armada_gem_dmabuf_mmap,
537};
538
539struct dma_buf *
540armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
541	int flags)
542{
543	return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
544			      O_RDWR);
 
 
 
 
 
 
545}
546
547struct drm_gem_object *
548armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
549{
550	struct dma_buf_attachment *attach;
551	struct armada_gem_object *dobj;
552
553	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
554		struct drm_gem_object *obj = buf->priv;
555		if (obj->dev == dev) {
556			/*
557			 * Importing our own dmabuf(s) increases the
558			 * refcount on the gem object itself.
559			 */
560			drm_gem_object_reference(obj);
561			return obj;
562		}
563	}
564
565	attach = dma_buf_attach(buf, dev->dev);
566	if (IS_ERR(attach))
567		return ERR_CAST(attach);
568
569	dobj = armada_gem_alloc_private_object(dev, buf->size);
570	if (!dobj) {
571		dma_buf_detach(buf, attach);
572		return ERR_PTR(-ENOMEM);
573	}
574
575	dobj->obj.import_attach = attach;
576	get_dma_buf(buf);
577
578	/*
579	 * Don't call dma_buf_map_attachment() here - it maps the
580	 * scatterlist immediately for DMA, and this is not always
581	 * an appropriate thing to do.
582	 */
583	return &dobj->obj;
584}
585
586int armada_gem_map_import(struct armada_gem_object *dobj)
587{
588	int ret;
589
590	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
591					  DMA_TO_DEVICE);
592	if (!dobj->sgt) {
593		DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
594		return -EINVAL;
595	}
596	if (IS_ERR(dobj->sgt)) {
597		ret = PTR_ERR(dobj->sgt);
598		dobj->sgt = NULL;
599		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
600		return ret;
601	}
602	if (dobj->sgt->nents > 1) {
603		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
604		return -EINVAL;
605	}
606	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
607		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
608		return -EINVAL;
609	}
610	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
 
611	return 0;
612}