Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) 2012 Russell King
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
 
  8#include <linux/dma-buf.h>
  9#include <linux/dma-mapping.h>
 
 10#include <linux/shmem_fs.h>
 11#include <drm/drmP.h>
 
 
 
 12#include "armada_drm.h"
 13#include "armada_gem.h"
 14#include <drm/armada_drm.h>
 15#include "armada_ioctlP.h"
 16
 17static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 18{
 19	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
 20	unsigned long addr = (unsigned long)vmf->virtual_address;
 21	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 22	int ret;
 23
 24	pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
 25	ret = vm_insert_pfn(vma, addr, pfn);
 26
 27	switch (ret) {
 28	case 0:
 29	case -EBUSY:
 30		return VM_FAULT_NOPAGE;
 31	case -ENOMEM:
 32		return VM_FAULT_OOM;
 33	default:
 34		return VM_FAULT_SIGBUS;
 35	}
 36}
 37
 38const struct vm_operations_struct armada_gem_vm_ops = {
 39	.fault	= armada_gem_vm_fault,
 40	.open	= drm_gem_vm_open,
 41	.close	= drm_gem_vm_close,
 42};
 43
 44static size_t roundup_gem_size(size_t size)
 45{
 46	return roundup(size, PAGE_SIZE);
 47}
 48
 49void armada_gem_free_object(struct drm_gem_object *obj)
 50{
 51	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
 52	struct armada_private *priv = obj->dev->dev_private;
 53
 54	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
 55
 56	drm_gem_free_mmap_offset(&dobj->obj);
 57
 58	might_lock(&priv->linear_lock);
 59
 60	if (dobj->page) {
 61		/* page backed memory */
 62		unsigned int order = get_order(dobj->obj.size);
 63		__free_pages(dobj->page, order);
 64	} else if (dobj->linear) {
 65		/* linear backed memory */
 66		mutex_lock(&priv->linear_lock);
 67		drm_mm_remove_node(dobj->linear);
 68		mutex_unlock(&priv->linear_lock);
 69		kfree(dobj->linear);
 70		if (dobj->addr)
 71			iounmap(dobj->addr);
 72	}
 73
 74	if (dobj->obj.import_attach) {
 75		/* We only ever display imported data */
 76		if (dobj->sgt)
 77			dma_buf_unmap_attachment(dobj->obj.import_attach,
 78						 dobj->sgt, DMA_TO_DEVICE);
 79		drm_prime_gem_destroy(&dobj->obj, NULL);
 80	}
 81
 82	drm_gem_object_release(&dobj->obj);
 83
 84	kfree(dobj);
 85}
 86
 87int
 88armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
 89{
 90	struct armada_private *priv = dev->dev_private;
 91	size_t size = obj->obj.size;
 92
 93	if (obj->page || obj->linear)
 94		return 0;
 95
 96	/*
 97	 * If it is a small allocation (typically cursor, which will
 98	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
 99	 * Framebuffers will never be this small (our minimum size for
100	 * framebuffers is larger than this anyway.)  Such objects are
101	 * only accessed by the CPU so we don't need any special handing
102	 * here.
103	 */
104	if (size <= 8192) {
105		unsigned int order = get_order(size);
106		struct page *p = alloc_pages(GFP_KERNEL, order);
107
108		if (p) {
109			obj->addr = page_address(p);
110			obj->phys_addr = page_to_phys(p);
111			obj->page = p;
112
113			memset(obj->addr, 0, PAGE_ALIGN(size));
114		}
115	}
116
117	/*
118	 * We could grab something from CMA if it's enabled, but that
119	 * involves building in a problem:
120	 *
121	 * CMA's interface uses dma_alloc_coherent(), which provides us
122	 * with an CPU virtual address and a device address.
123	 *
124	 * The CPU virtual address may be either an address in the kernel
125	 * direct mapped region (for example, as it would be on x86) or
126	 * it may be remapped into another part of kernel memory space
127	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
128	 * returned virtual address is invalid depending on the architecture
129	 * implementation.
130	 *
131	 * The device address may also not be a physical address; it may
132	 * be that there is some kind of remapping between the device and
133	 * system RAM, which makes the use of the device address also
134	 * unsafe to re-use as a physical address.
135	 *
136	 * This makes DRM usage of dma_alloc_coherent() in a generic way
137	 * at best very questionable and unsafe.
138	 */
139
140	/* Otherwise, grab it from our linear allocation */
141	if (!obj->page) {
142		struct drm_mm_node *node;
143		unsigned align = min_t(unsigned, size, SZ_2M);
144		void __iomem *ptr;
145		int ret;
146
147		node = kzalloc(sizeof(*node), GFP_KERNEL);
148		if (!node)
149			return -ENOSPC;
150
151		mutex_lock(&priv->linear_lock);
152		ret = drm_mm_insert_node(&priv->linear, node, size, align,
153					 DRM_MM_SEARCH_DEFAULT);
154		mutex_unlock(&priv->linear_lock);
155		if (ret) {
156			kfree(node);
157			return ret;
158		}
159
160		obj->linear = node;
161
162		/* Ensure that the memory we're returning is cleared. */
163		ptr = ioremap_wc(obj->linear->start, size);
164		if (!ptr) {
165			mutex_lock(&priv->linear_lock);
166			drm_mm_remove_node(obj->linear);
167			mutex_unlock(&priv->linear_lock);
168			kfree(obj->linear);
169			obj->linear = NULL;
170			return -ENOMEM;
171		}
172
173		memset_io(ptr, 0, size);
174		iounmap(ptr);
175
176		obj->phys_addr = obj->linear->start;
177		obj->dev_addr = obj->linear->start;
 
178	}
179
180	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
181			 (unsigned long long)obj->phys_addr,
182			 (unsigned long long)obj->dev_addr);
183
184	return 0;
185}
186
187void *
188armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
189{
190	/* only linear objects need to be ioremap'd */
191	if (!dobj->addr && dobj->linear)
192		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
193	return dobj->addr;
194}
195
196struct armada_gem_object *
197armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
198{
199	struct armada_gem_object *obj;
200
201	size = roundup_gem_size(size);
202
203	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
204	if (!obj)
205		return NULL;
206
207	drm_gem_private_object_init(dev, &obj->obj, size);
208	obj->dev_addr = DMA_ERROR_CODE;
209
210	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
211
212	return obj;
213}
214
215struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
216	size_t size)
217{
218	struct armada_gem_object *obj;
219	struct address_space *mapping;
220
221	size = roundup_gem_size(size);
222
223	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
224	if (!obj)
225		return NULL;
226
227	if (drm_gem_object_init(dev, &obj->obj, size)) {
228		kfree(obj);
229		return NULL;
230	}
231
232	obj->dev_addr = DMA_ERROR_CODE;
233
234	mapping = file_inode(obj->obj.filp)->i_mapping;
235	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
236
237	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
238
239	return obj;
240}
241
242/* Dumb alloc support */
243int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
244	struct drm_mode_create_dumb *args)
245{
246	struct armada_gem_object *dobj;
247	u32 handle;
248	size_t size;
249	int ret;
250
251	args->pitch = armada_pitch(args->width, args->bpp);
252	args->size = size = args->pitch * args->height;
253
254	dobj = armada_gem_alloc_private_object(dev, size);
255	if (dobj == NULL)
256		return -ENOMEM;
257
258	ret = armada_gem_linear_back(dev, dobj);
259	if (ret)
260		goto err;
261
262	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
263	if (ret)
264		goto err;
265
266	args->handle = handle;
267
268	/* drop reference from allocate - handle holds it now */
269	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
270 err:
271	drm_gem_object_unreference_unlocked(&dobj->obj);
272	return ret;
273}
274
275int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
276	uint32_t handle, uint64_t *offset)
277{
278	struct armada_gem_object *obj;
279	int ret = 0;
280
281	obj = armada_gem_object_lookup(dev, file, handle);
282	if (!obj) {
283		DRM_ERROR("failed to lookup gem object\n");
284		return -EINVAL;
285	}
286
287	/* Don't allow imported objects to be mapped */
288	if (obj->obj.import_attach) {
289		ret = -EINVAL;
290		goto err_unref;
291	}
292
293	ret = drm_gem_create_mmap_offset(&obj->obj);
294	if (ret == 0) {
295		*offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
296		DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
297	}
298
299 err_unref:
300	drm_gem_object_unreference_unlocked(&obj->obj);
301
302	return ret;
303}
304
305int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
306	uint32_t handle)
307{
308	return drm_gem_handle_delete(file, handle);
309}
310
311/* Private driver gem ioctls */
312int armada_gem_create_ioctl(struct drm_device *dev, void *data,
313	struct drm_file *file)
314{
315	struct drm_armada_gem_create *args = data;
316	struct armada_gem_object *dobj;
317	size_t size;
318	u32 handle;
319	int ret;
320
321	if (args->size == 0)
322		return -ENOMEM;
323
324	size = args->size;
325
326	dobj = armada_gem_alloc_object(dev, size);
327	if (dobj == NULL)
328		return -ENOMEM;
329
330	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
331	if (ret)
332		goto err;
333
334	args->handle = handle;
335
336	/* drop reference from allocate - handle holds it now */
337	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
338 err:
339	drm_gem_object_unreference_unlocked(&dobj->obj);
340	return ret;
341}
342
343/* Map a shmem-backed object into process memory space */
344int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
345	struct drm_file *file)
346{
347	struct drm_armada_gem_mmap *args = data;
348	struct armada_gem_object *dobj;
349	unsigned long addr;
350
351	dobj = armada_gem_object_lookup(dev, file, args->handle);
352	if (dobj == NULL)
353		return -ENOENT;
354
355	if (!dobj->obj.filp) {
356		drm_gem_object_unreference_unlocked(&dobj->obj);
357		return -EINVAL;
358	}
359
360	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
361		       MAP_SHARED, args->offset);
362	drm_gem_object_unreference_unlocked(&dobj->obj);
363	if (IS_ERR_VALUE(addr))
364		return addr;
365
366	args->addr = addr;
367
368	return 0;
369}
370
371int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
372	struct drm_file *file)
373{
374	struct drm_armada_gem_pwrite *args = data;
375	struct armada_gem_object *dobj;
376	char __user *ptr;
377	int ret;
378
379	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
380		args->handle, args->offset, args->size, args->ptr);
381
382	if (args->size == 0)
383		return 0;
384
385	ptr = (char __user *)(uintptr_t)args->ptr;
386
387	if (!access_ok(VERIFY_READ, ptr, args->size))
388		return -EFAULT;
389
390	ret = fault_in_multipages_readable(ptr, args->size);
391	if (ret)
392		return ret;
393
394	dobj = armada_gem_object_lookup(dev, file, args->handle);
395	if (dobj == NULL)
396		return -ENOENT;
397
398	/* Must be a kernel-mapped object */
399	if (!dobj->addr)
400		return -EINVAL;
401
402	if (args->offset > dobj->obj.size ||
403	    args->size > dobj->obj.size - args->offset) {
404		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
405		ret = -EINVAL;
406		goto unref;
407	}
408
409	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
410		ret = -EFAULT;
411	} else if (dobj->update) {
412		dobj->update(dobj->update_data);
413		ret = 0;
414	}
415
416 unref:
417	drm_gem_object_unreference_unlocked(&dobj->obj);
418	return ret;
419}
420
421/* Prime support */
422struct sg_table *
423armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
424	enum dma_data_direction dir)
425{
426	struct drm_gem_object *obj = attach->dmabuf->priv;
427	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
428	struct scatterlist *sg;
429	struct sg_table *sgt;
430	int i, num;
431
432	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
433	if (!sgt)
434		return NULL;
435
436	if (dobj->obj.filp) {
437		struct address_space *mapping;
438		int count;
439
440		count = dobj->obj.size / PAGE_SIZE;
441		if (sg_alloc_table(sgt, count, GFP_KERNEL))
442			goto free_sgt;
443
444		mapping = file_inode(dobj->obj.filp)->i_mapping;
445
446		for_each_sg(sgt->sgl, sg, count, i) {
447			struct page *page;
448
449			page = shmem_read_mapping_page(mapping, i);
450			if (IS_ERR(page)) {
451				num = i;
452				goto release;
453			}
454
455			sg_set_page(sg, page, PAGE_SIZE, 0);
456		}
457
458		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
459			num = sgt->nents;
460			goto release;
461		}
462	} else if (dobj->page) {
463		/* Single contiguous page */
464		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
465			goto free_sgt;
466
467		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
468
469		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
470			goto free_table;
471	} else if (dobj->linear) {
472		/* Single contiguous physical region - no struct page */
473		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
474			goto free_sgt;
475		sg_dma_address(sgt->sgl) = dobj->dev_addr;
476		sg_dma_len(sgt->sgl) = dobj->obj.size;
477	} else {
478		goto free_sgt;
479	}
480	return sgt;
481
482 release:
483	for_each_sg(sgt->sgl, sg, num, i)
484		put_page(sg_page(sg));
485 free_table:
486	sg_free_table(sgt);
487 free_sgt:
488	kfree(sgt);
489	return NULL;
490}
491
492static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
493	struct sg_table *sgt, enum dma_data_direction dir)
494{
495	struct drm_gem_object *obj = attach->dmabuf->priv;
496	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
497	int i;
498
499	if (!dobj->linear)
500		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
501
502	if (dobj->obj.filp) {
503		struct scatterlist *sg;
504		for_each_sg(sgt->sgl, sg, sgt->nents, i)
505			put_page(sg_page(sg));
506	}
507
508	sg_free_table(sgt);
509	kfree(sgt);
510}
511
512static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
513{
514	return NULL;
515}
516
517static void
518armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
519{
520}
521
522static int
523armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
524{
525	return -EINVAL;
526}
527
528static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
529	.map_dma_buf	= armada_gem_prime_map_dma_buf,
530	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
531	.release	= drm_gem_dmabuf_release,
532	.kmap_atomic	= armada_gem_dmabuf_no_kmap,
533	.kunmap_atomic	= armada_gem_dmabuf_no_kunmap,
534	.kmap		= armada_gem_dmabuf_no_kmap,
535	.kunmap		= armada_gem_dmabuf_no_kunmap,
536	.mmap		= armada_gem_dmabuf_mmap,
537};
538
539struct dma_buf *
540armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
541	int flags)
542{
543	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
544
545	exp_info.ops = &armada_gem_prime_dmabuf_ops;
546	exp_info.size = obj->size;
547	exp_info.flags = O_RDWR;
548	exp_info.priv = obj;
549
550	return dma_buf_export(&exp_info);
551}
552
553struct drm_gem_object *
554armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
555{
556	struct dma_buf_attachment *attach;
557	struct armada_gem_object *dobj;
558
559	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
560		struct drm_gem_object *obj = buf->priv;
561		if (obj->dev == dev) {
562			/*
563			 * Importing our own dmabuf(s) increases the
564			 * refcount on the gem object itself.
565			 */
566			drm_gem_object_reference(obj);
567			return obj;
568		}
569	}
570
571	attach = dma_buf_attach(buf, dev->dev);
572	if (IS_ERR(attach))
573		return ERR_CAST(attach);
574
575	dobj = armada_gem_alloc_private_object(dev, buf->size);
576	if (!dobj) {
577		dma_buf_detach(buf, attach);
578		return ERR_PTR(-ENOMEM);
579	}
580
581	dobj->obj.import_attach = attach;
582	get_dma_buf(buf);
583
584	/*
585	 * Don't call dma_buf_map_attachment() here - it maps the
586	 * scatterlist immediately for DMA, and this is not always
587	 * an appropriate thing to do.
588	 */
589	return &dobj->obj;
590}
591
592int armada_gem_map_import(struct armada_gem_object *dobj)
593{
594	int ret;
595
596	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
597					  DMA_TO_DEVICE);
598	if (!dobj->sgt) {
599		DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
600		return -EINVAL;
601	}
602	if (IS_ERR(dobj->sgt)) {
603		ret = PTR_ERR(dobj->sgt);
604		dobj->sgt = NULL;
605		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
606		return ret;
607	}
608	if (dobj->sgt->nents > 1) {
609		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
610		return -EINVAL;
611	}
612	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
613		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
614		return -EINVAL;
615	}
616	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
 
617	return 0;
618}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Russell King
 
 
 
 
  4 */
  5
  6#include <linux/dma-buf.h>
  7#include <linux/dma-mapping.h>
  8#include <linux/mman.h>
  9#include <linux/shmem_fs.h>
 10
 11#include <drm/armada_drm.h>
 12#include <drm/drm_prime.h>
 13
 14#include "armada_drm.h"
 15#include "armada_gem.h"
 
 16#include "armada_ioctlP.h"
 17
 18static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
 19{
 20	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
 21	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
 22	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 
 23
 24	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
 25	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
 
 
 
 
 
 
 
 
 
 
 26}
 27
 28const struct vm_operations_struct armada_gem_vm_ops = {
 29	.fault	= armada_gem_vm_fault,
 30	.open	= drm_gem_vm_open,
 31	.close	= drm_gem_vm_close,
 32};
 33
 34static size_t roundup_gem_size(size_t size)
 35{
 36	return roundup(size, PAGE_SIZE);
 37}
 38
 39void armada_gem_free_object(struct drm_gem_object *obj)
 40{
 41	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
 42	struct armada_private *priv = obj->dev->dev_private;
 43
 44	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
 45
 46	drm_gem_free_mmap_offset(&dobj->obj);
 47
 48	might_lock(&priv->linear_lock);
 49
 50	if (dobj->page) {
 51		/* page backed memory */
 52		unsigned int order = get_order(dobj->obj.size);
 53		__free_pages(dobj->page, order);
 54	} else if (dobj->linear) {
 55		/* linear backed memory */
 56		mutex_lock(&priv->linear_lock);
 57		drm_mm_remove_node(dobj->linear);
 58		mutex_unlock(&priv->linear_lock);
 59		kfree(dobj->linear);
 60		if (dobj->addr)
 61			iounmap(dobj->addr);
 62	}
 63
 64	if (dobj->obj.import_attach) {
 65		/* We only ever display imported data */
 66		if (dobj->sgt)
 67			dma_buf_unmap_attachment(dobj->obj.import_attach,
 68						 dobj->sgt, DMA_TO_DEVICE);
 69		drm_prime_gem_destroy(&dobj->obj, NULL);
 70	}
 71
 72	drm_gem_object_release(&dobj->obj);
 73
 74	kfree(dobj);
 75}
 76
 77int
 78armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
 79{
 80	struct armada_private *priv = dev->dev_private;
 81	size_t size = obj->obj.size;
 82
 83	if (obj->page || obj->linear)
 84		return 0;
 85
 86	/*
 87	 * If it is a small allocation (typically cursor, which will
 88	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
 89	 * Framebuffers will never be this small (our minimum size for
 90	 * framebuffers is larger than this anyway.)  Such objects are
 91	 * only accessed by the CPU so we don't need any special handing
 92	 * here.
 93	 */
 94	if (size <= 8192) {
 95		unsigned int order = get_order(size);
 96		struct page *p = alloc_pages(GFP_KERNEL, order);
 97
 98		if (p) {
 99			obj->addr = page_address(p);
100			obj->phys_addr = page_to_phys(p);
101			obj->page = p;
102
103			memset(obj->addr, 0, PAGE_ALIGN(size));
104		}
105	}
106
107	/*
108	 * We could grab something from CMA if it's enabled, but that
109	 * involves building in a problem:
110	 *
111	 * CMA's interface uses dma_alloc_coherent(), which provides us
112	 * with an CPU virtual address and a device address.
113	 *
114	 * The CPU virtual address may be either an address in the kernel
115	 * direct mapped region (for example, as it would be on x86) or
116	 * it may be remapped into another part of kernel memory space
117	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
118	 * returned virtual address is invalid depending on the architecture
119	 * implementation.
120	 *
121	 * The device address may also not be a physical address; it may
122	 * be that there is some kind of remapping between the device and
123	 * system RAM, which makes the use of the device address also
124	 * unsafe to re-use as a physical address.
125	 *
126	 * This makes DRM usage of dma_alloc_coherent() in a generic way
127	 * at best very questionable and unsafe.
128	 */
129
130	/* Otherwise, grab it from our linear allocation */
131	if (!obj->page) {
132		struct drm_mm_node *node;
133		unsigned align = min_t(unsigned, size, SZ_2M);
134		void __iomem *ptr;
135		int ret;
136
137		node = kzalloc(sizeof(*node), GFP_KERNEL);
138		if (!node)
139			return -ENOSPC;
140
141		mutex_lock(&priv->linear_lock);
142		ret = drm_mm_insert_node_generic(&priv->linear, node,
143						 size, align, 0, 0);
144		mutex_unlock(&priv->linear_lock);
145		if (ret) {
146			kfree(node);
147			return ret;
148		}
149
150		obj->linear = node;
151
152		/* Ensure that the memory we're returning is cleared. */
153		ptr = ioremap_wc(obj->linear->start, size);
154		if (!ptr) {
155			mutex_lock(&priv->linear_lock);
156			drm_mm_remove_node(obj->linear);
157			mutex_unlock(&priv->linear_lock);
158			kfree(obj->linear);
159			obj->linear = NULL;
160			return -ENOMEM;
161		}
162
163		memset_io(ptr, 0, size);
164		iounmap(ptr);
165
166		obj->phys_addr = obj->linear->start;
167		obj->dev_addr = obj->linear->start;
168		obj->mapped = true;
169	}
170
171	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
172			 (unsigned long long)obj->phys_addr,
173			 (unsigned long long)obj->dev_addr);
174
175	return 0;
176}
177
178void *
179armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
180{
181	/* only linear objects need to be ioremap'd */
182	if (!dobj->addr && dobj->linear)
183		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
184	return dobj->addr;
185}
186
187struct armada_gem_object *
188armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
189{
190	struct armada_gem_object *obj;
191
192	size = roundup_gem_size(size);
193
194	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
195	if (!obj)
196		return NULL;
197
198	drm_gem_private_object_init(dev, &obj->obj, size);
 
199
200	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
201
202	return obj;
203}
204
205static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
206	size_t size)
207{
208	struct armada_gem_object *obj;
209	struct address_space *mapping;
210
211	size = roundup_gem_size(size);
212
213	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
214	if (!obj)
215		return NULL;
216
217	if (drm_gem_object_init(dev, &obj->obj, size)) {
218		kfree(obj);
219		return NULL;
220	}
221
222	mapping = obj->obj.filp->f_mapping;
 
 
223	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
224
225	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
226
227	return obj;
228}
229
230/* Dumb alloc support */
231int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
232	struct drm_mode_create_dumb *args)
233{
234	struct armada_gem_object *dobj;
235	u32 handle;
236	size_t size;
237	int ret;
238
239	args->pitch = armada_pitch(args->width, args->bpp);
240	args->size = size = args->pitch * args->height;
241
242	dobj = armada_gem_alloc_private_object(dev, size);
243	if (dobj == NULL)
244		return -ENOMEM;
245
246	ret = armada_gem_linear_back(dev, dobj);
247	if (ret)
248		goto err;
249
250	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
251	if (ret)
252		goto err;
253
254	args->handle = handle;
255
256	/* drop reference from allocate - handle holds it now */
257	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
258 err:
259	drm_gem_object_put_unlocked(&dobj->obj);
260	return ret;
261}
262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263/* Private driver gem ioctls */
264int armada_gem_create_ioctl(struct drm_device *dev, void *data,
265	struct drm_file *file)
266{
267	struct drm_armada_gem_create *args = data;
268	struct armada_gem_object *dobj;
269	size_t size;
270	u32 handle;
271	int ret;
272
273	if (args->size == 0)
274		return -ENOMEM;
275
276	size = args->size;
277
278	dobj = armada_gem_alloc_object(dev, size);
279	if (dobj == NULL)
280		return -ENOMEM;
281
282	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
283	if (ret)
284		goto err;
285
286	args->handle = handle;
287
288	/* drop reference from allocate - handle holds it now */
289	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
290 err:
291	drm_gem_object_put_unlocked(&dobj->obj);
292	return ret;
293}
294
295/* Map a shmem-backed object into process memory space */
296int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
297	struct drm_file *file)
298{
299	struct drm_armada_gem_mmap *args = data;
300	struct armada_gem_object *dobj;
301	unsigned long addr;
302
303	dobj = armada_gem_object_lookup(file, args->handle);
304	if (dobj == NULL)
305		return -ENOENT;
306
307	if (!dobj->obj.filp) {
308		drm_gem_object_put_unlocked(&dobj->obj);
309		return -EINVAL;
310	}
311
312	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
313		       MAP_SHARED, args->offset);
314	drm_gem_object_put_unlocked(&dobj->obj);
315	if (IS_ERR_VALUE(addr))
316		return addr;
317
318	args->addr = addr;
319
320	return 0;
321}
322
323int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
324	struct drm_file *file)
325{
326	struct drm_armada_gem_pwrite *args = data;
327	struct armada_gem_object *dobj;
328	char __user *ptr;
329	int ret;
330
331	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
332		args->handle, args->offset, args->size, args->ptr);
333
334	if (args->size == 0)
335		return 0;
336
337	ptr = (char __user *)(uintptr_t)args->ptr;
338
339	if (!access_ok(ptr, args->size))
340		return -EFAULT;
341
342	ret = fault_in_pages_readable(ptr, args->size);
343	if (ret)
344		return ret;
345
346	dobj = armada_gem_object_lookup(file, args->handle);
347	if (dobj == NULL)
348		return -ENOENT;
349
350	/* Must be a kernel-mapped object */
351	if (!dobj->addr)
352		return -EINVAL;
353
354	if (args->offset > dobj->obj.size ||
355	    args->size > dobj->obj.size - args->offset) {
356		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
357		ret = -EINVAL;
358		goto unref;
359	}
360
361	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
362		ret = -EFAULT;
363	} else if (dobj->update) {
364		dobj->update(dobj->update_data);
365		ret = 0;
366	}
367
368 unref:
369	drm_gem_object_put_unlocked(&dobj->obj);
370	return ret;
371}
372
373/* Prime support */
374static struct sg_table *
375armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
376	enum dma_data_direction dir)
377{
378	struct drm_gem_object *obj = attach->dmabuf->priv;
379	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
380	struct scatterlist *sg;
381	struct sg_table *sgt;
382	int i, num;
383
384	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
385	if (!sgt)
386		return NULL;
387
388	if (dobj->obj.filp) {
389		struct address_space *mapping;
390		int count;
391
392		count = dobj->obj.size / PAGE_SIZE;
393		if (sg_alloc_table(sgt, count, GFP_KERNEL))
394			goto free_sgt;
395
396		mapping = dobj->obj.filp->f_mapping;
397
398		for_each_sg(sgt->sgl, sg, count, i) {
399			struct page *page;
400
401			page = shmem_read_mapping_page(mapping, i);
402			if (IS_ERR(page)) {
403				num = i;
404				goto release;
405			}
406
407			sg_set_page(sg, page, PAGE_SIZE, 0);
408		}
409
410		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
411			num = sgt->nents;
412			goto release;
413		}
414	} else if (dobj->page) {
415		/* Single contiguous page */
416		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
417			goto free_sgt;
418
419		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
420
421		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
422			goto free_table;
423	} else if (dobj->linear) {
424		/* Single contiguous physical region - no struct page */
425		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
426			goto free_sgt;
427		sg_dma_address(sgt->sgl) = dobj->dev_addr;
428		sg_dma_len(sgt->sgl) = dobj->obj.size;
429	} else {
430		goto free_sgt;
431	}
432	return sgt;
433
434 release:
435	for_each_sg(sgt->sgl, sg, num, i)
436		put_page(sg_page(sg));
437 free_table:
438	sg_free_table(sgt);
439 free_sgt:
440	kfree(sgt);
441	return NULL;
442}
443
444static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
445	struct sg_table *sgt, enum dma_data_direction dir)
446{
447	struct drm_gem_object *obj = attach->dmabuf->priv;
448	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
449	int i;
450
451	if (!dobj->linear)
452		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
453
454	if (dobj->obj.filp) {
455		struct scatterlist *sg;
456		for_each_sg(sgt->sgl, sg, sgt->nents, i)
457			put_page(sg_page(sg));
458	}
459
460	sg_free_table(sgt);
461	kfree(sgt);
462}
463
464static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
465{
466	return NULL;
467}
468
469static void
470armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
471{
472}
473
474static int
475armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
476{
477	return -EINVAL;
478}
479
480static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
481	.map_dma_buf	= armada_gem_prime_map_dma_buf,
482	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
483	.release	= drm_gem_dmabuf_release,
484	.map		= armada_gem_dmabuf_no_kmap,
485	.unmap		= armada_gem_dmabuf_no_kunmap,
 
 
486	.mmap		= armada_gem_dmabuf_mmap,
487};
488
489struct dma_buf *
490armada_gem_prime_export(struct drm_gem_object *obj, int flags)
 
491{
492	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
493
494	exp_info.ops = &armada_gem_prime_dmabuf_ops;
495	exp_info.size = obj->size;
496	exp_info.flags = O_RDWR;
497	exp_info.priv = obj;
498
499	return drm_gem_dmabuf_export(obj->dev, &exp_info);
500}
501
502struct drm_gem_object *
503armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
504{
505	struct dma_buf_attachment *attach;
506	struct armada_gem_object *dobj;
507
508	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
509		struct drm_gem_object *obj = buf->priv;
510		if (obj->dev == dev) {
511			/*
512			 * Importing our own dmabuf(s) increases the
513			 * refcount on the gem object itself.
514			 */
515			drm_gem_object_get(obj);
516			return obj;
517		}
518	}
519
520	attach = dma_buf_attach(buf, dev->dev);
521	if (IS_ERR(attach))
522		return ERR_CAST(attach);
523
524	dobj = armada_gem_alloc_private_object(dev, buf->size);
525	if (!dobj) {
526		dma_buf_detach(buf, attach);
527		return ERR_PTR(-ENOMEM);
528	}
529
530	dobj->obj.import_attach = attach;
531	get_dma_buf(buf);
532
533	/*
534	 * Don't call dma_buf_map_attachment() here - it maps the
535	 * scatterlist immediately for DMA, and this is not always
536	 * an appropriate thing to do.
537	 */
538	return &dobj->obj;
539}
540
541int armada_gem_map_import(struct armada_gem_object *dobj)
542{
543	int ret;
544
545	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
546					   DMA_TO_DEVICE);
 
 
 
 
547	if (IS_ERR(dobj->sgt)) {
548		ret = PTR_ERR(dobj->sgt);
549		dobj->sgt = NULL;
550		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
551		return ret;
552	}
553	if (dobj->sgt->nents > 1) {
554		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
555		return -EINVAL;
556	}
557	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
558		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
559		return -EINVAL;
560	}
561	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
562	dobj->mapped = true;
563	return 0;
564}