Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2012 Red Hat
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License v2. See the file COPYING in the main directory of this archive for
  6 * more details.
  7 */
  8
  9#include "drmP.h"
 10#include "udl_drv.h"
 11#include <linux/shmem_fs.h>
 12#include <linux/dma-buf.h>
 13
 14struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
 15					    size_t size)
 16{
 17	struct udl_gem_object *obj;
 18
 19	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 20	if (obj == NULL)
 21		return NULL;
 22
 23	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
 24		kfree(obj);
 25		return NULL;
 26	}
 27
 28	return obj;
 29}
 30
 31static int
 32udl_gem_create(struct drm_file *file,
 33	       struct drm_device *dev,
 34	       uint64_t size,
 35	       uint32_t *handle_p)
 36{
 37	struct udl_gem_object *obj;
 38	int ret;
 39	u32 handle;
 40
 41	size = roundup(size, PAGE_SIZE);
 42
 43	obj = udl_gem_alloc_object(dev, size);
 44	if (obj == NULL)
 45		return -ENOMEM;
 46
 47	ret = drm_gem_handle_create(file, &obj->base, &handle);
 48	if (ret) {
 49		drm_gem_object_release(&obj->base);
 50		kfree(obj);
 51		return ret;
 52	}
 53
 54	drm_gem_object_unreference(&obj->base);
 55	*handle_p = handle;
 56	return 0;
 57}
 58
 59int udl_dumb_create(struct drm_file *file,
 60		    struct drm_device *dev,
 61		    struct drm_mode_create_dumb *args)
 62{
 63	args->pitch = args->width * ((args->bpp + 1) / 8);
 64	args->size = args->pitch * args->height;
 65	return udl_gem_create(file, dev,
 66			      args->size, &args->handle);
 67}
 68
 69int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
 70		     uint32_t handle)
 71{
 72	return drm_gem_handle_delete(file, handle);
 73}
 74
 75int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 76{
 77	int ret;
 78
 79	ret = drm_gem_mmap(filp, vma);
 80	if (ret)
 81		return ret;
 82
 83	vma->vm_flags &= ~VM_PFNMAP;
 84	vma->vm_flags |= VM_MIXEDMAP;
 85
 86	return ret;
 87}
 88
 89int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 90{
 91	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
 92	struct page *page;
 93	unsigned int page_offset;
 94	int ret = 0;
 95
 96	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
 97		PAGE_SHIFT;
 98
 99	if (!obj->pages)
100		return VM_FAULT_SIGBUS;
101
102	page = obj->pages[page_offset];
103	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
104	switch (ret) {
105	case -EAGAIN:
106		set_need_resched();
107	case 0:
108	case -ERESTARTSYS:
109		return VM_FAULT_NOPAGE;
110	case -ENOMEM:
111		return VM_FAULT_OOM;
112	default:
113		return VM_FAULT_SIGBUS;
114	}
115}
116
117int udl_gem_init_object(struct drm_gem_object *obj)
118{
119	BUG();
120
121	return 0;
122}
123
124static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125{
126	int page_count, i;
127	struct page *page;
128	struct inode *inode;
129	struct address_space *mapping;
130
131	if (obj->pages)
132		return 0;
133
134	page_count = obj->base.size / PAGE_SIZE;
135	BUG_ON(obj->pages != NULL);
136	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
137	if (obj->pages == NULL)
138		return -ENOMEM;
139
140	inode = obj->base.filp->f_path.dentry->d_inode;
141	mapping = inode->i_mapping;
142	gfpmask |= mapping_gfp_mask(mapping);
143
144	for (i = 0; i < page_count; i++) {
145		page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
146		if (IS_ERR(page))
147			goto err_pages;
148		obj->pages[i] = page;
149	}
150
151	return 0;
152err_pages:
153	while (i--)
154		page_cache_release(obj->pages[i]);
155	drm_free_large(obj->pages);
156	obj->pages = NULL;
157	return PTR_ERR(page);
158}
159
160static void udl_gem_put_pages(struct udl_gem_object *obj)
161{
162	int page_count = obj->base.size / PAGE_SIZE;
163	int i;
164
165	if (obj->base.import_attach) {
166		drm_free_large(obj->pages);
167		obj->pages = NULL;
168		return;
169	}
170
171	for (i = 0; i < page_count; i++)
172		page_cache_release(obj->pages[i]);
173
174	drm_free_large(obj->pages);
175	obj->pages = NULL;
176}
177
178int udl_gem_vmap(struct udl_gem_object *obj)
179{
180	int page_count = obj->base.size / PAGE_SIZE;
181	int ret;
182
183	if (obj->base.import_attach) {
184		ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
185					       0, obj->base.size, DMA_BIDIRECTIONAL);
186		if (ret)
187			return -EINVAL;
188
189		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
190		if (!obj->vmapping)
191			return -ENOMEM;
192		return 0;
193	}
194		
195	ret = udl_gem_get_pages(obj, GFP_KERNEL);
196	if (ret)
197		return ret;
198
199	obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
200	if (!obj->vmapping)
201		return -ENOMEM;
202	return 0;
203}
204
205void udl_gem_vunmap(struct udl_gem_object *obj)
206{
207	if (obj->base.import_attach) {
208		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
209		dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
210				       obj->base.size, DMA_BIDIRECTIONAL);
211		return;
212	}
213
214	if (obj->vmapping)
215		vunmap(obj->vmapping);
216
217	udl_gem_put_pages(obj);
218}
219
220void udl_gem_free_object(struct drm_gem_object *gem_obj)
221{
222	struct udl_gem_object *obj = to_udl_bo(gem_obj);
223
224	if (obj->vmapping)
225		udl_gem_vunmap(obj);
226
227	if (gem_obj->import_attach)
228		drm_prime_gem_destroy(gem_obj, obj->sg);
 
 
229
230	if (obj->pages)
231		udl_gem_put_pages(obj);
232
233	if (gem_obj->map_list.map)
234		drm_gem_free_mmap_offset(gem_obj);
235}
236
237/* the dumb interface doesn't work with the GEM straight MMAP
238   interface, it expects to do MMAP on the drm fd, like normal */
239int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
240		 uint32_t handle, uint64_t *offset)
241{
242	struct udl_gem_object *gobj;
243	struct drm_gem_object *obj;
244	int ret = 0;
245
246	mutex_lock(&dev->struct_mutex);
247	obj = drm_gem_object_lookup(dev, file, handle);
248	if (obj == NULL) {
249		ret = -ENOENT;
250		goto unlock;
251	}
252	gobj = to_udl_bo(obj);
253
254	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
255	if (ret)
256		goto out;
257	if (!gobj->base.map_list.map) {
258		ret = drm_gem_create_mmap_offset(obj);
259		if (ret)
260			goto out;
261	}
262
263	*offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
264
265out:
266	drm_gem_object_unreference(&gobj->base);
267unlock:
268	mutex_unlock(&dev->struct_mutex);
269	return ret;
270}
271
272static int udl_prime_create(struct drm_device *dev,
273			    size_t size,
274			    struct sg_table *sg,
275			    struct udl_gem_object **obj_p)
276{
277	struct udl_gem_object *obj;
278	int npages;
279
280	npages = size / PAGE_SIZE;
281
282	*obj_p = NULL;
283	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
284	if (!obj)
285		return -ENOMEM;
286
287	obj->sg = sg;
288	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
289	if (obj->pages == NULL) {
290		DRM_ERROR("obj pages is NULL %d\n", npages);
291		return -ENOMEM;
292	}
293
294	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
295
296	*obj_p = obj;
297	return 0;
298}
299
300struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
301				struct dma_buf *dma_buf)
302{
303	struct dma_buf_attachment *attach;
304	struct sg_table *sg;
305	struct udl_gem_object *uobj;
306	int ret;
307
308	/* need to attach */
 
309	attach = dma_buf_attach(dma_buf, dev->dev);
310	if (IS_ERR(attach))
311		return ERR_PTR(PTR_ERR(attach));
 
 
 
 
312
313	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
314	if (IS_ERR(sg)) {
315		ret = PTR_ERR(sg);
316		goto fail_detach;
317	}
318
319	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
320	if (ret) {
321		goto fail_unmap;
322	}
323
324	uobj->base.import_attach = attach;
325
326	return &uobj->base;
327
328fail_unmap:
329	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
330fail_detach:
331	dma_buf_detach(dma_buf, attach);
 
 
332	return ERR_PTR(ret);
333}
v3.15
  1/*
  2 * Copyright (C) 2012 Red Hat
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License v2. See the file COPYING in the main directory of this archive for
  6 * more details.
  7 */
  8
  9#include <drm/drmP.h>
 10#include "udl_drv.h"
 11#include <linux/shmem_fs.h>
 12#include <linux/dma-buf.h>
 13
 14struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
 15					    size_t size)
 16{
 17	struct udl_gem_object *obj;
 18
 19	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 20	if (obj == NULL)
 21		return NULL;
 22
 23	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
 24		kfree(obj);
 25		return NULL;
 26	}
 27
 28	return obj;
 29}
 30
 31static int
 32udl_gem_create(struct drm_file *file,
 33	       struct drm_device *dev,
 34	       uint64_t size,
 35	       uint32_t *handle_p)
 36{
 37	struct udl_gem_object *obj;
 38	int ret;
 39	u32 handle;
 40
 41	size = roundup(size, PAGE_SIZE);
 42
 43	obj = udl_gem_alloc_object(dev, size);
 44	if (obj == NULL)
 45		return -ENOMEM;
 46
 47	ret = drm_gem_handle_create(file, &obj->base, &handle);
 48	if (ret) {
 49		drm_gem_object_release(&obj->base);
 50		kfree(obj);
 51		return ret;
 52	}
 53
 54	drm_gem_object_unreference(&obj->base);
 55	*handle_p = handle;
 56	return 0;
 57}
 58
 59int udl_dumb_create(struct drm_file *file,
 60		    struct drm_device *dev,
 61		    struct drm_mode_create_dumb *args)
 62{
 63	args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
 64	args->size = args->pitch * args->height;
 65	return udl_gem_create(file, dev,
 66			      args->size, &args->handle);
 67}
 68
 
 
 
 
 
 
 69int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 70{
 71	int ret;
 72
 73	ret = drm_gem_mmap(filp, vma);
 74	if (ret)
 75		return ret;
 76
 77	vma->vm_flags &= ~VM_PFNMAP;
 78	vma->vm_flags |= VM_MIXEDMAP;
 79
 80	return ret;
 81}
 82
 83int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 84{
 85	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
 86	struct page *page;
 87	unsigned int page_offset;
 88	int ret = 0;
 89
 90	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
 91		PAGE_SHIFT;
 92
 93	if (!obj->pages)
 94		return VM_FAULT_SIGBUS;
 95
 96	page = obj->pages[page_offset];
 97	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
 98	switch (ret) {
 99	case -EAGAIN:
 
100	case 0:
101	case -ERESTARTSYS:
102		return VM_FAULT_NOPAGE;
103	case -ENOMEM:
104		return VM_FAULT_OOM;
105	default:
106		return VM_FAULT_SIGBUS;
107	}
108}
109
 
 
 
 
 
 
 
110static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
111{
112	struct page **pages;
 
 
 
113
114	if (obj->pages)
115		return 0;
116
117	pages = drm_gem_get_pages(&obj->base, gfpmask);
118	if (IS_ERR(pages))
119		return PTR_ERR(pages);
 
 
120
121	obj->pages = pages;
 
 
 
 
 
 
 
 
 
122
123	return 0;
 
 
 
 
 
 
124}
125
126static void udl_gem_put_pages(struct udl_gem_object *obj)
127{
 
 
 
128	if (obj->base.import_attach) {
129		drm_free_large(obj->pages);
130		obj->pages = NULL;
131		return;
132	}
133
134	drm_gem_put_pages(&obj->base, obj->pages, false, false);
 
 
 
135	obj->pages = NULL;
136}
137
138int udl_gem_vmap(struct udl_gem_object *obj)
139{
140	int page_count = obj->base.size / PAGE_SIZE;
141	int ret;
142
143	if (obj->base.import_attach) {
 
 
 
 
 
144		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
145		if (!obj->vmapping)
146			return -ENOMEM;
147		return 0;
148	}
149		
150	ret = udl_gem_get_pages(obj, GFP_KERNEL);
151	if (ret)
152		return ret;
153
154	obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
155	if (!obj->vmapping)
156		return -ENOMEM;
157	return 0;
158}
159
160void udl_gem_vunmap(struct udl_gem_object *obj)
161{
162	if (obj->base.import_attach) {
163		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
 
 
164		return;
165	}
166
167	if (obj->vmapping)
168		vunmap(obj->vmapping);
169
170	udl_gem_put_pages(obj);
171}
172
173void udl_gem_free_object(struct drm_gem_object *gem_obj)
174{
175	struct udl_gem_object *obj = to_udl_bo(gem_obj);
176
177	if (obj->vmapping)
178		udl_gem_vunmap(obj);
179
180	if (gem_obj->import_attach) {
181		drm_prime_gem_destroy(gem_obj, obj->sg);
182		put_device(gem_obj->dev->dev);
183	}
184
185	if (obj->pages)
186		udl_gem_put_pages(obj);
187
188	drm_gem_free_mmap_offset(gem_obj);
 
189}
190
191/* the dumb interface doesn't work with the GEM straight MMAP
192   interface, it expects to do MMAP on the drm fd, like normal */
193int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
194		 uint32_t handle, uint64_t *offset)
195{
196	struct udl_gem_object *gobj;
197	struct drm_gem_object *obj;
198	int ret = 0;
199
200	mutex_lock(&dev->struct_mutex);
201	obj = drm_gem_object_lookup(dev, file, handle);
202	if (obj == NULL) {
203		ret = -ENOENT;
204		goto unlock;
205	}
206	gobj = to_udl_bo(obj);
207
208	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
209	if (ret)
210		goto out;
211	ret = drm_gem_create_mmap_offset(obj);
212	if (ret)
213		goto out;
 
 
214
215	*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
216
217out:
218	drm_gem_object_unreference(&gobj->base);
219unlock:
220	mutex_unlock(&dev->struct_mutex);
221	return ret;
222}
223
224static int udl_prime_create(struct drm_device *dev,
225			    size_t size,
226			    struct sg_table *sg,
227			    struct udl_gem_object **obj_p)
228{
229	struct udl_gem_object *obj;
230	int npages;
231
232	npages = size / PAGE_SIZE;
233
234	*obj_p = NULL;
235	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
236	if (!obj)
237		return -ENOMEM;
238
239	obj->sg = sg;
240	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
241	if (obj->pages == NULL) {
242		DRM_ERROR("obj pages is NULL %d\n", npages);
243		return -ENOMEM;
244	}
245
246	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
247
248	*obj_p = obj;
249	return 0;
250}
251
252struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
253				struct dma_buf *dma_buf)
254{
255	struct dma_buf_attachment *attach;
256	struct sg_table *sg;
257	struct udl_gem_object *uobj;
258	int ret;
259
260	/* need to attach */
261	get_device(dev->dev);
262	attach = dma_buf_attach(dma_buf, dev->dev);
263	if (IS_ERR(attach)) {
264		put_device(dev->dev);
265		return ERR_CAST(attach);
266	}
267
268	get_dma_buf(dma_buf);
269
270	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
271	if (IS_ERR(sg)) {
272		ret = PTR_ERR(sg);
273		goto fail_detach;
274	}
275
276	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
277	if (ret) {
278		goto fail_unmap;
279	}
280
281	uobj->base.import_attach = attach;
282
283	return &uobj->base;
284
285fail_unmap:
286	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
287fail_detach:
288	dma_buf_detach(dma_buf, attach);
289	dma_buf_put(dma_buf);
290	put_device(dev->dev);
291	return ERR_PTR(ret);
292}