Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0+
  2
  3#include <linux/dma-buf.h>
  4#include <linux/shmem_fs.h>
  5#include <linux/vmalloc.h>
  6#include <drm/drm_prime.h>
  7
  8#include "vkms_drv.h"
  9
 10static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
 11						 u64 size)
 12{
 13	struct vkms_gem_object *obj;
 14	int ret;
 15
 16	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 17	if (!obj)
 18		return ERR_PTR(-ENOMEM);
 19
 20	size = roundup(size, PAGE_SIZE);
 21	ret = drm_gem_object_init(dev, &obj->gem, size);
 22	if (ret) {
 23		kfree(obj);
 24		return ERR_PTR(ret);
 25	}
 26
 27	mutex_init(&obj->pages_lock);
 28
 29	return obj;
 30}
 31
 32void vkms_gem_free_object(struct drm_gem_object *obj)
 33{
 34	struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
 35						   gem);
 36
 37	WARN_ON(gem->pages);
 38	WARN_ON(gem->vaddr);
 39
 40	mutex_destroy(&gem->pages_lock);
 41	drm_gem_object_release(obj);
 42	kfree(gem);
 43}
 44
 45vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
 46{
 47	struct vm_area_struct *vma = vmf->vma;
 48	struct vkms_gem_object *obj = vma->vm_private_data;
 49	unsigned long vaddr = vmf->address;
 50	pgoff_t page_offset;
 51	loff_t num_pages;
 52	vm_fault_t ret = VM_FAULT_SIGBUS;
 53
 54	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
 55	num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
 56
 57	if (page_offset > num_pages)
 58		return VM_FAULT_SIGBUS;
 59
 60	mutex_lock(&obj->pages_lock);
 61	if (obj->pages) {
 62		get_page(obj->pages[page_offset]);
 63		vmf->page = obj->pages[page_offset];
 64		ret = 0;
 65	}
 66	mutex_unlock(&obj->pages_lock);
 67	if (ret) {
 68		struct page *page;
 69		struct address_space *mapping;
 70
 71		mapping = file_inode(obj->gem.filp)->i_mapping;
 72		page = shmem_read_mapping_page(mapping, page_offset);
 73
 74		if (!IS_ERR(page)) {
 75			vmf->page = page;
 76			ret = 0;
 77		} else {
 78			switch (PTR_ERR(page)) {
 79			case -ENOSPC:
 80			case -ENOMEM:
 81				ret = VM_FAULT_OOM;
 82				break;
 83			case -EBUSY:
 84				ret = VM_FAULT_RETRY;
 85				break;
 86			case -EFAULT:
 87			case -EINVAL:
 88				ret = VM_FAULT_SIGBUS;
 89				break;
 90			default:
 91				WARN_ON(PTR_ERR(page));
 92				ret = VM_FAULT_SIGBUS;
 93				break;
 94			}
 95		}
 96	}
 97	return ret;
 98}
 99
100static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
101					      struct drm_file *file,
102					      u32 *handle,
103					      u64 size)
104{
105	struct vkms_gem_object *obj;
106	int ret;
107
108	if (!file || !dev || !handle)
109		return ERR_PTR(-EINVAL);
110
111	obj = __vkms_gem_create(dev, size);
112	if (IS_ERR(obj))
113		return ERR_CAST(obj);
114
115	ret = drm_gem_handle_create(file, &obj->gem, handle);
116	if (ret)
117		return ERR_PTR(ret);
118
119	return &obj->gem;
120}
121
122int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
123		     struct drm_mode_create_dumb *args)
124{
125	struct drm_gem_object *gem_obj;
126	u64 pitch, size;
127
128	if (!args || !dev || !file)
129		return -EINVAL;
130
131	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
132	size = pitch * args->height;
133
134	if (!size)
135		return -EINVAL;
136
137	gem_obj = vkms_gem_create(dev, file, &args->handle, size);
138	if (IS_ERR(gem_obj))
139		return PTR_ERR(gem_obj);
140
141	args->size = gem_obj->size;
142	args->pitch = pitch;
143
144	drm_gem_object_put(gem_obj);
145
146	DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
147
148	return 0;
149}
150
151static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
152{
153	struct drm_gem_object *gem_obj = &vkms_obj->gem;
154
155	if (!vkms_obj->pages) {
156		struct page **pages = drm_gem_get_pages(gem_obj);
157
158		if (IS_ERR(pages))
159			return pages;
160
161		if (cmpxchg(&vkms_obj->pages, NULL, pages))
162			drm_gem_put_pages(gem_obj, pages, false, true);
163	}
164
165	return vkms_obj->pages;
166}
167
168void vkms_gem_vunmap(struct drm_gem_object *obj)
169{
170	struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
171
172	mutex_lock(&vkms_obj->pages_lock);
173	if (vkms_obj->vmap_count < 1) {
174		WARN_ON(vkms_obj->vaddr);
175		WARN_ON(vkms_obj->pages);
176		mutex_unlock(&vkms_obj->pages_lock);
177		return;
178	}
179
180	vkms_obj->vmap_count--;
181
182	if (vkms_obj->vmap_count == 0) {
183		vunmap(vkms_obj->vaddr);
184		vkms_obj->vaddr = NULL;
185		drm_gem_put_pages(obj, vkms_obj->pages, false, true);
186		vkms_obj->pages = NULL;
187	}
188
189	mutex_unlock(&vkms_obj->pages_lock);
190}
191
192int vkms_gem_vmap(struct drm_gem_object *obj)
193{
194	struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
195	int ret = 0;
196
197	mutex_lock(&vkms_obj->pages_lock);
198
199	if (!vkms_obj->vaddr) {
200		unsigned int n_pages = obj->size >> PAGE_SHIFT;
201		struct page **pages = _get_pages(vkms_obj);
202
203		if (IS_ERR(pages)) {
204			ret = PTR_ERR(pages);
205			goto out;
206		}
207
208		vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
209		if (!vkms_obj->vaddr)
210			goto err_vmap;
211	}
212
213	vkms_obj->vmap_count++;
214	goto out;
215
216err_vmap:
217	ret = -ENOMEM;
218	drm_gem_put_pages(obj, vkms_obj->pages, false, true);
219	vkms_obj->pages = NULL;
220out:
221	mutex_unlock(&vkms_obj->pages_lock);
222	return ret;
223}
224
225struct drm_gem_object *
226vkms_prime_import_sg_table(struct drm_device *dev,
227			   struct dma_buf_attachment *attach,
228			   struct sg_table *sg)
229{
230	struct vkms_gem_object *obj;
231	int npages;
232
233	obj = __vkms_gem_create(dev, attach->dmabuf->size);
234	if (IS_ERR(obj))
235		return ERR_CAST(obj);
236
237	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
238	DRM_DEBUG_PRIME("Importing %d pages\n", npages);
239
240	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
241	if (!obj->pages) {
242		vkms_gem_free_object(&obj->gem);
243		return ERR_PTR(-ENOMEM);
244	}
245
246	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
247	return &obj->gem;
248}