Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.13.7
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 */
 25
 26#include <linux/dma-mapping.h>
 27#include <linux/moduleparam.h>
 28
 29#include "virtgpu_drv.h"
 30
 31static int virtio_gpu_virglrenderer_workaround = 1;
 32module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
 33
 34int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
 
 35{
 36	if (virtio_gpu_virglrenderer_workaround) {
 37		/*
 38		 * Hack to avoid re-using resource IDs.
 39		 *
 40		 * virglrenderer versions up to (and including) 0.7.0
 41		 * can't deal with that.  virglrenderer commit
 42		 * "f91a9dd35715 Fix unlinking resources from hash
 43		 * table." (Feb 2019) fixes the bug.
 44		 */
 45		static atomic_t seqno = ATOMIC_INIT(0);
 46		int handle = atomic_inc_return(&seqno);
 47		*resid = handle + 1;
 48	} else {
 49		int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
 50		if (handle < 0)
 51			return handle;
 52		*resid = handle + 1;
 53	}
 54	return 0;
 55}
 56
 57static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 58{
 59	if (!virtio_gpu_virglrenderer_workaround) {
 60		ida_free(&vgdev->resource_ida, id - 1);
 61	}
 62}
 63
 64void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
 65{
 66	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 67
 68	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 69	if (virtio_gpu_is_shmem(bo)) {
 70		drm_gem_shmem_free(&bo->base);
 71	} else if (virtio_gpu_is_vram(bo)) {
 72		struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
 73
 74		spin_lock(&vgdev->host_visible_lock);
 75		if (drm_mm_node_allocated(&vram->vram_node))
 76			drm_mm_remove_node(&vram->vram_node);
 77
 78		spin_unlock(&vgdev->host_visible_lock);
 79
 80		drm_gem_free_mmap_offset(&vram->base.base.base);
 81		drm_gem_object_release(&vram->base.base.base);
 82		kfree(vram);
 
 
 
 
 83	}
 84}
 85
 86static void virtio_gpu_free_object(struct drm_gem_object *obj)
 87{
 88	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
 89	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 90
 91	if (bo->created) {
 92		virtio_gpu_cmd_unref_resource(vgdev, bo);
 93		virtio_gpu_notify(vgdev);
 94		/* completion handler calls virtio_gpu_cleanup_object() */
 95		return;
 96	}
 97	virtio_gpu_cleanup_object(bo);
 98}
 99
100static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
101	.free = virtio_gpu_free_object,
102	.open = virtio_gpu_gem_object_open,
103	.close = virtio_gpu_gem_object_close,
104	.print_info = drm_gem_shmem_object_print_info,
105	.export = virtgpu_gem_prime_export,
106	.pin = drm_gem_shmem_object_pin,
107	.unpin = drm_gem_shmem_object_unpin,
108	.get_sg_table = drm_gem_shmem_object_get_sg_table,
109	.vmap = drm_gem_shmem_object_vmap,
110	.vunmap = drm_gem_shmem_object_vunmap,
111	.mmap = drm_gem_shmem_object_mmap,
112	.vm_ops = &drm_gem_shmem_vm_ops,
113};
114
115bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
116{
117	return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
118}
119
120struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
121						size_t size)
122{
123	struct virtio_gpu_object_shmem *shmem;
124	struct drm_gem_shmem_object *dshmem;
125
126	shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
127	if (!shmem)
128		return ERR_PTR(-ENOMEM);
129
130	dshmem = &shmem->base.base;
131	dshmem->base.funcs = &virtio_gpu_shmem_funcs;
 
132	return &dshmem->base;
133}
134
135static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
136					struct virtio_gpu_object *bo,
137					struct virtio_gpu_mem_entry **ents,
138					unsigned int *nents)
139{
140	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 
141	struct scatterlist *sg;
142	struct sg_table *pages;
143	int si;
144
145	pages = drm_gem_shmem_get_pages_sgt(&bo->base);
146	if (IS_ERR(pages))
147		return PTR_ERR(pages);
148
149	if (use_dma_api)
150		*nents = pages->nents;
151	else
152		*nents = pages->orig_nents;
153
154	*ents = kvmalloc_array(*nents,
155			       sizeof(struct virtio_gpu_mem_entry),
156			       GFP_KERNEL);
157	if (!(*ents)) {
158		DRM_ERROR("failed to allocate ent list\n");
159		return -ENOMEM;
160	}
161
162	if (use_dma_api) {
163		for_each_sgtable_dma_sg(pages, sg, si) {
164			(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
165			(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
166			(*ents)[si].padding = 0;
167		}
168	} else {
169		for_each_sgtable_sg(pages, sg, si) {
170			(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
171			(*ents)[si].length = cpu_to_le32(sg->length);
172			(*ents)[si].padding = 0;
173		}
174	}
175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176	return 0;
177}
178
179int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
180			     struct virtio_gpu_object_params *params,
181			     struct virtio_gpu_object **bo_ptr,
182			     struct virtio_gpu_fence *fence)
183{
184	struct virtio_gpu_object_array *objs = NULL;
185	struct drm_gem_shmem_object *shmem_obj;
186	struct virtio_gpu_object *bo;
187	struct virtio_gpu_mem_entry *ents = NULL;
188	unsigned int nents;
189	int ret;
190
191	*bo_ptr = NULL;
192
193	params->size = roundup(params->size, PAGE_SIZE);
194	shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
195	if (IS_ERR(shmem_obj))
196		return PTR_ERR(shmem_obj);
197	bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
198
199	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
200	if (ret < 0)
201		goto err_free_gem;
202
203	bo->dumb = params->dumb;
204
205	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
206	if (ret != 0)
207		goto err_put_id;
208
209	if (fence) {
210		ret = -ENOMEM;
211		objs = virtio_gpu_array_alloc(1);
212		if (!objs)
213			goto err_free_entry;
214		virtio_gpu_array_add_obj(objs, &bo->base.base);
215
216		ret = virtio_gpu_array_lock_resv(objs);
217		if (ret != 0)
218			goto err_put_objs;
219	}
220
221	if (params->blob) {
222		if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
223			bo->guest_blob = true;
224
225		virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
226						    ents, nents);
227	} else if (params->virgl) {
228		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
229						  objs, fence);
230		virtio_gpu_object_attach(vgdev, bo, ents, nents);
231	} else {
232		virtio_gpu_cmd_create_resource(vgdev, bo, params,
233					       objs, fence);
234		virtio_gpu_object_attach(vgdev, bo, ents, nents);
235	}
236
 
 
 
 
 
 
 
 
237	*bo_ptr = bo;
238	return 0;
239
240err_put_objs:
241	virtio_gpu_array_put_free(objs);
242err_free_entry:
243	kvfree(ents);
244err_put_id:
245	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
246err_free_gem:
247	drm_gem_shmem_free(shmem_obj);
248	return ret;
249}
v5.9
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 */
 25
 26#include <linux/dma-mapping.h>
 27#include <linux/moduleparam.h>
 28
 29#include "virtgpu_drv.h"
 30
 31static int virtio_gpu_virglrenderer_workaround = 1;
 32module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
 33
 34static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 35				       uint32_t *resid)
 36{
 37	if (virtio_gpu_virglrenderer_workaround) {
 38		/*
 39		 * Hack to avoid re-using resource IDs.
 40		 *
 41		 * virglrenderer versions up to (and including) 0.7.0
 42		 * can't deal with that.  virglrenderer commit
 43		 * "f91a9dd35715 Fix unlinking resources from hash
 44		 * table." (Feb 2019) fixes the bug.
 45		 */
 46		static atomic_t seqno = ATOMIC_INIT(0);
 47		int handle = atomic_inc_return(&seqno);
 48		*resid = handle + 1;
 49	} else {
 50		int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
 51		if (handle < 0)
 52			return handle;
 53		*resid = handle + 1;
 54	}
 55	return 0;
 56}
 57
 58static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 59{
 60	if (!virtio_gpu_virglrenderer_workaround) {
 61		ida_free(&vgdev->resource_ida, id - 1);
 62	}
 63}
 64
 65void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
 66{
 67	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 68
 69	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 70	if (virtio_gpu_is_shmem(bo)) {
 71		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 72
 73		if (shmem->pages) {
 74			if (shmem->mapped) {
 75				dma_unmap_sg(vgdev->vdev->dev.parent,
 76					     shmem->pages->sgl, shmem->mapped,
 77					     DMA_TO_DEVICE);
 78				shmem->mapped = 0;
 79			}
 80
 81			sg_free_table(shmem->pages);
 82			kfree(shmem->pages);
 83			shmem->pages = NULL;
 84			drm_gem_shmem_unpin(&bo->base.base);
 85		}
 86
 87		drm_gem_shmem_free_object(&bo->base.base);
 88	}
 89}
 90
 91static void virtio_gpu_free_object(struct drm_gem_object *obj)
 92{
 93	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
 94	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 95
 96	if (bo->created) {
 97		virtio_gpu_cmd_unref_resource(vgdev, bo);
 98		virtio_gpu_notify(vgdev);
 99		/* completion handler calls virtio_gpu_cleanup_object() */
100		return;
101	}
102	virtio_gpu_cleanup_object(bo);
103}
104
105static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
106	.free = virtio_gpu_free_object,
107	.open = virtio_gpu_gem_object_open,
108	.close = virtio_gpu_gem_object_close,
109
110	.print_info = drm_gem_shmem_print_info,
111	.pin = drm_gem_shmem_pin,
112	.unpin = drm_gem_shmem_unpin,
113	.get_sg_table = drm_gem_shmem_get_sg_table,
114	.vmap = drm_gem_shmem_vmap,
115	.vunmap = drm_gem_shmem_vunmap,
116	.mmap = drm_gem_shmem_mmap,
 
117};
118
119bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
120{
121	return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
122}
123
124struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
125						size_t size)
126{
127	struct virtio_gpu_object_shmem *shmem;
128	struct drm_gem_shmem_object *dshmem;
129
130	shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
131	if (!shmem)
132		return NULL;
133
134	dshmem = &shmem->base.base;
135	dshmem->base.funcs = &virtio_gpu_shmem_funcs;
136	dshmem->map_cached = true;
137	return &dshmem->base;
138}
139
140static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
141					struct virtio_gpu_object *bo,
142					struct virtio_gpu_mem_entry **ents,
143					unsigned int *nents)
144{
145	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
146	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
147	struct scatterlist *sg;
148	int si, ret;
 
149
150	ret = drm_gem_shmem_pin(&bo->base.base);
151	if (ret < 0)
152		return -EINVAL;
153
154	/*
155	 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
156	 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
157	 * dma-ops. This is discouraged for other drivers, but should be fine
158	 * since virtio_gpu doesn't support dma-buf import from other devices.
159	 */
160	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
161	if (!shmem->pages) {
162		drm_gem_shmem_unpin(&bo->base.base);
163		return -EINVAL;
 
164	}
165
166	if (use_dma_api) {
167		shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
168					   shmem->pages->sgl,
169					   shmem->pages->nents,
170					   DMA_TO_DEVICE);
171		*nents = shmem->mapped;
172	} else {
173		*nents = shmem->pages->nents;
 
 
 
 
174	}
175
176	*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
177			      GFP_KERNEL);
178	if (!(*ents)) {
179		DRM_ERROR("failed to allocate ent list\n");
180		return -ENOMEM;
181	}
182
183	for_each_sg(shmem->pages->sgl, sg, *nents, si) {
184		(*ents)[si].addr = cpu_to_le64(use_dma_api
185					       ? sg_dma_address(sg)
186					       : sg_phys(sg));
187		(*ents)[si].length = cpu_to_le32(sg->length);
188		(*ents)[si].padding = 0;
189	}
190	return 0;
191}
192
193int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
194			     struct virtio_gpu_object_params *params,
195			     struct virtio_gpu_object **bo_ptr,
196			     struct virtio_gpu_fence *fence)
197{
198	struct virtio_gpu_object_array *objs = NULL;
199	struct drm_gem_shmem_object *shmem_obj;
200	struct virtio_gpu_object *bo;
201	struct virtio_gpu_mem_entry *ents;
202	unsigned int nents;
203	int ret;
204
205	*bo_ptr = NULL;
206
207	params->size = roundup(params->size, PAGE_SIZE);
208	shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
209	if (IS_ERR(shmem_obj))
210		return PTR_ERR(shmem_obj);
211	bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
212
213	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
214	if (ret < 0)
215		goto err_free_gem;
216
217	bo->dumb = params->dumb;
218
 
 
 
 
219	if (fence) {
220		ret = -ENOMEM;
221		objs = virtio_gpu_array_alloc(1);
222		if (!objs)
223			goto err_put_id;
224		virtio_gpu_array_add_obj(objs, &bo->base.base);
225
226		ret = virtio_gpu_array_lock_resv(objs);
227		if (ret != 0)
228			goto err_put_objs;
229	}
230
231	if (params->virgl) {
 
 
 
 
 
 
232		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
233						  objs, fence);
 
234	} else {
235		virtio_gpu_cmd_create_resource(vgdev, bo, params,
236					       objs, fence);
 
237	}
238
239	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
240	if (ret != 0) {
241		virtio_gpu_free_object(&shmem_obj->base);
242		return ret;
243	}
244
245	virtio_gpu_object_attach(vgdev, bo, ents, nents);
246
247	*bo_ptr = bo;
248	return 0;
249
250err_put_objs:
251	virtio_gpu_array_put_free(objs);
 
 
252err_put_id:
253	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
254err_free_gem:
255	drm_gem_shmem_free_object(&shmem_obj->base);
256	return ret;
257}