Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 */
 25
 26#include <drm/ttm/ttm_execbuf_util.h>
 
 27
 28#include "virtgpu_drv.h"
 29
 30static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 31				       uint32_t *resid)
 32{
 33#if 0
 34	int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
 35
 36	if (handle < 0)
 37		return handle;
 38#else
 39	static int handle;
 40
 41	/*
 42	 * FIXME: dirty hack to avoid re-using IDs, virglrenderer
 43	 * can't deal with that.  Needs fixing in virglrenderer, also
 44	 * should figure a better way to handle that in the guest.
 45	 */
 46	handle++;
 47#endif
 48
 49	*resid = handle + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50	return 0;
 51}
 52
 53static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 54{
 55#if 0
 56	ida_free(&vgdev->resource_ida, id - 1);
 57#endif
 58}
 59
 60static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 61{
 62	struct virtio_gpu_object *bo;
 63	struct virtio_gpu_device *vgdev;
 64
 65	bo = container_of(tbo, struct virtio_gpu_object, tbo);
 66	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
 67
 68	if (bo->created)
 69		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
 70	if (bo->pages)
 71		virtio_gpu_object_free_sg_table(bo);
 72	if (bo->vmap)
 73		virtio_gpu_object_kunmap(bo);
 74	drm_gem_object_release(&bo->gem_base);
 75	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 76	kfree(bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77}
 78
 79static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
 80{
 81	u32 c = 1;
 
 82
 83	vgbo->placement.placement = &vgbo->placement_code;
 84	vgbo->placement.busy_placement = &vgbo->placement_code;
 85	vgbo->placement_code.fpfn = 0;
 86	vgbo->placement_code.lpfn = 0;
 87	vgbo->placement_code.flags =
 88		TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
 89		TTM_PL_FLAG_NO_EVICT;
 90	vgbo->placement.num_placement = c;
 91	vgbo->placement.num_busy_placement = c;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92
 
 
 
 93}
 94
 95int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 96			     struct virtio_gpu_object_params *params,
 97			     struct virtio_gpu_object **bo_ptr,
 98			     struct virtio_gpu_fence *fence)
 99{
100	struct virtio_gpu_object *bo;
101	size_t acc_size;
102	int ret;
103
104	*bo_ptr = NULL;
 
 
 
 
 
 
 
105
106	acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
107				       sizeof(struct virtio_gpu_object));
 
 
 
 
 
 
 
 
 
 
 
108
109	bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
110	if (bo == NULL)
111		return -ENOMEM;
112	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
113	if (ret < 0) {
114		kfree(bo);
115		return ret;
116	}
117	params->size = roundup(params->size, PAGE_SIZE);
118	ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
119	if (ret != 0) {
120		virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
121		kfree(bo);
122		return ret;
123	}
124	bo->dumb = params->dumb;
125
126	if (params->virgl) {
127		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
 
 
 
 
128	} else {
129		virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
130	}
131
132	virtio_gpu_init_ttm_placement(bo);
133	ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
134			  ttm_bo_type_device, &bo->placement, 0,
135			  true, acc_size, NULL, NULL,
136			  &virtio_gpu_ttm_bo_destroy);
137	/* ttm_bo_init failure will call the destroy */
138	if (ret != 0)
139		return ret;
140
141	if (fence) {
142		struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
143		struct list_head validate_list;
144		struct ttm_validate_buffer mainbuf;
145		struct ww_acquire_ctx ticket;
146		unsigned long irq_flags;
147		bool signaled;
148
149		INIT_LIST_HEAD(&validate_list);
150		memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
151
152		/* use a gem reference since unref list undoes them */
153		drm_gem_object_get(&bo->gem_base);
154		mainbuf.bo = &bo->tbo;
155		list_add(&mainbuf.head, &validate_list);
156
157		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
158		if (ret == 0) {
159			spin_lock_irqsave(&drv->lock, irq_flags);
160			signaled = virtio_fence_signaled(&fence->f);
161			if (!signaled)
162				/* virtio create command still in flight */
163				ttm_eu_fence_buffer_objects(&ticket, &validate_list,
164							    &fence->f);
165			spin_unlock_irqrestore(&drv->lock, irq_flags);
166			if (signaled)
167				/* virtio create command finished */
168				ttm_eu_backoff_reservation(&ticket, &validate_list);
169		}
170		virtio_gpu_unref_list(&validate_list);
171	}
172
173	*bo_ptr = bo;
174	return 0;
175}
176
177void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
 
 
 
178{
179	bo->vmap = NULL;
180	ttm_bo_kunmap(&bo->kmap);
181}
 
 
 
182
183int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
184{
185	bool is_iomem;
186	int r;
187
188	WARN_ON(bo->vmap);
 
 
 
 
189
190	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
191	if (r)
192		return r;
193	bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
194	return 0;
195}
196
197int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
198				   struct virtio_gpu_object *bo)
199{
200	int ret;
201	struct page **pages = bo->tbo.ttm->pages;
202	int nr_pages = bo->tbo.num_pages;
203	struct ttm_operation_ctx ctx = {
204		.interruptible = false,
205		.no_wait_gpu = false
206	};
207	size_t max_segment;
208
209	/* wtf swapping */
210	if (bo->pages)
211		return 0;
212
213	if (bo->tbo.ttm->state == tt_unpopulated)
214		bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
215	bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
216	if (!bo->pages)
217		goto out;
218
219	max_segment = virtio_max_dma_size(qdev->vdev);
220	max_segment &= PAGE_MASK;
221	if (max_segment > SCATTERLIST_MAX_SEGMENT)
222		max_segment = SCATTERLIST_MAX_SEGMENT;
223	ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
224					  nr_pages << PAGE_SHIFT,
225					  max_segment, GFP_KERNEL);
226	if (ret)
227		goto out;
228	return 0;
229out:
230	kfree(bo->pages);
231	bo->pages = NULL;
232	return -ENOMEM;
233}
234
235void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
236{
237	sg_free_table(bo->pages);
238	kfree(bo->pages);
239	bo->pages = NULL;
240}
 
 
 
 
 
241
242int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
243{
244	int r;
 
 
 
245
246	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
247	if (unlikely(r != 0))
248		return r;
249	r = ttm_bo_wait(&bo->tbo, true, no_wait);
250	ttm_bo_unreserve(&bo->tbo);
251	return r;
252}
 
 
 
 
 
 
 
 
253
v5.14.15
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 */
 25
 26#include <linux/dma-mapping.h>
 27#include <linux/moduleparam.h>
 28
 29#include "virtgpu_drv.h"
 30
 31static int virtio_gpu_virglrenderer_workaround = 1;
 32module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
 35{
 36	if (virtio_gpu_virglrenderer_workaround) {
 37		/*
 38		 * Hack to avoid re-using resource IDs.
 39		 *
 40		 * virglrenderer versions up to (and including) 0.7.0
 41		 * can't deal with that.  virglrenderer commit
 42		 * "f91a9dd35715 Fix unlinking resources from hash
 43		 * table." (Feb 2019) fixes the bug.
 44		 */
 45		static atomic_t seqno = ATOMIC_INIT(0);
 46		int handle = atomic_inc_return(&seqno);
 47		*resid = handle + 1;
 48	} else {
 49		int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
 50		if (handle < 0)
 51			return handle;
 52		*resid = handle + 1;
 53	}
 54	return 0;
 55}
 56
 57static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 58{
 59	if (!virtio_gpu_virglrenderer_workaround) {
 60		ida_free(&vgdev->resource_ida, id - 1);
 61	}
 62}
 63
 64void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
 65{
 66	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 
 67
 
 
 
 
 
 
 
 
 
 
 68	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 69	if (virtio_gpu_is_shmem(bo)) {
 70		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 71
 72		if (shmem->pages) {
 73			if (shmem->mapped) {
 74				dma_unmap_sgtable(vgdev->vdev->dev.parent,
 75					     shmem->pages, DMA_TO_DEVICE, 0);
 76				shmem->mapped = 0;
 77			}
 78
 79			sg_free_table(shmem->pages);
 80			kfree(shmem->pages);
 81			shmem->pages = NULL;
 82			drm_gem_shmem_unpin(&bo->base.base);
 83		}
 84
 85		drm_gem_shmem_free_object(&bo->base.base);
 86	} else if (virtio_gpu_is_vram(bo)) {
 87		struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
 88
 89		spin_lock(&vgdev->host_visible_lock);
 90		if (drm_mm_node_allocated(&vram->vram_node))
 91			drm_mm_remove_node(&vram->vram_node);
 92
 93		spin_unlock(&vgdev->host_visible_lock);
 94
 95		drm_gem_free_mmap_offset(&vram->base.base.base);
 96		drm_gem_object_release(&vram->base.base.base);
 97		kfree(vram);
 98	}
 99}
100
101static void virtio_gpu_free_object(struct drm_gem_object *obj)
102{
103	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
104	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
105
106	if (bo->created) {
107		virtio_gpu_cmd_unref_resource(vgdev, bo);
108		virtio_gpu_notify(vgdev);
109		/* completion handler calls virtio_gpu_cleanup_object() */
110		return;
111	}
112	virtio_gpu_cleanup_object(bo);
113}
114
115static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
116	.free = virtio_gpu_free_object,
117	.open = virtio_gpu_gem_object_open,
118	.close = virtio_gpu_gem_object_close,
119
120	.print_info = drm_gem_shmem_print_info,
121	.export = virtgpu_gem_prime_export,
122	.pin = drm_gem_shmem_pin,
123	.unpin = drm_gem_shmem_unpin,
124	.get_sg_table = drm_gem_shmem_get_sg_table,
125	.vmap = drm_gem_shmem_vmap,
126	.vunmap = drm_gem_shmem_vunmap,
127	.mmap = drm_gem_shmem_mmap,
128};
129
130bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
131{
132	return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
133}
134
135struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
136						size_t size)
 
 
137{
138	struct virtio_gpu_object_shmem *shmem;
139	struct drm_gem_shmem_object *dshmem;
 
140
141	shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
142	if (!shmem)
143		return NULL;
144
145	dshmem = &shmem->base.base;
146	dshmem->base.funcs = &virtio_gpu_shmem_funcs;
147	return &dshmem->base;
148}
149
150static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
151					struct virtio_gpu_object *bo,
152					struct virtio_gpu_mem_entry **ents,
153					unsigned int *nents)
154{
155	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
156	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
157	struct scatterlist *sg;
158	int si, ret;
159
160	ret = drm_gem_shmem_pin(&bo->base.base);
161	if (ret < 0)
162		return -EINVAL;
163
164	/*
165	 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
166	 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
167	 * dma-ops. This is discouraged for other drivers, but should be fine
168	 * since virtio_gpu doesn't support dma-buf import from other devices.
169	 */
170	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
171	if (!shmem->pages) {
172		drm_gem_shmem_unpin(&bo->base.base);
173		return -EINVAL;
 
 
 
 
174	}
 
175
176	if (use_dma_api) {
177		ret = dma_map_sgtable(vgdev->vdev->dev.parent,
178				      shmem->pages, DMA_TO_DEVICE, 0);
179		if (ret)
180			return ret;
181		*nents = shmem->mapped = shmem->pages->nents;
182	} else {
183		*nents = shmem->pages->orig_nents;
184	}
185
186	*ents = kvmalloc_array(*nents,
187			       sizeof(struct virtio_gpu_mem_entry),
188			       GFP_KERNEL);
189	if (!(*ents)) {
190		DRM_ERROR("failed to allocate ent list\n");
191		return -ENOMEM;
192	}
 
193
194	if (use_dma_api) {
195		for_each_sgtable_dma_sg(shmem->pages, sg, si) {
196			(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
197			(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
198			(*ents)[si].padding = 0;
199		}
200	} else {
201		for_each_sgtable_sg(shmem->pages, sg, si) {
202			(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
203			(*ents)[si].length = cpu_to_le32(sg->length);
204			(*ents)[si].padding = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205		}
 
206	}
207
 
208	return 0;
209}
210
211int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
212			     struct virtio_gpu_object_params *params,
213			     struct virtio_gpu_object **bo_ptr,
214			     struct virtio_gpu_fence *fence)
215{
216	struct virtio_gpu_object_array *objs = NULL;
217	struct drm_gem_shmem_object *shmem_obj;
218	struct virtio_gpu_object *bo;
219	struct virtio_gpu_mem_entry *ents;
220	unsigned int nents;
221	int ret;
222
223	*bo_ptr = NULL;
 
 
 
224
225	params->size = roundup(params->size, PAGE_SIZE);
226	shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
227	if (IS_ERR(shmem_obj))
228		return PTR_ERR(shmem_obj);
229	bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
230
231	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
232	if (ret < 0)
233		goto err_free_gem;
 
 
 
234
235	bo->dumb = params->dumb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
237	if (fence) {
238		ret = -ENOMEM;
239		objs = virtio_gpu_array_alloc(1);
240		if (!objs)
241			goto err_put_id;
242		virtio_gpu_array_add_obj(objs, &bo->base.base);
243
244		ret = virtio_gpu_array_lock_resv(objs);
245		if (ret != 0)
246			goto err_put_objs;
247	}
248
249	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
250	if (ret != 0) {
251		virtio_gpu_array_put_free(objs);
252		virtio_gpu_free_object(&shmem_obj->base);
253		return ret;
254	}
255
256	if (params->blob) {
257		if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
258			bo->guest_blob = true;
259
260		virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
261						    ents, nents);
262	} else if (params->virgl) {
263		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
264						  objs, fence);
265		virtio_gpu_object_attach(vgdev, bo, ents, nents);
266	} else {
267		virtio_gpu_cmd_create_resource(vgdev, bo, params,
268					       objs, fence);
269		virtio_gpu_object_attach(vgdev, bo, ents, nents);
270	}
271
272	*bo_ptr = bo;
273	return 0;
274
275err_put_objs:
276	virtio_gpu_array_put_free(objs);
277err_put_id:
278	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
279err_free_gem:
280	drm_gem_shmem_free_object(&shmem_obj->base);
281	return ret;
282}