Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie
  7 *    Alon Levy
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice shall be included in
 17 * all copies or substantial portions of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 25 * OTHER DEALINGS IN THE SOFTWARE.
 26 */
 27
 28#include <linux/file.h>
 29#include <linux/sync_file.h>
 
 30
 31#include <drm/drm_file.h>
 32#include <drm/ttm/ttm_execbuf_util.h>
 33#include <drm/virtgpu_drm.h>
 34
 35#include "virtgpu_drv.h"
 36
 37static void convert_to_hw_box(struct virtio_gpu_box *dst,
 38			      const struct drm_virtgpu_3d_box *src)
 39{
 40	dst->x = cpu_to_le32(src->x);
 41	dst->y = cpu_to_le32(src->y);
 42	dst->z = cpu_to_le32(src->z);
 43	dst->w = cpu_to_le32(src->w);
 44	dst->h = cpu_to_le32(src->h);
 45	dst->d = cpu_to_le32(src->d);
 
 
 
 
 
 
 
 
 
 46}
 47
 48static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 49				struct drm_file *file_priv)
 50{
 51	struct virtio_gpu_device *vgdev = dev->dev_private;
 52	struct drm_virtgpu_map *virtio_gpu_map = data;
 53
 54	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
 55					 virtio_gpu_map->handle,
 56					 &virtio_gpu_map->offset);
 57}
 58
 59int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
 60				    struct list_head *head)
 61{
 62	struct ttm_operation_ctx ctx = { false, false };
 63	struct ttm_validate_buffer *buf;
 64	struct ttm_buffer_object *bo;
 65	struct virtio_gpu_object *qobj;
 66	int ret;
 67
 68	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
 69	if (ret != 0)
 70		return ret;
 71
 72	list_for_each_entry(buf, head, head) {
 73		bo = buf->bo;
 74		qobj = container_of(bo, struct virtio_gpu_object, tbo);
 75		ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
 76		if (ret) {
 77			ttm_eu_backoff_reservation(ticket, head);
 78			return ret;
 79		}
 80	}
 81	return 0;
 82}
 83
 84void virtio_gpu_unref_list(struct list_head *head)
 85{
 86	struct ttm_validate_buffer *buf;
 87	struct ttm_buffer_object *bo;
 88	struct virtio_gpu_object *qobj;
 89
 90	list_for_each_entry(buf, head, head) {
 91		bo = buf->bo;
 92		qobj = container_of(bo, struct virtio_gpu_object, tbo);
 93
 94		drm_gem_object_put_unlocked(&qobj->gem_base);
 95	}
 96}
 97
 98/*
 99 * Usage of execbuffer:
100 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
101 * However, the command as passed from user space must *not* contain the initial
102 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
103 */
104static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
105				 struct drm_file *drm_file)
106{
107	struct drm_virtgpu_execbuffer *exbuf = data;
108	struct virtio_gpu_device *vgdev = dev->dev_private;
109	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
110	struct drm_gem_object *gobj;
111	struct virtio_gpu_fence *out_fence;
112	struct virtio_gpu_object *qobj;
113	int ret;
114	uint32_t *bo_handles = NULL;
115	void __user *user_bo_handles = NULL;
116	struct list_head validate_list;
117	struct ttm_validate_buffer *buflist = NULL;
118	int i;
119	struct ww_acquire_ctx ticket;
120	struct sync_file *sync_file;
121	int in_fence_fd = exbuf->fence_fd;
122	int out_fence_fd = -1;
123	void *buf;
124
125	if (vgdev->has_virgl_3d == false)
126		return -ENOSYS;
127
128	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
129		return -EINVAL;
130
131	exbuf->fence_fd = -1;
132
 
133	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
134		struct dma_fence *in_fence;
135
136		in_fence = sync_file_get_fence(in_fence_fd);
137
138		if (!in_fence)
139			return -EINVAL;
140
141		/*
142		 * Wait if the fence is from a foreign context, or if the fence
143		 * array contains any fence from a foreign context.
144		 */
145		ret = 0;
146		if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
147			ret = dma_fence_wait(in_fence, true);
148
149		dma_fence_put(in_fence);
150		if (ret)
151			return ret;
152	}
153
154	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
155		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
156		if (out_fence_fd < 0)
157			return out_fence_fd;
158	}
159
160	INIT_LIST_HEAD(&validate_list);
161	if (exbuf->num_bo_handles) {
162
163		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
164					   sizeof(uint32_t), GFP_KERNEL);
165		buflist = kvmalloc_array(exbuf->num_bo_handles,
166					   sizeof(struct ttm_validate_buffer),
167					   GFP_KERNEL | __GFP_ZERO);
168		if (!bo_handles || !buflist) {
169			ret = -ENOMEM;
170			goto out_unused_fd;
171		}
172
173		user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
174		if (copy_from_user(bo_handles, user_bo_handles,
175				   exbuf->num_bo_handles * sizeof(uint32_t))) {
176			ret = -EFAULT;
177			goto out_unused_fd;
178		}
179
180		for (i = 0; i < exbuf->num_bo_handles; i++) {
181			gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
182			if (!gobj) {
183				ret = -ENOENT;
184				goto out_unused_fd;
185			}
186
187			qobj = gem_to_virtio_gpu_obj(gobj);
188			buflist[i].bo = &qobj->tbo;
189
190			list_add(&buflist[i].head, &validate_list);
191		}
192		kvfree(bo_handles);
193		bo_handles = NULL;
194	}
195
196	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
197	if (ret)
198		goto out_free;
199
200	buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
201	if (IS_ERR(buf)) {
202		ret = PTR_ERR(buf);
203		goto out_unresv;
 
 
 
 
 
 
204	}
205
206	out_fence = virtio_gpu_fence_alloc(vgdev);
207	if(!out_fence) {
208		ret = -ENOMEM;
209		goto out_memdup;
210	}
211
212	if (out_fence_fd >= 0) {
213		sync_file = sync_file_create(&out_fence->f);
214		if (!sync_file) {
215			dma_fence_put(&out_fence->f);
216			ret = -ENOMEM;
217			goto out_memdup;
218		}
219
220		exbuf->fence_fd = out_fence_fd;
221		fd_install(out_fence_fd, sync_file->file);
222	}
223
224	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
225			      vfpriv->ctx_id, out_fence);
226
227	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
228
229	/* fence the command bo */
230	virtio_gpu_unref_list(&validate_list);
231	kvfree(buflist);
232	return 0;
233
234out_memdup:
235	kfree(buf);
236out_unresv:
237	ttm_eu_backoff_reservation(&ticket, &validate_list);
238out_free:
239	virtio_gpu_unref_list(&validate_list);
 
240out_unused_fd:
241	kvfree(bo_handles);
242	kvfree(buflist);
 
243
244	if (out_fence_fd >= 0)
245		put_unused_fd(out_fence_fd);
246
247	return ret;
248}
249
250static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
251				     struct drm_file *file_priv)
252{
253	struct virtio_gpu_device *vgdev = dev->dev_private;
254	struct drm_virtgpu_getparam *param = data;
255	int value;
256
257	switch (param->param) {
258	case VIRTGPU_PARAM_3D_FEATURES:
259		value = vgdev->has_virgl_3d == true ? 1 : 0;
260		break;
261	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
262		value = 1;
263		break;
264	default:
265		return -EINVAL;
266	}
267	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
268		return -EFAULT;
269
270	return 0;
271}
272
273static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
274					    struct drm_file *file_priv)
275{
276	struct virtio_gpu_device *vgdev = dev->dev_private;
277	struct drm_virtgpu_resource_create *rc = data;
278	struct virtio_gpu_fence *fence;
279	int ret;
280	struct virtio_gpu_object *qobj;
281	struct drm_gem_object *obj;
282	uint32_t handle = 0;
283	struct virtio_gpu_object_params params = { 0 };
284
285	if (vgdev->has_virgl_3d == false) {
 
 
 
 
 
 
 
 
 
 
286		if (rc->depth > 1)
287			return -EINVAL;
288		if (rc->nr_samples > 1)
289			return -EINVAL;
290		if (rc->last_level > 1)
291			return -EINVAL;
292		if (rc->target != 2)
293			return -EINVAL;
294		if (rc->array_size > 1)
295			return -EINVAL;
296	}
297
298	params.format = rc->format;
299	params.width = rc->width;
300	params.height = rc->height;
301	params.size = rc->size;
302	if (vgdev->has_virgl_3d) {
303		params.virgl = true;
304		params.target = rc->target;
305		params.bind = rc->bind;
306		params.depth = rc->depth;
307		params.array_size = rc->array_size;
308		params.last_level = rc->last_level;
309		params.nr_samples = rc->nr_samples;
310		params.flags = rc->flags;
311	}
312	/* allocate a single page size object */
313	if (params.size == 0)
314		params.size = PAGE_SIZE;
315
316	fence = virtio_gpu_fence_alloc(vgdev);
317	if (!fence)
318		return -ENOMEM;
319	qobj = virtio_gpu_alloc_object(dev, &params, fence);
320	dma_fence_put(&fence->f);
321	if (IS_ERR(qobj))
322		return PTR_ERR(qobj);
323	obj = &qobj->gem_base;
324
325	ret = drm_gem_handle_create(file_priv, obj, &handle);
326	if (ret) {
327		drm_gem_object_release(obj);
328		return ret;
329	}
330	drm_gem_object_put_unlocked(obj);
331
332	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
333	rc->bo_handle = handle;
334	return 0;
335}
336
337static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
338					  struct drm_file *file_priv)
339{
340	struct drm_virtgpu_resource_info *ri = data;
341	struct drm_gem_object *gobj = NULL;
342	struct virtio_gpu_object *qobj = NULL;
343
344	gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
345	if (gobj == NULL)
346		return -ENOENT;
347
348	qobj = gem_to_virtio_gpu_obj(gobj);
349
350	ri->size = qobj->gem_base.size;
351	ri->res_handle = qobj->hw_res_handle;
352	drm_gem_object_put_unlocked(gobj);
353	return 0;
354}
355
356static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
357					       void *data,
358					       struct drm_file *file)
359{
360	struct virtio_gpu_device *vgdev = dev->dev_private;
361	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
362	struct drm_virtgpu_3d_transfer_from_host *args = data;
363	struct ttm_operation_ctx ctx = { true, false };
364	struct drm_gem_object *gobj = NULL;
365	struct virtio_gpu_object *qobj = NULL;
366	struct virtio_gpu_fence *fence;
367	int ret;
368	u32 offset = args->offset;
369	struct virtio_gpu_box box;
370
371	if (vgdev->has_virgl_3d == false)
372		return -ENOSYS;
373
374	gobj = drm_gem_object_lookup(file, args->bo_handle);
375	if (gobj == NULL)
 
376		return -ENOENT;
377
378	qobj = gem_to_virtio_gpu_obj(gobj);
379
380	ret = virtio_gpu_object_reserve(qobj, false);
381	if (ret)
382		goto out;
383
384	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
385	if (unlikely(ret))
386		goto out_unres;
387
388	convert_to_hw_box(&box, &args->box);
389
390	fence = virtio_gpu_fence_alloc(vgdev);
391	if (!fence) {
392		ret = -ENOMEM;
393		goto out_unres;
394	}
395	virtio_gpu_cmd_transfer_from_host_3d
396		(vgdev, qobj->hw_res_handle,
397		 vfpriv->ctx_id, offset, args->level,
398		 &box, fence);
399	dma_resv_add_excl_fence(qobj->tbo.base.resv,
400					  &fence->f);
401
402	dma_fence_put(&fence->f);
403out_unres:
404	virtio_gpu_object_unreserve(qobj);
405out:
406	drm_gem_object_put_unlocked(gobj);
 
 
 
407	return ret;
408}
409
410static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
411					     struct drm_file *file)
412{
413	struct virtio_gpu_device *vgdev = dev->dev_private;
414	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
415	struct drm_virtgpu_3d_transfer_to_host *args = data;
416	struct ttm_operation_ctx ctx = { true, false };
417	struct drm_gem_object *gobj = NULL;
418	struct virtio_gpu_object *qobj = NULL;
419	struct virtio_gpu_fence *fence;
420	struct virtio_gpu_box box;
421	int ret;
422	u32 offset = args->offset;
423
424	gobj = drm_gem_object_lookup(file, args->bo_handle);
425	if (gobj == NULL)
426		return -ENOENT;
427
428	qobj = gem_to_virtio_gpu_obj(gobj);
429
430	ret = virtio_gpu_object_reserve(qobj, false);
431	if (ret)
432		goto out;
433
434	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
435	if (unlikely(ret))
436		goto out_unres;
437
438	convert_to_hw_box(&box, &args->box);
439	if (!vgdev->has_virgl_3d) {
440		virtio_gpu_cmd_transfer_to_host_2d
441			(vgdev, qobj, offset,
442			 box.w, box.h, box.x, box.y, NULL);
 
443	} else {
 
 
 
 
 
 
444		fence = virtio_gpu_fence_alloc(vgdev);
445		if (!fence) {
446			ret = -ENOMEM;
447			goto out_unres;
448		}
449		virtio_gpu_cmd_transfer_to_host_3d
450			(vgdev, qobj,
451			 vfpriv ? vfpriv->ctx_id : 0, offset,
452			 args->level, &box, fence);
453		dma_resv_add_excl_fence(qobj->tbo.base.resv,
454						  &fence->f);
455		dma_fence_put(&fence->f);
456	}
 
 
457
458out_unres:
459	virtio_gpu_object_unreserve(qobj);
460out:
461	drm_gem_object_put_unlocked(gobj);
462	return ret;
463}
464
465static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
466			    struct drm_file *file)
467{
468	struct drm_virtgpu_3d_wait *args = data;
469	struct drm_gem_object *gobj = NULL;
470	struct virtio_gpu_object *qobj = NULL;
471	int ret;
472	bool nowait = false;
473
474	gobj = drm_gem_object_lookup(file, args->handle);
475	if (gobj == NULL)
476		return -ENOENT;
477
478	qobj = gem_to_virtio_gpu_obj(gobj);
479
480	if (args->flags & VIRTGPU_WAIT_NOWAIT)
481		nowait = true;
482	ret = virtio_gpu_object_wait(qobj, nowait);
 
 
 
 
 
483
484	drm_gem_object_put_unlocked(gobj);
485	return ret;
486}
487
488static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
489				void *data, struct drm_file *file)
490{
491	struct virtio_gpu_device *vgdev = dev->dev_private;
492	struct drm_virtgpu_get_caps *args = data;
493	unsigned size, host_caps_size;
494	int i;
495	int found_valid = -1;
496	int ret;
497	struct virtio_gpu_drv_cap_cache *cache_ent;
498	void *ptr;
499
500	if (vgdev->num_capsets == 0)
501		return -ENOSYS;
502
503	/* don't allow userspace to pass 0 */
504	if (args->size == 0)
505		return -EINVAL;
506
507	spin_lock(&vgdev->display_info_lock);
508	for (i = 0; i < vgdev->num_capsets; i++) {
509		if (vgdev->capsets[i].id == args->cap_set_id) {
510			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
511				found_valid = i;
512				break;
513			}
514		}
515	}
516
517	if (found_valid == -1) {
518		spin_unlock(&vgdev->display_info_lock);
519		return -EINVAL;
520	}
521
522	host_caps_size = vgdev->capsets[found_valid].max_size;
523	/* only copy to user the minimum of the host caps size or the guest caps size */
524	size = min(args->size, host_caps_size);
525
526	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
527		if (cache_ent->id == args->cap_set_id &&
528		    cache_ent->version == args->cap_set_ver) {
529			spin_unlock(&vgdev->display_info_lock);
530			goto copy_exit;
531		}
532	}
533	spin_unlock(&vgdev->display_info_lock);
534
535	/* not in cache - need to talk to hw */
536	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
537				  &cache_ent);
 
538
539copy_exit:
540	ret = wait_event_timeout(vgdev->resp_wq,
541				 atomic_read(&cache_ent->is_valid), 5 * HZ);
542	if (!ret)
543		return -EBUSY;
544
545	/* is_valid check must proceed before copy of the cache entry. */
546	smp_rmb();
547
548	ptr = cache_ent->caps_cache;
549
550	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
551		return -EFAULT;
552
553	return 0;
554}
555
556struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
557	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
558			  DRM_RENDER_ALLOW),
559
560	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
561			  DRM_RENDER_ALLOW),
562
563	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
564			  DRM_RENDER_ALLOW),
565
566	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
567			  virtio_gpu_resource_create_ioctl,
568			  DRM_RENDER_ALLOW),
569
570	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
571			  DRM_RENDER_ALLOW),
572
573	/* make transfer async to the main ring? - no sure, can we
574	 * thread these in the underlying GL
575	 */
576	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
577			  virtio_gpu_transfer_from_host_ioctl,
578			  DRM_RENDER_ALLOW),
579	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
580			  virtio_gpu_transfer_to_host_ioctl,
581			  DRM_RENDER_ALLOW),
582
583	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
584			  DRM_RENDER_ALLOW),
585
586	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
587			  DRM_RENDER_ALLOW),
588};
v5.9
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie
  7 *    Alon Levy
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice shall be included in
 17 * all copies or substantial portions of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 25 * OTHER DEALINGS IN THE SOFTWARE.
 26 */
 27
 28#include <linux/file.h>
 29#include <linux/sync_file.h>
 30#include <linux/uaccess.h>
 31
 32#include <drm/drm_file.h>
 
 33#include <drm/virtgpu_drm.h>
 34
 35#include "virtgpu_drv.h"
 36
 37void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
 
 38{
 39	struct virtio_gpu_device *vgdev = dev->dev_private;
 40	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 41	char dbgname[TASK_COMM_LEN];
 42
 43	mutex_lock(&vfpriv->context_lock);
 44	if (vfpriv->context_created)
 45		goto out_unlock;
 46
 47	get_task_comm(dbgname, current);
 48	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
 49				      strlen(dbgname), dbgname);
 50	vfpriv->context_created = true;
 51
 52out_unlock:
 53	mutex_unlock(&vfpriv->context_lock);
 54}
 55
 56static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 57				struct drm_file *file)
 58{
 59	struct virtio_gpu_device *vgdev = dev->dev_private;
 60	struct drm_virtgpu_map *virtio_gpu_map = data;
 61
 62	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
 63					 virtio_gpu_map->handle,
 64					 &virtio_gpu_map->offset);
 65}
 66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67/*
 68 * Usage of execbuffer:
 69 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
 70 * However, the command as passed from user space must *not* contain the initial
 71 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
 72 */
 73static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 74				 struct drm_file *file)
 75{
 76	struct drm_virtgpu_execbuffer *exbuf = data;
 77	struct virtio_gpu_device *vgdev = dev->dev_private;
 78	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 
 79	struct virtio_gpu_fence *out_fence;
 
 80	int ret;
 81	uint32_t *bo_handles = NULL;
 82	void __user *user_bo_handles = NULL;
 83	struct virtio_gpu_object_array *buflist = NULL;
 
 
 
 84	struct sync_file *sync_file;
 85	int in_fence_fd = exbuf->fence_fd;
 86	int out_fence_fd = -1;
 87	void *buf;
 88
 89	if (vgdev->has_virgl_3d == false)
 90		return -ENOSYS;
 91
 92	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
 93		return -EINVAL;
 94
 95	exbuf->fence_fd = -1;
 96
 97	virtio_gpu_create_context(dev, file);
 98	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
 99		struct dma_fence *in_fence;
100
101		in_fence = sync_file_get_fence(in_fence_fd);
102
103		if (!in_fence)
104			return -EINVAL;
105
106		/*
107		 * Wait if the fence is from a foreign context, or if the fence
108		 * array contains any fence from a foreign context.
109		 */
110		ret = 0;
111		if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
112			ret = dma_fence_wait(in_fence, true);
113
114		dma_fence_put(in_fence);
115		if (ret)
116			return ret;
117	}
118
119	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
120		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
121		if (out_fence_fd < 0)
122			return out_fence_fd;
123	}
124
 
125	if (exbuf->num_bo_handles) {
 
126		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
127					    sizeof(uint32_t), GFP_KERNEL);
128		if (!bo_handles) {
 
 
 
129			ret = -ENOMEM;
130			goto out_unused_fd;
131		}
132
133		user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
134		if (copy_from_user(bo_handles, user_bo_handles,
135				   exbuf->num_bo_handles * sizeof(uint32_t))) {
136			ret = -EFAULT;
137			goto out_unused_fd;
138		}
139
140		buflist = virtio_gpu_array_from_handles(file, bo_handles,
141							exbuf->num_bo_handles);
142		if (!buflist) {
143			ret = -ENOENT;
144			goto out_unused_fd;
 
 
 
 
 
 
145		}
146		kvfree(bo_handles);
147		bo_handles = NULL;
148	}
149
150	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
 
 
 
 
151	if (IS_ERR(buf)) {
152		ret = PTR_ERR(buf);
153		goto out_unused_fd;
154	}
155
156	if (buflist) {
157		ret = virtio_gpu_array_lock_resv(buflist);
158		if (ret)
159			goto out_memdup;
160	}
161
162	out_fence = virtio_gpu_fence_alloc(vgdev);
163	if(!out_fence) {
164		ret = -ENOMEM;
165		goto out_unresv;
166	}
167
168	if (out_fence_fd >= 0) {
169		sync_file = sync_file_create(&out_fence->f);
170		if (!sync_file) {
171			dma_fence_put(&out_fence->f);
172			ret = -ENOMEM;
173			goto out_memdup;
174		}
175
176		exbuf->fence_fd = out_fence_fd;
177		fd_install(out_fence_fd, sync_file->file);
178	}
179
180	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
181			      vfpriv->ctx_id, buflist, out_fence);
182	dma_fence_put(&out_fence->f);
183	virtio_gpu_notify(vgdev);
 
 
 
 
184	return 0;
185
 
 
186out_unresv:
187	if (buflist)
188		virtio_gpu_array_unlock_resv(buflist);
189out_memdup:
190	kvfree(buf);
191out_unused_fd:
192	kvfree(bo_handles);
193	if (buflist)
194		virtio_gpu_array_put_free(buflist);
195
196	if (out_fence_fd >= 0)
197		put_unused_fd(out_fence_fd);
198
199	return ret;
200}
201
202static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
203				     struct drm_file *file)
204{
205	struct virtio_gpu_device *vgdev = dev->dev_private;
206	struct drm_virtgpu_getparam *param = data;
207	int value;
208
209	switch (param->param) {
210	case VIRTGPU_PARAM_3D_FEATURES:
211		value = vgdev->has_virgl_3d == true ? 1 : 0;
212		break;
213	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
214		value = 1;
215		break;
216	default:
217		return -EINVAL;
218	}
219	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
220		return -EFAULT;
221
222	return 0;
223}
224
225static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
226					    struct drm_file *file)
227{
228	struct virtio_gpu_device *vgdev = dev->dev_private;
229	struct drm_virtgpu_resource_create *rc = data;
230	struct virtio_gpu_fence *fence;
231	int ret;
232	struct virtio_gpu_object *qobj;
233	struct drm_gem_object *obj;
234	uint32_t handle = 0;
235	struct virtio_gpu_object_params params = { 0 };
236
237	if (vgdev->has_virgl_3d) {
238		virtio_gpu_create_context(dev, file);
239		params.virgl = true;
240		params.target = rc->target;
241		params.bind = rc->bind;
242		params.depth = rc->depth;
243		params.array_size = rc->array_size;
244		params.last_level = rc->last_level;
245		params.nr_samples = rc->nr_samples;
246		params.flags = rc->flags;
247	} else {
248		if (rc->depth > 1)
249			return -EINVAL;
250		if (rc->nr_samples > 1)
251			return -EINVAL;
252		if (rc->last_level > 1)
253			return -EINVAL;
254		if (rc->target != 2)
255			return -EINVAL;
256		if (rc->array_size > 1)
257			return -EINVAL;
258	}
259
260	params.format = rc->format;
261	params.width = rc->width;
262	params.height = rc->height;
263	params.size = rc->size;
 
 
 
 
 
 
 
 
 
 
264	/* allocate a single page size object */
265	if (params.size == 0)
266		params.size = PAGE_SIZE;
267
268	fence = virtio_gpu_fence_alloc(vgdev);
269	if (!fence)
270		return -ENOMEM;
271	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
272	dma_fence_put(&fence->f);
273	if (ret < 0)
274		return ret;
275	obj = &qobj->base.base;
276
277	ret = drm_gem_handle_create(file, obj, &handle);
278	if (ret) {
279		drm_gem_object_release(obj);
280		return ret;
281	}
282	drm_gem_object_put(obj);
283
284	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
285	rc->bo_handle = handle;
286	return 0;
287}
288
289static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
290					  struct drm_file *file)
291{
292	struct drm_virtgpu_resource_info *ri = data;
293	struct drm_gem_object *gobj = NULL;
294	struct virtio_gpu_object *qobj = NULL;
295
296	gobj = drm_gem_object_lookup(file, ri->bo_handle);
297	if (gobj == NULL)
298		return -ENOENT;
299
300	qobj = gem_to_virtio_gpu_obj(gobj);
301
302	ri->size = qobj->base.base.size;
303	ri->res_handle = qobj->hw_res_handle;
304	drm_gem_object_put(gobj);
305	return 0;
306}
307
308static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
309					       void *data,
310					       struct drm_file *file)
311{
312	struct virtio_gpu_device *vgdev = dev->dev_private;
313	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
314	struct drm_virtgpu_3d_transfer_from_host *args = data;
315	struct virtio_gpu_object_array *objs;
 
 
316	struct virtio_gpu_fence *fence;
317	int ret;
318	u32 offset = args->offset;
 
319
320	if (vgdev->has_virgl_3d == false)
321		return -ENOSYS;
322
323	virtio_gpu_create_context(dev, file);
324	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
325	if (objs == NULL)
326		return -ENOENT;
327
328	ret = virtio_gpu_array_lock_resv(objs);
329	if (ret != 0)
330		goto err_put_free;
 
 
 
 
 
 
 
 
331
332	fence = virtio_gpu_fence_alloc(vgdev);
333	if (!fence) {
334		ret = -ENOMEM;
335		goto err_unlock;
336	}
337	virtio_gpu_cmd_transfer_from_host_3d
338		(vgdev, vfpriv->ctx_id, offset, args->level,
339		 &args->box, objs, fence);
 
 
 
 
340	dma_fence_put(&fence->f);
341	virtio_gpu_notify(vgdev);
342	return 0;
343
344err_unlock:
345	virtio_gpu_array_unlock_resv(objs);
346err_put_free:
347	virtio_gpu_array_put_free(objs);
348	return ret;
349}
350
351static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
352					     struct drm_file *file)
353{
354	struct virtio_gpu_device *vgdev = dev->dev_private;
355	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
356	struct drm_virtgpu_3d_transfer_to_host *args = data;
357	struct virtio_gpu_object_array *objs;
 
 
358	struct virtio_gpu_fence *fence;
 
359	int ret;
360	u32 offset = args->offset;
361
362	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
363	if (objs == NULL)
364		return -ENOENT;
365
 
 
 
 
 
 
 
 
 
 
 
366	if (!vgdev->has_virgl_3d) {
367		virtio_gpu_cmd_transfer_to_host_2d
368			(vgdev, offset,
369			 args->box.w, args->box.h, args->box.x, args->box.y,
370			 objs, NULL);
371	} else {
372		virtio_gpu_create_context(dev, file);
373		ret = virtio_gpu_array_lock_resv(objs);
374		if (ret != 0)
375			goto err_put_free;
376
377		ret = -ENOMEM;
378		fence = virtio_gpu_fence_alloc(vgdev);
379		if (!fence)
380			goto err_unlock;
381
 
382		virtio_gpu_cmd_transfer_to_host_3d
383			(vgdev,
384			 vfpriv ? vfpriv->ctx_id : 0, offset,
385			 args->level, &args->box, objs, fence);
 
 
386		dma_fence_put(&fence->f);
387	}
388	virtio_gpu_notify(vgdev);
389	return 0;
390
391err_unlock:
392	virtio_gpu_array_unlock_resv(objs);
393err_put_free:
394	virtio_gpu_array_put_free(objs);
395	return ret;
396}
397
398static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
399				 struct drm_file *file)
400{
401	struct drm_virtgpu_3d_wait *args = data;
402	struct drm_gem_object *obj;
403	long timeout = 15 * HZ;
404	int ret;
 
405
406	obj = drm_gem_object_lookup(file, args->handle);
407	if (obj == NULL)
408		return -ENOENT;
409
410	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
411		ret = dma_resv_test_signaled_rcu(obj->resv, true);
412	} else {
413		ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
414						timeout);
415	}
416	if (ret == 0)
417		ret = -EBUSY;
418	else if (ret > 0)
419		ret = 0;
420
421	drm_gem_object_put(obj);
422	return ret;
423}
424
425static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
426				void *data, struct drm_file *file)
427{
428	struct virtio_gpu_device *vgdev = dev->dev_private;
429	struct drm_virtgpu_get_caps *args = data;
430	unsigned size, host_caps_size;
431	int i;
432	int found_valid = -1;
433	int ret;
434	struct virtio_gpu_drv_cap_cache *cache_ent;
435	void *ptr;
436
437	if (vgdev->num_capsets == 0)
438		return -ENOSYS;
439
440	/* don't allow userspace to pass 0 */
441	if (args->size == 0)
442		return -EINVAL;
443
444	spin_lock(&vgdev->display_info_lock);
445	for (i = 0; i < vgdev->num_capsets; i++) {
446		if (vgdev->capsets[i].id == args->cap_set_id) {
447			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
448				found_valid = i;
449				break;
450			}
451		}
452	}
453
454	if (found_valid == -1) {
455		spin_unlock(&vgdev->display_info_lock);
456		return -EINVAL;
457	}
458
459	host_caps_size = vgdev->capsets[found_valid].max_size;
460	/* only copy to user the minimum of the host caps size or the guest caps size */
461	size = min(args->size, host_caps_size);
462
463	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
464		if (cache_ent->id == args->cap_set_id &&
465		    cache_ent->version == args->cap_set_ver) {
466			spin_unlock(&vgdev->display_info_lock);
467			goto copy_exit;
468		}
469	}
470	spin_unlock(&vgdev->display_info_lock);
471
472	/* not in cache - need to talk to hw */
473	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
474				  &cache_ent);
475	virtio_gpu_notify(vgdev);
476
477copy_exit:
478	ret = wait_event_timeout(vgdev->resp_wq,
479				 atomic_read(&cache_ent->is_valid), 5 * HZ);
480	if (!ret)
481		return -EBUSY;
482
483	/* is_valid check must proceed before copy of the cache entry. */
484	smp_rmb();
485
486	ptr = cache_ent->caps_cache;
487
488	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
489		return -EFAULT;
490
491	return 0;
492}
493
494struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
495	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
496			  DRM_RENDER_ALLOW),
497
498	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
499			  DRM_RENDER_ALLOW),
500
501	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
502			  DRM_RENDER_ALLOW),
503
504	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
505			  virtio_gpu_resource_create_ioctl,
506			  DRM_RENDER_ALLOW),
507
508	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
509			  DRM_RENDER_ALLOW),
510
511	/* make transfer async to the main ring? - no sure, can we
512	 * thread these in the underlying GL
513	 */
514	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
515			  virtio_gpu_transfer_from_host_ioctl,
516			  DRM_RENDER_ALLOW),
517	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
518			  virtio_gpu_transfer_to_host_ioctl,
519			  DRM_RENDER_ALLOW),
520
521	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
522			  DRM_RENDER_ALLOW),
523
524	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
525			  DRM_RENDER_ALLOW),
526};