Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie
  7 *    Alon Levy
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice shall be included in
 17 * all copies or substantial portions of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 25 * OTHER DEALINGS IN THE SOFTWARE.
 26 */
 27
 28#include <linux/file.h>
 29#include <linux/sync_file.h>
 30#include <linux/uaccess.h>
 31
 32#include <drm/drm_file.h>
 33#include <drm/virtgpu_drm.h>
 34
 35#include "virtgpu_drv.h"
 
 
 36
 37#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
 38				    VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
 39				    VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
 40
 41static int virtio_gpu_fence_event_create(struct drm_device *dev,
 42					 struct drm_file *file,
 43					 struct virtio_gpu_fence *fence,
 44					 uint32_t ring_idx)
 45{
 46	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 47	struct virtio_gpu_fence_event *e = NULL;
 48	int ret;
 49
 50	if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
 51		return 0;
 52
 53	e = kzalloc(sizeof(*e), GFP_KERNEL);
 54	if (!e)
 55		return -ENOMEM;
 56
 57	e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
 58	e->event.length = sizeof(e->event);
 59
 60	ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
 61	if (ret)
 62		goto free;
 63
 64	fence->e = e;
 65	return 0;
 66free:
 67	kfree(e);
 68	return ret;
 69}
 70
 71/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
 72static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
 73					     struct virtio_gpu_fpriv *vfpriv)
 74{
 75	char dbgname[TASK_COMM_LEN];
 76
 77	get_task_comm(dbgname, current);
 78	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
 79				      vfpriv->context_init, strlen(dbgname),
 80				      dbgname);
 81
 82	vfpriv->context_created = true;
 
 
 83}
 84
 85void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
 
 86{
 87	struct virtio_gpu_device *vgdev = dev->dev_private;
 88	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 89
 90	mutex_lock(&vfpriv->context_lock);
 91	if (vfpriv->context_created)
 92		goto out_unlock;
 93
 94	virtio_gpu_create_context_locked(vgdev, vfpriv);
 
 
 95
 96out_unlock:
 97	mutex_unlock(&vfpriv->context_lock);
 
 
 
 
 
 
 
 
 98}
 99
100static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
101				struct drm_file *file)
102{
103	struct virtio_gpu_device *vgdev = dev->dev_private;
104	struct drm_virtgpu_map *virtio_gpu_map = data;
 
 
 
 
105
106	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
107					 virtio_gpu_map->handle,
108					 &virtio_gpu_map->offset);
109}
110
111/*
112 * Usage of execbuffer:
113 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
114 * However, the command as passed from user space must *not* contain the initial
115 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
116 */
117static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
118				 struct drm_file *file)
119{
120	struct drm_virtgpu_execbuffer *exbuf = data;
121	struct virtio_gpu_device *vgdev = dev->dev_private;
122	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
123	struct virtio_gpu_fence *out_fence;
 
 
124	int ret;
125	uint32_t *bo_handles = NULL;
126	void __user *user_bo_handles = NULL;
127	struct virtio_gpu_object_array *buflist = NULL;
128	struct sync_file *sync_file;
129	int out_fence_fd = -1;
 
130	void *buf;
131	uint64_t fence_ctx;
132	uint32_t ring_idx;
133
134	fence_ctx = vgdev->fence_drv.context;
135	ring_idx = 0;
136
137	if (vgdev->has_virgl_3d == false)
138		return -ENOSYS;
139
140	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
141		return -EINVAL;
142
143	if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
144		if (exbuf->ring_idx >= vfpriv->num_rings)
145			return -EINVAL;
146
147		if (!vfpriv->base_fence_ctx)
148			return -EINVAL;
149
150		fence_ctx = vfpriv->base_fence_ctx;
151		ring_idx = exbuf->ring_idx;
152	}
153
154	virtio_gpu_create_context(dev, file);
155	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
156		struct dma_fence *in_fence;
157
158		in_fence = sync_file_get_fence(exbuf->fence_fd);
159
160		if (!in_fence)
161			return -EINVAL;
162
163		/*
164		 * Wait if the fence is from a foreign context, or if the fence
165		 * array contains any fence from a foreign context.
166		 */
167		ret = 0;
168		if (!dma_fence_match_context(in_fence, fence_ctx + ring_idx))
169			ret = dma_fence_wait(in_fence, true);
170
171		dma_fence_put(in_fence);
172		if (ret)
173			return ret;
174	}
175
176	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
177		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
178		if (out_fence_fd < 0)
179			return out_fence_fd;
180	}
181
182	if (exbuf->num_bo_handles) {
183		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
184					    sizeof(uint32_t), GFP_KERNEL);
185		if (!bo_handles) {
186			ret = -ENOMEM;
187			goto out_unused_fd;
 
 
 
 
188		}
189
190		user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
191		if (copy_from_user(bo_handles, user_bo_handles,
192				   exbuf->num_bo_handles * sizeof(uint32_t))) {
193			ret = -EFAULT;
194			goto out_unused_fd;
 
 
195		}
196
197		buflist = virtio_gpu_array_from_handles(file, bo_handles,
198							exbuf->num_bo_handles);
199		if (!buflist) {
200			ret = -ENOENT;
201			goto out_unused_fd;
 
 
 
 
 
 
 
202		}
203		kvfree(bo_handles);
204		bo_handles = NULL;
205	}
206
207	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
 
 
 
 
 
208	if (IS_ERR(buf)) {
209		ret = PTR_ERR(buf);
210		goto out_unused_fd;
211	}
212
213	if (buflist) {
214		ret = virtio_gpu_array_lock_resv(buflist);
215		if (ret)
216			goto out_memdup;
217	}
218
219	out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
220	if(!out_fence) {
221		ret = -ENOMEM;
222		goto out_unresv;
223	}
 
 
224
225	ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
226	if (ret)
227		goto out_unresv;
228
229	if (out_fence_fd >= 0) {
230		sync_file = sync_file_create(&out_fence->f);
231		if (!sync_file) {
232			dma_fence_put(&out_fence->f);
233			ret = -ENOMEM;
234			goto out_unresv;
235		}
236
237		exbuf->fence_fd = out_fence_fd;
238		fd_install(out_fence_fd, sync_file->file);
239	}
240
241	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
242			      vfpriv->ctx_id, buflist, out_fence);
243	dma_fence_put(&out_fence->f);
244	virtio_gpu_notify(vgdev);
245	return 0;
246
247out_unresv:
248	if (buflist)
249		virtio_gpu_array_unlock_resv(buflist);
250out_memdup:
251	kvfree(buf);
252out_unused_fd:
253	kvfree(bo_handles);
254	if (buflist)
255		virtio_gpu_array_put_free(buflist);
256
257	if (out_fence_fd >= 0)
258		put_unused_fd(out_fence_fd);
259
260	return ret;
261}
262
263static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
264				     struct drm_file *file)
265{
266	struct virtio_gpu_device *vgdev = dev->dev_private;
267	struct drm_virtgpu_getparam *param = data;
268	int value;
269
270	switch (param->param) {
271	case VIRTGPU_PARAM_3D_FEATURES:
272		value = vgdev->has_virgl_3d ? 1 : 0;
273		break;
274	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
275		value = 1;
276		break;
277	case VIRTGPU_PARAM_RESOURCE_BLOB:
278		value = vgdev->has_resource_blob ? 1 : 0;
279		break;
280	case VIRTGPU_PARAM_HOST_VISIBLE:
281		value = vgdev->has_host_visible ? 1 : 0;
282		break;
283	case VIRTGPU_PARAM_CROSS_DEVICE:
284		value = vgdev->has_resource_assign_uuid ? 1 : 0;
285		break;
286	case VIRTGPU_PARAM_CONTEXT_INIT:
287		value = vgdev->has_context_init ? 1 : 0;
288		break;
289	case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
290		value = vgdev->capset_id_mask;
291		break;
292	default:
293		return -EINVAL;
294	}
295	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
 
296		return -EFAULT;
297
298	return 0;
299}
300
301static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
302					    struct drm_file *file)
303{
304	struct virtio_gpu_device *vgdev = dev->dev_private;
305	struct drm_virtgpu_resource_create *rc = data;
306	struct virtio_gpu_fence *fence;
307	int ret;
 
308	struct virtio_gpu_object *qobj;
309	struct drm_gem_object *obj;
310	uint32_t handle = 0;
311	struct virtio_gpu_object_params params = { 0 };
 
 
 
 
 
312
313	if (vgdev->has_virgl_3d) {
314		virtio_gpu_create_context(dev, file);
315		params.virgl = true;
316		params.target = rc->target;
317		params.bind = rc->bind;
318		params.depth = rc->depth;
319		params.array_size = rc->array_size;
320		params.last_level = rc->last_level;
321		params.nr_samples = rc->nr_samples;
322		params.flags = rc->flags;
323	} else {
324		if (rc->depth > 1)
325			return -EINVAL;
326		if (rc->nr_samples > 1)
327			return -EINVAL;
328		if (rc->last_level > 1)
329			return -EINVAL;
330		if (rc->target != 2)
331			return -EINVAL;
332		if (rc->array_size > 1)
333			return -EINVAL;
334	}
335
336	params.format = rc->format;
337	params.width = rc->width;
338	params.height = rc->height;
339	params.size = rc->size;
 
 
 
340	/* allocate a single page size object */
341	if (params.size == 0)
342		params.size = PAGE_SIZE;
343
344	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
345	if (!fence)
346		return -ENOMEM;
347	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
348	dma_fence_put(&fence->f);
349	if (ret < 0)
350		return ret;
351	obj = &qobj->base.base;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
353	ret = drm_gem_handle_create(file, obj, &handle);
 
 
 
 
 
 
 
 
 
 
 
354	if (ret) {
 
355		drm_gem_object_release(obj);
 
 
 
 
356		return ret;
357	}
 
358
359	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
360	rc->bo_handle = handle;
361
362	/*
363	 * The handle owns the reference now.  But we must drop our
364	 * remaining reference *after* we no longer need to dereference
365	 * the obj.  Otherwise userspace could guess the handle and
366	 * race closing it from another thread.
367	 */
368	drm_gem_object_put(obj);
369
370	return 0;
 
 
 
 
 
 
 
 
 
 
371}
372
373static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
374					  struct drm_file *file)
375{
376	struct drm_virtgpu_resource_info *ri = data;
377	struct drm_gem_object *gobj = NULL;
378	struct virtio_gpu_object *qobj = NULL;
379
380	gobj = drm_gem_object_lookup(file, ri->bo_handle);
381	if (gobj == NULL)
382		return -ENOENT;
383
384	qobj = gem_to_virtio_gpu_obj(gobj);
385
386	ri->size = qobj->base.base.size;
387	ri->res_handle = qobj->hw_res_handle;
388	if (qobj->host3d_blob || qobj->guest_blob)
389		ri->blob_mem = qobj->blob_mem;
390
391	drm_gem_object_put(gobj);
392	return 0;
393}
394
395static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
396					       void *data,
397					       struct drm_file *file)
398{
399	struct virtio_gpu_device *vgdev = dev->dev_private;
400	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
401	struct drm_virtgpu_3d_transfer_from_host *args = data;
402	struct virtio_gpu_object *bo;
403	struct virtio_gpu_object_array *objs;
404	struct virtio_gpu_fence *fence;
405	int ret;
406	u32 offset = args->offset;
 
407
408	if (vgdev->has_virgl_3d == false)
409		return -ENOSYS;
410
411	virtio_gpu_create_context(dev, file);
412	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
413	if (objs == NULL)
414		return -ENOENT;
415
416	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
417	if (bo->guest_blob && !bo->host3d_blob) {
418		ret = -EINVAL;
419		goto err_put_free;
420	}
421
422	if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
423		ret = -EINVAL;
424		goto err_put_free;
425	}
426
427	ret = virtio_gpu_array_lock_resv(objs);
428	if (ret != 0)
429		goto err_put_free;
430
431	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
432	if (!fence) {
433		ret = -ENOMEM;
434		goto err_unlock;
435	}
436
 
437	virtio_gpu_cmd_transfer_from_host_3d
438		(vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
439		 args->layer_stride, &args->box, objs, fence);
440	dma_fence_put(&fence->f);
441	virtio_gpu_notify(vgdev);
442	return 0;
443
444err_unlock:
445	virtio_gpu_array_unlock_resv(objs);
446err_put_free:
447	virtio_gpu_array_put_free(objs);
 
448	return ret;
449}
450
451static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
452					     struct drm_file *file)
453{
454	struct virtio_gpu_device *vgdev = dev->dev_private;
455	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
456	struct drm_virtgpu_3d_transfer_to_host *args = data;
457	struct virtio_gpu_object *bo;
458	struct virtio_gpu_object_array *objs;
459	struct virtio_gpu_fence *fence;
 
460	int ret;
461	u32 offset = args->offset;
462
463	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
464	if (objs == NULL)
465		return -ENOENT;
466
467	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
468	if (bo->guest_blob && !bo->host3d_blob) {
469		ret = -EINVAL;
470		goto err_put_free;
471	}
 
 
 
 
 
472
 
473	if (!vgdev->has_virgl_3d) {
474		virtio_gpu_cmd_transfer_to_host_2d
475			(vgdev, offset,
476			 args->box.w, args->box.h, args->box.x, args->box.y,
477			 objs, NULL);
478	} else {
479		virtio_gpu_create_context(dev, file);
480
481		if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
482			ret = -EINVAL;
483			goto err_put_free;
484		}
485
486		ret = virtio_gpu_array_lock_resv(objs);
487		if (ret != 0)
488			goto err_put_free;
489
490		ret = -ENOMEM;
491		fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
492					       0);
493		if (!fence)
494			goto err_unlock;
495
496		virtio_gpu_cmd_transfer_to_host_3d
497			(vgdev,
498			 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
499			 args->stride, args->layer_stride, &args->box, objs,
500			 fence);
 
501		dma_fence_put(&fence->f);
502	}
503	virtio_gpu_notify(vgdev);
504	return 0;
505
506err_unlock:
507	virtio_gpu_array_unlock_resv(objs);
508err_put_free:
509	virtio_gpu_array_put_free(objs);
510	return ret;
511}
512
513static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
514				 struct drm_file *file)
515{
516	struct drm_virtgpu_3d_wait *args = data;
517	struct drm_gem_object *obj;
518	long timeout = 15 * HZ;
519	int ret;
 
520
521	obj = drm_gem_object_lookup(file, args->handle);
522	if (obj == NULL)
523		return -ENOENT;
524
525	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
526		ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
527	} else {
528		ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
529					    true, timeout);
530	}
531	if (ret == 0)
532		ret = -EBUSY;
533	else if (ret > 0)
534		ret = 0;
535
536	drm_gem_object_put(obj);
 
 
 
 
537	return ret;
538}
539
540static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
541				void *data, struct drm_file *file)
542{
543	struct virtio_gpu_device *vgdev = dev->dev_private;
544	struct drm_virtgpu_get_caps *args = data;
545	unsigned size, host_caps_size;
546	int i;
547	int found_valid = -1;
548	int ret;
549	struct virtio_gpu_drv_cap_cache *cache_ent;
550	void *ptr;
551
552	if (vgdev->num_capsets == 0)
553		return -ENOSYS;
554
555	/* don't allow userspace to pass 0 */
556	if (args->size == 0)
557		return -EINVAL;
558
559	spin_lock(&vgdev->display_info_lock);
560	for (i = 0; i < vgdev->num_capsets; i++) {
561		if (vgdev->capsets[i].id == args->cap_set_id) {
562			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
563				found_valid = i;
564				break;
565			}
566		}
567	}
568
569	if (found_valid == -1) {
570		spin_unlock(&vgdev->display_info_lock);
571		return -EINVAL;
572	}
573
574	host_caps_size = vgdev->capsets[found_valid].max_size;
575	/* only copy to user the minimum of the host caps size or the guest caps size */
576	size = min(args->size, host_caps_size);
 
 
577
578	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
579		if (cache_ent->id == args->cap_set_id &&
580		    cache_ent->version == args->cap_set_ver) {
 
581			spin_unlock(&vgdev->display_info_lock);
582			goto copy_exit;
583		}
584	}
585	spin_unlock(&vgdev->display_info_lock);
586
587	/* not in cache - need to talk to hw */
588	ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
589					&cache_ent);
590	if (ret)
591		return ret;
592	virtio_gpu_notify(vgdev);
593
594copy_exit:
595	ret = wait_event_timeout(vgdev->resp_wq,
596				 atomic_read(&cache_ent->is_valid), 5 * HZ);
597	if (!ret)
598		return -EBUSY;
599
600	/* is_valid check must proceed before copy of the cache entry. */
601	smp_rmb();
602
603	ptr = cache_ent->caps_cache;
604
605	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
 
606		return -EFAULT;
607
608	return 0;
609}
610
611static int verify_blob(struct virtio_gpu_device *vgdev,
612		       struct virtio_gpu_fpriv *vfpriv,
613		       struct virtio_gpu_object_params *params,
614		       struct drm_virtgpu_resource_create_blob *rc_blob,
615		       bool *guest_blob, bool *host3d_blob)
616{
617	if (!vgdev->has_resource_blob)
618		return -EINVAL;
619
620	if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
621		return -EINVAL;
622
623	if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
624		if (!vgdev->has_resource_assign_uuid)
625			return -EINVAL;
626	}
627
628	switch (rc_blob->blob_mem) {
629	case VIRTGPU_BLOB_MEM_GUEST:
630		*guest_blob = true;
631		break;
632	case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
633		*guest_blob = true;
634		fallthrough;
635	case VIRTGPU_BLOB_MEM_HOST3D:
636		*host3d_blob = true;
637		break;
638	default:
639		return -EINVAL;
640	}
641
642	if (*host3d_blob) {
643		if (!vgdev->has_virgl_3d)
644			return -EINVAL;
645
646		/* Must be dword aligned. */
647		if (rc_blob->cmd_size % 4 != 0)
648			return -EINVAL;
649
650		params->ctx_id = vfpriv->ctx_id;
651		params->blob_id = rc_blob->blob_id;
652	} else {
653		if (rc_blob->blob_id != 0)
654			return -EINVAL;
655
656		if (rc_blob->cmd_size != 0)
657			return -EINVAL;
658	}
659
660	params->blob_mem = rc_blob->blob_mem;
661	params->size = rc_blob->size;
662	params->blob = true;
663	params->blob_flags = rc_blob->blob_flags;
664	return 0;
665}
666
667static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
668						 void *data,
669						 struct drm_file *file)
670{
671	int ret = 0;
672	uint32_t handle = 0;
673	bool guest_blob = false;
674	bool host3d_blob = false;
675	struct drm_gem_object *obj;
676	struct virtio_gpu_object *bo;
677	struct virtio_gpu_object_params params = { 0 };
678	struct virtio_gpu_device *vgdev = dev->dev_private;
679	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
680	struct drm_virtgpu_resource_create_blob *rc_blob = data;
681
682	if (verify_blob(vgdev, vfpriv, &params, rc_blob,
683			&guest_blob, &host3d_blob))
684		return -EINVAL;
685
686	if (vgdev->has_virgl_3d)
687		virtio_gpu_create_context(dev, file);
688
689	if (rc_blob->cmd_size) {
690		void *buf;
691
692		buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
693				  rc_blob->cmd_size);
694
695		if (IS_ERR(buf))
696			return PTR_ERR(buf);
697
698		virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
699				      vfpriv->ctx_id, NULL, NULL);
700	}
701
702	if (guest_blob)
703		ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
704	else if (!guest_blob && host3d_blob)
705		ret = virtio_gpu_vram_create(vgdev, &params, &bo);
706	else
707		return -EINVAL;
708
709	if (ret < 0)
710		return ret;
711
712	bo->guest_blob = guest_blob;
713	bo->host3d_blob = host3d_blob;
714	bo->blob_mem = rc_blob->blob_mem;
715	bo->blob_flags = rc_blob->blob_flags;
716
717	obj = &bo->base.base;
718	if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
719		ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
720		if (ret) {
721			drm_gem_object_release(obj);
722			return ret;
723		}
724	}
725
726	ret = drm_gem_handle_create(file, obj, &handle);
727	if (ret) {
728		drm_gem_object_release(obj);
729		return ret;
730	}
731
732	rc_blob->res_handle = bo->hw_res_handle;
733	rc_blob->bo_handle = handle;
734
735	/*
736	 * The handle owns the reference now.  But we must drop our
737	 * remaining reference *after* we no longer need to dereference
738	 * the obj.  Otherwise userspace could guess the handle and
739	 * race closing it from another thread.
740	 */
741	drm_gem_object_put(obj);
742
743	return 0;
744}
745
746static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
747					 void *data, struct drm_file *file)
748{
749	int ret = 0;
750	uint32_t num_params, i, param, value;
751	uint64_t valid_ring_mask;
752	size_t len;
753	struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
754	struct virtio_gpu_device *vgdev = dev->dev_private;
755	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
756	struct drm_virtgpu_context_init *args = data;
757
758	num_params = args->num_params;
759	len = num_params * sizeof(struct drm_virtgpu_context_set_param);
760
761	if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
762		return -EINVAL;
763
764	/* Number of unique parameters supported at this time. */
765	if (num_params > 3)
766		return -EINVAL;
767
768	ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
769				     len);
770
771	if (IS_ERR(ctx_set_params))
772		return PTR_ERR(ctx_set_params);
773
774	mutex_lock(&vfpriv->context_lock);
775	if (vfpriv->context_created) {
776		ret = -EEXIST;
777		goto out_unlock;
778	}
779
780	for (i = 0; i < num_params; i++) {
781		param = ctx_set_params[i].param;
782		value = ctx_set_params[i].value;
783
784		switch (param) {
785		case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
786			if (value > MAX_CAPSET_ID) {
787				ret = -EINVAL;
788				goto out_unlock;
789			}
790
791			if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
792				ret = -EINVAL;
793				goto out_unlock;
794			}
795
796			/* Context capset ID already set */
797			if (vfpriv->context_init &
798			    VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
799				ret = -EINVAL;
800				goto out_unlock;
801			}
802
803			vfpriv->context_init |= value;
804			break;
805		case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
806			if (vfpriv->base_fence_ctx) {
807				ret = -EINVAL;
808				goto out_unlock;
809			}
810
811			if (value > MAX_RINGS) {
812				ret = -EINVAL;
813				goto out_unlock;
814			}
815
816			vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
817			vfpriv->num_rings = value;
818			break;
819		case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
820			if (vfpriv->ring_idx_mask) {
821				ret = -EINVAL;
822				goto out_unlock;
823			}
824
825			vfpriv->ring_idx_mask = value;
826			break;
827		default:
828			ret = -EINVAL;
829			goto out_unlock;
830		}
831	}
832
833	if (vfpriv->ring_idx_mask) {
834		valid_ring_mask = 0;
835		for (i = 0; i < vfpriv->num_rings; i++)
836			valid_ring_mask |= 1ULL << i;
837
838		if (~valid_ring_mask & vfpriv->ring_idx_mask) {
839			ret = -EINVAL;
840			goto out_unlock;
841		}
842	}
843
844	virtio_gpu_create_context_locked(vgdev, vfpriv);
845	virtio_gpu_notify(vgdev);
846
847out_unlock:
848	mutex_unlock(&vfpriv->context_lock);
849	kfree(ctx_set_params);
850	return ret;
851}
852
853struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
854	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
855			  DRM_RENDER_ALLOW),
856
857	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
858			  DRM_RENDER_ALLOW),
859
860	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
861			  DRM_RENDER_ALLOW),
862
863	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
864			  virtio_gpu_resource_create_ioctl,
865			  DRM_RENDER_ALLOW),
866
867	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
868			  DRM_RENDER_ALLOW),
869
870	/* make transfer async to the main ring? - no sure, can we
871	 * thread these in the underlying GL
872	 */
873	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
874			  virtio_gpu_transfer_from_host_ioctl,
875			  DRM_RENDER_ALLOW),
876	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
877			  virtio_gpu_transfer_to_host_ioctl,
878			  DRM_RENDER_ALLOW),
879
880	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
881			  DRM_RENDER_ALLOW),
882
883	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
884			  DRM_RENDER_ALLOW),
885
886	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
887			  virtio_gpu_resource_create_blob_ioctl,
888			  DRM_RENDER_ALLOW),
889
890	DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
891			  DRM_RENDER_ALLOW),
892};
v4.10.11
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie
  7 *    Alon Levy
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice shall be included in
 17 * all copies or substantial portions of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 25 * OTHER DEALINGS IN THE SOFTWARE.
 26 */
 27
 28#include <drm/drmP.h>
 
 
 
 
 
 
 29#include "virtgpu_drv.h"
 30#include <drm/virtgpu_drm.h>
 31#include "ttm/ttm_execbuf_util.h"
 32
 33static void convert_to_hw_box(struct virtio_gpu_box *dst,
 34			      const struct drm_virtgpu_3d_box *src)
 
 
 
 
 
 
 35{
 36	dst->x = cpu_to_le32(src->x);
 37	dst->y = cpu_to_le32(src->y);
 38	dst->z = cpu_to_le32(src->z);
 39	dst->w = cpu_to_le32(src->w);
 40	dst->h = cpu_to_le32(src->h);
 41	dst->d = cpu_to_le32(src->d);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42}
 43
 44static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 45				struct drm_file *file_priv)
 
 46{
 47	struct virtio_gpu_device *vgdev = dev->dev_private;
 48	struct drm_virtgpu_map *virtio_gpu_map = data;
 
 
 
 
 49
 50	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
 51					 virtio_gpu_map->handle,
 52					 &virtio_gpu_map->offset);
 53}
 54
 55static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
 56					   struct list_head *head)
 57{
 58	struct ttm_validate_buffer *buf;
 59	struct ttm_buffer_object *bo;
 60	struct virtio_gpu_object *qobj;
 61	int ret;
 
 
 62
 63	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
 64	if (ret != 0)
 65		return ret;
 66
 67	list_for_each_entry(buf, head, head) {
 68		bo = buf->bo;
 69		qobj = container_of(bo, struct virtio_gpu_object, tbo);
 70		ret = ttm_bo_validate(bo, &qobj->placement, false, false);
 71		if (ret) {
 72			ttm_eu_backoff_reservation(ticket, head);
 73			return ret;
 74		}
 75	}
 76	return 0;
 77}
 78
 79static void virtio_gpu_unref_list(struct list_head *head)
 
 80{
 81	struct ttm_validate_buffer *buf;
 82	struct ttm_buffer_object *bo;
 83	struct virtio_gpu_object *qobj;
 84	list_for_each_entry(buf, head, head) {
 85		bo = buf->bo;
 86		qobj = container_of(bo, struct virtio_gpu_object, tbo);
 87
 88		drm_gem_object_unreference_unlocked(&qobj->gem_base);
 89	}
 
 90}
 91
 92/*
 93 * Usage of execbuffer:
 94 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
 95 * However, the command as passed from user space must *not* contain the initial
 96 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
 97 */
 98static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 99				 struct drm_file *drm_file)
100{
101	struct drm_virtgpu_execbuffer *exbuf = data;
102	struct virtio_gpu_device *vgdev = dev->dev_private;
103	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
104	struct drm_gem_object *gobj;
105	struct virtio_gpu_fence *fence;
106	struct virtio_gpu_object *qobj;
107	int ret;
108	uint32_t *bo_handles = NULL;
109	void __user *user_bo_handles = NULL;
110	struct list_head validate_list;
111	struct ttm_validate_buffer *buflist = NULL;
112	int i;
113	struct ww_acquire_ctx ticket;
114	void *buf;
 
 
 
 
 
115
116	if (vgdev->has_virgl_3d == false)
117		return -ENOSYS;
118
119	INIT_LIST_HEAD(&validate_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120	if (exbuf->num_bo_handles) {
121
122		bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
123					   sizeof(uint32_t));
124		buflist = drm_calloc_large(exbuf->num_bo_handles,
125					   sizeof(struct ttm_validate_buffer));
126		if (!bo_handles || !buflist) {
127			drm_free_large(bo_handles);
128			drm_free_large(buflist);
129			return -ENOMEM;
130		}
131
132		user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
133		if (copy_from_user(bo_handles, user_bo_handles,
134				   exbuf->num_bo_handles * sizeof(uint32_t))) {
135			ret = -EFAULT;
136			drm_free_large(bo_handles);
137			drm_free_large(buflist);
138			return ret;
139		}
140
141		for (i = 0; i < exbuf->num_bo_handles; i++) {
142			gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
143			if (!gobj) {
144				drm_free_large(bo_handles);
145				drm_free_large(buflist);
146				return -ENOENT;
147			}
148
149			qobj = gem_to_virtio_gpu_obj(gobj);
150			buflist[i].bo = &qobj->tbo;
151
152			list_add(&buflist[i].head, &validate_list);
153		}
154		drm_free_large(bo_handles);
 
155	}
156
157	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
158	if (ret)
159		goto out_free;
160
161	buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
162			  exbuf->size);
163	if (IS_ERR(buf)) {
164		ret = PTR_ERR(buf);
 
 
 
 
 
 
 
 
 
 
 
 
165		goto out_unresv;
166	}
167	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168			      vfpriv->ctx_id, &fence);
169
170	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
172	/* fence the command bo */
173	virtio_gpu_unref_list(&validate_list);
174	drm_free_large(buflist);
175	dma_fence_put(&fence->f);
176	return 0;
177
178out_unresv:
179	ttm_eu_backoff_reservation(&ticket, &validate_list);
180out_free:
181	virtio_gpu_unref_list(&validate_list);
182	drm_free_large(buflist);
 
 
 
 
 
 
 
 
183	return ret;
184}
185
186static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
187				     struct drm_file *file_priv)
188{
189	struct virtio_gpu_device *vgdev = dev->dev_private;
190	struct drm_virtgpu_getparam *param = data;
191	int value;
192
193	switch (param->param) {
194	case VIRTGPU_PARAM_3D_FEATURES:
195		value = vgdev->has_virgl_3d == true ? 1 : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196		break;
197	default:
198		return -EINVAL;
199	}
200	if (copy_to_user((void __user *)(unsigned long)param->value,
201			 &value, sizeof(int))) {
202		return -EFAULT;
203	}
204	return 0;
205}
206
207static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
208					    struct drm_file *file_priv)
209{
210	struct virtio_gpu_device *vgdev = dev->dev_private;
211	struct drm_virtgpu_resource_create *rc = data;
 
212	int ret;
213	uint32_t res_id;
214	struct virtio_gpu_object *qobj;
215	struct drm_gem_object *obj;
216	uint32_t handle = 0;
217	uint32_t size;
218	struct list_head validate_list;
219	struct ttm_validate_buffer mainbuf;
220	struct virtio_gpu_fence *fence = NULL;
221	struct ww_acquire_ctx ticket;
222	struct virtio_gpu_resource_create_3d rc_3d;
223
224	if (vgdev->has_virgl_3d == false) {
 
 
 
 
 
 
 
 
 
 
225		if (rc->depth > 1)
226			return -EINVAL;
227		if (rc->nr_samples > 1)
228			return -EINVAL;
229		if (rc->last_level > 1)
230			return -EINVAL;
231		if (rc->target != 2)
232			return -EINVAL;
233		if (rc->array_size > 1)
234			return -EINVAL;
235	}
236
237	INIT_LIST_HEAD(&validate_list);
238	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
239
240	virtio_gpu_resource_id_get(vgdev, &res_id);
241
242	size = rc->size;
243
244	/* allocate a single page size object */
245	if (size == 0)
246		size = PAGE_SIZE;
247
248	qobj = virtio_gpu_alloc_object(dev, size, false, false);
249	if (IS_ERR(qobj)) {
250		ret = PTR_ERR(qobj);
251		goto fail_id;
252	}
253	obj = &qobj->gem_base;
254
255	if (!vgdev->has_virgl_3d) {
256		virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
257					       rc->width, rc->height);
258
259		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
260	} else {
261		/* use a gem reference since unref list undoes them */
262		drm_gem_object_reference(&qobj->gem_base);
263		mainbuf.bo = &qobj->tbo;
264		list_add(&mainbuf.head, &validate_list);
265
266		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
267		if (ret) {
268			DRM_DEBUG("failed to validate\n");
269			goto fail_unref;
270		}
271
272		rc_3d.resource_id = cpu_to_le32(res_id);
273		rc_3d.target = cpu_to_le32(rc->target);
274		rc_3d.format = cpu_to_le32(rc->format);
275		rc_3d.bind = cpu_to_le32(rc->bind);
276		rc_3d.width = cpu_to_le32(rc->width);
277		rc_3d.height = cpu_to_le32(rc->height);
278		rc_3d.depth = cpu_to_le32(rc->depth);
279		rc_3d.array_size = cpu_to_le32(rc->array_size);
280		rc_3d.last_level = cpu_to_le32(rc->last_level);
281		rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
282		rc_3d.flags = cpu_to_le32(rc->flags);
283
284		virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
285		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
286		if (ret) {
287			ttm_eu_backoff_reservation(&ticket, &validate_list);
288			goto fail_unref;
289		}
290		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
291	}
292
293	qobj->hw_res_handle = res_id;
294
295	ret = drm_gem_handle_create(file_priv, obj, &handle);
296	if (ret) {
297
298		drm_gem_object_release(obj);
299		if (vgdev->has_virgl_3d) {
300			virtio_gpu_unref_list(&validate_list);
301			dma_fence_put(&fence->f);
302		}
303		return ret;
304	}
305	drm_gem_object_unreference_unlocked(obj);
306
307	rc->res_handle = res_id; /* similiar to a VM address */
308	rc->bo_handle = handle;
309
310	if (vgdev->has_virgl_3d) {
311		virtio_gpu_unref_list(&validate_list);
312		dma_fence_put(&fence->f);
313	}
 
 
 
 
314	return 0;
315fail_unref:
316	if (vgdev->has_virgl_3d) {
317		virtio_gpu_unref_list(&validate_list);
318		dma_fence_put(&fence->f);
319	}
320//fail_obj:
321//	drm_gem_object_handle_unreference_unlocked(obj);
322fail_id:
323	virtio_gpu_resource_id_put(vgdev, res_id);
324	return ret;
325}
326
327static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
328					  struct drm_file *file_priv)
329{
330	struct drm_virtgpu_resource_info *ri = data;
331	struct drm_gem_object *gobj = NULL;
332	struct virtio_gpu_object *qobj = NULL;
333
334	gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
335	if (gobj == NULL)
336		return -ENOENT;
337
338	qobj = gem_to_virtio_gpu_obj(gobj);
339
340	ri->size = qobj->gem_base.size;
341	ri->res_handle = qobj->hw_res_handle;
342	drm_gem_object_unreference_unlocked(gobj);
 
 
 
343	return 0;
344}
345
346static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
347					       void *data,
348					       struct drm_file *file)
349{
350	struct virtio_gpu_device *vgdev = dev->dev_private;
351	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
352	struct drm_virtgpu_3d_transfer_from_host *args = data;
353	struct drm_gem_object *gobj = NULL;
354	struct virtio_gpu_object *qobj = NULL;
355	struct virtio_gpu_fence *fence;
356	int ret;
357	u32 offset = args->offset;
358	struct virtio_gpu_box box;
359
360	if (vgdev->has_virgl_3d == false)
361		return -ENOSYS;
362
363	gobj = drm_gem_object_lookup(file, args->bo_handle);
364	if (gobj == NULL)
 
365		return -ENOENT;
366
367	qobj = gem_to_virtio_gpu_obj(gobj);
 
 
 
 
 
 
 
 
 
368
369	ret = virtio_gpu_object_reserve(qobj, false);
370	if (ret)
371		goto out;
372
373	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
374			      true, false);
375	if (unlikely(ret))
376		goto out_unres;
 
377
378	convert_to_hw_box(&box, &args->box);
379	virtio_gpu_cmd_transfer_from_host_3d
380		(vgdev, qobj->hw_res_handle,
381		 vfpriv->ctx_id, offset, args->level,
382		 &box, &fence);
383	reservation_object_add_excl_fence(qobj->tbo.resv,
384					  &fence->f);
385
386	dma_fence_put(&fence->f);
387out_unres:
388	virtio_gpu_object_unreserve(qobj);
389out:
390	drm_gem_object_unreference_unlocked(gobj);
391	return ret;
392}
393
394static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
395					     struct drm_file *file)
396{
397	struct virtio_gpu_device *vgdev = dev->dev_private;
398	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
399	struct drm_virtgpu_3d_transfer_to_host *args = data;
400	struct drm_gem_object *gobj = NULL;
401	struct virtio_gpu_object *qobj = NULL;
402	struct virtio_gpu_fence *fence;
403	struct virtio_gpu_box box;
404	int ret;
405	u32 offset = args->offset;
406
407	gobj = drm_gem_object_lookup(file, args->bo_handle);
408	if (gobj == NULL)
409		return -ENOENT;
410
411	qobj = gem_to_virtio_gpu_obj(gobj);
412
413	ret = virtio_gpu_object_reserve(qobj, false);
414	if (ret)
415		goto out;
416
417	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
418			      true, false);
419	if (unlikely(ret))
420		goto out_unres;
421
422	convert_to_hw_box(&box, &args->box);
423	if (!vgdev->has_virgl_3d) {
424		virtio_gpu_cmd_transfer_to_host_2d
425			(vgdev, qobj->hw_res_handle, offset,
426			 box.w, box.h, box.x, box.y, NULL);
 
427	} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428		virtio_gpu_cmd_transfer_to_host_3d
429			(vgdev, qobj->hw_res_handle,
430			 vfpriv ? vfpriv->ctx_id : 0, offset,
431			 args->level, &box, &fence);
432		reservation_object_add_excl_fence(qobj->tbo.resv,
433						  &fence->f);
434		dma_fence_put(&fence->f);
435	}
 
 
436
437out_unres:
438	virtio_gpu_object_unreserve(qobj);
439out:
440	drm_gem_object_unreference_unlocked(gobj);
441	return ret;
442}
443
444static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
445			    struct drm_file *file)
446{
447	struct drm_virtgpu_3d_wait *args = data;
448	struct drm_gem_object *gobj = NULL;
449	struct virtio_gpu_object *qobj = NULL;
450	int ret;
451	bool nowait = false;
452
453	gobj = drm_gem_object_lookup(file, args->handle);
454	if (gobj == NULL)
455		return -ENOENT;
456
457	qobj = gem_to_virtio_gpu_obj(gobj);
 
 
 
 
 
 
 
 
 
458
459	if (args->flags & VIRTGPU_WAIT_NOWAIT)
460		nowait = true;
461	ret = virtio_gpu_object_wait(qobj, nowait);
462
463	drm_gem_object_unreference_unlocked(gobj);
464	return ret;
465}
466
467static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
468				void *data, struct drm_file *file)
469{
470	struct virtio_gpu_device *vgdev = dev->dev_private;
471	struct drm_virtgpu_get_caps *args = data;
472	int size;
473	int i;
474	int found_valid = -1;
475	int ret;
476	struct virtio_gpu_drv_cap_cache *cache_ent;
477	void *ptr;
 
478	if (vgdev->num_capsets == 0)
479		return -ENOSYS;
480
 
 
 
 
481	spin_lock(&vgdev->display_info_lock);
482	for (i = 0; i < vgdev->num_capsets; i++) {
483		if (vgdev->capsets[i].id == args->cap_set_id) {
484			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
485				found_valid = i;
486				break;
487			}
488		}
489	}
490
491	if (found_valid == -1) {
492		spin_unlock(&vgdev->display_info_lock);
493		return -EINVAL;
494	}
495
496	size = vgdev->capsets[found_valid].max_size;
497	if (args->size > size) {
498		spin_unlock(&vgdev->display_info_lock);
499		return -EINVAL;
500	}
501
502	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
503		if (cache_ent->id == args->cap_set_id &&
504		    cache_ent->version == args->cap_set_ver) {
505			ptr = cache_ent->caps_cache;
506			spin_unlock(&vgdev->display_info_lock);
507			goto copy_exit;
508		}
509	}
510	spin_unlock(&vgdev->display_info_lock);
511
512	/* not in cache - need to talk to hw */
513	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
514				  &cache_ent);
 
 
 
515
 
516	ret = wait_event_timeout(vgdev->resp_wq,
517				 atomic_read(&cache_ent->is_valid), 5 * HZ);
 
 
 
 
 
518
519	ptr = cache_ent->caps_cache;
520
521copy_exit:
522	if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
523		return -EFAULT;
524
525	return 0;
526}
527
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
528struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
529	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
530			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
531
532	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
533			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
534
535	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
536			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
537
538	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
539			  virtio_gpu_resource_create_ioctl,
540			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
541
542	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
543			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
544
545	/* make transfer async to the main ring? - no sure, can we
546	   thread these in the underlying GL */
 
547	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
548			  virtio_gpu_transfer_from_host_ioctl,
549			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
550	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
551			  virtio_gpu_transfer_to_host_ioctl,
552			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
553
554	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
555			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
556
557	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
558			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
 
 
 
 
 
 
 
559};