Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie
  7 *    Alon Levy
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice shall be included in
 17 * all copies or substantial portions of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 25 * OTHER DEALINGS IN THE SOFTWARE.
 26 */
 27
 28#include <drm/drmP.h>
 29#include "virtgpu_drv.h"
 
 
 
 30#include <drm/virtgpu_drm.h>
 31#include "ttm/ttm_execbuf_util.h"
 32
 33static void convert_to_hw_box(struct virtio_gpu_box *dst,
 34			      const struct drm_virtgpu_3d_box *src)
 
 35{
 36	dst->x = cpu_to_le32(src->x);
 37	dst->y = cpu_to_le32(src->y);
 38	dst->z = cpu_to_le32(src->z);
 39	dst->w = cpu_to_le32(src->w);
 40	dst->h = cpu_to_le32(src->h);
 41	dst->d = cpu_to_le32(src->d);
 
 
 
 
 
 
 
 
 
 42}
 43
 44static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 45				struct drm_file *file_priv)
 46{
 47	struct virtio_gpu_device *vgdev = dev->dev_private;
 48	struct drm_virtgpu_map *virtio_gpu_map = data;
 49
 50	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
 51					 virtio_gpu_map->handle,
 52					 &virtio_gpu_map->offset);
 53}
 54
 55static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
 56					   struct list_head *head)
 57{
 58	struct ttm_validate_buffer *buf;
 59	struct ttm_buffer_object *bo;
 60	struct virtio_gpu_object *qobj;
 61	int ret;
 62
 63	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
 64	if (ret != 0)
 65		return ret;
 66
 67	list_for_each_entry(buf, head, head) {
 68		bo = buf->bo;
 69		qobj = container_of(bo, struct virtio_gpu_object, tbo);
 70		ret = ttm_bo_validate(bo, &qobj->placement, false, false);
 71		if (ret) {
 72			ttm_eu_backoff_reservation(ticket, head);
 73			return ret;
 74		}
 75	}
 76	return 0;
 77}
 78
 79static void virtio_gpu_unref_list(struct list_head *head)
 80{
 81	struct ttm_validate_buffer *buf;
 82	struct ttm_buffer_object *bo;
 83	struct virtio_gpu_object *qobj;
 84	list_for_each_entry(buf, head, head) {
 85		bo = buf->bo;
 86		qobj = container_of(bo, struct virtio_gpu_object, tbo);
 87
 88		drm_gem_object_unreference_unlocked(&qobj->gem_base);
 89	}
 90}
 91
 92static int virtio_gpu_execbuffer(struct drm_device *dev,
 93				 struct drm_virtgpu_execbuffer *exbuf,
 94				 struct drm_file *drm_file)
 95{
 
 96	struct virtio_gpu_device *vgdev = dev->dev_private;
 97	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
 98	struct drm_gem_object *gobj;
 99	struct virtio_gpu_fence *fence;
100	struct virtio_gpu_object *qobj;
101	int ret;
102	uint32_t *bo_handles = NULL;
103	void __user *user_bo_handles = NULL;
104	struct list_head validate_list;
105	struct ttm_validate_buffer *buflist = NULL;
106	int i;
107	struct ww_acquire_ctx ticket;
108	void *buf;
109
110	if (vgdev->has_virgl_3d == false)
111		return -ENOSYS;
112
113	INIT_LIST_HEAD(&validate_list);
114	if (exbuf->num_bo_handles) {
 
 
 
 
 
 
 
 
 
 
 
115
116		bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
117					   sizeof(uint32_t));
118		buflist = drm_calloc_large(exbuf->num_bo_handles,
119					   sizeof(struct ttm_validate_buffer));
120		if (!bo_handles || !buflist) {
121			drm_free_large(bo_handles);
122			drm_free_large(buflist);
123			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124		}
125
126		user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
127		if (copy_from_user(bo_handles, user_bo_handles,
128				   exbuf->num_bo_handles * sizeof(uint32_t))) {
129			ret = -EFAULT;
130			drm_free_large(bo_handles);
131			drm_free_large(buflist);
132			return ret;
133		}
134
135		for (i = 0; i < exbuf->num_bo_handles; i++) {
136			gobj = drm_gem_object_lookup(dev,
137						     drm_file, bo_handles[i]);
138			if (!gobj) {
139				drm_free_large(bo_handles);
140				drm_free_large(buflist);
141				return -ENOENT;
142			}
143
144			qobj = gem_to_virtio_gpu_obj(gobj);
145			buflist[i].bo = &qobj->tbo;
146
147			list_add(&buflist[i].head, &validate_list);
148		}
149		drm_free_large(bo_handles);
 
150	}
151
152	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
153	if (ret)
154		goto out_free;
 
 
155
156	buf = kmalloc(exbuf->size, GFP_KERNEL);
157	if (!buf) {
158		ret = -ENOMEM;
159		goto out_unresv;
160	}
161	if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
162			   exbuf->size)) {
163		kfree(buf);
164		ret = -EFAULT;
165		goto out_unresv;
166	}
167	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168			      vfpriv->ctx_id, &fence);
169
170	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
 
 
 
 
 
 
 
 
 
 
171
172	/* fence the command bo */
173	virtio_gpu_unref_list(&validate_list);
174	drm_free_large(buflist);
175	fence_put(&fence->f);
176	return 0;
177
178out_unresv:
179	ttm_eu_backoff_reservation(&ticket, &validate_list);
180out_free:
181	virtio_gpu_unref_list(&validate_list);
182	drm_free_large(buflist);
183	return ret;
184}
 
 
185
186/*
187 * Usage of execbuffer:
188 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
189 * However, the command as passed from user space must *not* contain the initial
190 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
191 */
192static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
193				       struct drm_file *file_priv)
194{
195	struct drm_virtgpu_execbuffer *execbuffer = data;
196	return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
197}
198
 
 
199
200static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
201				     struct drm_file *file_priv)
202{
203	struct virtio_gpu_device *vgdev = dev->dev_private;
204	struct drm_virtgpu_getparam *param = data;
205	int value;
206
207	switch (param->param) {
208	case VIRTGPU_PARAM_3D_FEATURES:
209		value = vgdev->has_virgl_3d == true ? 1 : 0;
210		break;
 
 
 
211	default:
212		return -EINVAL;
213	}
214	if (copy_to_user((void __user *)(unsigned long)param->value,
215			 &value, sizeof(int))) {
216		return -EFAULT;
217	}
218	return 0;
219}
220
221static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
222					    struct drm_file *file_priv)
223{
224	struct virtio_gpu_device *vgdev = dev->dev_private;
225	struct drm_virtgpu_resource_create *rc = data;
 
226	int ret;
227	uint32_t res_id;
228	struct virtio_gpu_object *qobj;
229	struct drm_gem_object *obj;
230	uint32_t handle = 0;
231	uint32_t size;
232	struct list_head validate_list;
233	struct ttm_validate_buffer mainbuf;
234	struct virtio_gpu_fence *fence = NULL;
235	struct ww_acquire_ctx ticket;
236	struct virtio_gpu_resource_create_3d rc_3d;
237
238	if (vgdev->has_virgl_3d == false) {
 
 
 
 
 
 
 
 
 
 
239		if (rc->depth > 1)
240			return -EINVAL;
241		if (rc->nr_samples > 1)
242			return -EINVAL;
243		if (rc->last_level > 1)
244			return -EINVAL;
245		if (rc->target != 2)
246			return -EINVAL;
247		if (rc->array_size > 1)
248			return -EINVAL;
249	}
250
251	INIT_LIST_HEAD(&validate_list);
252	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
253
254	virtio_gpu_resource_id_get(vgdev, &res_id);
255
256	size = rc->size;
257
258	/* allocate a single page size object */
259	if (size == 0)
260		size = PAGE_SIZE;
261
262	qobj = virtio_gpu_alloc_object(dev, size, false, false);
263	if (IS_ERR(qobj)) {
264		ret = PTR_ERR(qobj);
265		goto fail_id;
266	}
267	obj = &qobj->gem_base;
268
269	if (!vgdev->has_virgl_3d) {
270		virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
271					       rc->width, rc->height);
272
273		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
274	} else {
275		/* use a gem reference since unref list undoes them */
276		drm_gem_object_reference(&qobj->gem_base);
277		mainbuf.bo = &qobj->tbo;
278		list_add(&mainbuf.head, &validate_list);
279
280		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
281		if (ret) {
282			DRM_DEBUG("failed to validate\n");
283			goto fail_unref;
284		}
285
286		rc_3d.resource_id = cpu_to_le32(res_id);
287		rc_3d.target = cpu_to_le32(rc->target);
288		rc_3d.format = cpu_to_le32(rc->format);
289		rc_3d.bind = cpu_to_le32(rc->bind);
290		rc_3d.width = cpu_to_le32(rc->width);
291		rc_3d.height = cpu_to_le32(rc->height);
292		rc_3d.depth = cpu_to_le32(rc->depth);
293		rc_3d.array_size = cpu_to_le32(rc->array_size);
294		rc_3d.last_level = cpu_to_le32(rc->last_level);
295		rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
296		rc_3d.flags = cpu_to_le32(rc->flags);
297
298		virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
299		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
300		if (ret) {
301			ttm_eu_backoff_reservation(&ticket, &validate_list);
302			goto fail_unref;
303		}
304		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
305	}
306
307	qobj->hw_res_handle = res_id;
308
309	ret = drm_gem_handle_create(file_priv, obj, &handle);
310	if (ret) {
311
312		drm_gem_object_release(obj);
313		if (vgdev->has_virgl_3d) {
314			virtio_gpu_unref_list(&validate_list);
315			fence_put(&fence->f);
316		}
317		return ret;
318	}
319	drm_gem_object_unreference_unlocked(obj);
320
321	rc->res_handle = res_id; /* similiar to a VM address */
322	rc->bo_handle = handle;
323
324	if (vgdev->has_virgl_3d) {
325		virtio_gpu_unref_list(&validate_list);
326		fence_put(&fence->f);
327	}
328	return 0;
329fail_unref:
330	if (vgdev->has_virgl_3d) {
331		virtio_gpu_unref_list(&validate_list);
332		fence_put(&fence->f);
333	}
334//fail_obj:
335//	drm_gem_object_handle_unreference_unlocked(obj);
336fail_id:
337	virtio_gpu_resource_id_put(vgdev, res_id);
338	return ret;
339}
340
341static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
342					  struct drm_file *file_priv)
343{
344	struct drm_virtgpu_resource_info *ri = data;
345	struct drm_gem_object *gobj = NULL;
346	struct virtio_gpu_object *qobj = NULL;
347
348	gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
349	if (gobj == NULL)
350		return -ENOENT;
351
352	qobj = gem_to_virtio_gpu_obj(gobj);
353
354	ri->size = qobj->gem_base.size;
355	ri->res_handle = qobj->hw_res_handle;
356	drm_gem_object_unreference_unlocked(gobj);
357	return 0;
358}
359
360static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
361					       void *data,
362					       struct drm_file *file)
363{
364	struct virtio_gpu_device *vgdev = dev->dev_private;
365	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
366	struct drm_virtgpu_3d_transfer_from_host *args = data;
367	struct drm_gem_object *gobj = NULL;
368	struct virtio_gpu_object *qobj = NULL;
369	struct virtio_gpu_fence *fence;
370	int ret;
371	u32 offset = args->offset;
372	struct virtio_gpu_box box;
373
374	if (vgdev->has_virgl_3d == false)
375		return -ENOSYS;
376
377	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
378	if (gobj == NULL)
 
379		return -ENOENT;
380
381	qobj = gem_to_virtio_gpu_obj(gobj);
382
383	ret = virtio_gpu_object_reserve(qobj, false);
384	if (ret)
385		goto out;
386
387	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
388			      true, false);
389	if (unlikely(ret))
390		goto out_unres;
391
392	convert_to_hw_box(&box, &args->box);
 
 
 
 
393	virtio_gpu_cmd_transfer_from_host_3d
394		(vgdev, qobj->hw_res_handle,
395		 vfpriv->ctx_id, offset, args->level,
396		 &box, &fence);
397	reservation_object_add_excl_fence(qobj->tbo.resv,
398					  &fence->f);
399
400	fence_put(&fence->f);
401out_unres:
402	virtio_gpu_object_unreserve(qobj);
403out:
404	drm_gem_object_unreference_unlocked(gobj);
405	return ret;
406}
407
408static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
409					     struct drm_file *file)
410{
411	struct virtio_gpu_device *vgdev = dev->dev_private;
412	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
413	struct drm_virtgpu_3d_transfer_to_host *args = data;
414	struct drm_gem_object *gobj = NULL;
415	struct virtio_gpu_object *qobj = NULL;
416	struct virtio_gpu_fence *fence;
417	struct virtio_gpu_box box;
418	int ret;
419	u32 offset = args->offset;
420
421	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
422	if (gobj == NULL)
423		return -ENOENT;
424
425	qobj = gem_to_virtio_gpu_obj(gobj);
426
427	ret = virtio_gpu_object_reserve(qobj, false);
428	if (ret)
429		goto out;
430
431	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
432			      true, false);
433	if (unlikely(ret))
434		goto out_unres;
435
436	convert_to_hw_box(&box, &args->box);
437	if (!vgdev->has_virgl_3d) {
438		virtio_gpu_cmd_transfer_to_host_2d
439			(vgdev, qobj->hw_res_handle, offset,
440			 box.w, box.h, box.x, box.y, NULL);
 
441	} else {
 
 
 
 
 
 
 
 
 
 
442		virtio_gpu_cmd_transfer_to_host_3d
443			(vgdev, qobj->hw_res_handle,
444			 vfpriv ? vfpriv->ctx_id : 0, offset,
445			 args->level, &box, &fence);
446		reservation_object_add_excl_fence(qobj->tbo.resv,
447						  &fence->f);
448		fence_put(&fence->f);
449	}
 
 
450
451out_unres:
452	virtio_gpu_object_unreserve(qobj);
453out:
454	drm_gem_object_unreference_unlocked(gobj);
455	return ret;
456}
457
458static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
459			    struct drm_file *file)
460{
461	struct drm_virtgpu_3d_wait *args = data;
462	struct drm_gem_object *gobj = NULL;
463	struct virtio_gpu_object *qobj = NULL;
464	int ret;
465	bool nowait = false;
466
467	gobj = drm_gem_object_lookup(dev, file, args->handle);
468	if (gobj == NULL)
469		return -ENOENT;
470
471	qobj = gem_to_virtio_gpu_obj(gobj);
472
473	if (args->flags & VIRTGPU_WAIT_NOWAIT)
474		nowait = true;
475	ret = virtio_gpu_object_wait(qobj, nowait);
 
 
 
 
 
476
477	drm_gem_object_unreference_unlocked(gobj);
478	return ret;
479}
480
481static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
482				void *data, struct drm_file *file)
483{
484	struct virtio_gpu_device *vgdev = dev->dev_private;
485	struct drm_virtgpu_get_caps *args = data;
486	int size;
487	int i;
488	int found_valid = -1;
489	int ret;
490	struct virtio_gpu_drv_cap_cache *cache_ent;
491	void *ptr;
 
492	if (vgdev->num_capsets == 0)
493		return -ENOSYS;
494
 
 
 
 
495	spin_lock(&vgdev->display_info_lock);
496	for (i = 0; i < vgdev->num_capsets; i++) {
497		if (vgdev->capsets[i].id == args->cap_set_id) {
498			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
499				found_valid = i;
500				break;
501			}
502		}
503	}
504
505	if (found_valid == -1) {
506		spin_unlock(&vgdev->display_info_lock);
507		return -EINVAL;
508	}
509
510	size = vgdev->capsets[found_valid].max_size;
511	if (args->size > size) {
512		spin_unlock(&vgdev->display_info_lock);
513		return -EINVAL;
514	}
515
516	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
517		if (cache_ent->id == args->cap_set_id &&
518		    cache_ent->version == args->cap_set_ver) {
519			ptr = cache_ent->caps_cache;
520			spin_unlock(&vgdev->display_info_lock);
521			goto copy_exit;
522		}
523	}
524	spin_unlock(&vgdev->display_info_lock);
525
526	/* not in cache - need to talk to hw */
527	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
528				  &cache_ent);
 
529
 
530	ret = wait_event_timeout(vgdev->resp_wq,
531				 atomic_read(&cache_ent->is_valid), 5 * HZ);
 
 
 
 
 
532
533	ptr = cache_ent->caps_cache;
534
535copy_exit:
536	if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
537		return -EFAULT;
538
539	return 0;
540}
541
542struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
543	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
544			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
545
546	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
547			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
548
549	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
550			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
551
552	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
553			  virtio_gpu_resource_create_ioctl,
554			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
555
556	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
557			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
558
559	/* make transfer async to the main ring? - no sure, can we
560	   thread these in the underlying GL */
 
561	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
562			  virtio_gpu_transfer_from_host_ioctl,
563			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
564	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
565			  virtio_gpu_transfer_to_host_ioctl,
566			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
567
568	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
569			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
570
571	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
572			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
573};
v5.9
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie
  7 *    Alon Levy
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice shall be included in
 17 * all copies or substantial portions of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 25 * OTHER DEALINGS IN THE SOFTWARE.
 26 */
 27
 28#include <linux/file.h>
 29#include <linux/sync_file.h>
 30#include <linux/uaccess.h>
 31
 32#include <drm/drm_file.h>
 33#include <drm/virtgpu_drm.h>
 
 34
 35#include "virtgpu_drv.h"
 36
 37void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
 38{
 39	struct virtio_gpu_device *vgdev = dev->dev_private;
 40	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 41	char dbgname[TASK_COMM_LEN];
 42
 43	mutex_lock(&vfpriv->context_lock);
 44	if (vfpriv->context_created)
 45		goto out_unlock;
 46
 47	get_task_comm(dbgname, current);
 48	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
 49				      strlen(dbgname), dbgname);
 50	vfpriv->context_created = true;
 51
 52out_unlock:
 53	mutex_unlock(&vfpriv->context_lock);
 54}
 55
 56static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 57				struct drm_file *file)
 58{
 59	struct virtio_gpu_device *vgdev = dev->dev_private;
 60	struct drm_virtgpu_map *virtio_gpu_map = data;
 61
 62	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
 63					 virtio_gpu_map->handle,
 64					 &virtio_gpu_map->offset);
 65}
 66
 67/*
 68 * Usage of execbuffer:
 69 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
 70 * However, the command as passed from user space must *not* contain the initial
 71 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
 72 */
 73static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 74				 struct drm_file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75{
 76	struct drm_virtgpu_execbuffer *exbuf = data;
 77	struct virtio_gpu_device *vgdev = dev->dev_private;
 78	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 79	struct virtio_gpu_fence *out_fence;
 
 
 80	int ret;
 81	uint32_t *bo_handles = NULL;
 82	void __user *user_bo_handles = NULL;
 83	struct virtio_gpu_object_array *buflist = NULL;
 84	struct sync_file *sync_file;
 85	int in_fence_fd = exbuf->fence_fd;
 86	int out_fence_fd = -1;
 87	void *buf;
 88
 89	if (vgdev->has_virgl_3d == false)
 90		return -ENOSYS;
 91
 92	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
 93		return -EINVAL;
 94
 95	exbuf->fence_fd = -1;
 96
 97	virtio_gpu_create_context(dev, file);
 98	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
 99		struct dma_fence *in_fence;
100
101		in_fence = sync_file_get_fence(in_fence_fd);
102
103		if (!in_fence)
104			return -EINVAL;
105
106		/*
107		 * Wait if the fence is from a foreign context, or if the fence
108		 * array contains any fence from a foreign context.
109		 */
110		ret = 0;
111		if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
112			ret = dma_fence_wait(in_fence, true);
113
114		dma_fence_put(in_fence);
115		if (ret)
116			return ret;
117	}
118
119	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
120		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
121		if (out_fence_fd < 0)
122			return out_fence_fd;
123	}
124
125	if (exbuf->num_bo_handles) {
126		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
127					    sizeof(uint32_t), GFP_KERNEL);
128		if (!bo_handles) {
129			ret = -ENOMEM;
130			goto out_unused_fd;
131		}
132
133		user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
134		if (copy_from_user(bo_handles, user_bo_handles,
135				   exbuf->num_bo_handles * sizeof(uint32_t))) {
136			ret = -EFAULT;
137			goto out_unused_fd;
 
 
138		}
139
140		buflist = virtio_gpu_array_from_handles(file, bo_handles,
141							exbuf->num_bo_handles);
142		if (!buflist) {
143			ret = -ENOENT;
144			goto out_unused_fd;
 
 
 
 
 
 
 
 
145		}
146		kvfree(bo_handles);
147		bo_handles = NULL;
148	}
149
150	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
151	if (IS_ERR(buf)) {
152		ret = PTR_ERR(buf);
153		goto out_unused_fd;
154	}
155
156	if (buflist) {
157		ret = virtio_gpu_array_lock_resv(buflist);
158		if (ret)
159			goto out_memdup;
160	}
161
162	out_fence = virtio_gpu_fence_alloc(vgdev);
163	if(!out_fence) {
164		ret = -ENOMEM;
165		goto out_unresv;
166	}
 
 
167
168	if (out_fence_fd >= 0) {
169		sync_file = sync_file_create(&out_fence->f);
170		if (!sync_file) {
171			dma_fence_put(&out_fence->f);
172			ret = -ENOMEM;
173			goto out_memdup;
174		}
175
176		exbuf->fence_fd = out_fence_fd;
177		fd_install(out_fence_fd, sync_file->file);
178	}
179
180	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
181			      vfpriv->ctx_id, buflist, out_fence);
182	dma_fence_put(&out_fence->f);
183	virtio_gpu_notify(vgdev);
184	return 0;
185
186out_unresv:
187	if (buflist)
188		virtio_gpu_array_unlock_resv(buflist);
189out_memdup:
190	kvfree(buf);
191out_unused_fd:
192	kvfree(bo_handles);
193	if (buflist)
194		virtio_gpu_array_put_free(buflist);
195
196	if (out_fence_fd >= 0)
197		put_unused_fd(out_fence_fd);
 
 
 
 
 
 
 
 
 
 
198
199	return ret;
200}
201
202static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
203				     struct drm_file *file)
204{
205	struct virtio_gpu_device *vgdev = dev->dev_private;
206	struct drm_virtgpu_getparam *param = data;
207	int value;
208
209	switch (param->param) {
210	case VIRTGPU_PARAM_3D_FEATURES:
211		value = vgdev->has_virgl_3d == true ? 1 : 0;
212		break;
213	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
214		value = 1;
215		break;
216	default:
217		return -EINVAL;
218	}
219	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
 
220		return -EFAULT;
221
222	return 0;
223}
224
225static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
226					    struct drm_file *file)
227{
228	struct virtio_gpu_device *vgdev = dev->dev_private;
229	struct drm_virtgpu_resource_create *rc = data;
230	struct virtio_gpu_fence *fence;
231	int ret;
 
232	struct virtio_gpu_object *qobj;
233	struct drm_gem_object *obj;
234	uint32_t handle = 0;
235	struct virtio_gpu_object_params params = { 0 };
 
 
 
 
 
236
237	if (vgdev->has_virgl_3d) {
238		virtio_gpu_create_context(dev, file);
239		params.virgl = true;
240		params.target = rc->target;
241		params.bind = rc->bind;
242		params.depth = rc->depth;
243		params.array_size = rc->array_size;
244		params.last_level = rc->last_level;
245		params.nr_samples = rc->nr_samples;
246		params.flags = rc->flags;
247	} else {
248		if (rc->depth > 1)
249			return -EINVAL;
250		if (rc->nr_samples > 1)
251			return -EINVAL;
252		if (rc->last_level > 1)
253			return -EINVAL;
254		if (rc->target != 2)
255			return -EINVAL;
256		if (rc->array_size > 1)
257			return -EINVAL;
258	}
259
260	params.format = rc->format;
261	params.width = rc->width;
262	params.height = rc->height;
263	params.size = rc->size;
 
 
 
264	/* allocate a single page size object */
265	if (params.size == 0)
266		params.size = PAGE_SIZE;
 
 
 
 
 
 
 
267
268	fence = virtio_gpu_fence_alloc(vgdev);
269	if (!fence)
270		return -ENOMEM;
271	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
272	dma_fence_put(&fence->f);
273	if (ret < 0)
274		return ret;
275	obj = &qobj->base.base;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
277	ret = drm_gem_handle_create(file, obj, &handle);
278	if (ret) {
 
279		drm_gem_object_release(obj);
 
 
 
 
280		return ret;
281	}
282	drm_gem_object_put(obj);
283
284	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
285	rc->bo_handle = handle;
 
 
 
 
 
286	return 0;
 
 
 
 
 
 
 
 
 
 
287}
288
289static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
290					  struct drm_file *file)
291{
292	struct drm_virtgpu_resource_info *ri = data;
293	struct drm_gem_object *gobj = NULL;
294	struct virtio_gpu_object *qobj = NULL;
295
296	gobj = drm_gem_object_lookup(file, ri->bo_handle);
297	if (gobj == NULL)
298		return -ENOENT;
299
300	qobj = gem_to_virtio_gpu_obj(gobj);
301
302	ri->size = qobj->base.base.size;
303	ri->res_handle = qobj->hw_res_handle;
304	drm_gem_object_put(gobj);
305	return 0;
306}
307
308static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
309					       void *data,
310					       struct drm_file *file)
311{
312	struct virtio_gpu_device *vgdev = dev->dev_private;
313	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
314	struct drm_virtgpu_3d_transfer_from_host *args = data;
315	struct virtio_gpu_object_array *objs;
 
316	struct virtio_gpu_fence *fence;
317	int ret;
318	u32 offset = args->offset;
 
319
320	if (vgdev->has_virgl_3d == false)
321		return -ENOSYS;
322
323	virtio_gpu_create_context(dev, file);
324	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
325	if (objs == NULL)
326		return -ENOENT;
327
328	ret = virtio_gpu_array_lock_resv(objs);
329	if (ret != 0)
330		goto err_put_free;
 
 
 
 
 
 
 
331
332	fence = virtio_gpu_fence_alloc(vgdev);
333	if (!fence) {
334		ret = -ENOMEM;
335		goto err_unlock;
336	}
337	virtio_gpu_cmd_transfer_from_host_3d
338		(vgdev, vfpriv->ctx_id, offset, args->level,
339		 &args->box, objs, fence);
340	dma_fence_put(&fence->f);
341	virtio_gpu_notify(vgdev);
342	return 0;
343
344err_unlock:
345	virtio_gpu_array_unlock_resv(objs);
346err_put_free:
347	virtio_gpu_array_put_free(objs);
 
348	return ret;
349}
350
351static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
352					     struct drm_file *file)
353{
354	struct virtio_gpu_device *vgdev = dev->dev_private;
355	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
356	struct drm_virtgpu_3d_transfer_to_host *args = data;
357	struct virtio_gpu_object_array *objs;
 
358	struct virtio_gpu_fence *fence;
 
359	int ret;
360	u32 offset = args->offset;
361
362	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
363	if (objs == NULL)
364		return -ENOENT;
365
 
 
 
 
 
 
 
 
 
 
 
 
366	if (!vgdev->has_virgl_3d) {
367		virtio_gpu_cmd_transfer_to_host_2d
368			(vgdev, offset,
369			 args->box.w, args->box.h, args->box.x, args->box.y,
370			 objs, NULL);
371	} else {
372		virtio_gpu_create_context(dev, file);
373		ret = virtio_gpu_array_lock_resv(objs);
374		if (ret != 0)
375			goto err_put_free;
376
377		ret = -ENOMEM;
378		fence = virtio_gpu_fence_alloc(vgdev);
379		if (!fence)
380			goto err_unlock;
381
382		virtio_gpu_cmd_transfer_to_host_3d
383			(vgdev,
384			 vfpriv ? vfpriv->ctx_id : 0, offset,
385			 args->level, &args->box, objs, fence);
386		dma_fence_put(&fence->f);
 
 
387	}
388	virtio_gpu_notify(vgdev);
389	return 0;
390
391err_unlock:
392	virtio_gpu_array_unlock_resv(objs);
393err_put_free:
394	virtio_gpu_array_put_free(objs);
395	return ret;
396}
397
398static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
399				 struct drm_file *file)
400{
401	struct drm_virtgpu_3d_wait *args = data;
402	struct drm_gem_object *obj;
403	long timeout = 15 * HZ;
404	int ret;
 
405
406	obj = drm_gem_object_lookup(file, args->handle);
407	if (obj == NULL)
408		return -ENOENT;
409
410	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
411		ret = dma_resv_test_signaled_rcu(obj->resv, true);
412	} else {
413		ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
414						timeout);
415	}
416	if (ret == 0)
417		ret = -EBUSY;
418	else if (ret > 0)
419		ret = 0;
420
421	drm_gem_object_put(obj);
422	return ret;
423}
424
425static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
426				void *data, struct drm_file *file)
427{
428	struct virtio_gpu_device *vgdev = dev->dev_private;
429	struct drm_virtgpu_get_caps *args = data;
430	unsigned size, host_caps_size;
431	int i;
432	int found_valid = -1;
433	int ret;
434	struct virtio_gpu_drv_cap_cache *cache_ent;
435	void *ptr;
436
437	if (vgdev->num_capsets == 0)
438		return -ENOSYS;
439
440	/* don't allow userspace to pass 0 */
441	if (args->size == 0)
442		return -EINVAL;
443
444	spin_lock(&vgdev->display_info_lock);
445	for (i = 0; i < vgdev->num_capsets; i++) {
446		if (vgdev->capsets[i].id == args->cap_set_id) {
447			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
448				found_valid = i;
449				break;
450			}
451		}
452	}
453
454	if (found_valid == -1) {
455		spin_unlock(&vgdev->display_info_lock);
456		return -EINVAL;
457	}
458
459	host_caps_size = vgdev->capsets[found_valid].max_size;
460	/* only copy to user the minimum of the host caps size or the guest caps size */
461	size = min(args->size, host_caps_size);
 
 
462
463	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
464		if (cache_ent->id == args->cap_set_id &&
465		    cache_ent->version == args->cap_set_ver) {
 
466			spin_unlock(&vgdev->display_info_lock);
467			goto copy_exit;
468		}
469	}
470	spin_unlock(&vgdev->display_info_lock);
471
472	/* not in cache - need to talk to hw */
473	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
474				  &cache_ent);
475	virtio_gpu_notify(vgdev);
476
477copy_exit:
478	ret = wait_event_timeout(vgdev->resp_wq,
479				 atomic_read(&cache_ent->is_valid), 5 * HZ);
480	if (!ret)
481		return -EBUSY;
482
483	/* is_valid check must proceed before copy of the cache entry. */
484	smp_rmb();
485
486	ptr = cache_ent->caps_cache;
487
488	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
 
489		return -EFAULT;
490
491	return 0;
492}
493
494struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
495	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
496			  DRM_RENDER_ALLOW),
497
498	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
499			  DRM_RENDER_ALLOW),
500
501	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
502			  DRM_RENDER_ALLOW),
503
504	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
505			  virtio_gpu_resource_create_ioctl,
506			  DRM_RENDER_ALLOW),
507
508	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
509			  DRM_RENDER_ALLOW),
510
511	/* make transfer async to the main ring? - no sure, can we
512	 * thread these in the underlying GL
513	 */
514	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
515			  virtio_gpu_transfer_from_host_ioctl,
516			  DRM_RENDER_ALLOW),
517	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
518			  virtio_gpu_transfer_to_host_ioctl,
519			  DRM_RENDER_ALLOW),
520
521	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
522			  DRM_RENDER_ALLOW),
523
524	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
525			  DRM_RENDER_ALLOW),
526};