Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v4.6
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie
  7 *    Alon Levy
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice shall be included in
 17 * all copies or substantial portions of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 25 * OTHER DEALINGS IN THE SOFTWARE.
 26 */
 27
 28#include <drm/drmP.h>
 29#include "virtgpu_drv.h"
 30#include <drm/virtgpu_drm.h>
 31#include "ttm/ttm_execbuf_util.h"
 32
 33static void convert_to_hw_box(struct virtio_gpu_box *dst,
 34			      const struct drm_virtgpu_3d_box *src)
 35{
 36	dst->x = cpu_to_le32(src->x);
 37	dst->y = cpu_to_le32(src->y);
 38	dst->z = cpu_to_le32(src->z);
 39	dst->w = cpu_to_le32(src->w);
 40	dst->h = cpu_to_le32(src->h);
 41	dst->d = cpu_to_le32(src->d);
 42}
 43
 44static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 45				struct drm_file *file_priv)
 46{
 47	struct virtio_gpu_device *vgdev = dev->dev_private;
 48	struct drm_virtgpu_map *virtio_gpu_map = data;
 49
 50	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
 51					 virtio_gpu_map->handle,
 52					 &virtio_gpu_map->offset);
 53}
 54
 55static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
 56					   struct list_head *head)
 57{
 58	struct ttm_validate_buffer *buf;
 59	struct ttm_buffer_object *bo;
 60	struct virtio_gpu_object *qobj;
 61	int ret;
 62
 63	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
 64	if (ret != 0)
 65		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 66
 67	list_for_each_entry(buf, head, head) {
 68		bo = buf->bo;
 69		qobj = container_of(bo, struct virtio_gpu_object, tbo);
 70		ret = ttm_bo_validate(bo, &qobj->placement, false, false);
 71		if (ret) {
 72			ttm_eu_backoff_reservation(ticket, head);
 73			return ret;
 74		}
 75	}
 76	return 0;
 77}
 78
 79static void virtio_gpu_unref_list(struct list_head *head)
 80{
 81	struct ttm_validate_buffer *buf;
 82	struct ttm_buffer_object *bo;
 83	struct virtio_gpu_object *qobj;
 84	list_for_each_entry(buf, head, head) {
 85		bo = buf->bo;
 86		qobj = container_of(bo, struct virtio_gpu_object, tbo);
 87
 88		drm_gem_object_unreference_unlocked(&qobj->gem_base);
 89	}
 90}
 91
 92static int virtio_gpu_execbuffer(struct drm_device *dev,
 93				 struct drm_virtgpu_execbuffer *exbuf,
 94				 struct drm_file *drm_file)
 95{
 96	struct virtio_gpu_device *vgdev = dev->dev_private;
 97	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
 98	struct drm_gem_object *gobj;
 99	struct virtio_gpu_fence *fence;
100	struct virtio_gpu_object *qobj;
101	int ret;
102	uint32_t *bo_handles = NULL;
103	void __user *user_bo_handles = NULL;
104	struct list_head validate_list;
105	struct ttm_validate_buffer *buflist = NULL;
106	int i;
107	struct ww_acquire_ctx ticket;
108	void *buf;
109
110	if (vgdev->has_virgl_3d == false)
111		return -ENOSYS;
112
113	INIT_LIST_HEAD(&validate_list);
114	if (exbuf->num_bo_handles) {
115
116		bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
117					   sizeof(uint32_t));
118		buflist = drm_calloc_large(exbuf->num_bo_handles,
119					   sizeof(struct ttm_validate_buffer));
120		if (!bo_handles || !buflist) {
121			drm_free_large(bo_handles);
122			drm_free_large(buflist);
123			return -ENOMEM;
124		}
125
126		user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
127		if (copy_from_user(bo_handles, user_bo_handles,
128				   exbuf->num_bo_handles * sizeof(uint32_t))) {
129			ret = -EFAULT;
130			drm_free_large(bo_handles);
131			drm_free_large(buflist);
132			return ret;
133		}
134
135		for (i = 0; i < exbuf->num_bo_handles; i++) {
136			gobj = drm_gem_object_lookup(dev,
137						     drm_file, bo_handles[i]);
138			if (!gobj) {
139				drm_free_large(bo_handles);
140				drm_free_large(buflist);
141				return -ENOENT;
142			}
143
144			qobj = gem_to_virtio_gpu_obj(gobj);
145			buflist[i].bo = &qobj->tbo;
146
147			list_add(&buflist[i].head, &validate_list);
148		}
149		drm_free_large(bo_handles);
150	}
151
152	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
153	if (ret)
154		goto out_free;
155
156	buf = kmalloc(exbuf->size, GFP_KERNEL);
157	if (!buf) {
158		ret = -ENOMEM;
159		goto out_unresv;
160	}
161	if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
162			   exbuf->size)) {
163		kfree(buf);
164		ret = -EFAULT;
165		goto out_unresv;
166	}
167	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168			      vfpriv->ctx_id, &fence);
169
170	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
171
172	/* fence the command bo */
173	virtio_gpu_unref_list(&validate_list);
174	drm_free_large(buflist);
175	fence_put(&fence->f);
176	return 0;
177
178out_unresv:
179	ttm_eu_backoff_reservation(&ticket, &validate_list);
180out_free:
181	virtio_gpu_unref_list(&validate_list);
182	drm_free_large(buflist);
183	return ret;
184}
185
186/*
187 * Usage of execbuffer:
188 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
189 * However, the command as passed from user space must *not* contain the initial
190 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
191 */
192static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
193				       struct drm_file *file_priv)
194{
195	struct drm_virtgpu_execbuffer *execbuffer = data;
196	return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
197}
198
 
 
 
 
199
200static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
201				     struct drm_file *file_priv)
202{
203	struct virtio_gpu_device *vgdev = dev->dev_private;
204	struct drm_virtgpu_getparam *param = data;
205	int value;
206
207	switch (param->param) {
208	case VIRTGPU_PARAM_3D_FEATURES:
209		value = vgdev->has_virgl_3d == true ? 1 : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210		break;
211	default:
212		return -EINVAL;
213	}
214	if (copy_to_user((void __user *)(unsigned long)param->value,
215			 &value, sizeof(int))) {
216		return -EFAULT;
217	}
218	return 0;
219}
220
221static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
222					    struct drm_file *file_priv)
223{
224	struct virtio_gpu_device *vgdev = dev->dev_private;
225	struct drm_virtgpu_resource_create *rc = data;
 
226	int ret;
227	uint32_t res_id;
228	struct virtio_gpu_object *qobj;
229	struct drm_gem_object *obj;
230	uint32_t handle = 0;
231	uint32_t size;
232	struct list_head validate_list;
233	struct ttm_validate_buffer mainbuf;
234	struct virtio_gpu_fence *fence = NULL;
235	struct ww_acquire_ctx ticket;
236	struct virtio_gpu_resource_create_3d rc_3d;
237
238	if (vgdev->has_virgl_3d == false) {
 
 
 
 
 
 
 
 
 
 
239		if (rc->depth > 1)
240			return -EINVAL;
241		if (rc->nr_samples > 1)
242			return -EINVAL;
243		if (rc->last_level > 1)
244			return -EINVAL;
245		if (rc->target != 2)
246			return -EINVAL;
247		if (rc->array_size > 1)
248			return -EINVAL;
249	}
250
251	INIT_LIST_HEAD(&validate_list);
252	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
253
254	virtio_gpu_resource_id_get(vgdev, &res_id);
255
256	size = rc->size;
257
258	/* allocate a single page size object */
259	if (size == 0)
260		size = PAGE_SIZE;
261
262	qobj = virtio_gpu_alloc_object(dev, size, false, false);
263	if (IS_ERR(qobj)) {
264		ret = PTR_ERR(qobj);
265		goto fail_id;
266	}
267	obj = &qobj->gem_base;
268
269	if (!vgdev->has_virgl_3d) {
270		virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
271					       rc->width, rc->height);
272
273		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
274	} else {
275		/* use a gem reference since unref list undoes them */
276		drm_gem_object_reference(&qobj->gem_base);
277		mainbuf.bo = &qobj->tbo;
278		list_add(&mainbuf.head, &validate_list);
279
280		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
281		if (ret) {
282			DRM_DEBUG("failed to validate\n");
283			goto fail_unref;
284		}
285
286		rc_3d.resource_id = cpu_to_le32(res_id);
287		rc_3d.target = cpu_to_le32(rc->target);
288		rc_3d.format = cpu_to_le32(rc->format);
289		rc_3d.bind = cpu_to_le32(rc->bind);
290		rc_3d.width = cpu_to_le32(rc->width);
291		rc_3d.height = cpu_to_le32(rc->height);
292		rc_3d.depth = cpu_to_le32(rc->depth);
293		rc_3d.array_size = cpu_to_le32(rc->array_size);
294		rc_3d.last_level = cpu_to_le32(rc->last_level);
295		rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
296		rc_3d.flags = cpu_to_le32(rc->flags);
297
298		virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
299		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
300		if (ret) {
301			ttm_eu_backoff_reservation(&ticket, &validate_list);
302			goto fail_unref;
303		}
304		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
305	}
306
307	qobj->hw_res_handle = res_id;
308
309	ret = drm_gem_handle_create(file_priv, obj, &handle);
310	if (ret) {
311
312		drm_gem_object_release(obj);
313		if (vgdev->has_virgl_3d) {
314			virtio_gpu_unref_list(&validate_list);
315			fence_put(&fence->f);
316		}
317		return ret;
318	}
319	drm_gem_object_unreference_unlocked(obj);
320
321	rc->res_handle = res_id; /* similiar to a VM address */
322	rc->bo_handle = handle;
323
324	if (vgdev->has_virgl_3d) {
325		virtio_gpu_unref_list(&validate_list);
326		fence_put(&fence->f);
327	}
 
 
 
 
328	return 0;
329fail_unref:
330	if (vgdev->has_virgl_3d) {
331		virtio_gpu_unref_list(&validate_list);
332		fence_put(&fence->f);
333	}
334//fail_obj:
335//	drm_gem_object_handle_unreference_unlocked(obj);
336fail_id:
337	virtio_gpu_resource_id_put(vgdev, res_id);
338	return ret;
339}
340
341static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
342					  struct drm_file *file_priv)
343{
344	struct drm_virtgpu_resource_info *ri = data;
345	struct drm_gem_object *gobj = NULL;
346	struct virtio_gpu_object *qobj = NULL;
347
348	gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
349	if (gobj == NULL)
350		return -ENOENT;
351
352	qobj = gem_to_virtio_gpu_obj(gobj);
353
354	ri->size = qobj->gem_base.size;
355	ri->res_handle = qobj->hw_res_handle;
356	drm_gem_object_unreference_unlocked(gobj);
 
 
 
357	return 0;
358}
359
360static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
361					       void *data,
362					       struct drm_file *file)
363{
364	struct virtio_gpu_device *vgdev = dev->dev_private;
365	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
366	struct drm_virtgpu_3d_transfer_from_host *args = data;
367	struct drm_gem_object *gobj = NULL;
368	struct virtio_gpu_object *qobj = NULL;
369	struct virtio_gpu_fence *fence;
370	int ret;
371	u32 offset = args->offset;
372	struct virtio_gpu_box box;
373
374	if (vgdev->has_virgl_3d == false)
375		return -ENOSYS;
376
377	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
378	if (gobj == NULL)
 
379		return -ENOENT;
380
381	qobj = gem_to_virtio_gpu_obj(gobj);
 
 
 
 
382
383	ret = virtio_gpu_object_reserve(qobj, false);
384	if (ret)
385		goto out;
 
386
387	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
388			      true, false);
389	if (unlikely(ret))
390		goto out_unres;
 
 
 
 
 
391
392	convert_to_hw_box(&box, &args->box);
393	virtio_gpu_cmd_transfer_from_host_3d
394		(vgdev, qobj->hw_res_handle,
395		 vfpriv->ctx_id, offset, args->level,
396		 &box, &fence);
397	reservation_object_add_excl_fence(qobj->tbo.resv,
398					  &fence->f);
399
400	fence_put(&fence->f);
401out_unres:
402	virtio_gpu_object_unreserve(qobj);
403out:
404	drm_gem_object_unreference_unlocked(gobj);
405	return ret;
406}
407
408static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
409					     struct drm_file *file)
410{
411	struct virtio_gpu_device *vgdev = dev->dev_private;
412	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
413	struct drm_virtgpu_3d_transfer_to_host *args = data;
414	struct drm_gem_object *gobj = NULL;
415	struct virtio_gpu_object *qobj = NULL;
416	struct virtio_gpu_fence *fence;
417	struct virtio_gpu_box box;
418	int ret;
419	u32 offset = args->offset;
420
421	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
422	if (gobj == NULL)
423		return -ENOENT;
424
425	qobj = gem_to_virtio_gpu_obj(gobj);
426
427	ret = virtio_gpu_object_reserve(qobj, false);
428	if (ret)
429		goto out;
430
431	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
432			      true, false);
433	if (unlikely(ret))
434		goto out_unres;
435
436	convert_to_hw_box(&box, &args->box);
437	if (!vgdev->has_virgl_3d) {
438		virtio_gpu_cmd_transfer_to_host_2d
439			(vgdev, qobj->hw_res_handle, offset,
440			 box.w, box.h, box.x, box.y, NULL);
 
441	} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442		virtio_gpu_cmd_transfer_to_host_3d
443			(vgdev, qobj->hw_res_handle,
444			 vfpriv ? vfpriv->ctx_id : 0, offset,
445			 args->level, &box, &fence);
446		reservation_object_add_excl_fence(qobj->tbo.resv,
447						  &fence->f);
448		fence_put(&fence->f);
449	}
 
 
450
451out_unres:
452	virtio_gpu_object_unreserve(qobj);
453out:
454	drm_gem_object_unreference_unlocked(gobj);
455	return ret;
456}
457
458static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
459			    struct drm_file *file)
460{
461	struct drm_virtgpu_3d_wait *args = data;
462	struct drm_gem_object *gobj = NULL;
463	struct virtio_gpu_object *qobj = NULL;
464	int ret;
465	bool nowait = false;
466
467	gobj = drm_gem_object_lookup(dev, file, args->handle);
468	if (gobj == NULL)
469		return -ENOENT;
470
471	qobj = gem_to_virtio_gpu_obj(gobj);
472
473	if (args->flags & VIRTGPU_WAIT_NOWAIT)
474		nowait = true;
475	ret = virtio_gpu_object_wait(qobj, nowait);
 
 
 
 
 
476
477	drm_gem_object_unreference_unlocked(gobj);
478	return ret;
479}
480
481static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
482				void *data, struct drm_file *file)
483{
484	struct virtio_gpu_device *vgdev = dev->dev_private;
485	struct drm_virtgpu_get_caps *args = data;
486	int size;
487	int i;
488	int found_valid = -1;
489	int ret;
490	struct virtio_gpu_drv_cap_cache *cache_ent;
491	void *ptr;
 
492	if (vgdev->num_capsets == 0)
493		return -ENOSYS;
494
 
 
 
 
495	spin_lock(&vgdev->display_info_lock);
496	for (i = 0; i < vgdev->num_capsets; i++) {
497		if (vgdev->capsets[i].id == args->cap_set_id) {
498			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
499				found_valid = i;
500				break;
501			}
502		}
503	}
504
505	if (found_valid == -1) {
506		spin_unlock(&vgdev->display_info_lock);
507		return -EINVAL;
508	}
509
510	size = vgdev->capsets[found_valid].max_size;
511	if (args->size > size) {
512		spin_unlock(&vgdev->display_info_lock);
513		return -EINVAL;
514	}
515
516	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
517		if (cache_ent->id == args->cap_set_id &&
518		    cache_ent->version == args->cap_set_ver) {
519			ptr = cache_ent->caps_cache;
520			spin_unlock(&vgdev->display_info_lock);
521			goto copy_exit;
522		}
523	}
524	spin_unlock(&vgdev->display_info_lock);
525
526	/* not in cache - need to talk to hw */
527	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
528				  &cache_ent);
 
 
 
529
 
530	ret = wait_event_timeout(vgdev->resp_wq,
531				 atomic_read(&cache_ent->is_valid), 5 * HZ);
 
 
 
 
 
532
533	ptr = cache_ent->caps_cache;
534
535copy_exit:
536	if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
537		return -EFAULT;
538
539	return 0;
540}
541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
543	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
544			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
545
546	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
547			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
548
549	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
550			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
551
552	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
553			  virtio_gpu_resource_create_ioctl,
554			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
555
556	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
557			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
558
559	/* make transfer async to the main ring? - no sure, can we
560	   thread these in the underlying GL */
 
561	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
562			  virtio_gpu_transfer_from_host_ioctl,
563			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
564	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
565			  virtio_gpu_transfer_to_host_ioctl,
566			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
567
568	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
569			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
570
571	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
572			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
 
 
 
 
 
 
 
573};
v6.9.4
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie
  7 *    Alon Levy
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice shall be included in
 17 * all copies or substantial portions of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 25 * OTHER DEALINGS IN THE SOFTWARE.
 26 */
 27
 28#include <linux/file.h>
 29#include <linux/sync_file.h>
 30#include <linux/uaccess.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32#include <drm/drm_file.h>
 33#include <drm/virtgpu_drm.h>
 
 
 34
 35#include "virtgpu_drv.h"
 
 
 
 
 
 
 36
 37#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
 38				    VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
 39				    VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
 40
 41/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
 42static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
 43					     struct virtio_gpu_fpriv *vfpriv)
 44{
 45	if (vfpriv->explicit_debug_name) {
 46		virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
 47					      vfpriv->context_init,
 48					      strlen(vfpriv->debug_name),
 49					      vfpriv->debug_name);
 50	} else {
 51		char dbgname[TASK_COMM_LEN];
 52
 53		get_task_comm(dbgname, current);
 54		virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
 55					      vfpriv->context_init, strlen(dbgname),
 56					      dbgname);
 
 
 
 
 57	}
 
 
 58
 59	vfpriv->context_created = true;
 
 
 
 
 
 
 
 
 
 
 60}
 61
 62void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
 
 
 63{
 64	struct virtio_gpu_device *vgdev = dev->dev_private;
 65	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66
 67	mutex_lock(&vfpriv->context_lock);
 68	if (vfpriv->context_created)
 69		goto out_unlock;
 70
 71	virtio_gpu_create_context_locked(vgdev, vfpriv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72
 73out_unlock:
 74	mutex_unlock(&vfpriv->context_lock);
 
 
 
 
 75}
 76
 77static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 78				struct drm_file *file)
 
 
 
 
 
 
 79{
 80	struct virtio_gpu_device *vgdev = dev->dev_private;
 81	struct drm_virtgpu_map *virtio_gpu_map = data;
 
 82
 83	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
 84					 virtio_gpu_map->handle,
 85					 &virtio_gpu_map->offset);
 86}
 87
 88static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
 89				     struct drm_file *file)
 90{
 91	struct virtio_gpu_device *vgdev = dev->dev_private;
 92	struct drm_virtgpu_getparam *param = data;
 93	int value;
 94
 95	switch (param->param) {
 96	case VIRTGPU_PARAM_3D_FEATURES:
 97		value = vgdev->has_virgl_3d ? 1 : 0;
 98		break;
 99	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
100		value = 1;
101		break;
102	case VIRTGPU_PARAM_RESOURCE_BLOB:
103		value = vgdev->has_resource_blob ? 1 : 0;
104		break;
105	case VIRTGPU_PARAM_HOST_VISIBLE:
106		value = vgdev->has_host_visible ? 1 : 0;
107		break;
108	case VIRTGPU_PARAM_CROSS_DEVICE:
109		value = vgdev->has_resource_assign_uuid ? 1 : 0;
110		break;
111	case VIRTGPU_PARAM_CONTEXT_INIT:
112		value = vgdev->has_context_init ? 1 : 0;
113		break;
114	case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
115		value = vgdev->capset_id_mask;
116		break;
117	case VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME:
118		value = vgdev->has_context_init ? 1 : 0;
119		break;
120	default:
121		return -EINVAL;
122	}
123	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
 
124		return -EFAULT;
125
126	return 0;
127}
128
129static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
130					    struct drm_file *file)
131{
132	struct virtio_gpu_device *vgdev = dev->dev_private;
133	struct drm_virtgpu_resource_create *rc = data;
134	struct virtio_gpu_fence *fence;
135	int ret;
 
136	struct virtio_gpu_object *qobj;
137	struct drm_gem_object *obj;
138	uint32_t handle = 0;
139	struct virtio_gpu_object_params params = { 0 };
 
 
 
 
 
140
141	if (vgdev->has_virgl_3d) {
142		virtio_gpu_create_context(dev, file);
143		params.virgl = true;
144		params.target = rc->target;
145		params.bind = rc->bind;
146		params.depth = rc->depth;
147		params.array_size = rc->array_size;
148		params.last_level = rc->last_level;
149		params.nr_samples = rc->nr_samples;
150		params.flags = rc->flags;
151	} else {
152		if (rc->depth > 1)
153			return -EINVAL;
154		if (rc->nr_samples > 1)
155			return -EINVAL;
156		if (rc->last_level > 1)
157			return -EINVAL;
158		if (rc->target != 2)
159			return -EINVAL;
160		if (rc->array_size > 1)
161			return -EINVAL;
162	}
163
164	params.format = rc->format;
165	params.width = rc->width;
166	params.height = rc->height;
167	params.size = rc->size;
 
 
 
168	/* allocate a single page size object */
169	if (params.size == 0)
170		params.size = PAGE_SIZE;
171
172	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
173	if (!fence)
174		return -ENOMEM;
175	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
176	dma_fence_put(&fence->f);
177	if (ret < 0)
178		return ret;
179	obj = &qobj->base.base;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
181	ret = drm_gem_handle_create(file, obj, &handle);
182	if (ret) {
 
183		drm_gem_object_release(obj);
 
 
 
 
184		return ret;
185	}
 
186
187	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
188	rc->bo_handle = handle;
189
190	/*
191	 * The handle owns the reference now.  But we must drop our
192	 * remaining reference *after* we no longer need to dereference
193	 * the obj.  Otherwise userspace could guess the handle and
194	 * race closing it from another thread.
195	 */
196	drm_gem_object_put(obj);
197
198	return 0;
 
 
 
 
 
 
 
 
 
 
199}
200
201static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
202					  struct drm_file *file)
203{
204	struct drm_virtgpu_resource_info *ri = data;
205	struct drm_gem_object *gobj = NULL;
206	struct virtio_gpu_object *qobj = NULL;
207
208	gobj = drm_gem_object_lookup(file, ri->bo_handle);
209	if (gobj == NULL)
210		return -ENOENT;
211
212	qobj = gem_to_virtio_gpu_obj(gobj);
213
214	ri->size = qobj->base.base.size;
215	ri->res_handle = qobj->hw_res_handle;
216	if (qobj->host3d_blob || qobj->guest_blob)
217		ri->blob_mem = qobj->blob_mem;
218
219	drm_gem_object_put(gobj);
220	return 0;
221}
222
223static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
224					       void *data,
225					       struct drm_file *file)
226{
227	struct virtio_gpu_device *vgdev = dev->dev_private;
228	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
229	struct drm_virtgpu_3d_transfer_from_host *args = data;
230	struct virtio_gpu_object *bo;
231	struct virtio_gpu_object_array *objs;
232	struct virtio_gpu_fence *fence;
233	int ret;
234	u32 offset = args->offset;
 
235
236	if (vgdev->has_virgl_3d == false)
237		return -ENOSYS;
238
239	virtio_gpu_create_context(dev, file);
240	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
241	if (objs == NULL)
242		return -ENOENT;
243
244	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
245	if (bo->guest_blob && !bo->host3d_blob) {
246		ret = -EINVAL;
247		goto err_put_free;
248	}
249
250	if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
251		ret = -EINVAL;
252		goto err_put_free;
253	}
254
255	ret = virtio_gpu_array_lock_resv(objs);
256	if (ret != 0)
257		goto err_put_free;
258
259	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
260	if (!fence) {
261		ret = -ENOMEM;
262		goto err_unlock;
263	}
264
 
265	virtio_gpu_cmd_transfer_from_host_3d
266		(vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
267		 args->layer_stride, &args->box, objs, fence);
268	dma_fence_put(&fence->f);
269	virtio_gpu_notify(vgdev);
270	return 0;
271
272err_unlock:
273	virtio_gpu_array_unlock_resv(objs);
274err_put_free:
275	virtio_gpu_array_put_free(objs);
 
276	return ret;
277}
278
279static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
280					     struct drm_file *file)
281{
282	struct virtio_gpu_device *vgdev = dev->dev_private;
283	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
284	struct drm_virtgpu_3d_transfer_to_host *args = data;
285	struct virtio_gpu_object *bo;
286	struct virtio_gpu_object_array *objs;
287	struct virtio_gpu_fence *fence;
 
288	int ret;
289	u32 offset = args->offset;
290
291	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
292	if (objs == NULL)
293		return -ENOENT;
294
295	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
296	if (bo->guest_blob && !bo->host3d_blob) {
297		ret = -EINVAL;
298		goto err_put_free;
299	}
 
 
 
 
 
300
 
301	if (!vgdev->has_virgl_3d) {
302		virtio_gpu_cmd_transfer_to_host_2d
303			(vgdev, offset,
304			 args->box.w, args->box.h, args->box.x, args->box.y,
305			 objs, NULL);
306	} else {
307		virtio_gpu_create_context(dev, file);
308
309		if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
310			ret = -EINVAL;
311			goto err_put_free;
312		}
313
314		ret = virtio_gpu_array_lock_resv(objs);
315		if (ret != 0)
316			goto err_put_free;
317
318		ret = -ENOMEM;
319		fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
320					       0);
321		if (!fence)
322			goto err_unlock;
323
324		virtio_gpu_cmd_transfer_to_host_3d
325			(vgdev,
326			 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
327			 args->stride, args->layer_stride, &args->box, objs,
328			 fence);
329		dma_fence_put(&fence->f);
 
330	}
331	virtio_gpu_notify(vgdev);
332	return 0;
333
334err_unlock:
335	virtio_gpu_array_unlock_resv(objs);
336err_put_free:
337	virtio_gpu_array_put_free(objs);
338	return ret;
339}
340
341static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
342				 struct drm_file *file)
343{
344	struct drm_virtgpu_3d_wait *args = data;
345	struct drm_gem_object *obj;
346	long timeout = 15 * HZ;
347	int ret;
 
348
349	obj = drm_gem_object_lookup(file, args->handle);
350	if (obj == NULL)
351		return -ENOENT;
352
353	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
354		ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
355	} else {
356		ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
357					    true, timeout);
358	}
359	if (ret == 0)
360		ret = -EBUSY;
361	else if (ret > 0)
362		ret = 0;
363
364	drm_gem_object_put(obj);
365	return ret;
366}
367
368static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
369				void *data, struct drm_file *file)
370{
371	struct virtio_gpu_device *vgdev = dev->dev_private;
372	struct drm_virtgpu_get_caps *args = data;
373	unsigned size, host_caps_size;
374	int i;
375	int found_valid = -1;
376	int ret;
377	struct virtio_gpu_drv_cap_cache *cache_ent;
378	void *ptr;
379
380	if (vgdev->num_capsets == 0)
381		return -ENOSYS;
382
383	/* don't allow userspace to pass 0 */
384	if (args->size == 0)
385		return -EINVAL;
386
387	spin_lock(&vgdev->display_info_lock);
388	for (i = 0; i < vgdev->num_capsets; i++) {
389		if (vgdev->capsets[i].id == args->cap_set_id) {
390			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
391				found_valid = i;
392				break;
393			}
394		}
395	}
396
397	if (found_valid == -1) {
398		spin_unlock(&vgdev->display_info_lock);
399		return -EINVAL;
400	}
401
402	host_caps_size = vgdev->capsets[found_valid].max_size;
403	/* only copy to user the minimum of the host caps size or the guest caps size */
404	size = min(args->size, host_caps_size);
 
 
405
406	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
407		if (cache_ent->id == args->cap_set_id &&
408		    cache_ent->version == args->cap_set_ver) {
 
409			spin_unlock(&vgdev->display_info_lock);
410			goto copy_exit;
411		}
412	}
413	spin_unlock(&vgdev->display_info_lock);
414
415	/* not in cache - need to talk to hw */
416	ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
417					&cache_ent);
418	if (ret)
419		return ret;
420	virtio_gpu_notify(vgdev);
421
422copy_exit:
423	ret = wait_event_timeout(vgdev->resp_wq,
424				 atomic_read(&cache_ent->is_valid), 5 * HZ);
425	if (!ret)
426		return -EBUSY;
427
428	/* is_valid check must proceed before copy of the cache entry. */
429	smp_rmb();
430
431	ptr = cache_ent->caps_cache;
432
433	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
 
434		return -EFAULT;
435
436	return 0;
437}
438
439static int verify_blob(struct virtio_gpu_device *vgdev,
440		       struct virtio_gpu_fpriv *vfpriv,
441		       struct virtio_gpu_object_params *params,
442		       struct drm_virtgpu_resource_create_blob *rc_blob,
443		       bool *guest_blob, bool *host3d_blob)
444{
445	if (!vgdev->has_resource_blob)
446		return -EINVAL;
447
448	if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
449		return -EINVAL;
450
451	if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
452		if (!vgdev->has_resource_assign_uuid)
453			return -EINVAL;
454	}
455
456	switch (rc_blob->blob_mem) {
457	case VIRTGPU_BLOB_MEM_GUEST:
458		*guest_blob = true;
459		break;
460	case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
461		*guest_blob = true;
462		fallthrough;
463	case VIRTGPU_BLOB_MEM_HOST3D:
464		*host3d_blob = true;
465		break;
466	default:
467		return -EINVAL;
468	}
469
470	if (*host3d_blob) {
471		if (!vgdev->has_virgl_3d)
472			return -EINVAL;
473
474		/* Must be dword aligned. */
475		if (rc_blob->cmd_size % 4 != 0)
476			return -EINVAL;
477
478		params->ctx_id = vfpriv->ctx_id;
479		params->blob_id = rc_blob->blob_id;
480	} else {
481		if (rc_blob->blob_id != 0)
482			return -EINVAL;
483
484		if (rc_blob->cmd_size != 0)
485			return -EINVAL;
486	}
487
488	params->blob_mem = rc_blob->blob_mem;
489	params->size = rc_blob->size;
490	params->blob = true;
491	params->blob_flags = rc_blob->blob_flags;
492	return 0;
493}
494
495static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
496						 void *data,
497						 struct drm_file *file)
498{
499	int ret = 0;
500	uint32_t handle = 0;
501	bool guest_blob = false;
502	bool host3d_blob = false;
503	struct drm_gem_object *obj;
504	struct virtio_gpu_object *bo;
505	struct virtio_gpu_object_params params = { 0 };
506	struct virtio_gpu_device *vgdev = dev->dev_private;
507	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
508	struct drm_virtgpu_resource_create_blob *rc_blob = data;
509
510	if (verify_blob(vgdev, vfpriv, &params, rc_blob,
511			&guest_blob, &host3d_blob))
512		return -EINVAL;
513
514	if (vgdev->has_virgl_3d)
515		virtio_gpu_create_context(dev, file);
516
517	if (rc_blob->cmd_size) {
518		void *buf;
519
520		buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
521				  rc_blob->cmd_size);
522
523		if (IS_ERR(buf))
524			return PTR_ERR(buf);
525
526		virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
527				      vfpriv->ctx_id, NULL, NULL);
528	}
529
530	if (guest_blob)
531		ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
532	else if (!guest_blob && host3d_blob)
533		ret = virtio_gpu_vram_create(vgdev, &params, &bo);
534	else
535		return -EINVAL;
536
537	if (ret < 0)
538		return ret;
539
540	bo->guest_blob = guest_blob;
541	bo->host3d_blob = host3d_blob;
542	bo->blob_mem = rc_blob->blob_mem;
543	bo->blob_flags = rc_blob->blob_flags;
544
545	obj = &bo->base.base;
546	if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
547		ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
548		if (ret) {
549			drm_gem_object_release(obj);
550			return ret;
551		}
552	}
553
554	ret = drm_gem_handle_create(file, obj, &handle);
555	if (ret) {
556		drm_gem_object_release(obj);
557		return ret;
558	}
559
560	rc_blob->res_handle = bo->hw_res_handle;
561	rc_blob->bo_handle = handle;
562
563	/*
564	 * The handle owns the reference now.  But we must drop our
565	 * remaining reference *after* we no longer need to dereference
566	 * the obj.  Otherwise userspace could guess the handle and
567	 * race closing it from another thread.
568	 */
569	drm_gem_object_put(obj);
570
571	return 0;
572}
573
574static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
575					 void *data, struct drm_file *file)
576{
577	int ret = 0;
578	uint32_t num_params, i;
579	uint64_t valid_ring_mask, param, value;
580	size_t len;
581	struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
582	struct virtio_gpu_device *vgdev = dev->dev_private;
583	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
584	struct drm_virtgpu_context_init *args = data;
585
586	num_params = args->num_params;
587	len = num_params * sizeof(struct drm_virtgpu_context_set_param);
588
589	if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
590		return -EINVAL;
591
592	/* Number of unique parameters supported at this time. */
593	if (num_params > 4)
594		return -EINVAL;
595
596	ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
597				     len);
598
599	if (IS_ERR(ctx_set_params))
600		return PTR_ERR(ctx_set_params);
601
602	mutex_lock(&vfpriv->context_lock);
603	if (vfpriv->context_created) {
604		ret = -EEXIST;
605		goto out_unlock;
606	}
607
608	for (i = 0; i < num_params; i++) {
609		param = ctx_set_params[i].param;
610		value = ctx_set_params[i].value;
611
612		switch (param) {
613		case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
614			if (value > MAX_CAPSET_ID) {
615				ret = -EINVAL;
616				goto out_unlock;
617			}
618
619			if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
620				ret = -EINVAL;
621				goto out_unlock;
622			}
623
624			/* Context capset ID already set */
625			if (vfpriv->context_init &
626			    VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
627				ret = -EINVAL;
628				goto out_unlock;
629			}
630
631			vfpriv->context_init |= value;
632			break;
633		case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
634			if (vfpriv->base_fence_ctx) {
635				ret = -EINVAL;
636				goto out_unlock;
637			}
638
639			if (value > MAX_RINGS) {
640				ret = -EINVAL;
641				goto out_unlock;
642			}
643
644			vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
645			vfpriv->num_rings = value;
646			break;
647		case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
648			if (vfpriv->ring_idx_mask) {
649				ret = -EINVAL;
650				goto out_unlock;
651			}
652
653			vfpriv->ring_idx_mask = value;
654			break;
655		case VIRTGPU_CONTEXT_PARAM_DEBUG_NAME:
656			if (vfpriv->explicit_debug_name) {
657				ret = -EINVAL;
658				goto out_unlock;
659			}
660
661			ret = strncpy_from_user(vfpriv->debug_name,
662						u64_to_user_ptr(value),
663						DEBUG_NAME_MAX_LEN - 1);
664			if (ret < 0)
665				goto out_unlock;
666
667			vfpriv->explicit_debug_name = true;
668			ret = 0;
669			break;
670		default:
671			ret = -EINVAL;
672			goto out_unlock;
673		}
674	}
675
676	if (vfpriv->ring_idx_mask) {
677		valid_ring_mask = 0;
678		for (i = 0; i < vfpriv->num_rings; i++)
679			valid_ring_mask |= 1ULL << i;
680
681		if (~valid_ring_mask & vfpriv->ring_idx_mask) {
682			ret = -EINVAL;
683			goto out_unlock;
684		}
685	}
686
687	virtio_gpu_create_context_locked(vgdev, vfpriv);
688	virtio_gpu_notify(vgdev);
689
690out_unlock:
691	mutex_unlock(&vfpriv->context_lock);
692	kfree(ctx_set_params);
693	return ret;
694}
695
696struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
697	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
698			  DRM_RENDER_ALLOW),
699
700	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
701			  DRM_RENDER_ALLOW),
702
703	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
704			  DRM_RENDER_ALLOW),
705
706	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
707			  virtio_gpu_resource_create_ioctl,
708			  DRM_RENDER_ALLOW),
709
710	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
711			  DRM_RENDER_ALLOW),
712
713	/* make transfer async to the main ring? - no sure, can we
714	 * thread these in the underlying GL
715	 */
716	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
717			  virtio_gpu_transfer_from_host_ioctl,
718			  DRM_RENDER_ALLOW),
719	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
720			  virtio_gpu_transfer_to_host_ioctl,
721			  DRM_RENDER_ALLOW),
722
723	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
724			  DRM_RENDER_ALLOW),
725
726	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
727			  DRM_RENDER_ALLOW),
728
729	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
730			  virtio_gpu_resource_create_blob_ioctl,
731			  DRM_RENDER_ALLOW),
732
733	DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
734			  DRM_RENDER_ALLOW),
735};