Loading...
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <drm/drmP.h>
29#include "virtgpu_drv.h"
30#include <drm/virtgpu_drm.h>
31#include "ttm/ttm_execbuf_util.h"
32
33static void convert_to_hw_box(struct virtio_gpu_box *dst,
34 const struct drm_virtgpu_3d_box *src)
35{
36 dst->x = cpu_to_le32(src->x);
37 dst->y = cpu_to_le32(src->y);
38 dst->z = cpu_to_le32(src->z);
39 dst->w = cpu_to_le32(src->w);
40 dst->h = cpu_to_le32(src->h);
41 dst->d = cpu_to_le32(src->d);
42}
43
44static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
45 struct drm_file *file_priv)
46{
47 struct virtio_gpu_device *vgdev = dev->dev_private;
48 struct drm_virtgpu_map *virtio_gpu_map = data;
49
50 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
51 virtio_gpu_map->handle,
52 &virtio_gpu_map->offset);
53}
54
55static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
56 struct list_head *head)
57{
58 struct ttm_validate_buffer *buf;
59 struct ttm_buffer_object *bo;
60 struct virtio_gpu_object *qobj;
61 int ret;
62
63 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
64 if (ret != 0)
65 return ret;
66
67 list_for_each_entry(buf, head, head) {
68 bo = buf->bo;
69 qobj = container_of(bo, struct virtio_gpu_object, tbo);
70 ret = ttm_bo_validate(bo, &qobj->placement, false, false);
71 if (ret) {
72 ttm_eu_backoff_reservation(ticket, head);
73 return ret;
74 }
75 }
76 return 0;
77}
78
79static void virtio_gpu_unref_list(struct list_head *head)
80{
81 struct ttm_validate_buffer *buf;
82 struct ttm_buffer_object *bo;
83 struct virtio_gpu_object *qobj;
84 list_for_each_entry(buf, head, head) {
85 bo = buf->bo;
86 qobj = container_of(bo, struct virtio_gpu_object, tbo);
87
88 drm_gem_object_unreference_unlocked(&qobj->gem_base);
89 }
90}
91
92static int virtio_gpu_execbuffer(struct drm_device *dev,
93 struct drm_virtgpu_execbuffer *exbuf,
94 struct drm_file *drm_file)
95{
96 struct virtio_gpu_device *vgdev = dev->dev_private;
97 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
98 struct drm_gem_object *gobj;
99 struct virtio_gpu_fence *fence;
100 struct virtio_gpu_object *qobj;
101 int ret;
102 uint32_t *bo_handles = NULL;
103 void __user *user_bo_handles = NULL;
104 struct list_head validate_list;
105 struct ttm_validate_buffer *buflist = NULL;
106 int i;
107 struct ww_acquire_ctx ticket;
108 void *buf;
109
110 if (vgdev->has_virgl_3d == false)
111 return -ENOSYS;
112
113 INIT_LIST_HEAD(&validate_list);
114 if (exbuf->num_bo_handles) {
115
116 bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
117 sizeof(uint32_t));
118 buflist = drm_calloc_large(exbuf->num_bo_handles,
119 sizeof(struct ttm_validate_buffer));
120 if (!bo_handles || !buflist) {
121 drm_free_large(bo_handles);
122 drm_free_large(buflist);
123 return -ENOMEM;
124 }
125
126 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
127 if (copy_from_user(bo_handles, user_bo_handles,
128 exbuf->num_bo_handles * sizeof(uint32_t))) {
129 ret = -EFAULT;
130 drm_free_large(bo_handles);
131 drm_free_large(buflist);
132 return ret;
133 }
134
135 for (i = 0; i < exbuf->num_bo_handles; i++) {
136 gobj = drm_gem_object_lookup(dev,
137 drm_file, bo_handles[i]);
138 if (!gobj) {
139 drm_free_large(bo_handles);
140 drm_free_large(buflist);
141 return -ENOENT;
142 }
143
144 qobj = gem_to_virtio_gpu_obj(gobj);
145 buflist[i].bo = &qobj->tbo;
146
147 list_add(&buflist[i].head, &validate_list);
148 }
149 drm_free_large(bo_handles);
150 }
151
152 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
153 if (ret)
154 goto out_free;
155
156 buf = kmalloc(exbuf->size, GFP_KERNEL);
157 if (!buf) {
158 ret = -ENOMEM;
159 goto out_unresv;
160 }
161 if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
162 exbuf->size)) {
163 kfree(buf);
164 ret = -EFAULT;
165 goto out_unresv;
166 }
167 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168 vfpriv->ctx_id, &fence);
169
170 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
171
172 /* fence the command bo */
173 virtio_gpu_unref_list(&validate_list);
174 drm_free_large(buflist);
175 fence_put(&fence->f);
176 return 0;
177
178out_unresv:
179 ttm_eu_backoff_reservation(&ticket, &validate_list);
180out_free:
181 virtio_gpu_unref_list(&validate_list);
182 drm_free_large(buflist);
183 return ret;
184}
185
186/*
187 * Usage of execbuffer:
188 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
189 * However, the command as passed from user space must *not* contain the initial
190 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
191 */
192static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
193 struct drm_file *file_priv)
194{
195 struct drm_virtgpu_execbuffer *execbuffer = data;
196 return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
197}
198
199
200static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
201 struct drm_file *file_priv)
202{
203 struct virtio_gpu_device *vgdev = dev->dev_private;
204 struct drm_virtgpu_getparam *param = data;
205 int value;
206
207 switch (param->param) {
208 case VIRTGPU_PARAM_3D_FEATURES:
209 value = vgdev->has_virgl_3d == true ? 1 : 0;
210 break;
211 default:
212 return -EINVAL;
213 }
214 if (copy_to_user((void __user *)(unsigned long)param->value,
215 &value, sizeof(int))) {
216 return -EFAULT;
217 }
218 return 0;
219}
220
221static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
222 struct drm_file *file_priv)
223{
224 struct virtio_gpu_device *vgdev = dev->dev_private;
225 struct drm_virtgpu_resource_create *rc = data;
226 int ret;
227 uint32_t res_id;
228 struct virtio_gpu_object *qobj;
229 struct drm_gem_object *obj;
230 uint32_t handle = 0;
231 uint32_t size;
232 struct list_head validate_list;
233 struct ttm_validate_buffer mainbuf;
234 struct virtio_gpu_fence *fence = NULL;
235 struct ww_acquire_ctx ticket;
236 struct virtio_gpu_resource_create_3d rc_3d;
237
238 if (vgdev->has_virgl_3d == false) {
239 if (rc->depth > 1)
240 return -EINVAL;
241 if (rc->nr_samples > 1)
242 return -EINVAL;
243 if (rc->last_level > 1)
244 return -EINVAL;
245 if (rc->target != 2)
246 return -EINVAL;
247 if (rc->array_size > 1)
248 return -EINVAL;
249 }
250
251 INIT_LIST_HEAD(&validate_list);
252 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
253
254 virtio_gpu_resource_id_get(vgdev, &res_id);
255
256 size = rc->size;
257
258 /* allocate a single page size object */
259 if (size == 0)
260 size = PAGE_SIZE;
261
262 qobj = virtio_gpu_alloc_object(dev, size, false, false);
263 if (IS_ERR(qobj)) {
264 ret = PTR_ERR(qobj);
265 goto fail_id;
266 }
267 obj = &qobj->gem_base;
268
269 if (!vgdev->has_virgl_3d) {
270 virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
271 rc->width, rc->height);
272
273 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
274 } else {
275 /* use a gem reference since unref list undoes them */
276 drm_gem_object_reference(&qobj->gem_base);
277 mainbuf.bo = &qobj->tbo;
278 list_add(&mainbuf.head, &validate_list);
279
280 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
281 if (ret) {
282 DRM_DEBUG("failed to validate\n");
283 goto fail_unref;
284 }
285
286 rc_3d.resource_id = cpu_to_le32(res_id);
287 rc_3d.target = cpu_to_le32(rc->target);
288 rc_3d.format = cpu_to_le32(rc->format);
289 rc_3d.bind = cpu_to_le32(rc->bind);
290 rc_3d.width = cpu_to_le32(rc->width);
291 rc_3d.height = cpu_to_le32(rc->height);
292 rc_3d.depth = cpu_to_le32(rc->depth);
293 rc_3d.array_size = cpu_to_le32(rc->array_size);
294 rc_3d.last_level = cpu_to_le32(rc->last_level);
295 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
296 rc_3d.flags = cpu_to_le32(rc->flags);
297
298 virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
299 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
300 if (ret) {
301 ttm_eu_backoff_reservation(&ticket, &validate_list);
302 goto fail_unref;
303 }
304 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
305 }
306
307 qobj->hw_res_handle = res_id;
308
309 ret = drm_gem_handle_create(file_priv, obj, &handle);
310 if (ret) {
311
312 drm_gem_object_release(obj);
313 if (vgdev->has_virgl_3d) {
314 virtio_gpu_unref_list(&validate_list);
315 fence_put(&fence->f);
316 }
317 return ret;
318 }
319 drm_gem_object_unreference_unlocked(obj);
320
321 rc->res_handle = res_id; /* similiar to a VM address */
322 rc->bo_handle = handle;
323
324 if (vgdev->has_virgl_3d) {
325 virtio_gpu_unref_list(&validate_list);
326 fence_put(&fence->f);
327 }
328 return 0;
329fail_unref:
330 if (vgdev->has_virgl_3d) {
331 virtio_gpu_unref_list(&validate_list);
332 fence_put(&fence->f);
333 }
334//fail_obj:
335// drm_gem_object_handle_unreference_unlocked(obj);
336fail_id:
337 virtio_gpu_resource_id_put(vgdev, res_id);
338 return ret;
339}
340
341static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
342 struct drm_file *file_priv)
343{
344 struct drm_virtgpu_resource_info *ri = data;
345 struct drm_gem_object *gobj = NULL;
346 struct virtio_gpu_object *qobj = NULL;
347
348 gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
349 if (gobj == NULL)
350 return -ENOENT;
351
352 qobj = gem_to_virtio_gpu_obj(gobj);
353
354 ri->size = qobj->gem_base.size;
355 ri->res_handle = qobj->hw_res_handle;
356 drm_gem_object_unreference_unlocked(gobj);
357 return 0;
358}
359
360static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
361 void *data,
362 struct drm_file *file)
363{
364 struct virtio_gpu_device *vgdev = dev->dev_private;
365 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
366 struct drm_virtgpu_3d_transfer_from_host *args = data;
367 struct drm_gem_object *gobj = NULL;
368 struct virtio_gpu_object *qobj = NULL;
369 struct virtio_gpu_fence *fence;
370 int ret;
371 u32 offset = args->offset;
372 struct virtio_gpu_box box;
373
374 if (vgdev->has_virgl_3d == false)
375 return -ENOSYS;
376
377 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
378 if (gobj == NULL)
379 return -ENOENT;
380
381 qobj = gem_to_virtio_gpu_obj(gobj);
382
383 ret = virtio_gpu_object_reserve(qobj, false);
384 if (ret)
385 goto out;
386
387 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
388 true, false);
389 if (unlikely(ret))
390 goto out_unres;
391
392 convert_to_hw_box(&box, &args->box);
393 virtio_gpu_cmd_transfer_from_host_3d
394 (vgdev, qobj->hw_res_handle,
395 vfpriv->ctx_id, offset, args->level,
396 &box, &fence);
397 reservation_object_add_excl_fence(qobj->tbo.resv,
398 &fence->f);
399
400 fence_put(&fence->f);
401out_unres:
402 virtio_gpu_object_unreserve(qobj);
403out:
404 drm_gem_object_unreference_unlocked(gobj);
405 return ret;
406}
407
408static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
409 struct drm_file *file)
410{
411 struct virtio_gpu_device *vgdev = dev->dev_private;
412 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
413 struct drm_virtgpu_3d_transfer_to_host *args = data;
414 struct drm_gem_object *gobj = NULL;
415 struct virtio_gpu_object *qobj = NULL;
416 struct virtio_gpu_fence *fence;
417 struct virtio_gpu_box box;
418 int ret;
419 u32 offset = args->offset;
420
421 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
422 if (gobj == NULL)
423 return -ENOENT;
424
425 qobj = gem_to_virtio_gpu_obj(gobj);
426
427 ret = virtio_gpu_object_reserve(qobj, false);
428 if (ret)
429 goto out;
430
431 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
432 true, false);
433 if (unlikely(ret))
434 goto out_unres;
435
436 convert_to_hw_box(&box, &args->box);
437 if (!vgdev->has_virgl_3d) {
438 virtio_gpu_cmd_transfer_to_host_2d
439 (vgdev, qobj->hw_res_handle, offset,
440 box.w, box.h, box.x, box.y, NULL);
441 } else {
442 virtio_gpu_cmd_transfer_to_host_3d
443 (vgdev, qobj->hw_res_handle,
444 vfpriv ? vfpriv->ctx_id : 0, offset,
445 args->level, &box, &fence);
446 reservation_object_add_excl_fence(qobj->tbo.resv,
447 &fence->f);
448 fence_put(&fence->f);
449 }
450
451out_unres:
452 virtio_gpu_object_unreserve(qobj);
453out:
454 drm_gem_object_unreference_unlocked(gobj);
455 return ret;
456}
457
458static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
459 struct drm_file *file)
460{
461 struct drm_virtgpu_3d_wait *args = data;
462 struct drm_gem_object *gobj = NULL;
463 struct virtio_gpu_object *qobj = NULL;
464 int ret;
465 bool nowait = false;
466
467 gobj = drm_gem_object_lookup(dev, file, args->handle);
468 if (gobj == NULL)
469 return -ENOENT;
470
471 qobj = gem_to_virtio_gpu_obj(gobj);
472
473 if (args->flags & VIRTGPU_WAIT_NOWAIT)
474 nowait = true;
475 ret = virtio_gpu_object_wait(qobj, nowait);
476
477 drm_gem_object_unreference_unlocked(gobj);
478 return ret;
479}
480
481static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
482 void *data, struct drm_file *file)
483{
484 struct virtio_gpu_device *vgdev = dev->dev_private;
485 struct drm_virtgpu_get_caps *args = data;
486 int size;
487 int i;
488 int found_valid = -1;
489 int ret;
490 struct virtio_gpu_drv_cap_cache *cache_ent;
491 void *ptr;
492 if (vgdev->num_capsets == 0)
493 return -ENOSYS;
494
495 spin_lock(&vgdev->display_info_lock);
496 for (i = 0; i < vgdev->num_capsets; i++) {
497 if (vgdev->capsets[i].id == args->cap_set_id) {
498 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
499 found_valid = i;
500 break;
501 }
502 }
503 }
504
505 if (found_valid == -1) {
506 spin_unlock(&vgdev->display_info_lock);
507 return -EINVAL;
508 }
509
510 size = vgdev->capsets[found_valid].max_size;
511 if (args->size > size) {
512 spin_unlock(&vgdev->display_info_lock);
513 return -EINVAL;
514 }
515
516 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
517 if (cache_ent->id == args->cap_set_id &&
518 cache_ent->version == args->cap_set_ver) {
519 ptr = cache_ent->caps_cache;
520 spin_unlock(&vgdev->display_info_lock);
521 goto copy_exit;
522 }
523 }
524 spin_unlock(&vgdev->display_info_lock);
525
526 /* not in cache - need to talk to hw */
527 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
528 &cache_ent);
529
530 ret = wait_event_timeout(vgdev->resp_wq,
531 atomic_read(&cache_ent->is_valid), 5 * HZ);
532
533 ptr = cache_ent->caps_cache;
534
535copy_exit:
536 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
537 return -EFAULT;
538
539 return 0;
540}
541
542struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
543 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
544 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
545
546 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
547 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
548
549 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
550 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
551
552 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
553 virtio_gpu_resource_create_ioctl,
554 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
555
556 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
557 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
558
559 /* make transfer async to the main ring? - no sure, can we
560 thread these in the underlying GL */
561 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
562 virtio_gpu_transfer_from_host_ioctl,
563 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
564 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
565 virtio_gpu_transfer_to_host_ioctl,
566 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
567
568 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
569 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
570
571 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
572 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
573};
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <linux/file.h>
29#include <linux/sync_file.h>
30#include <linux/uaccess.h>
31
32#include <drm/drm_file.h>
33#include <drm/virtgpu_drm.h>
34
35#include "virtgpu_drv.h"
36
37#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38 VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39 VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
40
41void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
42{
43 struct virtio_gpu_device *vgdev = dev->dev_private;
44 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
45 char dbgname[TASK_COMM_LEN];
46
47 mutex_lock(&vfpriv->context_lock);
48 if (vfpriv->context_created)
49 goto out_unlock;
50
51 get_task_comm(dbgname, current);
52 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
53 strlen(dbgname), dbgname);
54 vfpriv->context_created = true;
55
56out_unlock:
57 mutex_unlock(&vfpriv->context_lock);
58}
59
60static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
61 struct drm_file *file)
62{
63 struct virtio_gpu_device *vgdev = dev->dev_private;
64 struct drm_virtgpu_map *virtio_gpu_map = data;
65
66 return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
67 virtio_gpu_map->handle,
68 &virtio_gpu_map->offset);
69}
70
71/*
72 * Usage of execbuffer:
73 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
74 * However, the command as passed from user space must *not* contain the initial
75 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
76 */
77static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
78 struct drm_file *file)
79{
80 struct drm_virtgpu_execbuffer *exbuf = data;
81 struct virtio_gpu_device *vgdev = dev->dev_private;
82 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
83 struct virtio_gpu_fence *out_fence;
84 int ret;
85 uint32_t *bo_handles = NULL;
86 void __user *user_bo_handles = NULL;
87 struct virtio_gpu_object_array *buflist = NULL;
88 struct sync_file *sync_file;
89 int in_fence_fd = exbuf->fence_fd;
90 int out_fence_fd = -1;
91 void *buf;
92
93 if (vgdev->has_virgl_3d == false)
94 return -ENOSYS;
95
96 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
97 return -EINVAL;
98
99 exbuf->fence_fd = -1;
100
101 virtio_gpu_create_context(dev, file);
102 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
103 struct dma_fence *in_fence;
104
105 in_fence = sync_file_get_fence(in_fence_fd);
106
107 if (!in_fence)
108 return -EINVAL;
109
110 /*
111 * Wait if the fence is from a foreign context, or if the fence
112 * array contains any fence from a foreign context.
113 */
114 ret = 0;
115 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
116 ret = dma_fence_wait(in_fence, true);
117
118 dma_fence_put(in_fence);
119 if (ret)
120 return ret;
121 }
122
123 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
124 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
125 if (out_fence_fd < 0)
126 return out_fence_fd;
127 }
128
129 if (exbuf->num_bo_handles) {
130 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
131 sizeof(uint32_t), GFP_KERNEL);
132 if (!bo_handles) {
133 ret = -ENOMEM;
134 goto out_unused_fd;
135 }
136
137 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
138 if (copy_from_user(bo_handles, user_bo_handles,
139 exbuf->num_bo_handles * sizeof(uint32_t))) {
140 ret = -EFAULT;
141 goto out_unused_fd;
142 }
143
144 buflist = virtio_gpu_array_from_handles(file, bo_handles,
145 exbuf->num_bo_handles);
146 if (!buflist) {
147 ret = -ENOENT;
148 goto out_unused_fd;
149 }
150 kvfree(bo_handles);
151 bo_handles = NULL;
152 }
153
154 buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
155 if (IS_ERR(buf)) {
156 ret = PTR_ERR(buf);
157 goto out_unused_fd;
158 }
159
160 if (buflist) {
161 ret = virtio_gpu_array_lock_resv(buflist);
162 if (ret)
163 goto out_memdup;
164 }
165
166 out_fence = virtio_gpu_fence_alloc(vgdev);
167 if(!out_fence) {
168 ret = -ENOMEM;
169 goto out_unresv;
170 }
171
172 if (out_fence_fd >= 0) {
173 sync_file = sync_file_create(&out_fence->f);
174 if (!sync_file) {
175 dma_fence_put(&out_fence->f);
176 ret = -ENOMEM;
177 goto out_unresv;
178 }
179
180 exbuf->fence_fd = out_fence_fd;
181 fd_install(out_fence_fd, sync_file->file);
182 }
183
184 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
185 vfpriv->ctx_id, buflist, out_fence);
186 dma_fence_put(&out_fence->f);
187 virtio_gpu_notify(vgdev);
188 return 0;
189
190out_unresv:
191 if (buflist)
192 virtio_gpu_array_unlock_resv(buflist);
193out_memdup:
194 kvfree(buf);
195out_unused_fd:
196 kvfree(bo_handles);
197 if (buflist)
198 virtio_gpu_array_put_free(buflist);
199
200 if (out_fence_fd >= 0)
201 put_unused_fd(out_fence_fd);
202
203 return ret;
204}
205
206static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
207 struct drm_file *file)
208{
209 struct virtio_gpu_device *vgdev = dev->dev_private;
210 struct drm_virtgpu_getparam *param = data;
211 int value;
212
213 switch (param->param) {
214 case VIRTGPU_PARAM_3D_FEATURES:
215 value = vgdev->has_virgl_3d ? 1 : 0;
216 break;
217 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
218 value = 1;
219 break;
220 case VIRTGPU_PARAM_RESOURCE_BLOB:
221 value = vgdev->has_resource_blob ? 1 : 0;
222 break;
223 case VIRTGPU_PARAM_HOST_VISIBLE:
224 value = vgdev->has_host_visible ? 1 : 0;
225 break;
226 case VIRTGPU_PARAM_CROSS_DEVICE:
227 value = vgdev->has_resource_assign_uuid ? 1 : 0;
228 break;
229 default:
230 return -EINVAL;
231 }
232 if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
233 return -EFAULT;
234
235 return 0;
236}
237
238static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
239 struct drm_file *file)
240{
241 struct virtio_gpu_device *vgdev = dev->dev_private;
242 struct drm_virtgpu_resource_create *rc = data;
243 struct virtio_gpu_fence *fence;
244 int ret;
245 struct virtio_gpu_object *qobj;
246 struct drm_gem_object *obj;
247 uint32_t handle = 0;
248 struct virtio_gpu_object_params params = { 0 };
249
250 if (vgdev->has_virgl_3d) {
251 virtio_gpu_create_context(dev, file);
252 params.virgl = true;
253 params.target = rc->target;
254 params.bind = rc->bind;
255 params.depth = rc->depth;
256 params.array_size = rc->array_size;
257 params.last_level = rc->last_level;
258 params.nr_samples = rc->nr_samples;
259 params.flags = rc->flags;
260 } else {
261 if (rc->depth > 1)
262 return -EINVAL;
263 if (rc->nr_samples > 1)
264 return -EINVAL;
265 if (rc->last_level > 1)
266 return -EINVAL;
267 if (rc->target != 2)
268 return -EINVAL;
269 if (rc->array_size > 1)
270 return -EINVAL;
271 }
272
273 params.format = rc->format;
274 params.width = rc->width;
275 params.height = rc->height;
276 params.size = rc->size;
277 /* allocate a single page size object */
278 if (params.size == 0)
279 params.size = PAGE_SIZE;
280
281 fence = virtio_gpu_fence_alloc(vgdev);
282 if (!fence)
283 return -ENOMEM;
284 ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence);
285 dma_fence_put(&fence->f);
286 if (ret < 0)
287 return ret;
288 obj = &qobj->base.base;
289
290 ret = drm_gem_handle_create(file, obj, &handle);
291 if (ret) {
292 drm_gem_object_release(obj);
293 return ret;
294 }
295 drm_gem_object_put(obj);
296
297 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
298 rc->bo_handle = handle;
299 return 0;
300}
301
302static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
303 struct drm_file *file)
304{
305 struct drm_virtgpu_resource_info *ri = data;
306 struct drm_gem_object *gobj = NULL;
307 struct virtio_gpu_object *qobj = NULL;
308
309 gobj = drm_gem_object_lookup(file, ri->bo_handle);
310 if (gobj == NULL)
311 return -ENOENT;
312
313 qobj = gem_to_virtio_gpu_obj(gobj);
314
315 ri->size = qobj->base.base.size;
316 ri->res_handle = qobj->hw_res_handle;
317 if (qobj->host3d_blob || qobj->guest_blob)
318 ri->blob_mem = qobj->blob_mem;
319
320 drm_gem_object_put(gobj);
321 return 0;
322}
323
324static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
325 void *data,
326 struct drm_file *file)
327{
328 struct virtio_gpu_device *vgdev = dev->dev_private;
329 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
330 struct drm_virtgpu_3d_transfer_from_host *args = data;
331 struct virtio_gpu_object *bo;
332 struct virtio_gpu_object_array *objs;
333 struct virtio_gpu_fence *fence;
334 int ret;
335 u32 offset = args->offset;
336
337 if (vgdev->has_virgl_3d == false)
338 return -ENOSYS;
339
340 virtio_gpu_create_context(dev, file);
341 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
342 if (objs == NULL)
343 return -ENOENT;
344
345 bo = gem_to_virtio_gpu_obj(objs->objs[0]);
346 if (bo->guest_blob && !bo->host3d_blob) {
347 ret = -EINVAL;
348 goto err_put_free;
349 }
350
351 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
352 ret = -EINVAL;
353 goto err_put_free;
354 }
355
356 ret = virtio_gpu_array_lock_resv(objs);
357 if (ret != 0)
358 goto err_put_free;
359
360 fence = virtio_gpu_fence_alloc(vgdev);
361 if (!fence) {
362 ret = -ENOMEM;
363 goto err_unlock;
364 }
365
366 virtio_gpu_cmd_transfer_from_host_3d
367 (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
368 args->layer_stride, &args->box, objs, fence);
369 dma_fence_put(&fence->f);
370 virtio_gpu_notify(vgdev);
371 return 0;
372
373err_unlock:
374 virtio_gpu_array_unlock_resv(objs);
375err_put_free:
376 virtio_gpu_array_put_free(objs);
377 return ret;
378}
379
380static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
381 struct drm_file *file)
382{
383 struct virtio_gpu_device *vgdev = dev->dev_private;
384 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
385 struct drm_virtgpu_3d_transfer_to_host *args = data;
386 struct virtio_gpu_object *bo;
387 struct virtio_gpu_object_array *objs;
388 struct virtio_gpu_fence *fence;
389 int ret;
390 u32 offset = args->offset;
391
392 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
393 if (objs == NULL)
394 return -ENOENT;
395
396 bo = gem_to_virtio_gpu_obj(objs->objs[0]);
397 if (bo->guest_blob && !bo->host3d_blob) {
398 ret = -EINVAL;
399 goto err_put_free;
400 }
401
402 if (!vgdev->has_virgl_3d) {
403 virtio_gpu_cmd_transfer_to_host_2d
404 (vgdev, offset,
405 args->box.w, args->box.h, args->box.x, args->box.y,
406 objs, NULL);
407 } else {
408 virtio_gpu_create_context(dev, file);
409
410 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
411 ret = -EINVAL;
412 goto err_put_free;
413 }
414
415 ret = virtio_gpu_array_lock_resv(objs);
416 if (ret != 0)
417 goto err_put_free;
418
419 ret = -ENOMEM;
420 fence = virtio_gpu_fence_alloc(vgdev);
421 if (!fence)
422 goto err_unlock;
423
424 virtio_gpu_cmd_transfer_to_host_3d
425 (vgdev,
426 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
427 args->stride, args->layer_stride, &args->box, objs,
428 fence);
429 dma_fence_put(&fence->f);
430 }
431 virtio_gpu_notify(vgdev);
432 return 0;
433
434err_unlock:
435 virtio_gpu_array_unlock_resv(objs);
436err_put_free:
437 virtio_gpu_array_put_free(objs);
438 return ret;
439}
440
441static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
442 struct drm_file *file)
443{
444 struct drm_virtgpu_3d_wait *args = data;
445 struct drm_gem_object *obj;
446 long timeout = 15 * HZ;
447 int ret;
448
449 obj = drm_gem_object_lookup(file, args->handle);
450 if (obj == NULL)
451 return -ENOENT;
452
453 if (args->flags & VIRTGPU_WAIT_NOWAIT) {
454 ret = dma_resv_test_signaled(obj->resv, true);
455 } else {
456 ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
457 }
458 if (ret == 0)
459 ret = -EBUSY;
460 else if (ret > 0)
461 ret = 0;
462
463 drm_gem_object_put(obj);
464 return ret;
465}
466
467static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
468 void *data, struct drm_file *file)
469{
470 struct virtio_gpu_device *vgdev = dev->dev_private;
471 struct drm_virtgpu_get_caps *args = data;
472 unsigned size, host_caps_size;
473 int i;
474 int found_valid = -1;
475 int ret;
476 struct virtio_gpu_drv_cap_cache *cache_ent;
477 void *ptr;
478
479 if (vgdev->num_capsets == 0)
480 return -ENOSYS;
481
482 /* don't allow userspace to pass 0 */
483 if (args->size == 0)
484 return -EINVAL;
485
486 spin_lock(&vgdev->display_info_lock);
487 for (i = 0; i < vgdev->num_capsets; i++) {
488 if (vgdev->capsets[i].id == args->cap_set_id) {
489 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
490 found_valid = i;
491 break;
492 }
493 }
494 }
495
496 if (found_valid == -1) {
497 spin_unlock(&vgdev->display_info_lock);
498 return -EINVAL;
499 }
500
501 host_caps_size = vgdev->capsets[found_valid].max_size;
502 /* only copy to user the minimum of the host caps size or the guest caps size */
503 size = min(args->size, host_caps_size);
504
505 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
506 if (cache_ent->id == args->cap_set_id &&
507 cache_ent->version == args->cap_set_ver) {
508 spin_unlock(&vgdev->display_info_lock);
509 goto copy_exit;
510 }
511 }
512 spin_unlock(&vgdev->display_info_lock);
513
514 /* not in cache - need to talk to hw */
515 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
516 &cache_ent);
517 virtio_gpu_notify(vgdev);
518
519copy_exit:
520 ret = wait_event_timeout(vgdev->resp_wq,
521 atomic_read(&cache_ent->is_valid), 5 * HZ);
522 if (!ret)
523 return -EBUSY;
524
525 /* is_valid check must proceed before copy of the cache entry. */
526 smp_rmb();
527
528 ptr = cache_ent->caps_cache;
529
530 if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
531 return -EFAULT;
532
533 return 0;
534}
535
536static int verify_blob(struct virtio_gpu_device *vgdev,
537 struct virtio_gpu_fpriv *vfpriv,
538 struct virtio_gpu_object_params *params,
539 struct drm_virtgpu_resource_create_blob *rc_blob,
540 bool *guest_blob, bool *host3d_blob)
541{
542 if (!vgdev->has_resource_blob)
543 return -EINVAL;
544
545 if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
546 !rc_blob->blob_flags)
547 return -EINVAL;
548
549 if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
550 if (!vgdev->has_resource_assign_uuid)
551 return -EINVAL;
552 }
553
554 switch (rc_blob->blob_mem) {
555 case VIRTGPU_BLOB_MEM_GUEST:
556 *guest_blob = true;
557 break;
558 case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
559 *guest_blob = true;
560 fallthrough;
561 case VIRTGPU_BLOB_MEM_HOST3D:
562 *host3d_blob = true;
563 break;
564 default:
565 return -EINVAL;
566 }
567
568 if (*host3d_blob) {
569 if (!vgdev->has_virgl_3d)
570 return -EINVAL;
571
572 /* Must be dword aligned. */
573 if (rc_blob->cmd_size % 4 != 0)
574 return -EINVAL;
575
576 params->ctx_id = vfpriv->ctx_id;
577 params->blob_id = rc_blob->blob_id;
578 } else {
579 if (rc_blob->blob_id != 0)
580 return -EINVAL;
581
582 if (rc_blob->cmd_size != 0)
583 return -EINVAL;
584 }
585
586 params->blob_mem = rc_blob->blob_mem;
587 params->size = rc_blob->size;
588 params->blob = true;
589 params->blob_flags = rc_blob->blob_flags;
590 return 0;
591}
592
593static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
594 void *data,
595 struct drm_file *file)
596{
597 int ret = 0;
598 uint32_t handle = 0;
599 bool guest_blob = false;
600 bool host3d_blob = false;
601 struct drm_gem_object *obj;
602 struct virtio_gpu_object *bo;
603 struct virtio_gpu_object_params params = { 0 };
604 struct virtio_gpu_device *vgdev = dev->dev_private;
605 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
606 struct drm_virtgpu_resource_create_blob *rc_blob = data;
607
608 if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob,
609 &guest_blob, &host3d_blob))
610 return -EINVAL;
611
612 if (vgdev->has_virgl_3d)
613 virtio_gpu_create_context(dev, file);
614
615 if (rc_blob->cmd_size) {
616 void *buf;
617
618 buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
619 rc_blob->cmd_size);
620
621 if (IS_ERR(buf))
622 return PTR_ERR(buf);
623
624 virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
625 vfpriv->ctx_id, NULL, NULL);
626 }
627
628 if (guest_blob)
629 ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL);
630 else if (!guest_blob && host3d_blob)
631 ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo);
632 else
633 return -EINVAL;
634
635 if (ret < 0)
636 return ret;
637
638 bo->guest_blob = guest_blob;
639 bo->host3d_blob = host3d_blob;
640 bo->blob_mem = rc_blob->blob_mem;
641 bo->blob_flags = rc_blob->blob_flags;
642
643 obj = &bo->base.base;
644 if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
645 ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
646 if (ret) {
647 drm_gem_object_release(obj);
648 return ret;
649 }
650 }
651
652 ret = drm_gem_handle_create(file, obj, &handle);
653 if (ret) {
654 drm_gem_object_release(obj);
655 return ret;
656 }
657 drm_gem_object_put(obj);
658
659 rc_blob->res_handle = bo->hw_res_handle;
660 rc_blob->bo_handle = handle;
661
662 return 0;
663}
664
665struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
666 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
667 DRM_RENDER_ALLOW),
668
669 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
670 DRM_RENDER_ALLOW),
671
672 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
673 DRM_RENDER_ALLOW),
674
675 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
676 virtio_gpu_resource_create_ioctl,
677 DRM_RENDER_ALLOW),
678
679 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
680 DRM_RENDER_ALLOW),
681
682 /* make transfer async to the main ring? - no sure, can we
683 * thread these in the underlying GL
684 */
685 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
686 virtio_gpu_transfer_from_host_ioctl,
687 DRM_RENDER_ALLOW),
688 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
689 virtio_gpu_transfer_to_host_ioctl,
690 DRM_RENDER_ALLOW),
691
692 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
693 DRM_RENDER_ALLOW),
694
695 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
696 DRM_RENDER_ALLOW),
697
698 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
699 virtio_gpu_resource_create_blob_ioctl,
700 DRM_RENDER_ALLOW),
701};