Loading...
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include "virtgpu_drv.h"
31#include <linux/virtio.h>
32#include <linux/virtio_config.h>
33#include <linux/virtio_ring.h>
34
35#define MAX_INLINE_CMD_SIZE 96
36#define MAX_INLINE_RESP_SIZE 24
37#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
40
41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 uint32_t *resid)
43{
44 int handle;
45
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
50 idr_preload_end();
51 *resid = handle;
52}
53
54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55{
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
59}
60
61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62{
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
65
66 schedule_work(&vgdev->ctrlq.dequeue_work);
67}
68
69void virtio_gpu_cursor_ack(struct virtqueue *vq)
70{
71 struct drm_device *dev = vq->vdev->priv;
72 struct virtio_gpu_device *vgdev = dev->dev_private;
73
74 schedule_work(&vgdev->cursorq.dequeue_work);
75}
76
77int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
78{
79 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
80 VBUFFER_SIZE,
81 __alignof__(struct virtio_gpu_vbuffer),
82 0, NULL);
83 if (!vgdev->vbufs)
84 return -ENOMEM;
85 return 0;
86}
87
88void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
89{
90 kmem_cache_destroy(vgdev->vbufs);
91 vgdev->vbufs = NULL;
92}
93
94static struct virtio_gpu_vbuffer*
95virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
96 int size, int resp_size, void *resp_buf,
97 virtio_gpu_resp_cb resp_cb)
98{
99 struct virtio_gpu_vbuffer *vbuf;
100
101 vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
102 if (!vbuf)
103 return ERR_PTR(-ENOMEM);
104 memset(vbuf, 0, VBUFFER_SIZE);
105
106 BUG_ON(size > MAX_INLINE_CMD_SIZE);
107 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
108 vbuf->size = size;
109
110 vbuf->resp_cb = resp_cb;
111 vbuf->resp_size = resp_size;
112 if (resp_size <= MAX_INLINE_RESP_SIZE)
113 vbuf->resp_buf = (void *)vbuf->buf + size;
114 else
115 vbuf->resp_buf = resp_buf;
116 BUG_ON(!vbuf->resp_buf);
117 return vbuf;
118}
119
120static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
121 struct virtio_gpu_vbuffer **vbuffer_p,
122 int size)
123{
124 struct virtio_gpu_vbuffer *vbuf;
125
126 vbuf = virtio_gpu_get_vbuf(vgdev, size,
127 sizeof(struct virtio_gpu_ctrl_hdr),
128 NULL, NULL);
129 if (IS_ERR(vbuf)) {
130 *vbuffer_p = NULL;
131 return ERR_CAST(vbuf);
132 }
133 *vbuffer_p = vbuf;
134 return vbuf->buf;
135}
136
137static struct virtio_gpu_update_cursor*
138virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
139 struct virtio_gpu_vbuffer **vbuffer_p)
140{
141 struct virtio_gpu_vbuffer *vbuf;
142
143 vbuf = virtio_gpu_get_vbuf
144 (vgdev, sizeof(struct virtio_gpu_update_cursor),
145 0, NULL, NULL);
146 if (IS_ERR(vbuf)) {
147 *vbuffer_p = NULL;
148 return ERR_CAST(vbuf);
149 }
150 *vbuffer_p = vbuf;
151 return (struct virtio_gpu_update_cursor *)vbuf->buf;
152}
153
154static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
155 virtio_gpu_resp_cb cb,
156 struct virtio_gpu_vbuffer **vbuffer_p,
157 int cmd_size, int resp_size,
158 void *resp_buf)
159{
160 struct virtio_gpu_vbuffer *vbuf;
161
162 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
163 resp_size, resp_buf, cb);
164 if (IS_ERR(vbuf)) {
165 *vbuffer_p = NULL;
166 return ERR_CAST(vbuf);
167 }
168 *vbuffer_p = vbuf;
169 return (struct virtio_gpu_command *)vbuf->buf;
170}
171
172static void free_vbuf(struct virtio_gpu_device *vgdev,
173 struct virtio_gpu_vbuffer *vbuf)
174{
175 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
176 kfree(vbuf->resp_buf);
177 kfree(vbuf->data_buf);
178 kmem_cache_free(vgdev->vbufs, vbuf);
179}
180
181static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
182{
183 struct virtio_gpu_vbuffer *vbuf;
184 unsigned int len;
185 int freed = 0;
186
187 while ((vbuf = virtqueue_get_buf(vq, &len))) {
188 list_add_tail(&vbuf->list, reclaim_list);
189 freed++;
190 }
191 if (freed == 0)
192 DRM_DEBUG("Huh? zero vbufs reclaimed");
193}
194
195void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
196{
197 struct virtio_gpu_device *vgdev =
198 container_of(work, struct virtio_gpu_device,
199 ctrlq.dequeue_work);
200 struct list_head reclaim_list;
201 struct virtio_gpu_vbuffer *entry, *tmp;
202 struct virtio_gpu_ctrl_hdr *resp;
203 u64 fence_id = 0;
204
205 INIT_LIST_HEAD(&reclaim_list);
206 spin_lock(&vgdev->ctrlq.qlock);
207 do {
208 virtqueue_disable_cb(vgdev->ctrlq.vq);
209 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210
211 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
212 spin_unlock(&vgdev->ctrlq.qlock);
213
214 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
215 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
217 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
218 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
219 u64 f = le64_to_cpu(resp->fence_id);
220
221 if (fence_id > f) {
222 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
223 __func__, fence_id, f);
224 } else {
225 fence_id = f;
226 }
227 }
228 if (entry->resp_cb)
229 entry->resp_cb(vgdev, entry);
230
231 list_del(&entry->list);
232 free_vbuf(vgdev, entry);
233 }
234 wake_up(&vgdev->ctrlq.ack_queue);
235
236 if (fence_id)
237 virtio_gpu_fence_event_process(vgdev, fence_id);
238}
239
240void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
241{
242 struct virtio_gpu_device *vgdev =
243 container_of(work, struct virtio_gpu_device,
244 cursorq.dequeue_work);
245 struct list_head reclaim_list;
246 struct virtio_gpu_vbuffer *entry, *tmp;
247
248 INIT_LIST_HEAD(&reclaim_list);
249 spin_lock(&vgdev->cursorq.qlock);
250 do {
251 virtqueue_disable_cb(vgdev->cursorq.vq);
252 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
253 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
254 spin_unlock(&vgdev->cursorq.qlock);
255
256 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
257 list_del(&entry->list);
258 free_vbuf(vgdev, entry);
259 }
260 wake_up(&vgdev->cursorq.ack_queue);
261}
262
263static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
264 struct virtio_gpu_vbuffer *vbuf)
265 __releases(&vgdev->ctrlq.qlock)
266 __acquires(&vgdev->ctrlq.qlock)
267{
268 struct virtqueue *vq = vgdev->ctrlq.vq;
269 struct scatterlist *sgs[3], vcmd, vout, vresp;
270 int outcnt = 0, incnt = 0;
271 int ret;
272
273 if (!vgdev->vqs_ready)
274 return -ENODEV;
275
276 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
277 sgs[outcnt + incnt] = &vcmd;
278 outcnt++;
279
280 if (vbuf->data_size) {
281 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
282 sgs[outcnt + incnt] = &vout;
283 outcnt++;
284 }
285
286 if (vbuf->resp_size) {
287 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
288 sgs[outcnt + incnt] = &vresp;
289 incnt++;
290 }
291
292retry:
293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 if (ret == -ENOSPC) {
295 spin_unlock(&vgdev->ctrlq.qlock);
296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 spin_lock(&vgdev->ctrlq.qlock);
298 goto retry;
299 } else {
300 virtqueue_kick(vq);
301 }
302
303 if (!ret)
304 ret = vq->num_free;
305 return ret;
306}
307
308static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
309 struct virtio_gpu_vbuffer *vbuf)
310{
311 int rc;
312
313 spin_lock(&vgdev->ctrlq.qlock);
314 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
315 spin_unlock(&vgdev->ctrlq.qlock);
316 return rc;
317}
318
319static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
320 struct virtio_gpu_vbuffer *vbuf,
321 struct virtio_gpu_ctrl_hdr *hdr,
322 struct virtio_gpu_fence **fence)
323{
324 struct virtqueue *vq = vgdev->ctrlq.vq;
325 int rc;
326
327again:
328 spin_lock(&vgdev->ctrlq.qlock);
329
330 /*
331 * Make sure we have enouth space in the virtqueue. If not
332 * wait here until we have.
333 *
334 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
335 * to wait for free space, which can result in fence ids being
336 * submitted out-of-order.
337 */
338 if (vq->num_free < 3) {
339 spin_unlock(&vgdev->ctrlq.qlock);
340 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
341 goto again;
342 }
343
344 if (fence)
345 virtio_gpu_fence_emit(vgdev, hdr, fence);
346 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
347 spin_unlock(&vgdev->ctrlq.qlock);
348 return rc;
349}
350
351static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
352 struct virtio_gpu_vbuffer *vbuf)
353{
354 struct virtqueue *vq = vgdev->cursorq.vq;
355 struct scatterlist *sgs[1], ccmd;
356 int ret;
357 int outcnt;
358
359 if (!vgdev->vqs_ready)
360 return -ENODEV;
361
362 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
363 sgs[0] = &ccmd;
364 outcnt = 1;
365
366 spin_lock(&vgdev->cursorq.qlock);
367retry:
368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 if (ret == -ENOSPC) {
370 spin_unlock(&vgdev->cursorq.qlock);
371 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 spin_lock(&vgdev->cursorq.qlock);
373 goto retry;
374 } else {
375 virtqueue_kick(vq);
376 }
377
378 spin_unlock(&vgdev->cursorq.qlock);
379
380 if (!ret)
381 ret = vq->num_free;
382 return ret;
383}
384
385/* just create gem objects for userspace and long lived objects,
386 * just use dma_alloced pages for the queue objects?
387 */
388
389/* create a basic resource */
390void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
391 uint32_t resource_id,
392 uint32_t format,
393 uint32_t width,
394 uint32_t height)
395{
396 struct virtio_gpu_resource_create_2d *cmd_p;
397 struct virtio_gpu_vbuffer *vbuf;
398
399 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
400 memset(cmd_p, 0, sizeof(*cmd_p));
401
402 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
403 cmd_p->resource_id = cpu_to_le32(resource_id);
404 cmd_p->format = cpu_to_le32(format);
405 cmd_p->width = cpu_to_le32(width);
406 cmd_p->height = cpu_to_le32(height);
407
408 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
409}
410
411void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
412 uint32_t resource_id)
413{
414 struct virtio_gpu_resource_unref *cmd_p;
415 struct virtio_gpu_vbuffer *vbuf;
416
417 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
418 memset(cmd_p, 0, sizeof(*cmd_p));
419
420 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
421 cmd_p->resource_id = cpu_to_le32(resource_id);
422
423 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
424}
425
426void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
427 uint32_t resource_id)
428{
429 struct virtio_gpu_resource_detach_backing *cmd_p;
430 struct virtio_gpu_vbuffer *vbuf;
431
432 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
433 memset(cmd_p, 0, sizeof(*cmd_p));
434
435 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
436 cmd_p->resource_id = cpu_to_le32(resource_id);
437
438 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
439}
440
441void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
442 uint32_t scanout_id, uint32_t resource_id,
443 uint32_t width, uint32_t height,
444 uint32_t x, uint32_t y)
445{
446 struct virtio_gpu_set_scanout *cmd_p;
447 struct virtio_gpu_vbuffer *vbuf;
448
449 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
450 memset(cmd_p, 0, sizeof(*cmd_p));
451
452 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
453 cmd_p->resource_id = cpu_to_le32(resource_id);
454 cmd_p->scanout_id = cpu_to_le32(scanout_id);
455 cmd_p->r.width = cpu_to_le32(width);
456 cmd_p->r.height = cpu_to_le32(height);
457 cmd_p->r.x = cpu_to_le32(x);
458 cmd_p->r.y = cpu_to_le32(y);
459
460 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
461}
462
463void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
464 uint32_t resource_id,
465 uint32_t x, uint32_t y,
466 uint32_t width, uint32_t height)
467{
468 struct virtio_gpu_resource_flush *cmd_p;
469 struct virtio_gpu_vbuffer *vbuf;
470
471 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
472 memset(cmd_p, 0, sizeof(*cmd_p));
473
474 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
475 cmd_p->resource_id = cpu_to_le32(resource_id);
476 cmd_p->r.width = cpu_to_le32(width);
477 cmd_p->r.height = cpu_to_le32(height);
478 cmd_p->r.x = cpu_to_le32(x);
479 cmd_p->r.y = cpu_to_le32(y);
480
481 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
482}
483
484void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
485 uint32_t resource_id, uint64_t offset,
486 __le32 width, __le32 height,
487 __le32 x, __le32 y,
488 struct virtio_gpu_fence **fence)
489{
490 struct virtio_gpu_transfer_to_host_2d *cmd_p;
491 struct virtio_gpu_vbuffer *vbuf;
492
493 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
494 memset(cmd_p, 0, sizeof(*cmd_p));
495
496 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
497 cmd_p->resource_id = cpu_to_le32(resource_id);
498 cmd_p->offset = cpu_to_le64(offset);
499 cmd_p->r.width = width;
500 cmd_p->r.height = height;
501 cmd_p->r.x = x;
502 cmd_p->r.y = y;
503
504 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
505}
506
507static void
508virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
509 uint32_t resource_id,
510 struct virtio_gpu_mem_entry *ents,
511 uint32_t nents,
512 struct virtio_gpu_fence **fence)
513{
514 struct virtio_gpu_resource_attach_backing *cmd_p;
515 struct virtio_gpu_vbuffer *vbuf;
516
517 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
518 memset(cmd_p, 0, sizeof(*cmd_p));
519
520 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
521 cmd_p->resource_id = cpu_to_le32(resource_id);
522 cmd_p->nr_entries = cpu_to_le32(nents);
523
524 vbuf->data_buf = ents;
525 vbuf->data_size = sizeof(*ents) * nents;
526
527 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
528}
529
530static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
531 struct virtio_gpu_vbuffer *vbuf)
532{
533 struct virtio_gpu_resp_display_info *resp =
534 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
535 int i;
536
537 spin_lock(&vgdev->display_info_lock);
538 for (i = 0; i < vgdev->num_scanouts; i++) {
539 vgdev->outputs[i].info = resp->pmodes[i];
540 if (resp->pmodes[i].enabled) {
541 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
542 le32_to_cpu(resp->pmodes[i].r.width),
543 le32_to_cpu(resp->pmodes[i].r.height),
544 le32_to_cpu(resp->pmodes[i].r.x),
545 le32_to_cpu(resp->pmodes[i].r.y));
546 } else {
547 DRM_DEBUG("output %d: disabled", i);
548 }
549 }
550
551 vgdev->display_info_pending = false;
552 spin_unlock(&vgdev->display_info_lock);
553 wake_up(&vgdev->resp_wq);
554
555 if (!drm_helper_hpd_irq_event(vgdev->ddev))
556 drm_kms_helper_hotplug_event(vgdev->ddev);
557}
558
559static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
560 struct virtio_gpu_vbuffer *vbuf)
561{
562 struct virtio_gpu_get_capset_info *cmd =
563 (struct virtio_gpu_get_capset_info *)vbuf->buf;
564 struct virtio_gpu_resp_capset_info *resp =
565 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
566 int i = le32_to_cpu(cmd->capset_index);
567
568 spin_lock(&vgdev->display_info_lock);
569 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
570 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
571 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
572 spin_unlock(&vgdev->display_info_lock);
573 wake_up(&vgdev->resp_wq);
574}
575
576static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
577 struct virtio_gpu_vbuffer *vbuf)
578{
579 struct virtio_gpu_get_capset *cmd =
580 (struct virtio_gpu_get_capset *)vbuf->buf;
581 struct virtio_gpu_resp_capset *resp =
582 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
583 struct virtio_gpu_drv_cap_cache *cache_ent;
584
585 spin_lock(&vgdev->display_info_lock);
586 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
587 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
588 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
589 memcpy(cache_ent->caps_cache, resp->capset_data,
590 cache_ent->size);
591 atomic_set(&cache_ent->is_valid, 1);
592 break;
593 }
594 }
595 spin_unlock(&vgdev->display_info_lock);
596 wake_up(&vgdev->resp_wq);
597}
598
599int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
600{
601 struct virtio_gpu_ctrl_hdr *cmd_p;
602 struct virtio_gpu_vbuffer *vbuf;
603 void *resp_buf;
604
605 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
606 GFP_KERNEL);
607 if (!resp_buf)
608 return -ENOMEM;
609
610 cmd_p = virtio_gpu_alloc_cmd_resp
611 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
612 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
613 resp_buf);
614 memset(cmd_p, 0, sizeof(*cmd_p));
615
616 vgdev->display_info_pending = true;
617 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
618 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
619 return 0;
620}
621
622int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
623{
624 struct virtio_gpu_get_capset_info *cmd_p;
625 struct virtio_gpu_vbuffer *vbuf;
626 void *resp_buf;
627
628 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
629 GFP_KERNEL);
630 if (!resp_buf)
631 return -ENOMEM;
632
633 cmd_p = virtio_gpu_alloc_cmd_resp
634 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
635 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
636 resp_buf);
637 memset(cmd_p, 0, sizeof(*cmd_p));
638
639 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
640 cmd_p->capset_index = cpu_to_le32(idx);
641 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
642 return 0;
643}
644
645int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
646 int idx, int version,
647 struct virtio_gpu_drv_cap_cache **cache_p)
648{
649 struct virtio_gpu_get_capset *cmd_p;
650 struct virtio_gpu_vbuffer *vbuf;
651 int max_size = vgdev->capsets[idx].max_size;
652 struct virtio_gpu_drv_cap_cache *cache_ent;
653 void *resp_buf;
654
655 if (idx > vgdev->num_capsets)
656 return -EINVAL;
657
658 if (version > vgdev->capsets[idx].max_version)
659 return -EINVAL;
660
661 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
662 if (!cache_ent)
663 return -ENOMEM;
664
665 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
666 if (!cache_ent->caps_cache) {
667 kfree(cache_ent);
668 return -ENOMEM;
669 }
670
671 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
672 GFP_KERNEL);
673 if (!resp_buf) {
674 kfree(cache_ent->caps_cache);
675 kfree(cache_ent);
676 return -ENOMEM;
677 }
678
679 cache_ent->version = version;
680 cache_ent->id = vgdev->capsets[idx].id;
681 atomic_set(&cache_ent->is_valid, 0);
682 cache_ent->size = max_size;
683 spin_lock(&vgdev->display_info_lock);
684 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
685 spin_unlock(&vgdev->display_info_lock);
686
687 cmd_p = virtio_gpu_alloc_cmd_resp
688 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
689 sizeof(struct virtio_gpu_resp_capset) + max_size,
690 resp_buf);
691 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
692 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
693 cmd_p->capset_version = cpu_to_le32(version);
694 *cache_p = cache_ent;
695 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
696
697 return 0;
698}
699
700void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
701 uint32_t nlen, const char *name)
702{
703 struct virtio_gpu_ctx_create *cmd_p;
704 struct virtio_gpu_vbuffer *vbuf;
705
706 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
707 memset(cmd_p, 0, sizeof(*cmd_p));
708
709 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
710 cmd_p->hdr.ctx_id = cpu_to_le32(id);
711 cmd_p->nlen = cpu_to_le32(nlen);
712 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
713 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
714 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
715}
716
717void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
718 uint32_t id)
719{
720 struct virtio_gpu_ctx_destroy *cmd_p;
721 struct virtio_gpu_vbuffer *vbuf;
722
723 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
724 memset(cmd_p, 0, sizeof(*cmd_p));
725
726 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
727 cmd_p->hdr.ctx_id = cpu_to_le32(id);
728 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
729}
730
731void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
732 uint32_t ctx_id,
733 uint32_t resource_id)
734{
735 struct virtio_gpu_ctx_resource *cmd_p;
736 struct virtio_gpu_vbuffer *vbuf;
737
738 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
739 memset(cmd_p, 0, sizeof(*cmd_p));
740
741 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
742 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
743 cmd_p->resource_id = cpu_to_le32(resource_id);
744 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
745
746}
747
748void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
749 uint32_t ctx_id,
750 uint32_t resource_id)
751{
752 struct virtio_gpu_ctx_resource *cmd_p;
753 struct virtio_gpu_vbuffer *vbuf;
754
755 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
756 memset(cmd_p, 0, sizeof(*cmd_p));
757
758 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
759 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
760 cmd_p->resource_id = cpu_to_le32(resource_id);
761 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
762}
763
764void
765virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
766 struct virtio_gpu_resource_create_3d *rc_3d,
767 struct virtio_gpu_fence **fence)
768{
769 struct virtio_gpu_resource_create_3d *cmd_p;
770 struct virtio_gpu_vbuffer *vbuf;
771
772 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
773 memset(cmd_p, 0, sizeof(*cmd_p));
774
775 *cmd_p = *rc_3d;
776 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
777 cmd_p->hdr.flags = 0;
778
779 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
780}
781
782void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
783 uint32_t resource_id, uint32_t ctx_id,
784 uint64_t offset, uint32_t level,
785 struct virtio_gpu_box *box,
786 struct virtio_gpu_fence **fence)
787{
788 struct virtio_gpu_transfer_host_3d *cmd_p;
789 struct virtio_gpu_vbuffer *vbuf;
790
791 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
792 memset(cmd_p, 0, sizeof(*cmd_p));
793
794 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
795 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
796 cmd_p->resource_id = cpu_to_le32(resource_id);
797 cmd_p->box = *box;
798 cmd_p->offset = cpu_to_le64(offset);
799 cmd_p->level = cpu_to_le32(level);
800
801 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
802}
803
804void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
805 uint32_t resource_id, uint32_t ctx_id,
806 uint64_t offset, uint32_t level,
807 struct virtio_gpu_box *box,
808 struct virtio_gpu_fence **fence)
809{
810 struct virtio_gpu_transfer_host_3d *cmd_p;
811 struct virtio_gpu_vbuffer *vbuf;
812
813 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
814 memset(cmd_p, 0, sizeof(*cmd_p));
815
816 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
817 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
818 cmd_p->resource_id = cpu_to_le32(resource_id);
819 cmd_p->box = *box;
820 cmd_p->offset = cpu_to_le64(offset);
821 cmd_p->level = cpu_to_le32(level);
822
823 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
824}
825
826void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
827 void *data, uint32_t data_size,
828 uint32_t ctx_id, struct virtio_gpu_fence **fence)
829{
830 struct virtio_gpu_cmd_submit *cmd_p;
831 struct virtio_gpu_vbuffer *vbuf;
832
833 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
834 memset(cmd_p, 0, sizeof(*cmd_p));
835
836 vbuf->data_buf = data;
837 vbuf->data_size = data_size;
838
839 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
840 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
841 cmd_p->size = cpu_to_le32(data_size);
842
843 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
844}
845
846int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
847 struct virtio_gpu_object *obj,
848 uint32_t resource_id,
849 struct virtio_gpu_fence **fence)
850{
851 struct virtio_gpu_mem_entry *ents;
852 struct scatterlist *sg;
853 int si;
854
855 if (!obj->pages) {
856 int ret;
857
858 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
859 if (ret)
860 return ret;
861 }
862
863 /* gets freed when the ring has consumed it */
864 ents = kmalloc_array(obj->pages->nents,
865 sizeof(struct virtio_gpu_mem_entry),
866 GFP_KERNEL);
867 if (!ents) {
868 DRM_ERROR("failed to allocate ent list\n");
869 return -ENOMEM;
870 }
871
872 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
873 ents[si].addr = cpu_to_le64(sg_phys(sg));
874 ents[si].length = cpu_to_le32(sg->length);
875 ents[si].padding = 0;
876 }
877
878 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
879 ents, obj->pages->nents,
880 fence);
881 obj->hw_res_handle = resource_id;
882 return 0;
883}
884
885void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
886 struct virtio_gpu_output *output)
887{
888 struct virtio_gpu_vbuffer *vbuf;
889 struct virtio_gpu_update_cursor *cur_p;
890
891 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
892 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
893 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
894 virtio_gpu_queue_cursor(vgdev, vbuf);
895}
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include "virtgpu_drv.h"
31#include <linux/virtio.h>
32#include <linux/virtio_config.h>
33#include <linux/virtio_ring.h>
34
35#define MAX_INLINE_CMD_SIZE 96
36#define MAX_INLINE_RESP_SIZE 24
37#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
40
41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 uint32_t *resid)
43{
44 int handle;
45
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
50 idr_preload_end();
51 *resid = handle;
52}
53
54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55{
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
59}
60
61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62{
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
65 schedule_work(&vgdev->ctrlq.dequeue_work);
66}
67
68void virtio_gpu_cursor_ack(struct virtqueue *vq)
69{
70 struct drm_device *dev = vq->vdev->priv;
71 struct virtio_gpu_device *vgdev = dev->dev_private;
72 schedule_work(&vgdev->cursorq.dequeue_work);
73}
74
75int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
76{
77 struct virtio_gpu_vbuffer *vbuf;
78 int i, size, count = 16;
79 void *ptr;
80
81 INIT_LIST_HEAD(&vgdev->free_vbufs);
82 spin_lock_init(&vgdev->free_vbufs_lock);
83 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
84 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
85 size = count * VBUFFER_SIZE;
86 DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
87 count, VBUFFER_SIZE, size / 1024);
88
89 vgdev->vbufs = kzalloc(size, GFP_KERNEL);
90 if (!vgdev->vbufs)
91 return -ENOMEM;
92
93 for (i = 0, ptr = vgdev->vbufs;
94 i < count;
95 i++, ptr += VBUFFER_SIZE) {
96 vbuf = ptr;
97 list_add(&vbuf->list, &vgdev->free_vbufs);
98 }
99 return 0;
100}
101
102void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103{
104 struct virtio_gpu_vbuffer *vbuf;
105 int i, count = 0;
106
107 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109
110 spin_lock(&vgdev->free_vbufs_lock);
111 for (i = 0; i < count; i++) {
112 if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
113 spin_unlock(&vgdev->free_vbufs_lock);
114 return;
115 }
116 vbuf = list_first_entry(&vgdev->free_vbufs,
117 struct virtio_gpu_vbuffer, list);
118 list_del(&vbuf->list);
119 }
120 spin_unlock(&vgdev->free_vbufs_lock);
121 kfree(vgdev->vbufs);
122}
123
124static struct virtio_gpu_vbuffer*
125virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
126 int size, int resp_size, void *resp_buf,
127 virtio_gpu_resp_cb resp_cb)
128{
129 struct virtio_gpu_vbuffer *vbuf;
130
131 spin_lock(&vgdev->free_vbufs_lock);
132 BUG_ON(list_empty(&vgdev->free_vbufs));
133 vbuf = list_first_entry(&vgdev->free_vbufs,
134 struct virtio_gpu_vbuffer, list);
135 list_del(&vbuf->list);
136 spin_unlock(&vgdev->free_vbufs_lock);
137 memset(vbuf, 0, VBUFFER_SIZE);
138
139 BUG_ON(size > MAX_INLINE_CMD_SIZE);
140 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
141 vbuf->size = size;
142
143 vbuf->resp_cb = resp_cb;
144 vbuf->resp_size = resp_size;
145 if (resp_size <= MAX_INLINE_RESP_SIZE)
146 vbuf->resp_buf = (void *)vbuf->buf + size;
147 else
148 vbuf->resp_buf = resp_buf;
149 BUG_ON(!vbuf->resp_buf);
150 return vbuf;
151}
152
153static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
154 struct virtio_gpu_vbuffer **vbuffer_p,
155 int size)
156{
157 struct virtio_gpu_vbuffer *vbuf;
158
159 vbuf = virtio_gpu_get_vbuf(vgdev, size,
160 sizeof(struct virtio_gpu_ctrl_hdr),
161 NULL, NULL);
162 if (IS_ERR(vbuf)) {
163 *vbuffer_p = NULL;
164 return ERR_CAST(vbuf);
165 }
166 *vbuffer_p = vbuf;
167 return vbuf->buf;
168}
169
170static struct virtio_gpu_update_cursor*
171virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
172 struct virtio_gpu_vbuffer **vbuffer_p)
173{
174 struct virtio_gpu_vbuffer *vbuf;
175
176 vbuf = virtio_gpu_get_vbuf
177 (vgdev, sizeof(struct virtio_gpu_update_cursor),
178 0, NULL, NULL);
179 if (IS_ERR(vbuf)) {
180 *vbuffer_p = NULL;
181 return ERR_CAST(vbuf);
182 }
183 *vbuffer_p = vbuf;
184 return (struct virtio_gpu_update_cursor *)vbuf->buf;
185}
186
187static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
188 virtio_gpu_resp_cb cb,
189 struct virtio_gpu_vbuffer **vbuffer_p,
190 int cmd_size, int resp_size,
191 void *resp_buf)
192{
193 struct virtio_gpu_vbuffer *vbuf;
194
195 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
196 resp_size, resp_buf, cb);
197 if (IS_ERR(vbuf)) {
198 *vbuffer_p = NULL;
199 return ERR_CAST(vbuf);
200 }
201 *vbuffer_p = vbuf;
202 return (struct virtio_gpu_command *)vbuf->buf;
203}
204
205static void free_vbuf(struct virtio_gpu_device *vgdev,
206 struct virtio_gpu_vbuffer *vbuf)
207{
208 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
209 kfree(vbuf->resp_buf);
210 kfree(vbuf->data_buf);
211 spin_lock(&vgdev->free_vbufs_lock);
212 list_add(&vbuf->list, &vgdev->free_vbufs);
213 spin_unlock(&vgdev->free_vbufs_lock);
214}
215
216static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
217{
218 struct virtio_gpu_vbuffer *vbuf;
219 unsigned int len;
220 int freed = 0;
221
222 while ((vbuf = virtqueue_get_buf(vq, &len))) {
223 list_add_tail(&vbuf->list, reclaim_list);
224 freed++;
225 }
226 if (freed == 0)
227 DRM_DEBUG("Huh? zero vbufs reclaimed");
228}
229
230void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
231{
232 struct virtio_gpu_device *vgdev =
233 container_of(work, struct virtio_gpu_device,
234 ctrlq.dequeue_work);
235 struct list_head reclaim_list;
236 struct virtio_gpu_vbuffer *entry, *tmp;
237 struct virtio_gpu_ctrl_hdr *resp;
238 u64 fence_id = 0;
239
240 INIT_LIST_HEAD(&reclaim_list);
241 spin_lock(&vgdev->ctrlq.qlock);
242 do {
243 virtqueue_disable_cb(vgdev->ctrlq.vq);
244 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
245
246 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
247 spin_unlock(&vgdev->ctrlq.qlock);
248
249 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
250 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
251 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
252 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
253 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
254 u64 f = le64_to_cpu(resp->fence_id);
255
256 if (fence_id > f) {
257 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
258 __func__, fence_id, f);
259 } else {
260 fence_id = f;
261 }
262 }
263 if (entry->resp_cb)
264 entry->resp_cb(vgdev, entry);
265
266 list_del(&entry->list);
267 free_vbuf(vgdev, entry);
268 }
269 wake_up(&vgdev->ctrlq.ack_queue);
270
271 if (fence_id)
272 virtio_gpu_fence_event_process(vgdev, fence_id);
273}
274
275void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
276{
277 struct virtio_gpu_device *vgdev =
278 container_of(work, struct virtio_gpu_device,
279 cursorq.dequeue_work);
280 struct list_head reclaim_list;
281 struct virtio_gpu_vbuffer *entry, *tmp;
282
283 INIT_LIST_HEAD(&reclaim_list);
284 spin_lock(&vgdev->cursorq.qlock);
285 do {
286 virtqueue_disable_cb(vgdev->cursorq.vq);
287 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
288 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
289 spin_unlock(&vgdev->cursorq.qlock);
290
291 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
292 list_del(&entry->list);
293 free_vbuf(vgdev, entry);
294 }
295 wake_up(&vgdev->cursorq.ack_queue);
296}
297
298static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
299 struct virtio_gpu_vbuffer *vbuf)
300 __releases(&vgdev->ctrlq.qlock)
301 __acquires(&vgdev->ctrlq.qlock)
302{
303 struct virtqueue *vq = vgdev->ctrlq.vq;
304 struct scatterlist *sgs[3], vcmd, vout, vresp;
305 int outcnt = 0, incnt = 0;
306 int ret;
307
308 if (!vgdev->vqs_ready)
309 return -ENODEV;
310
311 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
312 sgs[outcnt+incnt] = &vcmd;
313 outcnt++;
314
315 if (vbuf->data_size) {
316 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
317 sgs[outcnt + incnt] = &vout;
318 outcnt++;
319 }
320
321 if (vbuf->resp_size) {
322 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
323 sgs[outcnt + incnt] = &vresp;
324 incnt++;
325 }
326
327retry:
328 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
329 if (ret == -ENOSPC) {
330 spin_unlock(&vgdev->ctrlq.qlock);
331 wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
332 spin_lock(&vgdev->ctrlq.qlock);
333 goto retry;
334 } else {
335 virtqueue_kick(vq);
336 }
337
338 if (!ret)
339 ret = vq->num_free;
340 return ret;
341}
342
343static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
344 struct virtio_gpu_vbuffer *vbuf)
345{
346 int rc;
347
348 spin_lock(&vgdev->ctrlq.qlock);
349 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
350 spin_unlock(&vgdev->ctrlq.qlock);
351 return rc;
352}
353
354static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
355 struct virtio_gpu_vbuffer *vbuf,
356 struct virtio_gpu_ctrl_hdr *hdr,
357 struct virtio_gpu_fence **fence)
358{
359 struct virtqueue *vq = vgdev->ctrlq.vq;
360 int rc;
361
362again:
363 spin_lock(&vgdev->ctrlq.qlock);
364
365 /*
366 * Make sure we have enouth space in the virtqueue. If not
367 * wait here until we have.
368 *
369 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
370 * to wait for free space, which can result in fence ids being
371 * submitted out-of-order.
372 */
373 if (vq->num_free < 3) {
374 spin_unlock(&vgdev->ctrlq.qlock);
375 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
376 goto again;
377 }
378
379 if (fence)
380 virtio_gpu_fence_emit(vgdev, hdr, fence);
381 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
382 spin_unlock(&vgdev->ctrlq.qlock);
383 return rc;
384}
385
386static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
387 struct virtio_gpu_vbuffer *vbuf)
388{
389 struct virtqueue *vq = vgdev->cursorq.vq;
390 struct scatterlist *sgs[1], ccmd;
391 int ret;
392 int outcnt;
393
394 if (!vgdev->vqs_ready)
395 return -ENODEV;
396
397 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
398 sgs[0] = &ccmd;
399 outcnt = 1;
400
401 spin_lock(&vgdev->cursorq.qlock);
402retry:
403 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
404 if (ret == -ENOSPC) {
405 spin_unlock(&vgdev->cursorq.qlock);
406 wait_event(vgdev->cursorq.ack_queue, vq->num_free);
407 spin_lock(&vgdev->cursorq.qlock);
408 goto retry;
409 } else {
410 virtqueue_kick(vq);
411 }
412
413 spin_unlock(&vgdev->cursorq.qlock);
414
415 if (!ret)
416 ret = vq->num_free;
417 return ret;
418}
419
420/* just create gem objects for userspace and long lived objects,
421 just use dma_alloced pages for the queue objects? */
422
423/* create a basic resource */
424void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
425 uint32_t resource_id,
426 uint32_t format,
427 uint32_t width,
428 uint32_t height)
429{
430 struct virtio_gpu_resource_create_2d *cmd_p;
431 struct virtio_gpu_vbuffer *vbuf;
432
433 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
434 memset(cmd_p, 0, sizeof(*cmd_p));
435
436 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
437 cmd_p->resource_id = cpu_to_le32(resource_id);
438 cmd_p->format = cpu_to_le32(format);
439 cmd_p->width = cpu_to_le32(width);
440 cmd_p->height = cpu_to_le32(height);
441
442 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
443}
444
445void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
446 uint32_t resource_id)
447{
448 struct virtio_gpu_resource_unref *cmd_p;
449 struct virtio_gpu_vbuffer *vbuf;
450
451 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
452 memset(cmd_p, 0, sizeof(*cmd_p));
453
454 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
455 cmd_p->resource_id = cpu_to_le32(resource_id);
456
457 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
458}
459
460void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
461 uint32_t resource_id)
462{
463 struct virtio_gpu_resource_detach_backing *cmd_p;
464 struct virtio_gpu_vbuffer *vbuf;
465
466 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
467 memset(cmd_p, 0, sizeof(*cmd_p));
468
469 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
470 cmd_p->resource_id = cpu_to_le32(resource_id);
471
472 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
473}
474
475void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
476 uint32_t scanout_id, uint32_t resource_id,
477 uint32_t width, uint32_t height,
478 uint32_t x, uint32_t y)
479{
480 struct virtio_gpu_set_scanout *cmd_p;
481 struct virtio_gpu_vbuffer *vbuf;
482
483 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
484 memset(cmd_p, 0, sizeof(*cmd_p));
485
486 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
487 cmd_p->resource_id = cpu_to_le32(resource_id);
488 cmd_p->scanout_id = cpu_to_le32(scanout_id);
489 cmd_p->r.width = cpu_to_le32(width);
490 cmd_p->r.height = cpu_to_le32(height);
491 cmd_p->r.x = cpu_to_le32(x);
492 cmd_p->r.y = cpu_to_le32(y);
493
494 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
495}
496
497void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
498 uint32_t resource_id,
499 uint32_t x, uint32_t y,
500 uint32_t width, uint32_t height)
501{
502 struct virtio_gpu_resource_flush *cmd_p;
503 struct virtio_gpu_vbuffer *vbuf;
504
505 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
506 memset(cmd_p, 0, sizeof(*cmd_p));
507
508 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
509 cmd_p->resource_id = cpu_to_le32(resource_id);
510 cmd_p->r.width = cpu_to_le32(width);
511 cmd_p->r.height = cpu_to_le32(height);
512 cmd_p->r.x = cpu_to_le32(x);
513 cmd_p->r.y = cpu_to_le32(y);
514
515 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
516}
517
518void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
519 uint32_t resource_id, uint64_t offset,
520 __le32 width, __le32 height,
521 __le32 x, __le32 y,
522 struct virtio_gpu_fence **fence)
523{
524 struct virtio_gpu_transfer_to_host_2d *cmd_p;
525 struct virtio_gpu_vbuffer *vbuf;
526
527 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
528 memset(cmd_p, 0, sizeof(*cmd_p));
529
530 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
531 cmd_p->resource_id = cpu_to_le32(resource_id);
532 cmd_p->offset = cpu_to_le64(offset);
533 cmd_p->r.width = width;
534 cmd_p->r.height = height;
535 cmd_p->r.x = x;
536 cmd_p->r.y = y;
537
538 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
539}
540
541static void
542virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
543 uint32_t resource_id,
544 struct virtio_gpu_mem_entry *ents,
545 uint32_t nents,
546 struct virtio_gpu_fence **fence)
547{
548 struct virtio_gpu_resource_attach_backing *cmd_p;
549 struct virtio_gpu_vbuffer *vbuf;
550
551 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
552 memset(cmd_p, 0, sizeof(*cmd_p));
553
554 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
555 cmd_p->resource_id = cpu_to_le32(resource_id);
556 cmd_p->nr_entries = cpu_to_le32(nents);
557
558 vbuf->data_buf = ents;
559 vbuf->data_size = sizeof(*ents) * nents;
560
561 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
562}
563
564static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
565 struct virtio_gpu_vbuffer *vbuf)
566{
567 struct virtio_gpu_resp_display_info *resp =
568 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
569 int i;
570
571 spin_lock(&vgdev->display_info_lock);
572 for (i = 0; i < vgdev->num_scanouts; i++) {
573 vgdev->outputs[i].info = resp->pmodes[i];
574 if (resp->pmodes[i].enabled) {
575 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
576 le32_to_cpu(resp->pmodes[i].r.width),
577 le32_to_cpu(resp->pmodes[i].r.height),
578 le32_to_cpu(resp->pmodes[i].r.x),
579 le32_to_cpu(resp->pmodes[i].r.y));
580 } else {
581 DRM_DEBUG("output %d: disabled", i);
582 }
583 }
584
585 vgdev->display_info_pending = false;
586 spin_unlock(&vgdev->display_info_lock);
587 wake_up(&vgdev->resp_wq);
588
589 if (!drm_helper_hpd_irq_event(vgdev->ddev))
590 drm_kms_helper_hotplug_event(vgdev->ddev);
591}
592
593static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
594 struct virtio_gpu_vbuffer *vbuf)
595{
596 struct virtio_gpu_get_capset_info *cmd =
597 (struct virtio_gpu_get_capset_info *)vbuf->buf;
598 struct virtio_gpu_resp_capset_info *resp =
599 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
600 int i = le32_to_cpu(cmd->capset_index);
601
602 spin_lock(&vgdev->display_info_lock);
603 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
604 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
605 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
606 spin_unlock(&vgdev->display_info_lock);
607 wake_up(&vgdev->resp_wq);
608}
609
610static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
611 struct virtio_gpu_vbuffer *vbuf)
612{
613 struct virtio_gpu_get_capset *cmd =
614 (struct virtio_gpu_get_capset *)vbuf->buf;
615 struct virtio_gpu_resp_capset *resp =
616 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
617 struct virtio_gpu_drv_cap_cache *cache_ent;
618
619 spin_lock(&vgdev->display_info_lock);
620 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
621 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
622 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
623 memcpy(cache_ent->caps_cache, resp->capset_data,
624 cache_ent->size);
625 atomic_set(&cache_ent->is_valid, 1);
626 break;
627 }
628 }
629 spin_unlock(&vgdev->display_info_lock);
630 wake_up(&vgdev->resp_wq);
631}
632
633
634int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
635{
636 struct virtio_gpu_ctrl_hdr *cmd_p;
637 struct virtio_gpu_vbuffer *vbuf;
638 void *resp_buf;
639
640 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
641 GFP_KERNEL);
642 if (!resp_buf)
643 return -ENOMEM;
644
645 cmd_p = virtio_gpu_alloc_cmd_resp
646 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
647 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
648 resp_buf);
649 memset(cmd_p, 0, sizeof(*cmd_p));
650
651 vgdev->display_info_pending = true;
652 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
653 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
654 return 0;
655}
656
657int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
658{
659 struct virtio_gpu_get_capset_info *cmd_p;
660 struct virtio_gpu_vbuffer *vbuf;
661 void *resp_buf;
662
663 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
664 GFP_KERNEL);
665 if (!resp_buf)
666 return -ENOMEM;
667
668 cmd_p = virtio_gpu_alloc_cmd_resp
669 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
670 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
671 resp_buf);
672 memset(cmd_p, 0, sizeof(*cmd_p));
673
674 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
675 cmd_p->capset_index = cpu_to_le32(idx);
676 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
677 return 0;
678}
679
680int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
681 int idx, int version,
682 struct virtio_gpu_drv_cap_cache **cache_p)
683{
684 struct virtio_gpu_get_capset *cmd_p;
685 struct virtio_gpu_vbuffer *vbuf;
686 int max_size = vgdev->capsets[idx].max_size;
687 struct virtio_gpu_drv_cap_cache *cache_ent;
688 void *resp_buf;
689
690 if (idx > vgdev->num_capsets)
691 return -EINVAL;
692
693 if (version > vgdev->capsets[idx].max_version)
694 return -EINVAL;
695
696 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
697 if (!cache_ent)
698 return -ENOMEM;
699
700 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
701 if (!cache_ent->caps_cache) {
702 kfree(cache_ent);
703 return -ENOMEM;
704 }
705
706 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
707 GFP_KERNEL);
708 if (!resp_buf) {
709 kfree(cache_ent->caps_cache);
710 kfree(cache_ent);
711 return -ENOMEM;
712 }
713
714 cache_ent->version = version;
715 cache_ent->id = vgdev->capsets[idx].id;
716 atomic_set(&cache_ent->is_valid, 0);
717 cache_ent->size = max_size;
718 spin_lock(&vgdev->display_info_lock);
719 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
720 spin_unlock(&vgdev->display_info_lock);
721
722 cmd_p = virtio_gpu_alloc_cmd_resp
723 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
724 sizeof(struct virtio_gpu_resp_capset) + max_size,
725 resp_buf);
726 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
727 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
728 cmd_p->capset_version = cpu_to_le32(version);
729 *cache_p = cache_ent;
730 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
731
732 return 0;
733}
734
735void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
736 uint32_t nlen, const char *name)
737{
738 struct virtio_gpu_ctx_create *cmd_p;
739 struct virtio_gpu_vbuffer *vbuf;
740
741 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
742 memset(cmd_p, 0, sizeof(*cmd_p));
743
744 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
745 cmd_p->hdr.ctx_id = cpu_to_le32(id);
746 cmd_p->nlen = cpu_to_le32(nlen);
747 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
748 cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
749 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
750}
751
752void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
753 uint32_t id)
754{
755 struct virtio_gpu_ctx_destroy *cmd_p;
756 struct virtio_gpu_vbuffer *vbuf;
757
758 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
759 memset(cmd_p, 0, sizeof(*cmd_p));
760
761 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
762 cmd_p->hdr.ctx_id = cpu_to_le32(id);
763 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
764}
765
766void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
767 uint32_t ctx_id,
768 uint32_t resource_id)
769{
770 struct virtio_gpu_ctx_resource *cmd_p;
771 struct virtio_gpu_vbuffer *vbuf;
772
773 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
774 memset(cmd_p, 0, sizeof(*cmd_p));
775
776 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
777 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
778 cmd_p->resource_id = cpu_to_le32(resource_id);
779 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
780
781}
782
783void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
784 uint32_t ctx_id,
785 uint32_t resource_id)
786{
787 struct virtio_gpu_ctx_resource *cmd_p;
788 struct virtio_gpu_vbuffer *vbuf;
789
790 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
791 memset(cmd_p, 0, sizeof(*cmd_p));
792
793 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
794 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
795 cmd_p->resource_id = cpu_to_le32(resource_id);
796 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
797}
798
799void
800virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
801 struct virtio_gpu_resource_create_3d *rc_3d,
802 struct virtio_gpu_fence **fence)
803{
804 struct virtio_gpu_resource_create_3d *cmd_p;
805 struct virtio_gpu_vbuffer *vbuf;
806
807 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
808 memset(cmd_p, 0, sizeof(*cmd_p));
809
810 *cmd_p = *rc_3d;
811 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
812 cmd_p->hdr.flags = 0;
813
814 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
815}
816
817void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
818 uint32_t resource_id, uint32_t ctx_id,
819 uint64_t offset, uint32_t level,
820 struct virtio_gpu_box *box,
821 struct virtio_gpu_fence **fence)
822{
823 struct virtio_gpu_transfer_host_3d *cmd_p;
824 struct virtio_gpu_vbuffer *vbuf;
825
826 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
827 memset(cmd_p, 0, sizeof(*cmd_p));
828
829 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
830 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
831 cmd_p->resource_id = cpu_to_le32(resource_id);
832 cmd_p->box = *box;
833 cmd_p->offset = cpu_to_le64(offset);
834 cmd_p->level = cpu_to_le32(level);
835
836 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
837}
838
839void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
840 uint32_t resource_id, uint32_t ctx_id,
841 uint64_t offset, uint32_t level,
842 struct virtio_gpu_box *box,
843 struct virtio_gpu_fence **fence)
844{
845 struct virtio_gpu_transfer_host_3d *cmd_p;
846 struct virtio_gpu_vbuffer *vbuf;
847
848 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
849 memset(cmd_p, 0, sizeof(*cmd_p));
850
851 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
852 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
853 cmd_p->resource_id = cpu_to_le32(resource_id);
854 cmd_p->box = *box;
855 cmd_p->offset = cpu_to_le64(offset);
856 cmd_p->level = cpu_to_le32(level);
857
858 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
859}
860
861void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
862 void *data, uint32_t data_size,
863 uint32_t ctx_id, struct virtio_gpu_fence **fence)
864{
865 struct virtio_gpu_cmd_submit *cmd_p;
866 struct virtio_gpu_vbuffer *vbuf;
867
868 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
869 memset(cmd_p, 0, sizeof(*cmd_p));
870
871 vbuf->data_buf = data;
872 vbuf->data_size = data_size;
873
874 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
875 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
876 cmd_p->size = cpu_to_le32(data_size);
877
878 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
879}
880
881int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
882 struct virtio_gpu_object *obj,
883 uint32_t resource_id,
884 struct virtio_gpu_fence **fence)
885{
886 struct virtio_gpu_mem_entry *ents;
887 struct scatterlist *sg;
888 int si;
889
890 if (!obj->pages) {
891 int ret;
892 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
893 if (ret)
894 return ret;
895 }
896
897 /* gets freed when the ring has consumed it */
898 ents = kmalloc_array(obj->pages->nents,
899 sizeof(struct virtio_gpu_mem_entry),
900 GFP_KERNEL);
901 if (!ents) {
902 DRM_ERROR("failed to allocate ent list\n");
903 return -ENOMEM;
904 }
905
906 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
907 ents[si].addr = cpu_to_le64(sg_phys(sg));
908 ents[si].length = cpu_to_le32(sg->length);
909 ents[si].padding = 0;
910 }
911
912 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
913 ents, obj->pages->nents,
914 fence);
915 obj->hw_res_handle = resource_id;
916 return 0;
917}
918
919void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
920 struct virtio_gpu_output *output)
921{
922 struct virtio_gpu_vbuffer *vbuf;
923 struct virtio_gpu_update_cursor *cur_p;
924
925 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
926 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
927 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
928 virtio_gpu_queue_cursor(vgdev, vbuf);
929}