Loading...
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include "virtgpu_drv.h"
31#include <linux/virtio.h>
32#include <linux/virtio_config.h>
33#include <linux/virtio_ring.h>
34
35#define MAX_INLINE_CMD_SIZE 96
36#define MAX_INLINE_RESP_SIZE 24
37#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
40
41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 uint32_t *resid)
43{
44 int handle;
45
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
50 idr_preload_end();
51 *resid = handle;
52}
53
54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55{
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
59}
60
61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62{
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
65
66 schedule_work(&vgdev->ctrlq.dequeue_work);
67}
68
69void virtio_gpu_cursor_ack(struct virtqueue *vq)
70{
71 struct drm_device *dev = vq->vdev->priv;
72 struct virtio_gpu_device *vgdev = dev->dev_private;
73
74 schedule_work(&vgdev->cursorq.dequeue_work);
75}
76
77int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
78{
79 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
80 VBUFFER_SIZE,
81 __alignof__(struct virtio_gpu_vbuffer),
82 0, NULL);
83 if (!vgdev->vbufs)
84 return -ENOMEM;
85 return 0;
86}
87
88void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
89{
90 kmem_cache_destroy(vgdev->vbufs);
91 vgdev->vbufs = NULL;
92}
93
94static struct virtio_gpu_vbuffer*
95virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
96 int size, int resp_size, void *resp_buf,
97 virtio_gpu_resp_cb resp_cb)
98{
99 struct virtio_gpu_vbuffer *vbuf;
100
101 vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
102 if (!vbuf)
103 return ERR_PTR(-ENOMEM);
104 memset(vbuf, 0, VBUFFER_SIZE);
105
106 BUG_ON(size > MAX_INLINE_CMD_SIZE);
107 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
108 vbuf->size = size;
109
110 vbuf->resp_cb = resp_cb;
111 vbuf->resp_size = resp_size;
112 if (resp_size <= MAX_INLINE_RESP_SIZE)
113 vbuf->resp_buf = (void *)vbuf->buf + size;
114 else
115 vbuf->resp_buf = resp_buf;
116 BUG_ON(!vbuf->resp_buf);
117 return vbuf;
118}
119
120static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
121 struct virtio_gpu_vbuffer **vbuffer_p,
122 int size)
123{
124 struct virtio_gpu_vbuffer *vbuf;
125
126 vbuf = virtio_gpu_get_vbuf(vgdev, size,
127 sizeof(struct virtio_gpu_ctrl_hdr),
128 NULL, NULL);
129 if (IS_ERR(vbuf)) {
130 *vbuffer_p = NULL;
131 return ERR_CAST(vbuf);
132 }
133 *vbuffer_p = vbuf;
134 return vbuf->buf;
135}
136
137static struct virtio_gpu_update_cursor*
138virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
139 struct virtio_gpu_vbuffer **vbuffer_p)
140{
141 struct virtio_gpu_vbuffer *vbuf;
142
143 vbuf = virtio_gpu_get_vbuf
144 (vgdev, sizeof(struct virtio_gpu_update_cursor),
145 0, NULL, NULL);
146 if (IS_ERR(vbuf)) {
147 *vbuffer_p = NULL;
148 return ERR_CAST(vbuf);
149 }
150 *vbuffer_p = vbuf;
151 return (struct virtio_gpu_update_cursor *)vbuf->buf;
152}
153
154static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
155 virtio_gpu_resp_cb cb,
156 struct virtio_gpu_vbuffer **vbuffer_p,
157 int cmd_size, int resp_size,
158 void *resp_buf)
159{
160 struct virtio_gpu_vbuffer *vbuf;
161
162 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
163 resp_size, resp_buf, cb);
164 if (IS_ERR(vbuf)) {
165 *vbuffer_p = NULL;
166 return ERR_CAST(vbuf);
167 }
168 *vbuffer_p = vbuf;
169 return (struct virtio_gpu_command *)vbuf->buf;
170}
171
172static void free_vbuf(struct virtio_gpu_device *vgdev,
173 struct virtio_gpu_vbuffer *vbuf)
174{
175 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
176 kfree(vbuf->resp_buf);
177 kfree(vbuf->data_buf);
178 kmem_cache_free(vgdev->vbufs, vbuf);
179}
180
181static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
182{
183 struct virtio_gpu_vbuffer *vbuf;
184 unsigned int len;
185 int freed = 0;
186
187 while ((vbuf = virtqueue_get_buf(vq, &len))) {
188 list_add_tail(&vbuf->list, reclaim_list);
189 freed++;
190 }
191 if (freed == 0)
192 DRM_DEBUG("Huh? zero vbufs reclaimed");
193}
194
195void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
196{
197 struct virtio_gpu_device *vgdev =
198 container_of(work, struct virtio_gpu_device,
199 ctrlq.dequeue_work);
200 struct list_head reclaim_list;
201 struct virtio_gpu_vbuffer *entry, *tmp;
202 struct virtio_gpu_ctrl_hdr *resp;
203 u64 fence_id = 0;
204
205 INIT_LIST_HEAD(&reclaim_list);
206 spin_lock(&vgdev->ctrlq.qlock);
207 do {
208 virtqueue_disable_cb(vgdev->ctrlq.vq);
209 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210
211 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
212 spin_unlock(&vgdev->ctrlq.qlock);
213
214 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
215 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
217 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
218 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
219 u64 f = le64_to_cpu(resp->fence_id);
220
221 if (fence_id > f) {
222 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
223 __func__, fence_id, f);
224 } else {
225 fence_id = f;
226 }
227 }
228 if (entry->resp_cb)
229 entry->resp_cb(vgdev, entry);
230
231 list_del(&entry->list);
232 free_vbuf(vgdev, entry);
233 }
234 wake_up(&vgdev->ctrlq.ack_queue);
235
236 if (fence_id)
237 virtio_gpu_fence_event_process(vgdev, fence_id);
238}
239
240void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
241{
242 struct virtio_gpu_device *vgdev =
243 container_of(work, struct virtio_gpu_device,
244 cursorq.dequeue_work);
245 struct list_head reclaim_list;
246 struct virtio_gpu_vbuffer *entry, *tmp;
247
248 INIT_LIST_HEAD(&reclaim_list);
249 spin_lock(&vgdev->cursorq.qlock);
250 do {
251 virtqueue_disable_cb(vgdev->cursorq.vq);
252 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
253 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
254 spin_unlock(&vgdev->cursorq.qlock);
255
256 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
257 list_del(&entry->list);
258 free_vbuf(vgdev, entry);
259 }
260 wake_up(&vgdev->cursorq.ack_queue);
261}
262
263static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
264 struct virtio_gpu_vbuffer *vbuf)
265 __releases(&vgdev->ctrlq.qlock)
266 __acquires(&vgdev->ctrlq.qlock)
267{
268 struct virtqueue *vq = vgdev->ctrlq.vq;
269 struct scatterlist *sgs[3], vcmd, vout, vresp;
270 int outcnt = 0, incnt = 0;
271 int ret;
272
273 if (!vgdev->vqs_ready)
274 return -ENODEV;
275
276 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
277 sgs[outcnt + incnt] = &vcmd;
278 outcnt++;
279
280 if (vbuf->data_size) {
281 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
282 sgs[outcnt + incnt] = &vout;
283 outcnt++;
284 }
285
286 if (vbuf->resp_size) {
287 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
288 sgs[outcnt + incnt] = &vresp;
289 incnt++;
290 }
291
292retry:
293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 if (ret == -ENOSPC) {
295 spin_unlock(&vgdev->ctrlq.qlock);
296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 spin_lock(&vgdev->ctrlq.qlock);
298 goto retry;
299 } else {
300 virtqueue_kick(vq);
301 }
302
303 if (!ret)
304 ret = vq->num_free;
305 return ret;
306}
307
308static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
309 struct virtio_gpu_vbuffer *vbuf)
310{
311 int rc;
312
313 spin_lock(&vgdev->ctrlq.qlock);
314 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
315 spin_unlock(&vgdev->ctrlq.qlock);
316 return rc;
317}
318
319static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
320 struct virtio_gpu_vbuffer *vbuf,
321 struct virtio_gpu_ctrl_hdr *hdr,
322 struct virtio_gpu_fence **fence)
323{
324 struct virtqueue *vq = vgdev->ctrlq.vq;
325 int rc;
326
327again:
328 spin_lock(&vgdev->ctrlq.qlock);
329
330 /*
331 * Make sure we have enouth space in the virtqueue. If not
332 * wait here until we have.
333 *
334 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
335 * to wait for free space, which can result in fence ids being
336 * submitted out-of-order.
337 */
338 if (vq->num_free < 3) {
339 spin_unlock(&vgdev->ctrlq.qlock);
340 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
341 goto again;
342 }
343
344 if (fence)
345 virtio_gpu_fence_emit(vgdev, hdr, fence);
346 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
347 spin_unlock(&vgdev->ctrlq.qlock);
348 return rc;
349}
350
351static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
352 struct virtio_gpu_vbuffer *vbuf)
353{
354 struct virtqueue *vq = vgdev->cursorq.vq;
355 struct scatterlist *sgs[1], ccmd;
356 int ret;
357 int outcnt;
358
359 if (!vgdev->vqs_ready)
360 return -ENODEV;
361
362 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
363 sgs[0] = &ccmd;
364 outcnt = 1;
365
366 spin_lock(&vgdev->cursorq.qlock);
367retry:
368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 if (ret == -ENOSPC) {
370 spin_unlock(&vgdev->cursorq.qlock);
371 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 spin_lock(&vgdev->cursorq.qlock);
373 goto retry;
374 } else {
375 virtqueue_kick(vq);
376 }
377
378 spin_unlock(&vgdev->cursorq.qlock);
379
380 if (!ret)
381 ret = vq->num_free;
382 return ret;
383}
384
385/* just create gem objects for userspace and long lived objects,
386 * just use dma_alloced pages for the queue objects?
387 */
388
389/* create a basic resource */
390void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
391 uint32_t resource_id,
392 uint32_t format,
393 uint32_t width,
394 uint32_t height)
395{
396 struct virtio_gpu_resource_create_2d *cmd_p;
397 struct virtio_gpu_vbuffer *vbuf;
398
399 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
400 memset(cmd_p, 0, sizeof(*cmd_p));
401
402 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
403 cmd_p->resource_id = cpu_to_le32(resource_id);
404 cmd_p->format = cpu_to_le32(format);
405 cmd_p->width = cpu_to_le32(width);
406 cmd_p->height = cpu_to_le32(height);
407
408 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
409}
410
411void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
412 uint32_t resource_id)
413{
414 struct virtio_gpu_resource_unref *cmd_p;
415 struct virtio_gpu_vbuffer *vbuf;
416
417 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
418 memset(cmd_p, 0, sizeof(*cmd_p));
419
420 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
421 cmd_p->resource_id = cpu_to_le32(resource_id);
422
423 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
424}
425
426void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
427 uint32_t resource_id)
428{
429 struct virtio_gpu_resource_detach_backing *cmd_p;
430 struct virtio_gpu_vbuffer *vbuf;
431
432 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
433 memset(cmd_p, 0, sizeof(*cmd_p));
434
435 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
436 cmd_p->resource_id = cpu_to_le32(resource_id);
437
438 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
439}
440
441void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
442 uint32_t scanout_id, uint32_t resource_id,
443 uint32_t width, uint32_t height,
444 uint32_t x, uint32_t y)
445{
446 struct virtio_gpu_set_scanout *cmd_p;
447 struct virtio_gpu_vbuffer *vbuf;
448
449 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
450 memset(cmd_p, 0, sizeof(*cmd_p));
451
452 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
453 cmd_p->resource_id = cpu_to_le32(resource_id);
454 cmd_p->scanout_id = cpu_to_le32(scanout_id);
455 cmd_p->r.width = cpu_to_le32(width);
456 cmd_p->r.height = cpu_to_le32(height);
457 cmd_p->r.x = cpu_to_le32(x);
458 cmd_p->r.y = cpu_to_le32(y);
459
460 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
461}
462
463void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
464 uint32_t resource_id,
465 uint32_t x, uint32_t y,
466 uint32_t width, uint32_t height)
467{
468 struct virtio_gpu_resource_flush *cmd_p;
469 struct virtio_gpu_vbuffer *vbuf;
470
471 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
472 memset(cmd_p, 0, sizeof(*cmd_p));
473
474 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
475 cmd_p->resource_id = cpu_to_le32(resource_id);
476 cmd_p->r.width = cpu_to_le32(width);
477 cmd_p->r.height = cpu_to_le32(height);
478 cmd_p->r.x = cpu_to_le32(x);
479 cmd_p->r.y = cpu_to_le32(y);
480
481 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
482}
483
484void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
485 uint32_t resource_id, uint64_t offset,
486 __le32 width, __le32 height,
487 __le32 x, __le32 y,
488 struct virtio_gpu_fence **fence)
489{
490 struct virtio_gpu_transfer_to_host_2d *cmd_p;
491 struct virtio_gpu_vbuffer *vbuf;
492
493 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
494 memset(cmd_p, 0, sizeof(*cmd_p));
495
496 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
497 cmd_p->resource_id = cpu_to_le32(resource_id);
498 cmd_p->offset = cpu_to_le64(offset);
499 cmd_p->r.width = width;
500 cmd_p->r.height = height;
501 cmd_p->r.x = x;
502 cmd_p->r.y = y;
503
504 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
505}
506
507static void
508virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
509 uint32_t resource_id,
510 struct virtio_gpu_mem_entry *ents,
511 uint32_t nents,
512 struct virtio_gpu_fence **fence)
513{
514 struct virtio_gpu_resource_attach_backing *cmd_p;
515 struct virtio_gpu_vbuffer *vbuf;
516
517 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
518 memset(cmd_p, 0, sizeof(*cmd_p));
519
520 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
521 cmd_p->resource_id = cpu_to_le32(resource_id);
522 cmd_p->nr_entries = cpu_to_le32(nents);
523
524 vbuf->data_buf = ents;
525 vbuf->data_size = sizeof(*ents) * nents;
526
527 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
528}
529
530static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
531 struct virtio_gpu_vbuffer *vbuf)
532{
533 struct virtio_gpu_resp_display_info *resp =
534 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
535 int i;
536
537 spin_lock(&vgdev->display_info_lock);
538 for (i = 0; i < vgdev->num_scanouts; i++) {
539 vgdev->outputs[i].info = resp->pmodes[i];
540 if (resp->pmodes[i].enabled) {
541 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
542 le32_to_cpu(resp->pmodes[i].r.width),
543 le32_to_cpu(resp->pmodes[i].r.height),
544 le32_to_cpu(resp->pmodes[i].r.x),
545 le32_to_cpu(resp->pmodes[i].r.y));
546 } else {
547 DRM_DEBUG("output %d: disabled", i);
548 }
549 }
550
551 vgdev->display_info_pending = false;
552 spin_unlock(&vgdev->display_info_lock);
553 wake_up(&vgdev->resp_wq);
554
555 if (!drm_helper_hpd_irq_event(vgdev->ddev))
556 drm_kms_helper_hotplug_event(vgdev->ddev);
557}
558
559static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
560 struct virtio_gpu_vbuffer *vbuf)
561{
562 struct virtio_gpu_get_capset_info *cmd =
563 (struct virtio_gpu_get_capset_info *)vbuf->buf;
564 struct virtio_gpu_resp_capset_info *resp =
565 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
566 int i = le32_to_cpu(cmd->capset_index);
567
568 spin_lock(&vgdev->display_info_lock);
569 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
570 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
571 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
572 spin_unlock(&vgdev->display_info_lock);
573 wake_up(&vgdev->resp_wq);
574}
575
576static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
577 struct virtio_gpu_vbuffer *vbuf)
578{
579 struct virtio_gpu_get_capset *cmd =
580 (struct virtio_gpu_get_capset *)vbuf->buf;
581 struct virtio_gpu_resp_capset *resp =
582 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
583 struct virtio_gpu_drv_cap_cache *cache_ent;
584
585 spin_lock(&vgdev->display_info_lock);
586 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
587 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
588 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
589 memcpy(cache_ent->caps_cache, resp->capset_data,
590 cache_ent->size);
591 atomic_set(&cache_ent->is_valid, 1);
592 break;
593 }
594 }
595 spin_unlock(&vgdev->display_info_lock);
596 wake_up(&vgdev->resp_wq);
597}
598
599int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
600{
601 struct virtio_gpu_ctrl_hdr *cmd_p;
602 struct virtio_gpu_vbuffer *vbuf;
603 void *resp_buf;
604
605 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
606 GFP_KERNEL);
607 if (!resp_buf)
608 return -ENOMEM;
609
610 cmd_p = virtio_gpu_alloc_cmd_resp
611 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
612 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
613 resp_buf);
614 memset(cmd_p, 0, sizeof(*cmd_p));
615
616 vgdev->display_info_pending = true;
617 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
618 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
619 return 0;
620}
621
622int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
623{
624 struct virtio_gpu_get_capset_info *cmd_p;
625 struct virtio_gpu_vbuffer *vbuf;
626 void *resp_buf;
627
628 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
629 GFP_KERNEL);
630 if (!resp_buf)
631 return -ENOMEM;
632
633 cmd_p = virtio_gpu_alloc_cmd_resp
634 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
635 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
636 resp_buf);
637 memset(cmd_p, 0, sizeof(*cmd_p));
638
639 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
640 cmd_p->capset_index = cpu_to_le32(idx);
641 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
642 return 0;
643}
644
645int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
646 int idx, int version,
647 struct virtio_gpu_drv_cap_cache **cache_p)
648{
649 struct virtio_gpu_get_capset *cmd_p;
650 struct virtio_gpu_vbuffer *vbuf;
651 int max_size = vgdev->capsets[idx].max_size;
652 struct virtio_gpu_drv_cap_cache *cache_ent;
653 void *resp_buf;
654
655 if (idx > vgdev->num_capsets)
656 return -EINVAL;
657
658 if (version > vgdev->capsets[idx].max_version)
659 return -EINVAL;
660
661 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
662 if (!cache_ent)
663 return -ENOMEM;
664
665 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
666 if (!cache_ent->caps_cache) {
667 kfree(cache_ent);
668 return -ENOMEM;
669 }
670
671 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
672 GFP_KERNEL);
673 if (!resp_buf) {
674 kfree(cache_ent->caps_cache);
675 kfree(cache_ent);
676 return -ENOMEM;
677 }
678
679 cache_ent->version = version;
680 cache_ent->id = vgdev->capsets[idx].id;
681 atomic_set(&cache_ent->is_valid, 0);
682 cache_ent->size = max_size;
683 spin_lock(&vgdev->display_info_lock);
684 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
685 spin_unlock(&vgdev->display_info_lock);
686
687 cmd_p = virtio_gpu_alloc_cmd_resp
688 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
689 sizeof(struct virtio_gpu_resp_capset) + max_size,
690 resp_buf);
691 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
692 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
693 cmd_p->capset_version = cpu_to_le32(version);
694 *cache_p = cache_ent;
695 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
696
697 return 0;
698}
699
700void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
701 uint32_t nlen, const char *name)
702{
703 struct virtio_gpu_ctx_create *cmd_p;
704 struct virtio_gpu_vbuffer *vbuf;
705
706 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
707 memset(cmd_p, 0, sizeof(*cmd_p));
708
709 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
710 cmd_p->hdr.ctx_id = cpu_to_le32(id);
711 cmd_p->nlen = cpu_to_le32(nlen);
712 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
713 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
714 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
715}
716
717void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
718 uint32_t id)
719{
720 struct virtio_gpu_ctx_destroy *cmd_p;
721 struct virtio_gpu_vbuffer *vbuf;
722
723 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
724 memset(cmd_p, 0, sizeof(*cmd_p));
725
726 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
727 cmd_p->hdr.ctx_id = cpu_to_le32(id);
728 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
729}
730
731void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
732 uint32_t ctx_id,
733 uint32_t resource_id)
734{
735 struct virtio_gpu_ctx_resource *cmd_p;
736 struct virtio_gpu_vbuffer *vbuf;
737
738 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
739 memset(cmd_p, 0, sizeof(*cmd_p));
740
741 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
742 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
743 cmd_p->resource_id = cpu_to_le32(resource_id);
744 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
745
746}
747
748void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
749 uint32_t ctx_id,
750 uint32_t resource_id)
751{
752 struct virtio_gpu_ctx_resource *cmd_p;
753 struct virtio_gpu_vbuffer *vbuf;
754
755 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
756 memset(cmd_p, 0, sizeof(*cmd_p));
757
758 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
759 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
760 cmd_p->resource_id = cpu_to_le32(resource_id);
761 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
762}
763
764void
765virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
766 struct virtio_gpu_resource_create_3d *rc_3d,
767 struct virtio_gpu_fence **fence)
768{
769 struct virtio_gpu_resource_create_3d *cmd_p;
770 struct virtio_gpu_vbuffer *vbuf;
771
772 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
773 memset(cmd_p, 0, sizeof(*cmd_p));
774
775 *cmd_p = *rc_3d;
776 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
777 cmd_p->hdr.flags = 0;
778
779 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
780}
781
782void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
783 uint32_t resource_id, uint32_t ctx_id,
784 uint64_t offset, uint32_t level,
785 struct virtio_gpu_box *box,
786 struct virtio_gpu_fence **fence)
787{
788 struct virtio_gpu_transfer_host_3d *cmd_p;
789 struct virtio_gpu_vbuffer *vbuf;
790
791 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
792 memset(cmd_p, 0, sizeof(*cmd_p));
793
794 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
795 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
796 cmd_p->resource_id = cpu_to_le32(resource_id);
797 cmd_p->box = *box;
798 cmd_p->offset = cpu_to_le64(offset);
799 cmd_p->level = cpu_to_le32(level);
800
801 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
802}
803
804void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
805 uint32_t resource_id, uint32_t ctx_id,
806 uint64_t offset, uint32_t level,
807 struct virtio_gpu_box *box,
808 struct virtio_gpu_fence **fence)
809{
810 struct virtio_gpu_transfer_host_3d *cmd_p;
811 struct virtio_gpu_vbuffer *vbuf;
812
813 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
814 memset(cmd_p, 0, sizeof(*cmd_p));
815
816 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
817 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
818 cmd_p->resource_id = cpu_to_le32(resource_id);
819 cmd_p->box = *box;
820 cmd_p->offset = cpu_to_le64(offset);
821 cmd_p->level = cpu_to_le32(level);
822
823 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
824}
825
826void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
827 void *data, uint32_t data_size,
828 uint32_t ctx_id, struct virtio_gpu_fence **fence)
829{
830 struct virtio_gpu_cmd_submit *cmd_p;
831 struct virtio_gpu_vbuffer *vbuf;
832
833 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
834 memset(cmd_p, 0, sizeof(*cmd_p));
835
836 vbuf->data_buf = data;
837 vbuf->data_size = data_size;
838
839 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
840 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
841 cmd_p->size = cpu_to_le32(data_size);
842
843 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
844}
845
846int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
847 struct virtio_gpu_object *obj,
848 uint32_t resource_id,
849 struct virtio_gpu_fence **fence)
850{
851 struct virtio_gpu_mem_entry *ents;
852 struct scatterlist *sg;
853 int si;
854
855 if (!obj->pages) {
856 int ret;
857
858 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
859 if (ret)
860 return ret;
861 }
862
863 /* gets freed when the ring has consumed it */
864 ents = kmalloc_array(obj->pages->nents,
865 sizeof(struct virtio_gpu_mem_entry),
866 GFP_KERNEL);
867 if (!ents) {
868 DRM_ERROR("failed to allocate ent list\n");
869 return -ENOMEM;
870 }
871
872 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
873 ents[si].addr = cpu_to_le64(sg_phys(sg));
874 ents[si].length = cpu_to_le32(sg->length);
875 ents[si].padding = 0;
876 }
877
878 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
879 ents, obj->pages->nents,
880 fence);
881 obj->hw_res_handle = resource_id;
882 return 0;
883}
884
885void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
886 struct virtio_gpu_output *output)
887{
888 struct virtio_gpu_vbuffer *vbuf;
889 struct virtio_gpu_update_cursor *cur_p;
890
891 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
892 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
893 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
894 virtio_gpu_queue_cursor(vgdev, vbuf);
895}
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <linux/dma-mapping.h>
30#include <linux/virtio.h>
31#include <linux/virtio_config.h>
32#include <linux/virtio_ring.h>
33
34#include <drm/drm_edid.h>
35
36#include "virtgpu_drv.h"
37#include "virtgpu_trace.h"
38
39#define MAX_INLINE_CMD_SIZE 96
40#define MAX_INLINE_RESP_SIZE 24
41#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
42 + MAX_INLINE_CMD_SIZE \
43 + MAX_INLINE_RESP_SIZE)
44
45static void convert_to_hw_box(struct virtio_gpu_box *dst,
46 const struct drm_virtgpu_3d_box *src)
47{
48 dst->x = cpu_to_le32(src->x);
49 dst->y = cpu_to_le32(src->y);
50 dst->z = cpu_to_le32(src->z);
51 dst->w = cpu_to_le32(src->w);
52 dst->h = cpu_to_le32(src->h);
53 dst->d = cpu_to_le32(src->d);
54}
55
56void virtio_gpu_ctrl_ack(struct virtqueue *vq)
57{
58 struct drm_device *dev = vq->vdev->priv;
59 struct virtio_gpu_device *vgdev = dev->dev_private;
60
61 schedule_work(&vgdev->ctrlq.dequeue_work);
62}
63
64void virtio_gpu_cursor_ack(struct virtqueue *vq)
65{
66 struct drm_device *dev = vq->vdev->priv;
67 struct virtio_gpu_device *vgdev = dev->dev_private;
68
69 schedule_work(&vgdev->cursorq.dequeue_work);
70}
71
72int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
73{
74 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
75 VBUFFER_SIZE,
76 __alignof__(struct virtio_gpu_vbuffer),
77 0, NULL);
78 if (!vgdev->vbufs)
79 return -ENOMEM;
80 return 0;
81}
82
83void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
84{
85 kmem_cache_destroy(vgdev->vbufs);
86 vgdev->vbufs = NULL;
87}
88
89static struct virtio_gpu_vbuffer*
90virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
91 int size, int resp_size, void *resp_buf,
92 virtio_gpu_resp_cb resp_cb)
93{
94 struct virtio_gpu_vbuffer *vbuf;
95
96 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
97
98 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 size < sizeof(struct virtio_gpu_ctrl_hdr));
100 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 vbuf->size = size;
102
103 vbuf->resp_cb = resp_cb;
104 vbuf->resp_size = resp_size;
105 if (resp_size <= MAX_INLINE_RESP_SIZE)
106 vbuf->resp_buf = (void *)vbuf->buf + size;
107 else
108 vbuf->resp_buf = resp_buf;
109 BUG_ON(!vbuf->resp_buf);
110 return vbuf;
111}
112
113static struct virtio_gpu_ctrl_hdr *
114virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115{
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 * virtqueues.
119 */
120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121}
122
123static struct virtio_gpu_update_cursor*
124virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 struct virtio_gpu_vbuffer **vbuffer_p)
126{
127 struct virtio_gpu_vbuffer *vbuf;
128
129 vbuf = virtio_gpu_get_vbuf
130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
131 0, NULL, NULL);
132 if (IS_ERR(vbuf)) {
133 *vbuffer_p = NULL;
134 return ERR_CAST(vbuf);
135 }
136 *vbuffer_p = vbuf;
137 return (struct virtio_gpu_update_cursor *)vbuf->buf;
138}
139
140static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 virtio_gpu_resp_cb cb,
142 struct virtio_gpu_vbuffer **vbuffer_p,
143 int cmd_size, int resp_size,
144 void *resp_buf)
145{
146 struct virtio_gpu_vbuffer *vbuf;
147
148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 resp_size, resp_buf, cb);
150 *vbuffer_p = vbuf;
151 return (struct virtio_gpu_command *)vbuf->buf;
152}
153
154static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
155 struct virtio_gpu_vbuffer **vbuffer_p,
156 int size)
157{
158 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
159 sizeof(struct virtio_gpu_ctrl_hdr),
160 NULL);
161}
162
163static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
164 struct virtio_gpu_vbuffer **vbuffer_p,
165 int size,
166 virtio_gpu_resp_cb cb)
167{
168 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
169 sizeof(struct virtio_gpu_ctrl_hdr),
170 NULL);
171}
172
173static void free_vbuf(struct virtio_gpu_device *vgdev,
174 struct virtio_gpu_vbuffer *vbuf)
175{
176 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
177 kfree(vbuf->resp_buf);
178 kvfree(vbuf->data_buf);
179 kmem_cache_free(vgdev->vbufs, vbuf);
180}
181
182static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
183{
184 struct virtio_gpu_vbuffer *vbuf;
185 unsigned int len;
186 int freed = 0;
187
188 while ((vbuf = virtqueue_get_buf(vq, &len))) {
189 list_add_tail(&vbuf->list, reclaim_list);
190 freed++;
191 }
192 if (freed == 0)
193 DRM_DEBUG("Huh? zero vbufs reclaimed");
194}
195
196void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
197{
198 struct virtio_gpu_device *vgdev =
199 container_of(work, struct virtio_gpu_device,
200 ctrlq.dequeue_work);
201 struct list_head reclaim_list;
202 struct virtio_gpu_vbuffer *entry, *tmp;
203 struct virtio_gpu_ctrl_hdr *resp;
204 u64 fence_id;
205
206 INIT_LIST_HEAD(&reclaim_list);
207 spin_lock(&vgdev->ctrlq.qlock);
208 do {
209 virtqueue_disable_cb(vgdev->ctrlq.vq);
210 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
211
212 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
213 spin_unlock(&vgdev->ctrlq.qlock);
214
215 list_for_each_entry(entry, &reclaim_list, list) {
216 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
217
218 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
219
220 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
221 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
222 struct virtio_gpu_ctrl_hdr *cmd;
223 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
224 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
225 le32_to_cpu(resp->type),
226 le32_to_cpu(cmd->type));
227 } else
228 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
229 }
230 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
231 fence_id = le64_to_cpu(resp->fence_id);
232 virtio_gpu_fence_event_process(vgdev, fence_id);
233 }
234 if (entry->resp_cb)
235 entry->resp_cb(vgdev, entry);
236 }
237 wake_up(&vgdev->ctrlq.ack_queue);
238
239 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
240 if (entry->objs)
241 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
242 list_del(&entry->list);
243 free_vbuf(vgdev, entry);
244 }
245}
246
247void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
248{
249 struct virtio_gpu_device *vgdev =
250 container_of(work, struct virtio_gpu_device,
251 cursorq.dequeue_work);
252 struct list_head reclaim_list;
253 struct virtio_gpu_vbuffer *entry, *tmp;
254
255 INIT_LIST_HEAD(&reclaim_list);
256 spin_lock(&vgdev->cursorq.qlock);
257 do {
258 virtqueue_disable_cb(vgdev->cursorq.vq);
259 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261 spin_unlock(&vgdev->cursorq.qlock);
262
263 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
264 list_del(&entry->list);
265 free_vbuf(vgdev, entry);
266 }
267 wake_up(&vgdev->cursorq.ack_queue);
268}
269
270/* Create sg_table from a vmalloc'd buffer. */
271static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
272{
273 int ret, s, i;
274 struct sg_table *sgt;
275 struct scatterlist *sg;
276 struct page *pg;
277
278 if (WARN_ON(!PAGE_ALIGNED(data)))
279 return NULL;
280
281 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
282 if (!sgt)
283 return NULL;
284
285 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
286 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
287 if (ret) {
288 kfree(sgt);
289 return NULL;
290 }
291
292 for_each_sgtable_sg(sgt, sg, i) {
293 pg = vmalloc_to_page(data);
294 if (!pg) {
295 sg_free_table(sgt);
296 kfree(sgt);
297 return NULL;
298 }
299
300 s = min_t(int, PAGE_SIZE, size);
301 sg_set_page(sg, pg, s, 0);
302
303 size -= s;
304 data += s;
305 }
306
307 return sgt;
308}
309
310static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
311 struct virtio_gpu_vbuffer *vbuf,
312 struct virtio_gpu_fence *fence,
313 int elemcnt,
314 struct scatterlist **sgs,
315 int outcnt,
316 int incnt)
317{
318 struct virtqueue *vq = vgdev->ctrlq.vq;
319 int ret, idx;
320
321 if (!drm_dev_enter(vgdev->ddev, &idx)) {
322 if (fence && vbuf->objs)
323 virtio_gpu_array_unlock_resv(vbuf->objs);
324 free_vbuf(vgdev, vbuf);
325 return -ENODEV;
326 }
327
328 if (vgdev->has_indirect)
329 elemcnt = 1;
330
331again:
332 spin_lock(&vgdev->ctrlq.qlock);
333
334 if (vq->num_free < elemcnt) {
335 spin_unlock(&vgdev->ctrlq.qlock);
336 virtio_gpu_notify(vgdev);
337 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
338 goto again;
339 }
340
341 /* now that the position of the vbuf in the virtqueue is known, we can
342 * finally set the fence id
343 */
344 if (fence) {
345 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
346 fence);
347 if (vbuf->objs) {
348 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
349 virtio_gpu_array_unlock_resv(vbuf->objs);
350 }
351 }
352
353 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
354 WARN_ON(ret);
355
356 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
357
358 atomic_inc(&vgdev->pending_commands);
359
360 spin_unlock(&vgdev->ctrlq.qlock);
361
362 drm_dev_exit(idx);
363 return 0;
364}
365
366static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
367 struct virtio_gpu_vbuffer *vbuf,
368 struct virtio_gpu_fence *fence)
369{
370 struct scatterlist *sgs[3], vcmd, vout, vresp;
371 struct sg_table *sgt = NULL;
372 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
373
374 /* set up vcmd */
375 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
376 elemcnt++;
377 sgs[outcnt] = &vcmd;
378 outcnt++;
379
380 /* set up vout */
381 if (vbuf->data_size) {
382 if (is_vmalloc_addr(vbuf->data_buf)) {
383 int sg_ents;
384 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
385 &sg_ents);
386 if (!sgt) {
387 if (fence && vbuf->objs)
388 virtio_gpu_array_unlock_resv(vbuf->objs);
389 return -ENOMEM;
390 }
391
392 elemcnt += sg_ents;
393 sgs[outcnt] = sgt->sgl;
394 } else {
395 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
396 elemcnt++;
397 sgs[outcnt] = &vout;
398 }
399 outcnt++;
400 }
401
402 /* set up vresp */
403 if (vbuf->resp_size) {
404 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
405 elemcnt++;
406 sgs[outcnt + incnt] = &vresp;
407 incnt++;
408 }
409
410 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
411 incnt);
412
413 if (sgt) {
414 sg_free_table(sgt);
415 kfree(sgt);
416 }
417 return ret;
418}
419
420void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
421{
422 bool notify;
423
424 if (!atomic_read(&vgdev->pending_commands))
425 return;
426
427 spin_lock(&vgdev->ctrlq.qlock);
428 atomic_set(&vgdev->pending_commands, 0);
429 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
430 spin_unlock(&vgdev->ctrlq.qlock);
431
432 if (notify)
433 virtqueue_notify(vgdev->ctrlq.vq);
434}
435
436static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
437 struct virtio_gpu_vbuffer *vbuf)
438{
439 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
440}
441
442static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
443 struct virtio_gpu_vbuffer *vbuf)
444{
445 struct virtqueue *vq = vgdev->cursorq.vq;
446 struct scatterlist *sgs[1], ccmd;
447 int idx, ret, outcnt;
448 bool notify;
449
450 if (!drm_dev_enter(vgdev->ddev, &idx)) {
451 free_vbuf(vgdev, vbuf);
452 return;
453 }
454
455 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
456 sgs[0] = &ccmd;
457 outcnt = 1;
458
459 spin_lock(&vgdev->cursorq.qlock);
460retry:
461 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
462 if (ret == -ENOSPC) {
463 spin_unlock(&vgdev->cursorq.qlock);
464 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
465 spin_lock(&vgdev->cursorq.qlock);
466 goto retry;
467 } else {
468 trace_virtio_gpu_cmd_queue(vq,
469 virtio_gpu_vbuf_ctrl_hdr(vbuf));
470
471 notify = virtqueue_kick_prepare(vq);
472 }
473
474 spin_unlock(&vgdev->cursorq.qlock);
475
476 if (notify)
477 virtqueue_notify(vq);
478
479 drm_dev_exit(idx);
480}
481
482/* just create gem objects for userspace and long lived objects,
483 * just use dma_alloced pages for the queue objects?
484 */
485
486/* create a basic resource */
487void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
488 struct virtio_gpu_object *bo,
489 struct virtio_gpu_object_params *params,
490 struct virtio_gpu_object_array *objs,
491 struct virtio_gpu_fence *fence)
492{
493 struct virtio_gpu_resource_create_2d *cmd_p;
494 struct virtio_gpu_vbuffer *vbuf;
495
496 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
497 memset(cmd_p, 0, sizeof(*cmd_p));
498 vbuf->objs = objs;
499
500 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
501 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
502 cmd_p->format = cpu_to_le32(params->format);
503 cmd_p->width = cpu_to_le32(params->width);
504 cmd_p->height = cpu_to_le32(params->height);
505
506 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
507 bo->created = true;
508}
509
510static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
511 struct virtio_gpu_vbuffer *vbuf)
512{
513 struct virtio_gpu_object *bo;
514
515 bo = vbuf->resp_cb_data;
516 vbuf->resp_cb_data = NULL;
517
518 virtio_gpu_cleanup_object(bo);
519}
520
521void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
522 struct virtio_gpu_object *bo)
523{
524 struct virtio_gpu_resource_unref *cmd_p;
525 struct virtio_gpu_vbuffer *vbuf;
526 int ret;
527
528 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
529 virtio_gpu_cmd_unref_cb);
530 memset(cmd_p, 0, sizeof(*cmd_p));
531
532 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
533 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
534
535 vbuf->resp_cb_data = bo;
536 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
537 if (ret < 0)
538 virtio_gpu_cleanup_object(bo);
539}
540
541void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
542 uint32_t scanout_id, uint32_t resource_id,
543 uint32_t width, uint32_t height,
544 uint32_t x, uint32_t y)
545{
546 struct virtio_gpu_set_scanout *cmd_p;
547 struct virtio_gpu_vbuffer *vbuf;
548
549 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
550 memset(cmd_p, 0, sizeof(*cmd_p));
551
552 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
553 cmd_p->resource_id = cpu_to_le32(resource_id);
554 cmd_p->scanout_id = cpu_to_le32(scanout_id);
555 cmd_p->r.width = cpu_to_le32(width);
556 cmd_p->r.height = cpu_to_le32(height);
557 cmd_p->r.x = cpu_to_le32(x);
558 cmd_p->r.y = cpu_to_le32(y);
559
560 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
561}
562
563void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
564 uint32_t resource_id,
565 uint32_t x, uint32_t y,
566 uint32_t width, uint32_t height,
567 struct virtio_gpu_object_array *objs,
568 struct virtio_gpu_fence *fence)
569{
570 struct virtio_gpu_resource_flush *cmd_p;
571 struct virtio_gpu_vbuffer *vbuf;
572
573 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
574 memset(cmd_p, 0, sizeof(*cmd_p));
575 vbuf->objs = objs;
576
577 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
578 cmd_p->resource_id = cpu_to_le32(resource_id);
579 cmd_p->r.width = cpu_to_le32(width);
580 cmd_p->r.height = cpu_to_le32(height);
581 cmd_p->r.x = cpu_to_le32(x);
582 cmd_p->r.y = cpu_to_le32(y);
583
584 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
585}
586
587void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
588 uint64_t offset,
589 uint32_t width, uint32_t height,
590 uint32_t x, uint32_t y,
591 struct virtio_gpu_object_array *objs,
592 struct virtio_gpu_fence *fence)
593{
594 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
595 struct virtio_gpu_transfer_to_host_2d *cmd_p;
596 struct virtio_gpu_vbuffer *vbuf;
597 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
598
599 if (virtio_gpu_is_shmem(bo) && use_dma_api)
600 dma_sync_sgtable_for_device(&vgdev->vdev->dev,
601 bo->base.sgt, DMA_TO_DEVICE);
602
603 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
604 memset(cmd_p, 0, sizeof(*cmd_p));
605 vbuf->objs = objs;
606
607 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
608 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
609 cmd_p->offset = cpu_to_le64(offset);
610 cmd_p->r.width = cpu_to_le32(width);
611 cmd_p->r.height = cpu_to_le32(height);
612 cmd_p->r.x = cpu_to_le32(x);
613 cmd_p->r.y = cpu_to_le32(y);
614
615 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
616}
617
618static void
619virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
620 uint32_t resource_id,
621 struct virtio_gpu_mem_entry *ents,
622 uint32_t nents,
623 struct virtio_gpu_fence *fence)
624{
625 struct virtio_gpu_resource_attach_backing *cmd_p;
626 struct virtio_gpu_vbuffer *vbuf;
627
628 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
629 memset(cmd_p, 0, sizeof(*cmd_p));
630
631 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
632 cmd_p->resource_id = cpu_to_le32(resource_id);
633 cmd_p->nr_entries = cpu_to_le32(nents);
634
635 vbuf->data_buf = ents;
636 vbuf->data_size = sizeof(*ents) * nents;
637
638 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
639}
640
641static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
642 struct virtio_gpu_vbuffer *vbuf)
643{
644 struct virtio_gpu_resp_display_info *resp =
645 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
646 int i;
647
648 spin_lock(&vgdev->display_info_lock);
649 for (i = 0; i < vgdev->num_scanouts; i++) {
650 vgdev->outputs[i].info = resp->pmodes[i];
651 if (resp->pmodes[i].enabled) {
652 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
653 le32_to_cpu(resp->pmodes[i].r.width),
654 le32_to_cpu(resp->pmodes[i].r.height),
655 le32_to_cpu(resp->pmodes[i].r.x),
656 le32_to_cpu(resp->pmodes[i].r.y));
657 } else {
658 DRM_DEBUG("output %d: disabled", i);
659 }
660 }
661
662 vgdev->display_info_pending = false;
663 spin_unlock(&vgdev->display_info_lock);
664 wake_up(&vgdev->resp_wq);
665
666 if (!drm_helper_hpd_irq_event(vgdev->ddev))
667 drm_kms_helper_hotplug_event(vgdev->ddev);
668}
669
670static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
671 struct virtio_gpu_vbuffer *vbuf)
672{
673 struct virtio_gpu_get_capset_info *cmd =
674 (struct virtio_gpu_get_capset_info *)vbuf->buf;
675 struct virtio_gpu_resp_capset_info *resp =
676 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
677 int i = le32_to_cpu(cmd->capset_index);
678
679 spin_lock(&vgdev->display_info_lock);
680 if (vgdev->capsets) {
681 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
682 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
683 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
684 } else {
685 DRM_ERROR("invalid capset memory.");
686 }
687 spin_unlock(&vgdev->display_info_lock);
688 wake_up(&vgdev->resp_wq);
689}
690
691static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
692 struct virtio_gpu_vbuffer *vbuf)
693{
694 struct virtio_gpu_get_capset *cmd =
695 (struct virtio_gpu_get_capset *)vbuf->buf;
696 struct virtio_gpu_resp_capset *resp =
697 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
698 struct virtio_gpu_drv_cap_cache *cache_ent;
699
700 spin_lock(&vgdev->display_info_lock);
701 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
702 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
703 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
704 memcpy(cache_ent->caps_cache, resp->capset_data,
705 cache_ent->size);
706 /* Copy must occur before is_valid is signalled. */
707 smp_wmb();
708 atomic_set(&cache_ent->is_valid, 1);
709 break;
710 }
711 }
712 spin_unlock(&vgdev->display_info_lock);
713 wake_up_all(&vgdev->resp_wq);
714}
715
716static int virtio_get_edid_block(void *data, u8 *buf,
717 unsigned int block, size_t len)
718{
719 struct virtio_gpu_resp_edid *resp = data;
720 size_t start = block * EDID_LENGTH;
721
722 if (start + len > le32_to_cpu(resp->size))
723 return -EINVAL;
724 memcpy(buf, resp->edid + start, len);
725 return 0;
726}
727
728static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
729 struct virtio_gpu_vbuffer *vbuf)
730{
731 struct virtio_gpu_cmd_get_edid *cmd =
732 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
733 struct virtio_gpu_resp_edid *resp =
734 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
735 uint32_t scanout = le32_to_cpu(cmd->scanout);
736 struct virtio_gpu_output *output;
737 struct edid *new_edid, *old_edid;
738
739 if (scanout >= vgdev->num_scanouts)
740 return;
741 output = vgdev->outputs + scanout;
742
743 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
744 drm_connector_update_edid_property(&output->conn, new_edid);
745
746 spin_lock(&vgdev->display_info_lock);
747 old_edid = output->edid;
748 output->edid = new_edid;
749 spin_unlock(&vgdev->display_info_lock);
750
751 kfree(old_edid);
752 wake_up(&vgdev->resp_wq);
753}
754
755int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
756{
757 struct virtio_gpu_ctrl_hdr *cmd_p;
758 struct virtio_gpu_vbuffer *vbuf;
759 void *resp_buf;
760
761 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
762 GFP_KERNEL);
763 if (!resp_buf)
764 return -ENOMEM;
765
766 cmd_p = virtio_gpu_alloc_cmd_resp
767 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
768 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
769 resp_buf);
770 memset(cmd_p, 0, sizeof(*cmd_p));
771
772 vgdev->display_info_pending = true;
773 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
774 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
775 return 0;
776}
777
778int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
779{
780 struct virtio_gpu_get_capset_info *cmd_p;
781 struct virtio_gpu_vbuffer *vbuf;
782 void *resp_buf;
783
784 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
785 GFP_KERNEL);
786 if (!resp_buf)
787 return -ENOMEM;
788
789 cmd_p = virtio_gpu_alloc_cmd_resp
790 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
791 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
792 resp_buf);
793 memset(cmd_p, 0, sizeof(*cmd_p));
794
795 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
796 cmd_p->capset_index = cpu_to_le32(idx);
797 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
798 return 0;
799}
800
801int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
802 int idx, int version,
803 struct virtio_gpu_drv_cap_cache **cache_p)
804{
805 struct virtio_gpu_get_capset *cmd_p;
806 struct virtio_gpu_vbuffer *vbuf;
807 int max_size;
808 struct virtio_gpu_drv_cap_cache *cache_ent;
809 struct virtio_gpu_drv_cap_cache *search_ent;
810 void *resp_buf;
811
812 *cache_p = NULL;
813
814 if (idx >= vgdev->num_capsets)
815 return -EINVAL;
816
817 if (version > vgdev->capsets[idx].max_version)
818 return -EINVAL;
819
820 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
821 if (!cache_ent)
822 return -ENOMEM;
823
824 max_size = vgdev->capsets[idx].max_size;
825 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
826 if (!cache_ent->caps_cache) {
827 kfree(cache_ent);
828 return -ENOMEM;
829 }
830
831 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
832 GFP_KERNEL);
833 if (!resp_buf) {
834 kfree(cache_ent->caps_cache);
835 kfree(cache_ent);
836 return -ENOMEM;
837 }
838
839 cache_ent->version = version;
840 cache_ent->id = vgdev->capsets[idx].id;
841 atomic_set(&cache_ent->is_valid, 0);
842 cache_ent->size = max_size;
843 spin_lock(&vgdev->display_info_lock);
844 /* Search while under lock in case it was added by another task. */
845 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
846 if (search_ent->id == vgdev->capsets[idx].id &&
847 search_ent->version == version) {
848 *cache_p = search_ent;
849 break;
850 }
851 }
852 if (!*cache_p)
853 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
854 spin_unlock(&vgdev->display_info_lock);
855
856 if (*cache_p) {
857 /* Entry was found, so free everything that was just created. */
858 kfree(resp_buf);
859 kfree(cache_ent->caps_cache);
860 kfree(cache_ent);
861 return 0;
862 }
863
864 cmd_p = virtio_gpu_alloc_cmd_resp
865 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
866 sizeof(struct virtio_gpu_resp_capset) + max_size,
867 resp_buf);
868 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
869 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
870 cmd_p->capset_version = cpu_to_le32(version);
871 *cache_p = cache_ent;
872 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
873
874 return 0;
875}
876
877int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
878{
879 struct virtio_gpu_cmd_get_edid *cmd_p;
880 struct virtio_gpu_vbuffer *vbuf;
881 void *resp_buf;
882 int scanout;
883
884 if (WARN_ON(!vgdev->has_edid))
885 return -EINVAL;
886
887 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
888 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
889 GFP_KERNEL);
890 if (!resp_buf)
891 return -ENOMEM;
892
893 cmd_p = virtio_gpu_alloc_cmd_resp
894 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
895 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
896 resp_buf);
897 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
898 cmd_p->scanout = cpu_to_le32(scanout);
899 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
900 }
901
902 return 0;
903}
904
905void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
906 uint32_t context_init, uint32_t nlen,
907 const char *name)
908{
909 struct virtio_gpu_ctx_create *cmd_p;
910 struct virtio_gpu_vbuffer *vbuf;
911
912 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
913 memset(cmd_p, 0, sizeof(*cmd_p));
914
915 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
916 cmd_p->hdr.ctx_id = cpu_to_le32(id);
917 cmd_p->nlen = cpu_to_le32(nlen);
918 cmd_p->context_init = cpu_to_le32(context_init);
919 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
920 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
921 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
922}
923
924void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
925 uint32_t id)
926{
927 struct virtio_gpu_ctx_destroy *cmd_p;
928 struct virtio_gpu_vbuffer *vbuf;
929
930 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
931 memset(cmd_p, 0, sizeof(*cmd_p));
932
933 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
934 cmd_p->hdr.ctx_id = cpu_to_le32(id);
935 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
936}
937
938void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
939 uint32_t ctx_id,
940 struct virtio_gpu_object_array *objs)
941{
942 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
943 struct virtio_gpu_ctx_resource *cmd_p;
944 struct virtio_gpu_vbuffer *vbuf;
945
946 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
947 memset(cmd_p, 0, sizeof(*cmd_p));
948 vbuf->objs = objs;
949
950 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
951 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
952 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
953 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
954}
955
956void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
957 uint32_t ctx_id,
958 struct virtio_gpu_object_array *objs)
959{
960 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
961 struct virtio_gpu_ctx_resource *cmd_p;
962 struct virtio_gpu_vbuffer *vbuf;
963
964 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
965 memset(cmd_p, 0, sizeof(*cmd_p));
966 vbuf->objs = objs;
967
968 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
969 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
970 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
971 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
972}
973
974void
975virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
976 struct virtio_gpu_object *bo,
977 struct virtio_gpu_object_params *params,
978 struct virtio_gpu_object_array *objs,
979 struct virtio_gpu_fence *fence)
980{
981 struct virtio_gpu_resource_create_3d *cmd_p;
982 struct virtio_gpu_vbuffer *vbuf;
983
984 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
985 memset(cmd_p, 0, sizeof(*cmd_p));
986 vbuf->objs = objs;
987
988 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
989 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
990 cmd_p->format = cpu_to_le32(params->format);
991 cmd_p->width = cpu_to_le32(params->width);
992 cmd_p->height = cpu_to_le32(params->height);
993
994 cmd_p->target = cpu_to_le32(params->target);
995 cmd_p->bind = cpu_to_le32(params->bind);
996 cmd_p->depth = cpu_to_le32(params->depth);
997 cmd_p->array_size = cpu_to_le32(params->array_size);
998 cmd_p->last_level = cpu_to_le32(params->last_level);
999 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1000 cmd_p->flags = cpu_to_le32(params->flags);
1001
1002 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1003
1004 bo->created = true;
1005}
1006
1007void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1008 uint32_t ctx_id,
1009 uint64_t offset, uint32_t level,
1010 uint32_t stride,
1011 uint32_t layer_stride,
1012 struct drm_virtgpu_3d_box *box,
1013 struct virtio_gpu_object_array *objs,
1014 struct virtio_gpu_fence *fence)
1015{
1016 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1017 struct virtio_gpu_transfer_host_3d *cmd_p;
1018 struct virtio_gpu_vbuffer *vbuf;
1019 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1020
1021 if (virtio_gpu_is_shmem(bo) && use_dma_api)
1022 dma_sync_sgtable_for_device(&vgdev->vdev->dev,
1023 bo->base.sgt, DMA_TO_DEVICE);
1024
1025 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1026 memset(cmd_p, 0, sizeof(*cmd_p));
1027
1028 vbuf->objs = objs;
1029
1030 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1031 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1032 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1033 convert_to_hw_box(&cmd_p->box, box);
1034 cmd_p->offset = cpu_to_le64(offset);
1035 cmd_p->level = cpu_to_le32(level);
1036 cmd_p->stride = cpu_to_le32(stride);
1037 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1038
1039 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1040}
1041
1042void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1043 uint32_t ctx_id,
1044 uint64_t offset, uint32_t level,
1045 uint32_t stride,
1046 uint32_t layer_stride,
1047 struct drm_virtgpu_3d_box *box,
1048 struct virtio_gpu_object_array *objs,
1049 struct virtio_gpu_fence *fence)
1050{
1051 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1052 struct virtio_gpu_transfer_host_3d *cmd_p;
1053 struct virtio_gpu_vbuffer *vbuf;
1054
1055 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1056 memset(cmd_p, 0, sizeof(*cmd_p));
1057
1058 vbuf->objs = objs;
1059
1060 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1061 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1062 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1063 convert_to_hw_box(&cmd_p->box, box);
1064 cmd_p->offset = cpu_to_le64(offset);
1065 cmd_p->level = cpu_to_le32(level);
1066 cmd_p->stride = cpu_to_le32(stride);
1067 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1068
1069 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1070}
1071
1072void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1073 void *data, uint32_t data_size,
1074 uint32_t ctx_id,
1075 struct virtio_gpu_object_array *objs,
1076 struct virtio_gpu_fence *fence)
1077{
1078 struct virtio_gpu_cmd_submit *cmd_p;
1079 struct virtio_gpu_vbuffer *vbuf;
1080
1081 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1082 memset(cmd_p, 0, sizeof(*cmd_p));
1083
1084 vbuf->data_buf = data;
1085 vbuf->data_size = data_size;
1086 vbuf->objs = objs;
1087
1088 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1089 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1090 cmd_p->size = cpu_to_le32(data_size);
1091
1092 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1093}
1094
1095void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1096 struct virtio_gpu_object *obj,
1097 struct virtio_gpu_mem_entry *ents,
1098 unsigned int nents)
1099{
1100 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1101 ents, nents, NULL);
1102}
1103
1104void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1105 struct virtio_gpu_output *output)
1106{
1107 struct virtio_gpu_vbuffer *vbuf;
1108 struct virtio_gpu_update_cursor *cur_p;
1109
1110 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1111 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1112 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1113 virtio_gpu_queue_cursor(vgdev, vbuf);
1114}
1115
1116static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1117 struct virtio_gpu_vbuffer *vbuf)
1118{
1119 struct virtio_gpu_object *obj =
1120 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1121 struct virtio_gpu_resp_resource_uuid *resp =
1122 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1123 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1124
1125 spin_lock(&vgdev->resource_export_lock);
1126 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1127
1128 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1129 obj->uuid_state == STATE_INITIALIZING) {
1130 import_uuid(&obj->uuid, resp->uuid);
1131 obj->uuid_state = STATE_OK;
1132 } else {
1133 obj->uuid_state = STATE_ERR;
1134 }
1135 spin_unlock(&vgdev->resource_export_lock);
1136
1137 wake_up_all(&vgdev->resp_wq);
1138}
1139
1140int
1141virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1142 struct virtio_gpu_object_array *objs)
1143{
1144 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1145 struct virtio_gpu_resource_assign_uuid *cmd_p;
1146 struct virtio_gpu_vbuffer *vbuf;
1147 struct virtio_gpu_resp_resource_uuid *resp_buf;
1148
1149 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1150 if (!resp_buf) {
1151 spin_lock(&vgdev->resource_export_lock);
1152 bo->uuid_state = STATE_ERR;
1153 spin_unlock(&vgdev->resource_export_lock);
1154 virtio_gpu_array_put_free(objs);
1155 return -ENOMEM;
1156 }
1157
1158 cmd_p = virtio_gpu_alloc_cmd_resp
1159 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1160 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1161 memset(cmd_p, 0, sizeof(*cmd_p));
1162
1163 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1164 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1165
1166 vbuf->objs = objs;
1167 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1168 return 0;
1169}
1170
1171static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1172 struct virtio_gpu_vbuffer *vbuf)
1173{
1174 struct virtio_gpu_object *bo =
1175 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1176 struct virtio_gpu_resp_map_info *resp =
1177 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1178 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1179 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1180
1181 spin_lock(&vgdev->host_visible_lock);
1182
1183 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1184 vram->map_info = resp->map_info;
1185 vram->map_state = STATE_OK;
1186 } else {
1187 vram->map_state = STATE_ERR;
1188 }
1189
1190 spin_unlock(&vgdev->host_visible_lock);
1191 wake_up_all(&vgdev->resp_wq);
1192}
1193
1194int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1195 struct virtio_gpu_object_array *objs, uint64_t offset)
1196{
1197 struct virtio_gpu_resource_map_blob *cmd_p;
1198 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1199 struct virtio_gpu_vbuffer *vbuf;
1200 struct virtio_gpu_resp_map_info *resp_buf;
1201
1202 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1203 if (!resp_buf)
1204 return -ENOMEM;
1205
1206 cmd_p = virtio_gpu_alloc_cmd_resp
1207 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1208 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1209 memset(cmd_p, 0, sizeof(*cmd_p));
1210
1211 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1212 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1213 cmd_p->offset = cpu_to_le64(offset);
1214 vbuf->objs = objs;
1215
1216 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1217 return 0;
1218}
1219
1220void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1221 struct virtio_gpu_object *bo)
1222{
1223 struct virtio_gpu_resource_unmap_blob *cmd_p;
1224 struct virtio_gpu_vbuffer *vbuf;
1225
1226 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1227 memset(cmd_p, 0, sizeof(*cmd_p));
1228
1229 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1230 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1231
1232 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1233}
1234
1235void
1236virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1237 struct virtio_gpu_object *bo,
1238 struct virtio_gpu_object_params *params,
1239 struct virtio_gpu_mem_entry *ents,
1240 uint32_t nents)
1241{
1242 struct virtio_gpu_resource_create_blob *cmd_p;
1243 struct virtio_gpu_vbuffer *vbuf;
1244
1245 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1246 memset(cmd_p, 0, sizeof(*cmd_p));
1247
1248 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1249 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1250 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1251 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1252 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1253 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1254 cmd_p->size = cpu_to_le64(params->size);
1255 cmd_p->nr_entries = cpu_to_le32(nents);
1256
1257 vbuf->data_buf = ents;
1258 vbuf->data_size = sizeof(*ents) * nents;
1259
1260 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1261 bo->created = true;
1262}
1263
1264void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1265 uint32_t scanout_id,
1266 struct virtio_gpu_object *bo,
1267 struct drm_framebuffer *fb,
1268 uint32_t width, uint32_t height,
1269 uint32_t x, uint32_t y)
1270{
1271 uint32_t i;
1272 struct virtio_gpu_set_scanout_blob *cmd_p;
1273 struct virtio_gpu_vbuffer *vbuf;
1274 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1275
1276 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1277 memset(cmd_p, 0, sizeof(*cmd_p));
1278
1279 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1280 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1281 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1282
1283 cmd_p->format = cpu_to_le32(format);
1284 cmd_p->width = cpu_to_le32(fb->width);
1285 cmd_p->height = cpu_to_le32(fb->height);
1286
1287 for (i = 0; i < 4; i++) {
1288 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1289 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1290 }
1291
1292 cmd_p->r.width = cpu_to_le32(width);
1293 cmd_p->r.height = cpu_to_le32(height);
1294 cmd_p->r.x = cpu_to_le32(x);
1295 cmd_p->r.y = cpu_to_le32(y);
1296
1297 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1298}