Loading...
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <linux/dma-mapping.h>
30#include <linux/virtio.h>
31#include <linux/virtio_config.h>
32#include <linux/virtio_ring.h>
33
34#include "virtgpu_drv.h"
35#include "virtgpu_trace.h"
36
37#define MAX_INLINE_CMD_SIZE 96
38#define MAX_INLINE_RESP_SIZE 24
39#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
42
43void virtio_gpu_ctrl_ack(struct virtqueue *vq)
44{
45 struct drm_device *dev = vq->vdev->priv;
46 struct virtio_gpu_device *vgdev = dev->dev_private;
47
48 schedule_work(&vgdev->ctrlq.dequeue_work);
49}
50
51void virtio_gpu_cursor_ack(struct virtqueue *vq)
52{
53 struct drm_device *dev = vq->vdev->priv;
54 struct virtio_gpu_device *vgdev = dev->dev_private;
55
56 schedule_work(&vgdev->cursorq.dequeue_work);
57}
58
59int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
60{
61 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
62 VBUFFER_SIZE,
63 __alignof__(struct virtio_gpu_vbuffer),
64 0, NULL);
65 if (!vgdev->vbufs)
66 return -ENOMEM;
67 return 0;
68}
69
70void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
71{
72 kmem_cache_destroy(vgdev->vbufs);
73 vgdev->vbufs = NULL;
74}
75
76static struct virtio_gpu_vbuffer*
77virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
78 int size, int resp_size, void *resp_buf,
79 virtio_gpu_resp_cb resp_cb)
80{
81 struct virtio_gpu_vbuffer *vbuf;
82
83 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
84 if (!vbuf)
85 return ERR_PTR(-ENOMEM);
86
87 BUG_ON(size > MAX_INLINE_CMD_SIZE);
88 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
89 vbuf->size = size;
90
91 vbuf->resp_cb = resp_cb;
92 vbuf->resp_size = resp_size;
93 if (resp_size <= MAX_INLINE_RESP_SIZE)
94 vbuf->resp_buf = (void *)vbuf->buf + size;
95 else
96 vbuf->resp_buf = resp_buf;
97 BUG_ON(!vbuf->resp_buf);
98 return vbuf;
99}
100
101static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
102 struct virtio_gpu_vbuffer **vbuffer_p,
103 int size)
104{
105 struct virtio_gpu_vbuffer *vbuf;
106
107 vbuf = virtio_gpu_get_vbuf(vgdev, size,
108 sizeof(struct virtio_gpu_ctrl_hdr),
109 NULL, NULL);
110 if (IS_ERR(vbuf)) {
111 *vbuffer_p = NULL;
112 return ERR_CAST(vbuf);
113 }
114 *vbuffer_p = vbuf;
115 return vbuf->buf;
116}
117
118static struct virtio_gpu_update_cursor*
119virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
120 struct virtio_gpu_vbuffer **vbuffer_p)
121{
122 struct virtio_gpu_vbuffer *vbuf;
123
124 vbuf = virtio_gpu_get_vbuf
125 (vgdev, sizeof(struct virtio_gpu_update_cursor),
126 0, NULL, NULL);
127 if (IS_ERR(vbuf)) {
128 *vbuffer_p = NULL;
129 return ERR_CAST(vbuf);
130 }
131 *vbuffer_p = vbuf;
132 return (struct virtio_gpu_update_cursor *)vbuf->buf;
133}
134
135static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
136 virtio_gpu_resp_cb cb,
137 struct virtio_gpu_vbuffer **vbuffer_p,
138 int cmd_size, int resp_size,
139 void *resp_buf)
140{
141 struct virtio_gpu_vbuffer *vbuf;
142
143 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
144 resp_size, resp_buf, cb);
145 if (IS_ERR(vbuf)) {
146 *vbuffer_p = NULL;
147 return ERR_CAST(vbuf);
148 }
149 *vbuffer_p = vbuf;
150 return (struct virtio_gpu_command *)vbuf->buf;
151}
152
153static void free_vbuf(struct virtio_gpu_device *vgdev,
154 struct virtio_gpu_vbuffer *vbuf)
155{
156 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
157 kfree(vbuf->resp_buf);
158 kfree(vbuf->data_buf);
159 kmem_cache_free(vgdev->vbufs, vbuf);
160}
161
162static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
163{
164 struct virtio_gpu_vbuffer *vbuf;
165 unsigned int len;
166 int freed = 0;
167
168 while ((vbuf = virtqueue_get_buf(vq, &len))) {
169 list_add_tail(&vbuf->list, reclaim_list);
170 freed++;
171 }
172 if (freed == 0)
173 DRM_DEBUG("Huh? zero vbufs reclaimed");
174}
175
176void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
177{
178 struct virtio_gpu_device *vgdev =
179 container_of(work, struct virtio_gpu_device,
180 ctrlq.dequeue_work);
181 struct list_head reclaim_list;
182 struct virtio_gpu_vbuffer *entry, *tmp;
183 struct virtio_gpu_ctrl_hdr *resp;
184 u64 fence_id = 0;
185
186 INIT_LIST_HEAD(&reclaim_list);
187 spin_lock(&vgdev->ctrlq.qlock);
188 do {
189 virtqueue_disable_cb(vgdev->ctrlq.vq);
190 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
191
192 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
193 spin_unlock(&vgdev->ctrlq.qlock);
194
195 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
196 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
197
198 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
199
200 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
201 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
202 struct virtio_gpu_ctrl_hdr *cmd;
203 cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
204 DRM_ERROR("response 0x%x (command 0x%x)\n",
205 le32_to_cpu(resp->type),
206 le32_to_cpu(cmd->type));
207 } else
208 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
209 }
210 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
211 u64 f = le64_to_cpu(resp->fence_id);
212
213 if (fence_id > f) {
214 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
215 __func__, fence_id, f);
216 } else {
217 fence_id = f;
218 }
219 }
220 if (entry->resp_cb)
221 entry->resp_cb(vgdev, entry);
222
223 list_del(&entry->list);
224 free_vbuf(vgdev, entry);
225 }
226 wake_up(&vgdev->ctrlq.ack_queue);
227
228 if (fence_id)
229 virtio_gpu_fence_event_process(vgdev, fence_id);
230}
231
232void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
233{
234 struct virtio_gpu_device *vgdev =
235 container_of(work, struct virtio_gpu_device,
236 cursorq.dequeue_work);
237 struct list_head reclaim_list;
238 struct virtio_gpu_vbuffer *entry, *tmp;
239
240 INIT_LIST_HEAD(&reclaim_list);
241 spin_lock(&vgdev->cursorq.qlock);
242 do {
243 virtqueue_disable_cb(vgdev->cursorq.vq);
244 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
245 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
246 spin_unlock(&vgdev->cursorq.qlock);
247
248 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
249 list_del(&entry->list);
250 free_vbuf(vgdev, entry);
251 }
252 wake_up(&vgdev->cursorq.ack_queue);
253}
254
255static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
256 struct virtio_gpu_vbuffer *vbuf)
257 __releases(&vgdev->ctrlq.qlock)
258 __acquires(&vgdev->ctrlq.qlock)
259{
260 struct virtqueue *vq = vgdev->ctrlq.vq;
261 struct scatterlist *sgs[3], vcmd, vout, vresp;
262 int outcnt = 0, incnt = 0;
263 int ret;
264
265 if (!vgdev->vqs_ready)
266 return -ENODEV;
267
268 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
269 sgs[outcnt + incnt] = &vcmd;
270 outcnt++;
271
272 if (vbuf->data_size) {
273 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
274 sgs[outcnt + incnt] = &vout;
275 outcnt++;
276 }
277
278 if (vbuf->resp_size) {
279 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
280 sgs[outcnt + incnt] = &vresp;
281 incnt++;
282 }
283
284retry:
285 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
286 if (ret == -ENOSPC) {
287 spin_unlock(&vgdev->ctrlq.qlock);
288 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
289 spin_lock(&vgdev->ctrlq.qlock);
290 goto retry;
291 } else {
292 trace_virtio_gpu_cmd_queue(vq,
293 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
294
295 virtqueue_kick(vq);
296 }
297
298 if (!ret)
299 ret = vq->num_free;
300 return ret;
301}
302
303static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
304 struct virtio_gpu_vbuffer *vbuf)
305{
306 int rc;
307
308 spin_lock(&vgdev->ctrlq.qlock);
309 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
310 spin_unlock(&vgdev->ctrlq.qlock);
311 return rc;
312}
313
314static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
315 struct virtio_gpu_vbuffer *vbuf,
316 struct virtio_gpu_ctrl_hdr *hdr,
317 struct virtio_gpu_fence *fence)
318{
319 struct virtqueue *vq = vgdev->ctrlq.vq;
320 int rc;
321
322again:
323 spin_lock(&vgdev->ctrlq.qlock);
324
325 /*
326 * Make sure we have enouth space in the virtqueue. If not
327 * wait here until we have.
328 *
329 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
330 * to wait for free space, which can result in fence ids being
331 * submitted out-of-order.
332 */
333 if (vq->num_free < 3) {
334 spin_unlock(&vgdev->ctrlq.qlock);
335 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
336 goto again;
337 }
338
339 if (fence)
340 virtio_gpu_fence_emit(vgdev, hdr, fence);
341 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
342 spin_unlock(&vgdev->ctrlq.qlock);
343 return rc;
344}
345
346static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
347 struct virtio_gpu_vbuffer *vbuf)
348{
349 struct virtqueue *vq = vgdev->cursorq.vq;
350 struct scatterlist *sgs[1], ccmd;
351 int ret;
352 int outcnt;
353
354 if (!vgdev->vqs_ready)
355 return -ENODEV;
356
357 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
358 sgs[0] = &ccmd;
359 outcnt = 1;
360
361 spin_lock(&vgdev->cursorq.qlock);
362retry:
363 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
364 if (ret == -ENOSPC) {
365 spin_unlock(&vgdev->cursorq.qlock);
366 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
367 spin_lock(&vgdev->cursorq.qlock);
368 goto retry;
369 } else {
370 trace_virtio_gpu_cmd_queue(vq,
371 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
372
373 virtqueue_kick(vq);
374 }
375
376 spin_unlock(&vgdev->cursorq.qlock);
377
378 if (!ret)
379 ret = vq->num_free;
380 return ret;
381}
382
383/* just create gem objects for userspace and long lived objects,
384 * just use dma_alloced pages for the queue objects?
385 */
386
387/* create a basic resource */
388void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
389 struct virtio_gpu_object *bo,
390 struct virtio_gpu_object_params *params,
391 struct virtio_gpu_fence *fence)
392{
393 struct virtio_gpu_resource_create_2d *cmd_p;
394 struct virtio_gpu_vbuffer *vbuf;
395
396 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
397 memset(cmd_p, 0, sizeof(*cmd_p));
398
399 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
400 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
401 cmd_p->format = cpu_to_le32(params->format);
402 cmd_p->width = cpu_to_le32(params->width);
403 cmd_p->height = cpu_to_le32(params->height);
404
405 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
406 bo->created = true;
407}
408
409void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
410 uint32_t resource_id)
411{
412 struct virtio_gpu_resource_unref *cmd_p;
413 struct virtio_gpu_vbuffer *vbuf;
414
415 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
416 memset(cmd_p, 0, sizeof(*cmd_p));
417
418 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
419 cmd_p->resource_id = cpu_to_le32(resource_id);
420
421 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
422}
423
424static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
425 uint32_t resource_id,
426 struct virtio_gpu_fence *fence)
427{
428 struct virtio_gpu_resource_detach_backing *cmd_p;
429 struct virtio_gpu_vbuffer *vbuf;
430
431 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
432 memset(cmd_p, 0, sizeof(*cmd_p));
433
434 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
435 cmd_p->resource_id = cpu_to_le32(resource_id);
436
437 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
438}
439
440void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
441 uint32_t scanout_id, uint32_t resource_id,
442 uint32_t width, uint32_t height,
443 uint32_t x, uint32_t y)
444{
445 struct virtio_gpu_set_scanout *cmd_p;
446 struct virtio_gpu_vbuffer *vbuf;
447
448 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
449 memset(cmd_p, 0, sizeof(*cmd_p));
450
451 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
452 cmd_p->resource_id = cpu_to_le32(resource_id);
453 cmd_p->scanout_id = cpu_to_le32(scanout_id);
454 cmd_p->r.width = cpu_to_le32(width);
455 cmd_p->r.height = cpu_to_le32(height);
456 cmd_p->r.x = cpu_to_le32(x);
457 cmd_p->r.y = cpu_to_le32(y);
458
459 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
460}
461
462void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
463 uint32_t resource_id,
464 uint32_t x, uint32_t y,
465 uint32_t width, uint32_t height)
466{
467 struct virtio_gpu_resource_flush *cmd_p;
468 struct virtio_gpu_vbuffer *vbuf;
469
470 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
471 memset(cmd_p, 0, sizeof(*cmd_p));
472
473 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
474 cmd_p->resource_id = cpu_to_le32(resource_id);
475 cmd_p->r.width = cpu_to_le32(width);
476 cmd_p->r.height = cpu_to_le32(height);
477 cmd_p->r.x = cpu_to_le32(x);
478 cmd_p->r.y = cpu_to_le32(y);
479
480 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
481}
482
483void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
484 struct virtio_gpu_object *bo,
485 uint64_t offset,
486 __le32 width, __le32 height,
487 __le32 x, __le32 y,
488 struct virtio_gpu_fence *fence)
489{
490 struct virtio_gpu_transfer_to_host_2d *cmd_p;
491 struct virtio_gpu_vbuffer *vbuf;
492 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
493
494 if (use_dma_api)
495 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
496 bo->pages->sgl, bo->pages->nents,
497 DMA_TO_DEVICE);
498
499 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
500 memset(cmd_p, 0, sizeof(*cmd_p));
501
502 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
503 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
504 cmd_p->offset = cpu_to_le64(offset);
505 cmd_p->r.width = width;
506 cmd_p->r.height = height;
507 cmd_p->r.x = x;
508 cmd_p->r.y = y;
509
510 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
511}
512
513static void
514virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
515 uint32_t resource_id,
516 struct virtio_gpu_mem_entry *ents,
517 uint32_t nents,
518 struct virtio_gpu_fence *fence)
519{
520 struct virtio_gpu_resource_attach_backing *cmd_p;
521 struct virtio_gpu_vbuffer *vbuf;
522
523 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
524 memset(cmd_p, 0, sizeof(*cmd_p));
525
526 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
527 cmd_p->resource_id = cpu_to_le32(resource_id);
528 cmd_p->nr_entries = cpu_to_le32(nents);
529
530 vbuf->data_buf = ents;
531 vbuf->data_size = sizeof(*ents) * nents;
532
533 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
534}
535
536static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
537 struct virtio_gpu_vbuffer *vbuf)
538{
539 struct virtio_gpu_resp_display_info *resp =
540 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
541 int i;
542
543 spin_lock(&vgdev->display_info_lock);
544 for (i = 0; i < vgdev->num_scanouts; i++) {
545 vgdev->outputs[i].info = resp->pmodes[i];
546 if (resp->pmodes[i].enabled) {
547 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
548 le32_to_cpu(resp->pmodes[i].r.width),
549 le32_to_cpu(resp->pmodes[i].r.height),
550 le32_to_cpu(resp->pmodes[i].r.x),
551 le32_to_cpu(resp->pmodes[i].r.y));
552 } else {
553 DRM_DEBUG("output %d: disabled", i);
554 }
555 }
556
557 vgdev->display_info_pending = false;
558 spin_unlock(&vgdev->display_info_lock);
559 wake_up(&vgdev->resp_wq);
560
561 if (!drm_helper_hpd_irq_event(vgdev->ddev))
562 drm_kms_helper_hotplug_event(vgdev->ddev);
563}
564
565static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
566 struct virtio_gpu_vbuffer *vbuf)
567{
568 struct virtio_gpu_get_capset_info *cmd =
569 (struct virtio_gpu_get_capset_info *)vbuf->buf;
570 struct virtio_gpu_resp_capset_info *resp =
571 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
572 int i = le32_to_cpu(cmd->capset_index);
573
574 spin_lock(&vgdev->display_info_lock);
575 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
576 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
577 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
578 spin_unlock(&vgdev->display_info_lock);
579 wake_up(&vgdev->resp_wq);
580}
581
582static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
583 struct virtio_gpu_vbuffer *vbuf)
584{
585 struct virtio_gpu_get_capset *cmd =
586 (struct virtio_gpu_get_capset *)vbuf->buf;
587 struct virtio_gpu_resp_capset *resp =
588 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
589 struct virtio_gpu_drv_cap_cache *cache_ent;
590
591 spin_lock(&vgdev->display_info_lock);
592 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
593 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
594 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
595 memcpy(cache_ent->caps_cache, resp->capset_data,
596 cache_ent->size);
597 /* Copy must occur before is_valid is signalled. */
598 smp_wmb();
599 atomic_set(&cache_ent->is_valid, 1);
600 break;
601 }
602 }
603 spin_unlock(&vgdev->display_info_lock);
604 wake_up_all(&vgdev->resp_wq);
605}
606
607static int virtio_get_edid_block(void *data, u8 *buf,
608 unsigned int block, size_t len)
609{
610 struct virtio_gpu_resp_edid *resp = data;
611 size_t start = block * EDID_LENGTH;
612
613 if (start + len > le32_to_cpu(resp->size))
614 return -1;
615 memcpy(buf, resp->edid + start, len);
616 return 0;
617}
618
619static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
620 struct virtio_gpu_vbuffer *vbuf)
621{
622 struct virtio_gpu_cmd_get_edid *cmd =
623 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
624 struct virtio_gpu_resp_edid *resp =
625 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
626 uint32_t scanout = le32_to_cpu(cmd->scanout);
627 struct virtio_gpu_output *output;
628 struct edid *new_edid, *old_edid;
629
630 if (scanout >= vgdev->num_scanouts)
631 return;
632 output = vgdev->outputs + scanout;
633
634 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
635 drm_connector_update_edid_property(&output->conn, new_edid);
636
637 spin_lock(&vgdev->display_info_lock);
638 old_edid = output->edid;
639 output->edid = new_edid;
640 spin_unlock(&vgdev->display_info_lock);
641
642 kfree(old_edid);
643 wake_up(&vgdev->resp_wq);
644}
645
646int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
647{
648 struct virtio_gpu_ctrl_hdr *cmd_p;
649 struct virtio_gpu_vbuffer *vbuf;
650 void *resp_buf;
651
652 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
653 GFP_KERNEL);
654 if (!resp_buf)
655 return -ENOMEM;
656
657 cmd_p = virtio_gpu_alloc_cmd_resp
658 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
659 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
660 resp_buf);
661 memset(cmd_p, 0, sizeof(*cmd_p));
662
663 vgdev->display_info_pending = true;
664 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
665 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
666 return 0;
667}
668
669int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
670{
671 struct virtio_gpu_get_capset_info *cmd_p;
672 struct virtio_gpu_vbuffer *vbuf;
673 void *resp_buf;
674
675 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
676 GFP_KERNEL);
677 if (!resp_buf)
678 return -ENOMEM;
679
680 cmd_p = virtio_gpu_alloc_cmd_resp
681 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
682 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
683 resp_buf);
684 memset(cmd_p, 0, sizeof(*cmd_p));
685
686 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
687 cmd_p->capset_index = cpu_to_le32(idx);
688 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
689 return 0;
690}
691
692int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
693 int idx, int version,
694 struct virtio_gpu_drv_cap_cache **cache_p)
695{
696 struct virtio_gpu_get_capset *cmd_p;
697 struct virtio_gpu_vbuffer *vbuf;
698 int max_size;
699 struct virtio_gpu_drv_cap_cache *cache_ent;
700 struct virtio_gpu_drv_cap_cache *search_ent;
701 void *resp_buf;
702
703 *cache_p = NULL;
704
705 if (idx >= vgdev->num_capsets)
706 return -EINVAL;
707
708 if (version > vgdev->capsets[idx].max_version)
709 return -EINVAL;
710
711 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
712 if (!cache_ent)
713 return -ENOMEM;
714
715 max_size = vgdev->capsets[idx].max_size;
716 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
717 if (!cache_ent->caps_cache) {
718 kfree(cache_ent);
719 return -ENOMEM;
720 }
721
722 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
723 GFP_KERNEL);
724 if (!resp_buf) {
725 kfree(cache_ent->caps_cache);
726 kfree(cache_ent);
727 return -ENOMEM;
728 }
729
730 cache_ent->version = version;
731 cache_ent->id = vgdev->capsets[idx].id;
732 atomic_set(&cache_ent->is_valid, 0);
733 cache_ent->size = max_size;
734 spin_lock(&vgdev->display_info_lock);
735 /* Search while under lock in case it was added by another task. */
736 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
737 if (search_ent->id == vgdev->capsets[idx].id &&
738 search_ent->version == version) {
739 *cache_p = search_ent;
740 break;
741 }
742 }
743 if (!*cache_p)
744 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
745 spin_unlock(&vgdev->display_info_lock);
746
747 if (*cache_p) {
748 /* Entry was found, so free everything that was just created. */
749 kfree(resp_buf);
750 kfree(cache_ent->caps_cache);
751 kfree(cache_ent);
752 return 0;
753 }
754
755 cmd_p = virtio_gpu_alloc_cmd_resp
756 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
757 sizeof(struct virtio_gpu_resp_capset) + max_size,
758 resp_buf);
759 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
760 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
761 cmd_p->capset_version = cpu_to_le32(version);
762 *cache_p = cache_ent;
763 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
764
765 return 0;
766}
767
768int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
769{
770 struct virtio_gpu_cmd_get_edid *cmd_p;
771 struct virtio_gpu_vbuffer *vbuf;
772 void *resp_buf;
773 int scanout;
774
775 if (WARN_ON(!vgdev->has_edid))
776 return -EINVAL;
777
778 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
779 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
780 GFP_KERNEL);
781 if (!resp_buf)
782 return -ENOMEM;
783
784 cmd_p = virtio_gpu_alloc_cmd_resp
785 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
786 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
787 resp_buf);
788 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
789 cmd_p->scanout = cpu_to_le32(scanout);
790 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
791 }
792
793 return 0;
794}
795
796void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
797 uint32_t nlen, const char *name)
798{
799 struct virtio_gpu_ctx_create *cmd_p;
800 struct virtio_gpu_vbuffer *vbuf;
801
802 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
803 memset(cmd_p, 0, sizeof(*cmd_p));
804
805 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
806 cmd_p->hdr.ctx_id = cpu_to_le32(id);
807 cmd_p->nlen = cpu_to_le32(nlen);
808 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
809 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
810 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
811}
812
813void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
814 uint32_t id)
815{
816 struct virtio_gpu_ctx_destroy *cmd_p;
817 struct virtio_gpu_vbuffer *vbuf;
818
819 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
820 memset(cmd_p, 0, sizeof(*cmd_p));
821
822 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
823 cmd_p->hdr.ctx_id = cpu_to_le32(id);
824 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
825}
826
827void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
828 uint32_t ctx_id,
829 uint32_t resource_id)
830{
831 struct virtio_gpu_ctx_resource *cmd_p;
832 struct virtio_gpu_vbuffer *vbuf;
833
834 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
835 memset(cmd_p, 0, sizeof(*cmd_p));
836
837 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
838 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
839 cmd_p->resource_id = cpu_to_le32(resource_id);
840 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
841
842}
843
844void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
845 uint32_t ctx_id,
846 uint32_t resource_id)
847{
848 struct virtio_gpu_ctx_resource *cmd_p;
849 struct virtio_gpu_vbuffer *vbuf;
850
851 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
852 memset(cmd_p, 0, sizeof(*cmd_p));
853
854 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
855 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
856 cmd_p->resource_id = cpu_to_le32(resource_id);
857 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
858}
859
860void
861virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
862 struct virtio_gpu_object *bo,
863 struct virtio_gpu_object_params *params,
864 struct virtio_gpu_fence *fence)
865{
866 struct virtio_gpu_resource_create_3d *cmd_p;
867 struct virtio_gpu_vbuffer *vbuf;
868
869 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
870 memset(cmd_p, 0, sizeof(*cmd_p));
871
872 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
873 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
874 cmd_p->format = cpu_to_le32(params->format);
875 cmd_p->width = cpu_to_le32(params->width);
876 cmd_p->height = cpu_to_le32(params->height);
877
878 cmd_p->target = cpu_to_le32(params->target);
879 cmd_p->bind = cpu_to_le32(params->bind);
880 cmd_p->depth = cpu_to_le32(params->depth);
881 cmd_p->array_size = cpu_to_le32(params->array_size);
882 cmd_p->last_level = cpu_to_le32(params->last_level);
883 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
884 cmd_p->flags = cpu_to_le32(params->flags);
885
886 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
887 bo->created = true;
888}
889
890void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
891 struct virtio_gpu_object *bo,
892 uint32_t ctx_id,
893 uint64_t offset, uint32_t level,
894 struct virtio_gpu_box *box,
895 struct virtio_gpu_fence *fence)
896{
897 struct virtio_gpu_transfer_host_3d *cmd_p;
898 struct virtio_gpu_vbuffer *vbuf;
899 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
900
901 if (use_dma_api)
902 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
903 bo->pages->sgl, bo->pages->nents,
904 DMA_TO_DEVICE);
905
906 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
907 memset(cmd_p, 0, sizeof(*cmd_p));
908
909 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
910 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
911 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
912 cmd_p->box = *box;
913 cmd_p->offset = cpu_to_le64(offset);
914 cmd_p->level = cpu_to_le32(level);
915
916 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
917}
918
919void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
920 uint32_t resource_id, uint32_t ctx_id,
921 uint64_t offset, uint32_t level,
922 struct virtio_gpu_box *box,
923 struct virtio_gpu_fence *fence)
924{
925 struct virtio_gpu_transfer_host_3d *cmd_p;
926 struct virtio_gpu_vbuffer *vbuf;
927
928 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
929 memset(cmd_p, 0, sizeof(*cmd_p));
930
931 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
932 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
933 cmd_p->resource_id = cpu_to_le32(resource_id);
934 cmd_p->box = *box;
935 cmd_p->offset = cpu_to_le64(offset);
936 cmd_p->level = cpu_to_le32(level);
937
938 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
939}
940
941void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
942 void *data, uint32_t data_size,
943 uint32_t ctx_id, struct virtio_gpu_fence *fence)
944{
945 struct virtio_gpu_cmd_submit *cmd_p;
946 struct virtio_gpu_vbuffer *vbuf;
947
948 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
949 memset(cmd_p, 0, sizeof(*cmd_p));
950
951 vbuf->data_buf = data;
952 vbuf->data_size = data_size;
953
954 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
955 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
956 cmd_p->size = cpu_to_le32(data_size);
957
958 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
959}
960
961int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
962 struct virtio_gpu_object *obj,
963 struct virtio_gpu_fence *fence)
964{
965 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
966 struct virtio_gpu_mem_entry *ents;
967 struct scatterlist *sg;
968 int si, nents;
969
970 if (WARN_ON_ONCE(!obj->created))
971 return -EINVAL;
972
973 if (!obj->pages) {
974 int ret;
975
976 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
977 if (ret)
978 return ret;
979 }
980
981 if (use_dma_api) {
982 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
983 obj->pages->sgl, obj->pages->nents,
984 DMA_TO_DEVICE);
985 nents = obj->mapped;
986 } else {
987 nents = obj->pages->nents;
988 }
989
990 /* gets freed when the ring has consumed it */
991 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
992 GFP_KERNEL);
993 if (!ents) {
994 DRM_ERROR("failed to allocate ent list\n");
995 return -ENOMEM;
996 }
997
998 for_each_sg(obj->pages->sgl, sg, nents, si) {
999 ents[si].addr = cpu_to_le64(use_dma_api
1000 ? sg_dma_address(sg)
1001 : sg_phys(sg));
1002 ents[si].length = cpu_to_le32(sg->length);
1003 ents[si].padding = 0;
1004 }
1005
1006 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1007 ents, nents,
1008 fence);
1009 return 0;
1010}
1011
1012void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1013 struct virtio_gpu_object *obj)
1014{
1015 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1016
1017 if (use_dma_api && obj->mapped) {
1018 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1019 /* detach backing and wait for the host process it ... */
1020 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1021 dma_fence_wait(&fence->f, true);
1022 dma_fence_put(&fence->f);
1023
1024 /* ... then tear down iommu mappings */
1025 dma_unmap_sg(vgdev->vdev->dev.parent,
1026 obj->pages->sgl, obj->mapped,
1027 DMA_TO_DEVICE);
1028 obj->mapped = 0;
1029 } else {
1030 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1031 }
1032}
1033
1034void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1035 struct virtio_gpu_output *output)
1036{
1037 struct virtio_gpu_vbuffer *vbuf;
1038 struct virtio_gpu_update_cursor *cur_p;
1039
1040 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1041 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1042 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1043 virtio_gpu_queue_cursor(vgdev, vbuf);
1044}
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <linux/dma-mapping.h>
30#include <linux/virtio.h>
31#include <linux/virtio_config.h>
32#include <linux/virtio_ring.h>
33
34#include "virtgpu_drv.h"
35#include "virtgpu_trace.h"
36
37#define MAX_INLINE_CMD_SIZE 96
38#define MAX_INLINE_RESP_SIZE 24
39#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
42
43static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
45{
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
52}
53
54void virtio_gpu_ctrl_ack(struct virtqueue *vq)
55{
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
58
59 schedule_work(&vgdev->ctrlq.dequeue_work);
60}
61
62void virtio_gpu_cursor_ack(struct virtqueue *vq)
63{
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
66
67 schedule_work(&vgdev->cursorq.dequeue_work);
68}
69
70int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
71{
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 VBUFFER_SIZE,
74 __alignof__(struct virtio_gpu_vbuffer),
75 0, NULL);
76 if (!vgdev->vbufs)
77 return -ENOMEM;
78 return 0;
79}
80
81void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
82{
83 kmem_cache_destroy(vgdev->vbufs);
84 vgdev->vbufs = NULL;
85}
86
87static struct virtio_gpu_vbuffer*
88virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
91{
92 struct virtio_gpu_vbuffer *vbuf;
93
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
95 if (!vbuf)
96 return ERR_PTR(-ENOMEM);
97
98 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 size < sizeof(struct virtio_gpu_ctrl_hdr));
100 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 vbuf->size = size;
102
103 vbuf->resp_cb = resp_cb;
104 vbuf->resp_size = resp_size;
105 if (resp_size <= MAX_INLINE_RESP_SIZE)
106 vbuf->resp_buf = (void *)vbuf->buf + size;
107 else
108 vbuf->resp_buf = resp_buf;
109 BUG_ON(!vbuf->resp_buf);
110 return vbuf;
111}
112
113static struct virtio_gpu_ctrl_hdr *
114virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115{
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 * virtqueues.
119 */
120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121}
122
123static struct virtio_gpu_update_cursor*
124virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 struct virtio_gpu_vbuffer **vbuffer_p)
126{
127 struct virtio_gpu_vbuffer *vbuf;
128
129 vbuf = virtio_gpu_get_vbuf
130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
131 0, NULL, NULL);
132 if (IS_ERR(vbuf)) {
133 *vbuffer_p = NULL;
134 return ERR_CAST(vbuf);
135 }
136 *vbuffer_p = vbuf;
137 return (struct virtio_gpu_update_cursor *)vbuf->buf;
138}
139
140static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 virtio_gpu_resp_cb cb,
142 struct virtio_gpu_vbuffer **vbuffer_p,
143 int cmd_size, int resp_size,
144 void *resp_buf)
145{
146 struct virtio_gpu_vbuffer *vbuf;
147
148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 resp_size, resp_buf, cb);
150 if (IS_ERR(vbuf)) {
151 *vbuffer_p = NULL;
152 return ERR_CAST(vbuf);
153 }
154 *vbuffer_p = vbuf;
155 return (struct virtio_gpu_command *)vbuf->buf;
156}
157
158static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
159 struct virtio_gpu_vbuffer **vbuffer_p,
160 int size)
161{
162 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
163 sizeof(struct virtio_gpu_ctrl_hdr),
164 NULL);
165}
166
167static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
168 struct virtio_gpu_vbuffer **vbuffer_p,
169 int size,
170 virtio_gpu_resp_cb cb)
171{
172 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
173 sizeof(struct virtio_gpu_ctrl_hdr),
174 NULL);
175}
176
177static void free_vbuf(struct virtio_gpu_device *vgdev,
178 struct virtio_gpu_vbuffer *vbuf)
179{
180 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
181 kfree(vbuf->resp_buf);
182 kvfree(vbuf->data_buf);
183 kmem_cache_free(vgdev->vbufs, vbuf);
184}
185
186static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
187{
188 struct virtio_gpu_vbuffer *vbuf;
189 unsigned int len;
190 int freed = 0;
191
192 while ((vbuf = virtqueue_get_buf(vq, &len))) {
193 list_add_tail(&vbuf->list, reclaim_list);
194 freed++;
195 }
196 if (freed == 0)
197 DRM_DEBUG("Huh? zero vbufs reclaimed");
198}
199
200void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
201{
202 struct virtio_gpu_device *vgdev =
203 container_of(work, struct virtio_gpu_device,
204 ctrlq.dequeue_work);
205 struct list_head reclaim_list;
206 struct virtio_gpu_vbuffer *entry, *tmp;
207 struct virtio_gpu_ctrl_hdr *resp;
208 u64 fence_id = 0;
209
210 INIT_LIST_HEAD(&reclaim_list);
211 spin_lock(&vgdev->ctrlq.qlock);
212 do {
213 virtqueue_disable_cb(vgdev->ctrlq.vq);
214 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
215
216 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
217 spin_unlock(&vgdev->ctrlq.qlock);
218
219 list_for_each_entry(entry, &reclaim_list, list) {
220 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
221
222 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
223
224 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
225 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
226 struct virtio_gpu_ctrl_hdr *cmd;
227 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
228 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
229 le32_to_cpu(resp->type),
230 le32_to_cpu(cmd->type));
231 } else
232 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
233 }
234 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
235 u64 f = le64_to_cpu(resp->fence_id);
236
237 if (fence_id > f) {
238 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
239 __func__, fence_id, f);
240 } else {
241 fence_id = f;
242 }
243 }
244 if (entry->resp_cb)
245 entry->resp_cb(vgdev, entry);
246 }
247 wake_up(&vgdev->ctrlq.ack_queue);
248
249 if (fence_id)
250 virtio_gpu_fence_event_process(vgdev, fence_id);
251
252 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
253 if (entry->objs)
254 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
255 list_del(&entry->list);
256 free_vbuf(vgdev, entry);
257 }
258}
259
260void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
261{
262 struct virtio_gpu_device *vgdev =
263 container_of(work, struct virtio_gpu_device,
264 cursorq.dequeue_work);
265 struct list_head reclaim_list;
266 struct virtio_gpu_vbuffer *entry, *tmp;
267
268 INIT_LIST_HEAD(&reclaim_list);
269 spin_lock(&vgdev->cursorq.qlock);
270 do {
271 virtqueue_disable_cb(vgdev->cursorq.vq);
272 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
273 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
274 spin_unlock(&vgdev->cursorq.qlock);
275
276 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
277 list_del(&entry->list);
278 free_vbuf(vgdev, entry);
279 }
280 wake_up(&vgdev->cursorq.ack_queue);
281}
282
283/* Create sg_table from a vmalloc'd buffer. */
284static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
285{
286 int ret, s, i;
287 struct sg_table *sgt;
288 struct scatterlist *sg;
289 struct page *pg;
290
291 if (WARN_ON(!PAGE_ALIGNED(data)))
292 return NULL;
293
294 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
295 if (!sgt)
296 return NULL;
297
298 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
299 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
300 if (ret) {
301 kfree(sgt);
302 return NULL;
303 }
304
305 for_each_sg(sgt->sgl, sg, *sg_ents, i) {
306 pg = vmalloc_to_page(data);
307 if (!pg) {
308 sg_free_table(sgt);
309 kfree(sgt);
310 return NULL;
311 }
312
313 s = min_t(int, PAGE_SIZE, size);
314 sg_set_page(sg, pg, s, 0);
315
316 size -= s;
317 data += s;
318 }
319
320 return sgt;
321}
322
323static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
324 struct virtio_gpu_vbuffer *vbuf,
325 struct virtio_gpu_fence *fence,
326 int elemcnt,
327 struct scatterlist **sgs,
328 int outcnt,
329 int incnt)
330{
331 struct virtqueue *vq = vgdev->ctrlq.vq;
332 int ret, idx;
333
334 if (!drm_dev_enter(vgdev->ddev, &idx)) {
335 if (fence && vbuf->objs)
336 virtio_gpu_array_unlock_resv(vbuf->objs);
337 free_vbuf(vgdev, vbuf);
338 return;
339 }
340
341 if (vgdev->has_indirect)
342 elemcnt = 1;
343
344again:
345 spin_lock(&vgdev->ctrlq.qlock);
346
347 if (vq->num_free < elemcnt) {
348 spin_unlock(&vgdev->ctrlq.qlock);
349 virtio_gpu_notify(vgdev);
350 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
351 goto again;
352 }
353
354 /* now that the position of the vbuf in the virtqueue is known, we can
355 * finally set the fence id
356 */
357 if (fence) {
358 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
359 fence);
360 if (vbuf->objs) {
361 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
362 virtio_gpu_array_unlock_resv(vbuf->objs);
363 }
364 }
365
366 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
367 WARN_ON(ret);
368
369 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
370
371 atomic_inc(&vgdev->pending_commands);
372
373 spin_unlock(&vgdev->ctrlq.qlock);
374
375 drm_dev_exit(idx);
376}
377
378static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
379 struct virtio_gpu_vbuffer *vbuf,
380 struct virtio_gpu_fence *fence)
381{
382 struct scatterlist *sgs[3], vcmd, vout, vresp;
383 struct sg_table *sgt = NULL;
384 int elemcnt = 0, outcnt = 0, incnt = 0;
385
386 /* set up vcmd */
387 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
388 elemcnt++;
389 sgs[outcnt] = &vcmd;
390 outcnt++;
391
392 /* set up vout */
393 if (vbuf->data_size) {
394 if (is_vmalloc_addr(vbuf->data_buf)) {
395 int sg_ents;
396 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
397 &sg_ents);
398 if (!sgt) {
399 if (fence && vbuf->objs)
400 virtio_gpu_array_unlock_resv(vbuf->objs);
401 return;
402 }
403
404 elemcnt += sg_ents;
405 sgs[outcnt] = sgt->sgl;
406 } else {
407 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
408 elemcnt++;
409 sgs[outcnt] = &vout;
410 }
411 outcnt++;
412 }
413
414 /* set up vresp */
415 if (vbuf->resp_size) {
416 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
417 elemcnt++;
418 sgs[outcnt + incnt] = &vresp;
419 incnt++;
420 }
421
422 virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
423 incnt);
424
425 if (sgt) {
426 sg_free_table(sgt);
427 kfree(sgt);
428 }
429}
430
431void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
432{
433 bool notify;
434
435 if (!atomic_read(&vgdev->pending_commands))
436 return;
437
438 spin_lock(&vgdev->ctrlq.qlock);
439 atomic_set(&vgdev->pending_commands, 0);
440 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
441 spin_unlock(&vgdev->ctrlq.qlock);
442
443 if (notify)
444 virtqueue_notify(vgdev->ctrlq.vq);
445}
446
447static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
448 struct virtio_gpu_vbuffer *vbuf)
449{
450 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
451}
452
453static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
454 struct virtio_gpu_vbuffer *vbuf)
455{
456 struct virtqueue *vq = vgdev->cursorq.vq;
457 struct scatterlist *sgs[1], ccmd;
458 int idx, ret, outcnt;
459 bool notify;
460
461 if (!drm_dev_enter(vgdev->ddev, &idx)) {
462 free_vbuf(vgdev, vbuf);
463 return;
464 }
465
466 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
467 sgs[0] = &ccmd;
468 outcnt = 1;
469
470 spin_lock(&vgdev->cursorq.qlock);
471retry:
472 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
473 if (ret == -ENOSPC) {
474 spin_unlock(&vgdev->cursorq.qlock);
475 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
476 spin_lock(&vgdev->cursorq.qlock);
477 goto retry;
478 } else {
479 trace_virtio_gpu_cmd_queue(vq,
480 virtio_gpu_vbuf_ctrl_hdr(vbuf));
481
482 notify = virtqueue_kick_prepare(vq);
483 }
484
485 spin_unlock(&vgdev->cursorq.qlock);
486
487 if (notify)
488 virtqueue_notify(vq);
489
490 drm_dev_exit(idx);
491}
492
493/* just create gem objects for userspace and long lived objects,
494 * just use dma_alloced pages for the queue objects?
495 */
496
497/* create a basic resource */
498void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
499 struct virtio_gpu_object *bo,
500 struct virtio_gpu_object_params *params,
501 struct virtio_gpu_object_array *objs,
502 struct virtio_gpu_fence *fence)
503{
504 struct virtio_gpu_resource_create_2d *cmd_p;
505 struct virtio_gpu_vbuffer *vbuf;
506
507 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
508 memset(cmd_p, 0, sizeof(*cmd_p));
509 vbuf->objs = objs;
510
511 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
512 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
513 cmd_p->format = cpu_to_le32(params->format);
514 cmd_p->width = cpu_to_le32(params->width);
515 cmd_p->height = cpu_to_le32(params->height);
516
517 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
518 bo->created = true;
519}
520
521static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
522 struct virtio_gpu_vbuffer *vbuf)
523{
524 struct virtio_gpu_object *bo;
525
526 bo = vbuf->resp_cb_data;
527 vbuf->resp_cb_data = NULL;
528
529 virtio_gpu_cleanup_object(bo);
530}
531
532void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
533 struct virtio_gpu_object *bo)
534{
535 struct virtio_gpu_resource_unref *cmd_p;
536 struct virtio_gpu_vbuffer *vbuf;
537
538 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
539 virtio_gpu_cmd_unref_cb);
540 memset(cmd_p, 0, sizeof(*cmd_p));
541
542 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
543 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
544
545 vbuf->resp_cb_data = bo;
546 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
547}
548
549void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
550 uint32_t scanout_id, uint32_t resource_id,
551 uint32_t width, uint32_t height,
552 uint32_t x, uint32_t y)
553{
554 struct virtio_gpu_set_scanout *cmd_p;
555 struct virtio_gpu_vbuffer *vbuf;
556
557 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
558 memset(cmd_p, 0, sizeof(*cmd_p));
559
560 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
561 cmd_p->resource_id = cpu_to_le32(resource_id);
562 cmd_p->scanout_id = cpu_to_le32(scanout_id);
563 cmd_p->r.width = cpu_to_le32(width);
564 cmd_p->r.height = cpu_to_le32(height);
565 cmd_p->r.x = cpu_to_le32(x);
566 cmd_p->r.y = cpu_to_le32(y);
567
568 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
569}
570
571void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
572 uint32_t resource_id,
573 uint32_t x, uint32_t y,
574 uint32_t width, uint32_t height)
575{
576 struct virtio_gpu_resource_flush *cmd_p;
577 struct virtio_gpu_vbuffer *vbuf;
578
579 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
580 memset(cmd_p, 0, sizeof(*cmd_p));
581
582 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
583 cmd_p->resource_id = cpu_to_le32(resource_id);
584 cmd_p->r.width = cpu_to_le32(width);
585 cmd_p->r.height = cpu_to_le32(height);
586 cmd_p->r.x = cpu_to_le32(x);
587 cmd_p->r.y = cpu_to_le32(y);
588
589 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
590}
591
592void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
593 uint64_t offset,
594 uint32_t width, uint32_t height,
595 uint32_t x, uint32_t y,
596 struct virtio_gpu_object_array *objs,
597 struct virtio_gpu_fence *fence)
598{
599 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
600 struct virtio_gpu_transfer_to_host_2d *cmd_p;
601 struct virtio_gpu_vbuffer *vbuf;
602 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
603 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
604
605 if (use_dma_api)
606 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
607 shmem->pages->sgl, shmem->pages->nents,
608 DMA_TO_DEVICE);
609
610 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
611 memset(cmd_p, 0, sizeof(*cmd_p));
612 vbuf->objs = objs;
613
614 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
615 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
616 cmd_p->offset = cpu_to_le64(offset);
617 cmd_p->r.width = cpu_to_le32(width);
618 cmd_p->r.height = cpu_to_le32(height);
619 cmd_p->r.x = cpu_to_le32(x);
620 cmd_p->r.y = cpu_to_le32(y);
621
622 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
623}
624
625static void
626virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
627 uint32_t resource_id,
628 struct virtio_gpu_mem_entry *ents,
629 uint32_t nents,
630 struct virtio_gpu_fence *fence)
631{
632 struct virtio_gpu_resource_attach_backing *cmd_p;
633 struct virtio_gpu_vbuffer *vbuf;
634
635 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
636 memset(cmd_p, 0, sizeof(*cmd_p));
637
638 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
639 cmd_p->resource_id = cpu_to_le32(resource_id);
640 cmd_p->nr_entries = cpu_to_le32(nents);
641
642 vbuf->data_buf = ents;
643 vbuf->data_size = sizeof(*ents) * nents;
644
645 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
646}
647
648static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
649 struct virtio_gpu_vbuffer *vbuf)
650{
651 struct virtio_gpu_resp_display_info *resp =
652 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
653 int i;
654
655 spin_lock(&vgdev->display_info_lock);
656 for (i = 0; i < vgdev->num_scanouts; i++) {
657 vgdev->outputs[i].info = resp->pmodes[i];
658 if (resp->pmodes[i].enabled) {
659 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
660 le32_to_cpu(resp->pmodes[i].r.width),
661 le32_to_cpu(resp->pmodes[i].r.height),
662 le32_to_cpu(resp->pmodes[i].r.x),
663 le32_to_cpu(resp->pmodes[i].r.y));
664 } else {
665 DRM_DEBUG("output %d: disabled", i);
666 }
667 }
668
669 vgdev->display_info_pending = false;
670 spin_unlock(&vgdev->display_info_lock);
671 wake_up(&vgdev->resp_wq);
672
673 if (!drm_helper_hpd_irq_event(vgdev->ddev))
674 drm_kms_helper_hotplug_event(vgdev->ddev);
675}
676
677static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
678 struct virtio_gpu_vbuffer *vbuf)
679{
680 struct virtio_gpu_get_capset_info *cmd =
681 (struct virtio_gpu_get_capset_info *)vbuf->buf;
682 struct virtio_gpu_resp_capset_info *resp =
683 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
684 int i = le32_to_cpu(cmd->capset_index);
685
686 spin_lock(&vgdev->display_info_lock);
687 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
688 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
689 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
690 spin_unlock(&vgdev->display_info_lock);
691 wake_up(&vgdev->resp_wq);
692}
693
694static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
695 struct virtio_gpu_vbuffer *vbuf)
696{
697 struct virtio_gpu_get_capset *cmd =
698 (struct virtio_gpu_get_capset *)vbuf->buf;
699 struct virtio_gpu_resp_capset *resp =
700 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
701 struct virtio_gpu_drv_cap_cache *cache_ent;
702
703 spin_lock(&vgdev->display_info_lock);
704 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
705 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
706 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
707 memcpy(cache_ent->caps_cache, resp->capset_data,
708 cache_ent->size);
709 /* Copy must occur before is_valid is signalled. */
710 smp_wmb();
711 atomic_set(&cache_ent->is_valid, 1);
712 break;
713 }
714 }
715 spin_unlock(&vgdev->display_info_lock);
716 wake_up_all(&vgdev->resp_wq);
717}
718
719static int virtio_get_edid_block(void *data, u8 *buf,
720 unsigned int block, size_t len)
721{
722 struct virtio_gpu_resp_edid *resp = data;
723 size_t start = block * EDID_LENGTH;
724
725 if (start + len > le32_to_cpu(resp->size))
726 return -1;
727 memcpy(buf, resp->edid + start, len);
728 return 0;
729}
730
731static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
732 struct virtio_gpu_vbuffer *vbuf)
733{
734 struct virtio_gpu_cmd_get_edid *cmd =
735 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
736 struct virtio_gpu_resp_edid *resp =
737 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
738 uint32_t scanout = le32_to_cpu(cmd->scanout);
739 struct virtio_gpu_output *output;
740 struct edid *new_edid, *old_edid;
741
742 if (scanout >= vgdev->num_scanouts)
743 return;
744 output = vgdev->outputs + scanout;
745
746 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
747 drm_connector_update_edid_property(&output->conn, new_edid);
748
749 spin_lock(&vgdev->display_info_lock);
750 old_edid = output->edid;
751 output->edid = new_edid;
752 spin_unlock(&vgdev->display_info_lock);
753
754 kfree(old_edid);
755 wake_up(&vgdev->resp_wq);
756}
757
758int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
759{
760 struct virtio_gpu_ctrl_hdr *cmd_p;
761 struct virtio_gpu_vbuffer *vbuf;
762 void *resp_buf;
763
764 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
765 GFP_KERNEL);
766 if (!resp_buf)
767 return -ENOMEM;
768
769 cmd_p = virtio_gpu_alloc_cmd_resp
770 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
771 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
772 resp_buf);
773 memset(cmd_p, 0, sizeof(*cmd_p));
774
775 vgdev->display_info_pending = true;
776 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
777 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
778 return 0;
779}
780
781int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
782{
783 struct virtio_gpu_get_capset_info *cmd_p;
784 struct virtio_gpu_vbuffer *vbuf;
785 void *resp_buf;
786
787 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
788 GFP_KERNEL);
789 if (!resp_buf)
790 return -ENOMEM;
791
792 cmd_p = virtio_gpu_alloc_cmd_resp
793 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
794 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
795 resp_buf);
796 memset(cmd_p, 0, sizeof(*cmd_p));
797
798 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
799 cmd_p->capset_index = cpu_to_le32(idx);
800 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
801 return 0;
802}
803
804int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
805 int idx, int version,
806 struct virtio_gpu_drv_cap_cache **cache_p)
807{
808 struct virtio_gpu_get_capset *cmd_p;
809 struct virtio_gpu_vbuffer *vbuf;
810 int max_size;
811 struct virtio_gpu_drv_cap_cache *cache_ent;
812 struct virtio_gpu_drv_cap_cache *search_ent;
813 void *resp_buf;
814
815 *cache_p = NULL;
816
817 if (idx >= vgdev->num_capsets)
818 return -EINVAL;
819
820 if (version > vgdev->capsets[idx].max_version)
821 return -EINVAL;
822
823 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
824 if (!cache_ent)
825 return -ENOMEM;
826
827 max_size = vgdev->capsets[idx].max_size;
828 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
829 if (!cache_ent->caps_cache) {
830 kfree(cache_ent);
831 return -ENOMEM;
832 }
833
834 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
835 GFP_KERNEL);
836 if (!resp_buf) {
837 kfree(cache_ent->caps_cache);
838 kfree(cache_ent);
839 return -ENOMEM;
840 }
841
842 cache_ent->version = version;
843 cache_ent->id = vgdev->capsets[idx].id;
844 atomic_set(&cache_ent->is_valid, 0);
845 cache_ent->size = max_size;
846 spin_lock(&vgdev->display_info_lock);
847 /* Search while under lock in case it was added by another task. */
848 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
849 if (search_ent->id == vgdev->capsets[idx].id &&
850 search_ent->version == version) {
851 *cache_p = search_ent;
852 break;
853 }
854 }
855 if (!*cache_p)
856 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
857 spin_unlock(&vgdev->display_info_lock);
858
859 if (*cache_p) {
860 /* Entry was found, so free everything that was just created. */
861 kfree(resp_buf);
862 kfree(cache_ent->caps_cache);
863 kfree(cache_ent);
864 return 0;
865 }
866
867 cmd_p = virtio_gpu_alloc_cmd_resp
868 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
869 sizeof(struct virtio_gpu_resp_capset) + max_size,
870 resp_buf);
871 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
872 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
873 cmd_p->capset_version = cpu_to_le32(version);
874 *cache_p = cache_ent;
875 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
876
877 return 0;
878}
879
880int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
881{
882 struct virtio_gpu_cmd_get_edid *cmd_p;
883 struct virtio_gpu_vbuffer *vbuf;
884 void *resp_buf;
885 int scanout;
886
887 if (WARN_ON(!vgdev->has_edid))
888 return -EINVAL;
889
890 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
891 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
892 GFP_KERNEL);
893 if (!resp_buf)
894 return -ENOMEM;
895
896 cmd_p = virtio_gpu_alloc_cmd_resp
897 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
898 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
899 resp_buf);
900 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
901 cmd_p->scanout = cpu_to_le32(scanout);
902 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
903 }
904
905 return 0;
906}
907
908void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
909 uint32_t nlen, const char *name)
910{
911 struct virtio_gpu_ctx_create *cmd_p;
912 struct virtio_gpu_vbuffer *vbuf;
913
914 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
915 memset(cmd_p, 0, sizeof(*cmd_p));
916
917 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
918 cmd_p->hdr.ctx_id = cpu_to_le32(id);
919 cmd_p->nlen = cpu_to_le32(nlen);
920 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
921 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
922 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
923}
924
925void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
926 uint32_t id)
927{
928 struct virtio_gpu_ctx_destroy *cmd_p;
929 struct virtio_gpu_vbuffer *vbuf;
930
931 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
932 memset(cmd_p, 0, sizeof(*cmd_p));
933
934 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
935 cmd_p->hdr.ctx_id = cpu_to_le32(id);
936 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
937}
938
939void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
940 uint32_t ctx_id,
941 struct virtio_gpu_object_array *objs)
942{
943 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
944 struct virtio_gpu_ctx_resource *cmd_p;
945 struct virtio_gpu_vbuffer *vbuf;
946
947 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
948 memset(cmd_p, 0, sizeof(*cmd_p));
949 vbuf->objs = objs;
950
951 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
952 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
953 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
954 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
955}
956
957void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
958 uint32_t ctx_id,
959 struct virtio_gpu_object_array *objs)
960{
961 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
962 struct virtio_gpu_ctx_resource *cmd_p;
963 struct virtio_gpu_vbuffer *vbuf;
964
965 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
966 memset(cmd_p, 0, sizeof(*cmd_p));
967 vbuf->objs = objs;
968
969 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
970 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
971 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
972 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
973}
974
975void
976virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
977 struct virtio_gpu_object *bo,
978 struct virtio_gpu_object_params *params,
979 struct virtio_gpu_object_array *objs,
980 struct virtio_gpu_fence *fence)
981{
982 struct virtio_gpu_resource_create_3d *cmd_p;
983 struct virtio_gpu_vbuffer *vbuf;
984
985 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
986 memset(cmd_p, 0, sizeof(*cmd_p));
987 vbuf->objs = objs;
988
989 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
990 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
991 cmd_p->format = cpu_to_le32(params->format);
992 cmd_p->width = cpu_to_le32(params->width);
993 cmd_p->height = cpu_to_le32(params->height);
994
995 cmd_p->target = cpu_to_le32(params->target);
996 cmd_p->bind = cpu_to_le32(params->bind);
997 cmd_p->depth = cpu_to_le32(params->depth);
998 cmd_p->array_size = cpu_to_le32(params->array_size);
999 cmd_p->last_level = cpu_to_le32(params->last_level);
1000 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1001 cmd_p->flags = cpu_to_le32(params->flags);
1002
1003 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1004
1005 bo->created = true;
1006}
1007
1008void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1009 uint32_t ctx_id,
1010 uint64_t offset, uint32_t level,
1011 struct drm_virtgpu_3d_box *box,
1012 struct virtio_gpu_object_array *objs,
1013 struct virtio_gpu_fence *fence)
1014{
1015 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1016 struct virtio_gpu_transfer_host_3d *cmd_p;
1017 struct virtio_gpu_vbuffer *vbuf;
1018 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1019 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1020
1021 if (use_dma_api)
1022 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1023 shmem->pages->sgl, shmem->pages->nents,
1024 DMA_TO_DEVICE);
1025
1026 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1027 memset(cmd_p, 0, sizeof(*cmd_p));
1028
1029 vbuf->objs = objs;
1030
1031 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1032 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1033 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1034 convert_to_hw_box(&cmd_p->box, box);
1035 cmd_p->offset = cpu_to_le64(offset);
1036 cmd_p->level = cpu_to_le32(level);
1037
1038 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1039}
1040
1041void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1042 uint32_t ctx_id,
1043 uint64_t offset, uint32_t level,
1044 struct drm_virtgpu_3d_box *box,
1045 struct virtio_gpu_object_array *objs,
1046 struct virtio_gpu_fence *fence)
1047{
1048 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1049 struct virtio_gpu_transfer_host_3d *cmd_p;
1050 struct virtio_gpu_vbuffer *vbuf;
1051
1052 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1053 memset(cmd_p, 0, sizeof(*cmd_p));
1054
1055 vbuf->objs = objs;
1056
1057 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1058 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1059 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1060 convert_to_hw_box(&cmd_p->box, box);
1061 cmd_p->offset = cpu_to_le64(offset);
1062 cmd_p->level = cpu_to_le32(level);
1063
1064 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1065}
1066
1067void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1068 void *data, uint32_t data_size,
1069 uint32_t ctx_id,
1070 struct virtio_gpu_object_array *objs,
1071 struct virtio_gpu_fence *fence)
1072{
1073 struct virtio_gpu_cmd_submit *cmd_p;
1074 struct virtio_gpu_vbuffer *vbuf;
1075
1076 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1077 memset(cmd_p, 0, sizeof(*cmd_p));
1078
1079 vbuf->data_buf = data;
1080 vbuf->data_size = data_size;
1081 vbuf->objs = objs;
1082
1083 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1084 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1085 cmd_p->size = cpu_to_le32(data_size);
1086
1087 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1088}
1089
1090void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1091 struct virtio_gpu_object *obj,
1092 struct virtio_gpu_mem_entry *ents,
1093 unsigned int nents)
1094{
1095 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1096 ents, nents, NULL);
1097}
1098
1099void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1100 struct virtio_gpu_output *output)
1101{
1102 struct virtio_gpu_vbuffer *vbuf;
1103 struct virtio_gpu_update_cursor *cur_p;
1104
1105 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1106 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1107 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1108 virtio_gpu_queue_cursor(vgdev, vbuf);
1109}