Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie <airlied@redhat.com>
  7 *    Gerd Hoffmann <kraxel@redhat.com>
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice (including the next
 17 * paragraph) shall be included in all copies or substantial portions of the
 18 * Software.
 19 *
 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 26 * OTHER DEALINGS IN THE SOFTWARE.
 27 */
 28
 29#include <drm/drmP.h>
 30#include "virtgpu_drv.h"
 31#include <linux/virtio.h>
 32#include <linux/virtio_config.h>
 33#include <linux/virtio_ring.h>
 34
 
 
 
 35#define MAX_INLINE_CMD_SIZE   96
 36#define MAX_INLINE_RESP_SIZE  24
 37#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
 38			       + MAX_INLINE_CMD_SIZE		 \
 39			       + MAX_INLINE_RESP_SIZE)
 40
 41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 42				uint32_t *resid)
 43{
 44	int handle;
 45
 46	idr_preload(GFP_KERNEL);
 47	spin_lock(&vgdev->resource_idr_lock);
 48	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
 49	spin_unlock(&vgdev->resource_idr_lock);
 50	idr_preload_end();
 51	*resid = handle;
 52}
 53
 54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 55{
 56	spin_lock(&vgdev->resource_idr_lock);
 57	idr_remove(&vgdev->resource_idr, id);
 58	spin_unlock(&vgdev->resource_idr_lock);
 
 
 
 59}
 60
 61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
 62{
 63	struct drm_device *dev = vq->vdev->priv;
 64	struct virtio_gpu_device *vgdev = dev->dev_private;
 
 65	schedule_work(&vgdev->ctrlq.dequeue_work);
 66}
 67
 68void virtio_gpu_cursor_ack(struct virtqueue *vq)
 69{
 70	struct drm_device *dev = vq->vdev->priv;
 71	struct virtio_gpu_device *vgdev = dev->dev_private;
 
 72	schedule_work(&vgdev->cursorq.dequeue_work);
 73}
 74
 75int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
 76{
 77	struct virtio_gpu_vbuffer *vbuf;
 78	int i, size, count = 0;
 79	void *ptr;
 80
 81	INIT_LIST_HEAD(&vgdev->free_vbufs);
 82	spin_lock_init(&vgdev->free_vbufs_lock);
 83	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
 84	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
 85	size = count * VBUFFER_SIZE;
 86	DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
 87		 count, VBUFFER_SIZE, size / 1024);
 88
 89	vgdev->vbufs = kzalloc(size, GFP_KERNEL);
 90	if (!vgdev->vbufs)
 91		return -ENOMEM;
 92
 93	for (i = 0, ptr = vgdev->vbufs;
 94	     i < count;
 95	     i++, ptr += VBUFFER_SIZE) {
 96		vbuf = ptr;
 97		list_add(&vbuf->list, &vgdev->free_vbufs);
 98	}
 99	return 0;
100}
101
102void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103{
104	struct virtio_gpu_vbuffer *vbuf;
105	int i, count = 0;
106
107	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109
110	spin_lock(&vgdev->free_vbufs_lock);
111	for (i = 0; i < count; i++) {
112		if (WARN_ON(list_empty(&vgdev->free_vbufs)))
113			return;
114		vbuf = list_first_entry(&vgdev->free_vbufs,
115					struct virtio_gpu_vbuffer, list);
116		list_del(&vbuf->list);
117	}
118	spin_unlock(&vgdev->free_vbufs_lock);
119	kfree(vgdev->vbufs);
120}
121
122static struct virtio_gpu_vbuffer*
123virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
124		    int size, int resp_size, void *resp_buf,
125		    virtio_gpu_resp_cb resp_cb)
126{
127	struct virtio_gpu_vbuffer *vbuf;
128
129	spin_lock(&vgdev->free_vbufs_lock);
130	BUG_ON(list_empty(&vgdev->free_vbufs));
131	vbuf = list_first_entry(&vgdev->free_vbufs,
132				struct virtio_gpu_vbuffer, list);
133	list_del(&vbuf->list);
134	spin_unlock(&vgdev->free_vbufs_lock);
135	memset(vbuf, 0, VBUFFER_SIZE);
136
137	BUG_ON(size > MAX_INLINE_CMD_SIZE);
 
138	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
139	vbuf->size = size;
140
141	vbuf->resp_cb = resp_cb;
142	vbuf->resp_size = resp_size;
143	if (resp_size <= MAX_INLINE_RESP_SIZE)
144		vbuf->resp_buf = (void *)vbuf->buf + size;
145	else
146		vbuf->resp_buf = resp_buf;
147	BUG_ON(!vbuf->resp_buf);
148	return vbuf;
149}
150
151static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
152				  struct virtio_gpu_vbuffer **vbuffer_p,
153				  int size)
154{
155	struct virtio_gpu_vbuffer *vbuf;
156
157	vbuf = virtio_gpu_get_vbuf(vgdev, size,
158				   sizeof(struct virtio_gpu_ctrl_hdr),
159				   NULL, NULL);
160	if (IS_ERR(vbuf)) {
161		*vbuffer_p = NULL;
162		return ERR_CAST(vbuf);
163	}
164	*vbuffer_p = vbuf;
165	return vbuf->buf;
166}
167
168static struct virtio_gpu_update_cursor*
169virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
170			struct virtio_gpu_vbuffer **vbuffer_p)
171{
172	struct virtio_gpu_vbuffer *vbuf;
173
174	vbuf = virtio_gpu_get_vbuf
175		(vgdev, sizeof(struct virtio_gpu_update_cursor),
176		 0, NULL, NULL);
177	if (IS_ERR(vbuf)) {
178		*vbuffer_p = NULL;
179		return ERR_CAST(vbuf);
180	}
181	*vbuffer_p = vbuf;
182	return (struct virtio_gpu_update_cursor *)vbuf->buf;
183}
184
185static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
186				       virtio_gpu_resp_cb cb,
187				       struct virtio_gpu_vbuffer **vbuffer_p,
188				       int cmd_size, int resp_size,
189				       void *resp_buf)
190{
191	struct virtio_gpu_vbuffer *vbuf;
192
193	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
194				   resp_size, resp_buf, cb);
195	if (IS_ERR(vbuf)) {
196		*vbuffer_p = NULL;
197		return ERR_CAST(vbuf);
198	}
199	*vbuffer_p = vbuf;
200	return (struct virtio_gpu_command *)vbuf->buf;
201}
202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203static void free_vbuf(struct virtio_gpu_device *vgdev,
204		      struct virtio_gpu_vbuffer *vbuf)
205{
206	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
207		kfree(vbuf->resp_buf);
208	kfree(vbuf->data_buf);
209	spin_lock(&vgdev->free_vbufs_lock);
210	list_add(&vbuf->list, &vgdev->free_vbufs);
211	spin_unlock(&vgdev->free_vbufs_lock);
212}
213
214static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
215{
216	struct virtio_gpu_vbuffer *vbuf;
217	unsigned int len;
218	int freed = 0;
219
220	while ((vbuf = virtqueue_get_buf(vq, &len))) {
221		list_add_tail(&vbuf->list, reclaim_list);
222		freed++;
223	}
224	if (freed == 0)
225		DRM_DEBUG("Huh? zero vbufs reclaimed");
226}
227
228void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
229{
230	struct virtio_gpu_device *vgdev =
231		container_of(work, struct virtio_gpu_device,
232			     ctrlq.dequeue_work);
233	struct list_head reclaim_list;
234	struct virtio_gpu_vbuffer *entry, *tmp;
235	struct virtio_gpu_ctrl_hdr *resp;
236	u64 fence_id = 0;
237
238	INIT_LIST_HEAD(&reclaim_list);
239	spin_lock(&vgdev->ctrlq.qlock);
240	do {
241		virtqueue_disable_cb(vgdev->ctrlq.vq);
242		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
243
244	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
245	spin_unlock(&vgdev->ctrlq.qlock);
246
247	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
248		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
249		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
250			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 
 
 
 
 
 
 
 
 
 
 
251		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
252			u64 f = le64_to_cpu(resp->fence_id);
253
254			if (fence_id > f) {
255				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
256					  __func__, fence_id, f);
257			} else {
258				fence_id = f;
259			}
260		}
261		if (entry->resp_cb)
262			entry->resp_cb(vgdev, entry);
263
264		list_del(&entry->list);
265		free_vbuf(vgdev, entry);
266	}
267	wake_up(&vgdev->ctrlq.ack_queue);
268
269	if (fence_id)
270		virtio_gpu_fence_event_process(vgdev, fence_id);
 
 
 
 
 
 
 
271}
272
273void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
274{
275	struct virtio_gpu_device *vgdev =
276		container_of(work, struct virtio_gpu_device,
277			     cursorq.dequeue_work);
278	struct list_head reclaim_list;
279	struct virtio_gpu_vbuffer *entry, *tmp;
280
281	INIT_LIST_HEAD(&reclaim_list);
282	spin_lock(&vgdev->cursorq.qlock);
283	do {
284		virtqueue_disable_cb(vgdev->cursorq.vq);
285		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
286	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
287	spin_unlock(&vgdev->cursorq.qlock);
288
289	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
290		list_del(&entry->list);
291		free_vbuf(vgdev, entry);
292	}
293	wake_up(&vgdev->cursorq.ack_queue);
294}
295
296static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
297					       struct virtio_gpu_vbuffer *vbuf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298{
299	struct virtqueue *vq = vgdev->ctrlq.vq;
300	struct scatterlist *sgs[3], vcmd, vout, vresp;
301	int outcnt = 0, incnt = 0;
302	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
304	if (!vgdev->vqs_ready)
305		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
306
 
307	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
308	sgs[outcnt+incnt] = &vcmd;
 
309	outcnt++;
310
 
311	if (vbuf->data_size) {
312		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
313		sgs[outcnt + incnt] = &vout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314		outcnt++;
315	}
316
 
317	if (vbuf->resp_size) {
318		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 
319		sgs[outcnt + incnt] = &vresp;
320		incnt++;
321	}
322
323retry:
324	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
325	if (ret == -ENOSPC) {
326		spin_unlock(&vgdev->ctrlq.qlock);
327		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
328		spin_lock(&vgdev->ctrlq.qlock);
329		goto retry;
330	} else {
331		virtqueue_kick(vq);
332	}
333
334	if (!ret)
335		ret = vq->num_free;
 
 
336	return ret;
337}
338
339static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
340					struct virtio_gpu_vbuffer *vbuf)
341{
342	int rc;
 
 
 
343
344	spin_lock(&vgdev->ctrlq.qlock);
345	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 
346	spin_unlock(&vgdev->ctrlq.qlock);
347	return rc;
 
 
348}
349
350static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
351					       struct virtio_gpu_vbuffer *vbuf,
352					       struct virtio_gpu_ctrl_hdr *hdr,
353					       struct virtio_gpu_fence **fence)
354{
355	struct virtqueue *vq = vgdev->ctrlq.vq;
356	int rc;
357
358again:
359	spin_lock(&vgdev->ctrlq.qlock);
360
361	/*
362	 * Make sure we have enouth space in the virtqueue.  If not
363	 * wait here until we have.
364	 *
365	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
366	 * to wait for free space, which can result in fence ids being
367	 * submitted out-of-order.
368	 */
369	if (vq->num_free < 3) {
370		spin_unlock(&vgdev->ctrlq.qlock);
371		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
372		goto again;
373	}
374
375	if (fence)
376		virtio_gpu_fence_emit(vgdev, hdr, fence);
377	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
378	spin_unlock(&vgdev->ctrlq.qlock);
379	return rc;
380}
381
382static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
383				   struct virtio_gpu_vbuffer *vbuf)
384{
385	struct virtqueue *vq = vgdev->cursorq.vq;
386	struct scatterlist *sgs[1], ccmd;
387	int ret;
388	int outcnt;
389
390	if (!vgdev->vqs_ready)
391		return -ENODEV;
 
 
392
393	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
394	sgs[0] = &ccmd;
395	outcnt = 1;
396
397	spin_lock(&vgdev->cursorq.qlock);
398retry:
399	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
400	if (ret == -ENOSPC) {
401		spin_unlock(&vgdev->cursorq.qlock);
402		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
403		spin_lock(&vgdev->cursorq.qlock);
404		goto retry;
405	} else {
406		virtqueue_kick(vq);
 
 
 
407	}
408
409	spin_unlock(&vgdev->cursorq.qlock);
410
411	if (!ret)
412		ret = vq->num_free;
413	return ret;
 
414}
415
416/* just create gem objects for userspace and long lived objects,
417   just use dma_alloced pages for the queue objects? */
 
418
419/* create a basic resource */
420void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
421				    uint32_t resource_id,
422				    uint32_t format,
423				    uint32_t width,
424				    uint32_t height)
425{
426	struct virtio_gpu_resource_create_2d *cmd_p;
427	struct virtio_gpu_vbuffer *vbuf;
428
429	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
430	memset(cmd_p, 0, sizeof(*cmd_p));
 
431
432	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
433	cmd_p->resource_id = cpu_to_le32(resource_id);
434	cmd_p->format = cpu_to_le32(format);
435	cmd_p->width = cpu_to_le32(width);
436	cmd_p->height = cpu_to_le32(height);
437
438	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
439}
440
441void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
442				   uint32_t resource_id)
443{
444	struct virtio_gpu_resource_unref *cmd_p;
445	struct virtio_gpu_vbuffer *vbuf;
446
447	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
448	memset(cmd_p, 0, sizeof(*cmd_p));
449
450	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
451	cmd_p->resource_id = cpu_to_le32(resource_id);
452
453	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
454}
455
456void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
457					   uint32_t resource_id)
458{
459	struct virtio_gpu_resource_detach_backing *cmd_p;
460	struct virtio_gpu_vbuffer *vbuf;
 
461
462	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 
463	memset(cmd_p, 0, sizeof(*cmd_p));
464
465	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
466	cmd_p->resource_id = cpu_to_le32(resource_id);
467
468	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
 
 
469}
470
471void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
472				uint32_t scanout_id, uint32_t resource_id,
473				uint32_t width, uint32_t height,
474				uint32_t x, uint32_t y)
475{
476	struct virtio_gpu_set_scanout *cmd_p;
477	struct virtio_gpu_vbuffer *vbuf;
478
479	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
480	memset(cmd_p, 0, sizeof(*cmd_p));
481
482	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
483	cmd_p->resource_id = cpu_to_le32(resource_id);
484	cmd_p->scanout_id = cpu_to_le32(scanout_id);
485	cmd_p->r.width = cpu_to_le32(width);
486	cmd_p->r.height = cpu_to_le32(height);
487	cmd_p->r.x = cpu_to_le32(x);
488	cmd_p->r.y = cpu_to_le32(y);
489
490	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
491}
492
493void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
494				   uint32_t resource_id,
495				   uint32_t x, uint32_t y,
496				   uint32_t width, uint32_t height)
497{
498	struct virtio_gpu_resource_flush *cmd_p;
499	struct virtio_gpu_vbuffer *vbuf;
500
501	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
502	memset(cmd_p, 0, sizeof(*cmd_p));
503
504	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
505	cmd_p->resource_id = cpu_to_le32(resource_id);
506	cmd_p->r.width = cpu_to_le32(width);
507	cmd_p->r.height = cpu_to_le32(height);
508	cmd_p->r.x = cpu_to_le32(x);
509	cmd_p->r.y = cpu_to_le32(y);
510
511	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
512}
513
514void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
515					uint32_t resource_id, uint64_t offset,
516					__le32 width, __le32 height,
517					__le32 x, __le32 y,
518					struct virtio_gpu_fence **fence)
 
519{
 
520	struct virtio_gpu_transfer_to_host_2d *cmd_p;
521	struct virtio_gpu_vbuffer *vbuf;
 
 
 
 
 
 
522
523	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
524	memset(cmd_p, 0, sizeof(*cmd_p));
 
525
526	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
527	cmd_p->resource_id = cpu_to_le32(resource_id);
528	cmd_p->offset = cpu_to_le64(offset);
529	cmd_p->r.width = width;
530	cmd_p->r.height = height;
531	cmd_p->r.x = x;
532	cmd_p->r.y = y;
533
534	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
535}
536
537static void
538virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
539				       uint32_t resource_id,
540				       struct virtio_gpu_mem_entry *ents,
541				       uint32_t nents,
542				       struct virtio_gpu_fence **fence)
543{
544	struct virtio_gpu_resource_attach_backing *cmd_p;
545	struct virtio_gpu_vbuffer *vbuf;
546
547	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
548	memset(cmd_p, 0, sizeof(*cmd_p));
549
550	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
551	cmd_p->resource_id = cpu_to_le32(resource_id);
552	cmd_p->nr_entries = cpu_to_le32(nents);
553
554	vbuf->data_buf = ents;
555	vbuf->data_size = sizeof(*ents) * nents;
556
557	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
558}
559
560static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
561					       struct virtio_gpu_vbuffer *vbuf)
562{
563	struct virtio_gpu_resp_display_info *resp =
564		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
565	int i;
566
567	spin_lock(&vgdev->display_info_lock);
568	for (i = 0; i < vgdev->num_scanouts; i++) {
569		vgdev->outputs[i].info = resp->pmodes[i];
570		if (resp->pmodes[i].enabled) {
571			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
572				  le32_to_cpu(resp->pmodes[i].r.width),
573				  le32_to_cpu(resp->pmodes[i].r.height),
574				  le32_to_cpu(resp->pmodes[i].r.x),
575				  le32_to_cpu(resp->pmodes[i].r.y));
576		} else {
577			DRM_DEBUG("output %d: disabled", i);
578		}
579	}
580
581	vgdev->display_info_pending = false;
582	spin_unlock(&vgdev->display_info_lock);
583	wake_up(&vgdev->resp_wq);
584
585	if (!drm_helper_hpd_irq_event(vgdev->ddev))
586		drm_kms_helper_hotplug_event(vgdev->ddev);
587}
588
589static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
590					      struct virtio_gpu_vbuffer *vbuf)
591{
592	struct virtio_gpu_get_capset_info *cmd =
593		(struct virtio_gpu_get_capset_info *)vbuf->buf;
594	struct virtio_gpu_resp_capset_info *resp =
595		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
596	int i = le32_to_cpu(cmd->capset_index);
597
598	spin_lock(&vgdev->display_info_lock);
599	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
600	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
601	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 
 
 
 
602	spin_unlock(&vgdev->display_info_lock);
603	wake_up(&vgdev->resp_wq);
604}
605
606static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
607				     struct virtio_gpu_vbuffer *vbuf)
608{
609	struct virtio_gpu_get_capset *cmd =
610		(struct virtio_gpu_get_capset *)vbuf->buf;
611	struct virtio_gpu_resp_capset *resp =
612		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
613	struct virtio_gpu_drv_cap_cache *cache_ent;
614
615	spin_lock(&vgdev->display_info_lock);
616	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
617		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
618		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
619			memcpy(cache_ent->caps_cache, resp->capset_data,
620			       cache_ent->size);
 
 
621			atomic_set(&cache_ent->is_valid, 1);
622			break;
623		}
624	}
625	spin_unlock(&vgdev->display_info_lock);
626	wake_up(&vgdev->resp_wq);
627}
628
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629
630int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
631{
632	struct virtio_gpu_ctrl_hdr *cmd_p;
633	struct virtio_gpu_vbuffer *vbuf;
634	void *resp_buf;
635
636	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
637			   GFP_KERNEL);
638	if (!resp_buf)
639		return -ENOMEM;
640
641	cmd_p = virtio_gpu_alloc_cmd_resp
642		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
643		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
644		 resp_buf);
645	memset(cmd_p, 0, sizeof(*cmd_p));
646
647	vgdev->display_info_pending = true;
648	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
649	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
650	return 0;
651}
652
653int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
654{
655	struct virtio_gpu_get_capset_info *cmd_p;
656	struct virtio_gpu_vbuffer *vbuf;
657	void *resp_buf;
658
659	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
660			   GFP_KERNEL);
661	if (!resp_buf)
662		return -ENOMEM;
663
664	cmd_p = virtio_gpu_alloc_cmd_resp
665		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
666		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
667		 resp_buf);
668	memset(cmd_p, 0, sizeof(*cmd_p));
669
670	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
671	cmd_p->capset_index = cpu_to_le32(idx);
672	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
673	return 0;
674}
675
676int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
677			      int idx, int version,
678			      struct virtio_gpu_drv_cap_cache **cache_p)
679{
680	struct virtio_gpu_get_capset *cmd_p;
681	struct virtio_gpu_vbuffer *vbuf;
682	int max_size = vgdev->capsets[idx].max_size;
683	struct virtio_gpu_drv_cap_cache *cache_ent;
 
684	void *resp_buf;
685
686	if (idx > vgdev->num_capsets)
 
 
687		return -EINVAL;
688
689	if (version > vgdev->capsets[idx].max_version)
690		return -EINVAL;
691
692	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
693	if (!cache_ent)
694		return -ENOMEM;
695
 
696	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
697	if (!cache_ent->caps_cache) {
698		kfree(cache_ent);
699		return -ENOMEM;
700	}
701
702	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
703			   GFP_KERNEL);
704	if (!resp_buf) {
705		kfree(cache_ent->caps_cache);
706		kfree(cache_ent);
707		return -ENOMEM;
708	}
709
710	cache_ent->version = version;
711	cache_ent->id = vgdev->capsets[idx].id;
712	atomic_set(&cache_ent->is_valid, 0);
713	cache_ent->size = max_size;
714	spin_lock(&vgdev->display_info_lock);
715	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 
 
 
 
 
 
 
 
 
716	spin_unlock(&vgdev->display_info_lock);
717
 
 
 
 
 
 
 
 
718	cmd_p = virtio_gpu_alloc_cmd_resp
719		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
720		 sizeof(struct virtio_gpu_resp_capset) + max_size,
721		 resp_buf);
722	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
723	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
724	cmd_p->capset_version = cpu_to_le32(version);
725	*cache_p = cache_ent;
726	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
727
728	return 0;
729}
730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
731void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
732				   uint32_t nlen, const char *name)
733{
734	struct virtio_gpu_ctx_create *cmd_p;
735	struct virtio_gpu_vbuffer *vbuf;
736
737	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
738	memset(cmd_p, 0, sizeof(*cmd_p));
739
740	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
741	cmd_p->hdr.ctx_id = cpu_to_le32(id);
742	cmd_p->nlen = cpu_to_le32(nlen);
743	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
744	cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
745	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
746}
747
748void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
749				    uint32_t id)
750{
751	struct virtio_gpu_ctx_destroy *cmd_p;
752	struct virtio_gpu_vbuffer *vbuf;
753
754	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
755	memset(cmd_p, 0, sizeof(*cmd_p));
756
757	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
758	cmd_p->hdr.ctx_id = cpu_to_le32(id);
759	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
760}
761
762void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
763					    uint32_t ctx_id,
764					    uint32_t resource_id)
765{
 
766	struct virtio_gpu_ctx_resource *cmd_p;
767	struct virtio_gpu_vbuffer *vbuf;
768
769	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
770	memset(cmd_p, 0, sizeof(*cmd_p));
 
771
772	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
773	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
774	cmd_p->resource_id = cpu_to_le32(resource_id);
775	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
776
777}
778
779void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
780					    uint32_t ctx_id,
781					    uint32_t resource_id)
782{
 
783	struct virtio_gpu_ctx_resource *cmd_p;
784	struct virtio_gpu_vbuffer *vbuf;
785
786	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
787	memset(cmd_p, 0, sizeof(*cmd_p));
 
788
789	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
790	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
791	cmd_p->resource_id = cpu_to_le32(resource_id);
792	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
793}
794
795void
796virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
797				  struct virtio_gpu_resource_create_3d *rc_3d,
798				  struct virtio_gpu_fence **fence)
 
 
799{
800	struct virtio_gpu_resource_create_3d *cmd_p;
801	struct virtio_gpu_vbuffer *vbuf;
802
803	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
804	memset(cmd_p, 0, sizeof(*cmd_p));
 
805
806	*cmd_p = *rc_3d;
807	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
808	cmd_p->hdr.flags = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
809
810	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
811}
812
813void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
814					uint32_t resource_id, uint32_t ctx_id,
815					uint64_t offset, uint32_t level,
816					struct virtio_gpu_box *box,
817					struct virtio_gpu_fence **fence)
 
 
 
818{
 
819	struct virtio_gpu_transfer_host_3d *cmd_p;
820	struct virtio_gpu_vbuffer *vbuf;
 
 
 
 
 
 
 
821
822	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
823	memset(cmd_p, 0, sizeof(*cmd_p));
824
 
 
825	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
826	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
827	cmd_p->resource_id = cpu_to_le32(resource_id);
828	cmd_p->box = *box;
829	cmd_p->offset = cpu_to_le64(offset);
830	cmd_p->level = cpu_to_le32(level);
 
 
831
832	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
833}
834
835void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
836					  uint32_t resource_id, uint32_t ctx_id,
837					  uint64_t offset, uint32_t level,
838					  struct virtio_gpu_box *box,
839					  struct virtio_gpu_fence **fence)
 
 
 
840{
 
841	struct virtio_gpu_transfer_host_3d *cmd_p;
842	struct virtio_gpu_vbuffer *vbuf;
843
844	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
845	memset(cmd_p, 0, sizeof(*cmd_p));
846
 
 
847	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
848	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
849	cmd_p->resource_id = cpu_to_le32(resource_id);
850	cmd_p->box = *box;
851	cmd_p->offset = cpu_to_le64(offset);
852	cmd_p->level = cpu_to_le32(level);
 
 
853
854	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
855}
856
857void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
858			   void *data, uint32_t data_size,
859			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
 
 
860{
861	struct virtio_gpu_cmd_submit *cmd_p;
862	struct virtio_gpu_vbuffer *vbuf;
863
864	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
865	memset(cmd_p, 0, sizeof(*cmd_p));
866
867	vbuf->data_buf = data;
868	vbuf->data_size = data_size;
 
869
870	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
871	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
872	cmd_p->size = cpu_to_le32(data_size);
873
874	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
875}
876
877int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
878			     struct virtio_gpu_object *obj,
879			     uint32_t resource_id,
880			     struct virtio_gpu_fence **fence)
881{
882	struct virtio_gpu_mem_entry *ents;
883	struct scatterlist *sg;
884	int si;
 
 
 
 
 
 
 
 
 
 
 
 
885
886	if (!obj->pages) {
887		int ret;
888		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
889		if (ret)
890			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
891	}
 
 
 
 
 
 
 
 
 
 
 
 
 
892
893	/* gets freed when the ring has consumed it */
894	ents = kmalloc_array(obj->pages->nents,
895			     sizeof(struct virtio_gpu_mem_entry),
896			     GFP_KERNEL);
897	if (!ents) {
898		DRM_ERROR("failed to allocate ent list\n");
899		return -ENOMEM;
900	}
901
902	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
903		ents[si].addr = cpu_to_le64(sg_phys(sg));
904		ents[si].length = cpu_to_le32(sg->length);
905		ents[si].padding = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
906	}
907
908	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
909					       ents, obj->pages->nents,
910					       fence);
911	obj->hw_res_handle = resource_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
912	return 0;
913}
914
915void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
916			    struct virtio_gpu_output *output)
917{
 
918	struct virtio_gpu_vbuffer *vbuf;
919	struct virtio_gpu_update_cursor *cur_p;
920
921	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
922	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
923	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
924	virtio_gpu_queue_cursor(vgdev, vbuf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
925}
v5.14.15
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie <airlied@redhat.com>
   7 *    Gerd Hoffmann <kraxel@redhat.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the next
  17 * paragraph) shall be included in all copies or substantial portions of the
  18 * Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26 * OTHER DEALINGS IN THE SOFTWARE.
  27 */
  28
  29#include <linux/dma-mapping.h>
 
  30#include <linux/virtio.h>
  31#include <linux/virtio_config.h>
  32#include <linux/virtio_ring.h>
  33
  34#include "virtgpu_drv.h"
  35#include "virtgpu_trace.h"
  36
  37#define MAX_INLINE_CMD_SIZE   96
  38#define MAX_INLINE_RESP_SIZE  24
  39#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
  40			       + MAX_INLINE_CMD_SIZE		 \
  41			       + MAX_INLINE_RESP_SIZE)
  42
  43static void convert_to_hw_box(struct virtio_gpu_box *dst,
  44			      const struct drm_virtgpu_3d_box *src)
 
 
 
 
 
 
 
 
 
 
 
 
  45{
  46	dst->x = cpu_to_le32(src->x);
  47	dst->y = cpu_to_le32(src->y);
  48	dst->z = cpu_to_le32(src->z);
  49	dst->w = cpu_to_le32(src->w);
  50	dst->h = cpu_to_le32(src->h);
  51	dst->d = cpu_to_le32(src->d);
  52}
  53
  54void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  55{
  56	struct drm_device *dev = vq->vdev->priv;
  57	struct virtio_gpu_device *vgdev = dev->dev_private;
  58
  59	schedule_work(&vgdev->ctrlq.dequeue_work);
  60}
  61
  62void virtio_gpu_cursor_ack(struct virtqueue *vq)
  63{
  64	struct drm_device *dev = vq->vdev->priv;
  65	struct virtio_gpu_device *vgdev = dev->dev_private;
  66
  67	schedule_work(&vgdev->cursorq.dequeue_work);
  68}
  69
  70int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  71{
  72	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  73					 VBUFFER_SIZE,
  74					 __alignof__(struct virtio_gpu_vbuffer),
  75					 0, NULL);
 
 
 
 
 
 
 
 
 
  76	if (!vgdev->vbufs)
  77		return -ENOMEM;
 
 
 
 
 
 
 
  78	return 0;
  79}
  80
  81void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  82{
  83	kmem_cache_destroy(vgdev->vbufs);
  84	vgdev->vbufs = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  85}
  86
  87static struct virtio_gpu_vbuffer*
  88virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  89		    int size, int resp_size, void *resp_buf,
  90		    virtio_gpu_resp_cb resp_cb)
  91{
  92	struct virtio_gpu_vbuffer *vbuf;
  93
  94	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
  95	if (!vbuf)
  96		return ERR_PTR(-ENOMEM);
 
 
 
 
  97
  98	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
  99	       size < sizeof(struct virtio_gpu_ctrl_hdr));
 100	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
 101	vbuf->size = size;
 102
 103	vbuf->resp_cb = resp_cb;
 104	vbuf->resp_size = resp_size;
 105	if (resp_size <= MAX_INLINE_RESP_SIZE)
 106		vbuf->resp_buf = (void *)vbuf->buf + size;
 107	else
 108		vbuf->resp_buf = resp_buf;
 109	BUG_ON(!vbuf->resp_buf);
 110	return vbuf;
 111}
 112
 113static struct virtio_gpu_ctrl_hdr *
 114virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
 
 115{
 116	/* this assumes a vbuf contains a command that starts with a
 117	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
 118	 * virtqueues.
 119	 */
 120	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
 
 
 
 
 
 
 121}
 122
 123static struct virtio_gpu_update_cursor*
 124virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
 125			struct virtio_gpu_vbuffer **vbuffer_p)
 126{
 127	struct virtio_gpu_vbuffer *vbuf;
 128
 129	vbuf = virtio_gpu_get_vbuf
 130		(vgdev, sizeof(struct virtio_gpu_update_cursor),
 131		 0, NULL, NULL);
 132	if (IS_ERR(vbuf)) {
 133		*vbuffer_p = NULL;
 134		return ERR_CAST(vbuf);
 135	}
 136	*vbuffer_p = vbuf;
 137	return (struct virtio_gpu_update_cursor *)vbuf->buf;
 138}
 139
 140static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 141				       virtio_gpu_resp_cb cb,
 142				       struct virtio_gpu_vbuffer **vbuffer_p,
 143				       int cmd_size, int resp_size,
 144				       void *resp_buf)
 145{
 146	struct virtio_gpu_vbuffer *vbuf;
 147
 148	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
 149				   resp_size, resp_buf, cb);
 150	if (IS_ERR(vbuf)) {
 151		*vbuffer_p = NULL;
 152		return ERR_CAST(vbuf);
 153	}
 154	*vbuffer_p = vbuf;
 155	return (struct virtio_gpu_command *)vbuf->buf;
 156}
 157
 158static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
 159				  struct virtio_gpu_vbuffer **vbuffer_p,
 160				  int size)
 161{
 162	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
 163					 sizeof(struct virtio_gpu_ctrl_hdr),
 164					 NULL);
 165}
 166
 167static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
 168				     struct virtio_gpu_vbuffer **vbuffer_p,
 169				     int size,
 170				     virtio_gpu_resp_cb cb)
 171{
 172	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
 173					 sizeof(struct virtio_gpu_ctrl_hdr),
 174					 NULL);
 175}
 176
 177static void free_vbuf(struct virtio_gpu_device *vgdev,
 178		      struct virtio_gpu_vbuffer *vbuf)
 179{
 180	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
 181		kfree(vbuf->resp_buf);
 182	kvfree(vbuf->data_buf);
 183	kmem_cache_free(vgdev->vbufs, vbuf);
 
 
 184}
 185
 186static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
 187{
 188	struct virtio_gpu_vbuffer *vbuf;
 189	unsigned int len;
 190	int freed = 0;
 191
 192	while ((vbuf = virtqueue_get_buf(vq, &len))) {
 193		list_add_tail(&vbuf->list, reclaim_list);
 194		freed++;
 195	}
 196	if (freed == 0)
 197		DRM_DEBUG("Huh? zero vbufs reclaimed");
 198}
 199
 200void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 201{
 202	struct virtio_gpu_device *vgdev =
 203		container_of(work, struct virtio_gpu_device,
 204			     ctrlq.dequeue_work);
 205	struct list_head reclaim_list;
 206	struct virtio_gpu_vbuffer *entry, *tmp;
 207	struct virtio_gpu_ctrl_hdr *resp;
 208	u64 fence_id = 0;
 209
 210	INIT_LIST_HEAD(&reclaim_list);
 211	spin_lock(&vgdev->ctrlq.qlock);
 212	do {
 213		virtqueue_disable_cb(vgdev->ctrlq.vq);
 214		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
 215
 216	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
 217	spin_unlock(&vgdev->ctrlq.qlock);
 218
 219	list_for_each_entry(entry, &reclaim_list, list) {
 220		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 221
 222		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
 223
 224		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
 225			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
 226				struct virtio_gpu_ctrl_hdr *cmd;
 227				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
 228				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
 229						      le32_to_cpu(resp->type),
 230						      le32_to_cpu(cmd->type));
 231			} else
 232				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 233		}
 234		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
 235			u64 f = le64_to_cpu(resp->fence_id);
 236
 237			if (fence_id > f) {
 238				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
 239					  __func__, fence_id, f);
 240			} else {
 241				fence_id = f;
 242			}
 243		}
 244		if (entry->resp_cb)
 245			entry->resp_cb(vgdev, entry);
 
 
 
 246	}
 247	wake_up(&vgdev->ctrlq.ack_queue);
 248
 249	if (fence_id)
 250		virtio_gpu_fence_event_process(vgdev, fence_id);
 251
 252	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 253		if (entry->objs)
 254			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
 255		list_del(&entry->list);
 256		free_vbuf(vgdev, entry);
 257	}
 258}
 259
 260void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 261{
 262	struct virtio_gpu_device *vgdev =
 263		container_of(work, struct virtio_gpu_device,
 264			     cursorq.dequeue_work);
 265	struct list_head reclaim_list;
 266	struct virtio_gpu_vbuffer *entry, *tmp;
 267
 268	INIT_LIST_HEAD(&reclaim_list);
 269	spin_lock(&vgdev->cursorq.qlock);
 270	do {
 271		virtqueue_disable_cb(vgdev->cursorq.vq);
 272		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
 273	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
 274	spin_unlock(&vgdev->cursorq.qlock);
 275
 276	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 277		list_del(&entry->list);
 278		free_vbuf(vgdev, entry);
 279	}
 280	wake_up(&vgdev->cursorq.ack_queue);
 281}
 282
 283/* Create sg_table from a vmalloc'd buffer. */
 284static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
 285{
 286	int ret, s, i;
 287	struct sg_table *sgt;
 288	struct scatterlist *sg;
 289	struct page *pg;
 290
 291	if (WARN_ON(!PAGE_ALIGNED(data)))
 292		return NULL;
 293
 294	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 295	if (!sgt)
 296		return NULL;
 297
 298	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
 299	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
 300	if (ret) {
 301		kfree(sgt);
 302		return NULL;
 303	}
 304
 305	for_each_sgtable_sg(sgt, sg, i) {
 306		pg = vmalloc_to_page(data);
 307		if (!pg) {
 308			sg_free_table(sgt);
 309			kfree(sgt);
 310			return NULL;
 311		}
 312
 313		s = min_t(int, PAGE_SIZE, size);
 314		sg_set_page(sg, pg, s, 0);
 315
 316		size -= s;
 317		data += s;
 318	}
 319
 320	return sgt;
 321}
 322
 323static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
 324				     struct virtio_gpu_vbuffer *vbuf,
 325				     struct virtio_gpu_fence *fence,
 326				     int elemcnt,
 327				     struct scatterlist **sgs,
 328				     int outcnt,
 329				     int incnt)
 330{
 331	struct virtqueue *vq = vgdev->ctrlq.vq;
 332	int ret, idx;
 333
 334	if (!drm_dev_enter(vgdev->ddev, &idx)) {
 335		if (fence && vbuf->objs)
 336			virtio_gpu_array_unlock_resv(vbuf->objs);
 337		free_vbuf(vgdev, vbuf);
 338		return -1;
 339	}
 340
 341	if (vgdev->has_indirect)
 342		elemcnt = 1;
 343
 344again:
 345	spin_lock(&vgdev->ctrlq.qlock);
 346
 347	if (vq->num_free < elemcnt) {
 348		spin_unlock(&vgdev->ctrlq.qlock);
 349		virtio_gpu_notify(vgdev);
 350		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
 351		goto again;
 352	}
 353
 354	/* now that the position of the vbuf in the virtqueue is known, we can
 355	 * finally set the fence id
 356	 */
 357	if (fence) {
 358		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
 359				      fence);
 360		if (vbuf->objs) {
 361			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
 362			virtio_gpu_array_unlock_resv(vbuf->objs);
 363		}
 364	}
 365
 366	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 367	WARN_ON(ret);
 368
 369	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
 370
 371	atomic_inc(&vgdev->pending_commands);
 372
 373	spin_unlock(&vgdev->ctrlq.qlock);
 374
 375	drm_dev_exit(idx);
 376	return 0;
 377}
 378
 379static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 380					       struct virtio_gpu_vbuffer *vbuf,
 381					       struct virtio_gpu_fence *fence)
 382{
 383	struct scatterlist *sgs[3], vcmd, vout, vresp;
 384	struct sg_table *sgt = NULL;
 385	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
 386
 387	/* set up vcmd */
 388	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 389	elemcnt++;
 390	sgs[outcnt] = &vcmd;
 391	outcnt++;
 392
 393	/* set up vout */
 394	if (vbuf->data_size) {
 395		if (is_vmalloc_addr(vbuf->data_buf)) {
 396			int sg_ents;
 397			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
 398					     &sg_ents);
 399			if (!sgt) {
 400				if (fence && vbuf->objs)
 401					virtio_gpu_array_unlock_resv(vbuf->objs);
 402				return -1;
 403			}
 404
 405			elemcnt += sg_ents;
 406			sgs[outcnt] = sgt->sgl;
 407		} else {
 408			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
 409			elemcnt++;
 410			sgs[outcnt] = &vout;
 411		}
 412		outcnt++;
 413	}
 414
 415	/* set up vresp */
 416	if (vbuf->resp_size) {
 417		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 418		elemcnt++;
 419		sgs[outcnt + incnt] = &vresp;
 420		incnt++;
 421	}
 422
 423	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
 424					incnt);
 
 
 
 
 
 
 
 
 425
 426	if (sgt) {
 427		sg_free_table(sgt);
 428		kfree(sgt);
 429	}
 430	return ret;
 431}
 432
 433void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
 
 434{
 435	bool notify;
 436
 437	if (!atomic_read(&vgdev->pending_commands))
 438		return;
 439
 440	spin_lock(&vgdev->ctrlq.qlock);
 441	atomic_set(&vgdev->pending_commands, 0);
 442	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
 443	spin_unlock(&vgdev->ctrlq.qlock);
 444
 445	if (notify)
 446		virtqueue_notify(vgdev->ctrlq.vq);
 447}
 448
 449static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 450					struct virtio_gpu_vbuffer *vbuf)
 
 
 451{
 452	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453}
 454
 455static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 456				    struct virtio_gpu_vbuffer *vbuf)
 457{
 458	struct virtqueue *vq = vgdev->cursorq.vq;
 459	struct scatterlist *sgs[1], ccmd;
 460	int idx, ret, outcnt;
 461	bool notify;
 462
 463	if (!drm_dev_enter(vgdev->ddev, &idx)) {
 464		free_vbuf(vgdev, vbuf);
 465		return;
 466	}
 467
 468	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 469	sgs[0] = &ccmd;
 470	outcnt = 1;
 471
 472	spin_lock(&vgdev->cursorq.qlock);
 473retry:
 474	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 475	if (ret == -ENOSPC) {
 476		spin_unlock(&vgdev->cursorq.qlock);
 477		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 478		spin_lock(&vgdev->cursorq.qlock);
 479		goto retry;
 480	} else {
 481		trace_virtio_gpu_cmd_queue(vq,
 482			virtio_gpu_vbuf_ctrl_hdr(vbuf));
 483
 484		notify = virtqueue_kick_prepare(vq);
 485	}
 486
 487	spin_unlock(&vgdev->cursorq.qlock);
 488
 489	if (notify)
 490		virtqueue_notify(vq);
 491
 492	drm_dev_exit(idx);
 493}
 494
 495/* just create gem objects for userspace and long lived objects,
 496 * just use dma_alloced pages for the queue objects?
 497 */
 498
 499/* create a basic resource */
 500void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 501				    struct virtio_gpu_object *bo,
 502				    struct virtio_gpu_object_params *params,
 503				    struct virtio_gpu_object_array *objs,
 504				    struct virtio_gpu_fence *fence)
 505{
 506	struct virtio_gpu_resource_create_2d *cmd_p;
 507	struct virtio_gpu_vbuffer *vbuf;
 508
 509	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 510	memset(cmd_p, 0, sizeof(*cmd_p));
 511	vbuf->objs = objs;
 512
 513	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
 514	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 515	cmd_p->format = cpu_to_le32(params->format);
 516	cmd_p->width = cpu_to_le32(params->width);
 517	cmd_p->height = cpu_to_le32(params->height);
 518
 519	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 520	bo->created = true;
 521}
 522
 523static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
 524				    struct virtio_gpu_vbuffer *vbuf)
 525{
 526	struct virtio_gpu_object *bo;
 
 
 
 
 527
 528	bo = vbuf->resp_cb_data;
 529	vbuf->resp_cb_data = NULL;
 530
 531	virtio_gpu_cleanup_object(bo);
 532}
 533
 534void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 535				   struct virtio_gpu_object *bo)
 536{
 537	struct virtio_gpu_resource_unref *cmd_p;
 538	struct virtio_gpu_vbuffer *vbuf;
 539	int ret;
 540
 541	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
 542					virtio_gpu_cmd_unref_cb);
 543	memset(cmd_p, 0, sizeof(*cmd_p));
 544
 545	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
 546	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 547
 548	vbuf->resp_cb_data = bo;
 549	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 550	if (ret < 0)
 551		virtio_gpu_cleanup_object(bo);
 552}
 553
 554void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 555				uint32_t scanout_id, uint32_t resource_id,
 556				uint32_t width, uint32_t height,
 557				uint32_t x, uint32_t y)
 558{
 559	struct virtio_gpu_set_scanout *cmd_p;
 560	struct virtio_gpu_vbuffer *vbuf;
 561
 562	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 563	memset(cmd_p, 0, sizeof(*cmd_p));
 564
 565	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
 566	cmd_p->resource_id = cpu_to_le32(resource_id);
 567	cmd_p->scanout_id = cpu_to_le32(scanout_id);
 568	cmd_p->r.width = cpu_to_le32(width);
 569	cmd_p->r.height = cpu_to_le32(height);
 570	cmd_p->r.x = cpu_to_le32(x);
 571	cmd_p->r.y = cpu_to_le32(y);
 572
 573	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 574}
 575
 576void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 577				   uint32_t resource_id,
 578				   uint32_t x, uint32_t y,
 579				   uint32_t width, uint32_t height)
 580{
 581	struct virtio_gpu_resource_flush *cmd_p;
 582	struct virtio_gpu_vbuffer *vbuf;
 583
 584	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 585	memset(cmd_p, 0, sizeof(*cmd_p));
 586
 587	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
 588	cmd_p->resource_id = cpu_to_le32(resource_id);
 589	cmd_p->r.width = cpu_to_le32(width);
 590	cmd_p->r.height = cpu_to_le32(height);
 591	cmd_p->r.x = cpu_to_le32(x);
 592	cmd_p->r.y = cpu_to_le32(y);
 593
 594	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 595}
 596
 597void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 598					uint64_t offset,
 599					uint32_t width, uint32_t height,
 600					uint32_t x, uint32_t y,
 601					struct virtio_gpu_object_array *objs,
 602					struct virtio_gpu_fence *fence)
 603{
 604	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 605	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 606	struct virtio_gpu_vbuffer *vbuf;
 607	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 608	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 609
 610	if (use_dma_api)
 611		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
 612					    shmem->pages, DMA_TO_DEVICE);
 613
 614	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 615	memset(cmd_p, 0, sizeof(*cmd_p));
 616	vbuf->objs = objs;
 617
 618	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
 619	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 620	cmd_p->offset = cpu_to_le64(offset);
 621	cmd_p->r.width = cpu_to_le32(width);
 622	cmd_p->r.height = cpu_to_le32(height);
 623	cmd_p->r.x = cpu_to_le32(x);
 624	cmd_p->r.y = cpu_to_le32(y);
 625
 626	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 627}
 628
 629static void
 630virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 631				       uint32_t resource_id,
 632				       struct virtio_gpu_mem_entry *ents,
 633				       uint32_t nents,
 634				       struct virtio_gpu_fence *fence)
 635{
 636	struct virtio_gpu_resource_attach_backing *cmd_p;
 637	struct virtio_gpu_vbuffer *vbuf;
 638
 639	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 640	memset(cmd_p, 0, sizeof(*cmd_p));
 641
 642	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
 643	cmd_p->resource_id = cpu_to_le32(resource_id);
 644	cmd_p->nr_entries = cpu_to_le32(nents);
 645
 646	vbuf->data_buf = ents;
 647	vbuf->data_size = sizeof(*ents) * nents;
 648
 649	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 650}
 651
 652static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
 653					       struct virtio_gpu_vbuffer *vbuf)
 654{
 655	struct virtio_gpu_resp_display_info *resp =
 656		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
 657	int i;
 658
 659	spin_lock(&vgdev->display_info_lock);
 660	for (i = 0; i < vgdev->num_scanouts; i++) {
 661		vgdev->outputs[i].info = resp->pmodes[i];
 662		if (resp->pmodes[i].enabled) {
 663			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
 664				  le32_to_cpu(resp->pmodes[i].r.width),
 665				  le32_to_cpu(resp->pmodes[i].r.height),
 666				  le32_to_cpu(resp->pmodes[i].r.x),
 667				  le32_to_cpu(resp->pmodes[i].r.y));
 668		} else {
 669			DRM_DEBUG("output %d: disabled", i);
 670		}
 671	}
 672
 673	vgdev->display_info_pending = false;
 674	spin_unlock(&vgdev->display_info_lock);
 675	wake_up(&vgdev->resp_wq);
 676
 677	if (!drm_helper_hpd_irq_event(vgdev->ddev))
 678		drm_kms_helper_hotplug_event(vgdev->ddev);
 679}
 680
 681static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
 682					      struct virtio_gpu_vbuffer *vbuf)
 683{
 684	struct virtio_gpu_get_capset_info *cmd =
 685		(struct virtio_gpu_get_capset_info *)vbuf->buf;
 686	struct virtio_gpu_resp_capset_info *resp =
 687		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
 688	int i = le32_to_cpu(cmd->capset_index);
 689
 690	spin_lock(&vgdev->display_info_lock);
 691	if (vgdev->capsets) {
 692		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
 693		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
 694		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 695	} else {
 696		DRM_ERROR("invalid capset memory.");
 697	}
 698	spin_unlock(&vgdev->display_info_lock);
 699	wake_up(&vgdev->resp_wq);
 700}
 701
 702static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 703				     struct virtio_gpu_vbuffer *vbuf)
 704{
 705	struct virtio_gpu_get_capset *cmd =
 706		(struct virtio_gpu_get_capset *)vbuf->buf;
 707	struct virtio_gpu_resp_capset *resp =
 708		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
 709	struct virtio_gpu_drv_cap_cache *cache_ent;
 710
 711	spin_lock(&vgdev->display_info_lock);
 712	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 713		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
 714		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
 715			memcpy(cache_ent->caps_cache, resp->capset_data,
 716			       cache_ent->size);
 717			/* Copy must occur before is_valid is signalled. */
 718			smp_wmb();
 719			atomic_set(&cache_ent->is_valid, 1);
 720			break;
 721		}
 722	}
 723	spin_unlock(&vgdev->display_info_lock);
 724	wake_up_all(&vgdev->resp_wq);
 725}
 726
 727static int virtio_get_edid_block(void *data, u8 *buf,
 728				 unsigned int block, size_t len)
 729{
 730	struct virtio_gpu_resp_edid *resp = data;
 731	size_t start = block * EDID_LENGTH;
 732
 733	if (start + len > le32_to_cpu(resp->size))
 734		return -1;
 735	memcpy(buf, resp->edid + start, len);
 736	return 0;
 737}
 738
 739static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
 740				       struct virtio_gpu_vbuffer *vbuf)
 741{
 742	struct virtio_gpu_cmd_get_edid *cmd =
 743		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
 744	struct virtio_gpu_resp_edid *resp =
 745		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
 746	uint32_t scanout = le32_to_cpu(cmd->scanout);
 747	struct virtio_gpu_output *output;
 748	struct edid *new_edid, *old_edid;
 749
 750	if (scanout >= vgdev->num_scanouts)
 751		return;
 752	output = vgdev->outputs + scanout;
 753
 754	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
 755	drm_connector_update_edid_property(&output->conn, new_edid);
 756
 757	spin_lock(&vgdev->display_info_lock);
 758	old_edid = output->edid;
 759	output->edid = new_edid;
 760	spin_unlock(&vgdev->display_info_lock);
 761
 762	kfree(old_edid);
 763	wake_up(&vgdev->resp_wq);
 764}
 765
 766int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 767{
 768	struct virtio_gpu_ctrl_hdr *cmd_p;
 769	struct virtio_gpu_vbuffer *vbuf;
 770	void *resp_buf;
 771
 772	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
 773			   GFP_KERNEL);
 774	if (!resp_buf)
 775		return -ENOMEM;
 776
 777	cmd_p = virtio_gpu_alloc_cmd_resp
 778		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
 779		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
 780		 resp_buf);
 781	memset(cmd_p, 0, sizeof(*cmd_p));
 782
 783	vgdev->display_info_pending = true;
 784	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
 785	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 786	return 0;
 787}
 788
 789int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
 790{
 791	struct virtio_gpu_get_capset_info *cmd_p;
 792	struct virtio_gpu_vbuffer *vbuf;
 793	void *resp_buf;
 794
 795	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
 796			   GFP_KERNEL);
 797	if (!resp_buf)
 798		return -ENOMEM;
 799
 800	cmd_p = virtio_gpu_alloc_cmd_resp
 801		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
 802		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
 803		 resp_buf);
 804	memset(cmd_p, 0, sizeof(*cmd_p));
 805
 806	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
 807	cmd_p->capset_index = cpu_to_le32(idx);
 808	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 809	return 0;
 810}
 811
 812int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 813			      int idx, int version,
 814			      struct virtio_gpu_drv_cap_cache **cache_p)
 815{
 816	struct virtio_gpu_get_capset *cmd_p;
 817	struct virtio_gpu_vbuffer *vbuf;
 818	int max_size;
 819	struct virtio_gpu_drv_cap_cache *cache_ent;
 820	struct virtio_gpu_drv_cap_cache *search_ent;
 821	void *resp_buf;
 822
 823	*cache_p = NULL;
 824
 825	if (idx >= vgdev->num_capsets)
 826		return -EINVAL;
 827
 828	if (version > vgdev->capsets[idx].max_version)
 829		return -EINVAL;
 830
 831	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
 832	if (!cache_ent)
 833		return -ENOMEM;
 834
 835	max_size = vgdev->capsets[idx].max_size;
 836	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
 837	if (!cache_ent->caps_cache) {
 838		kfree(cache_ent);
 839		return -ENOMEM;
 840	}
 841
 842	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
 843			   GFP_KERNEL);
 844	if (!resp_buf) {
 845		kfree(cache_ent->caps_cache);
 846		kfree(cache_ent);
 847		return -ENOMEM;
 848	}
 849
 850	cache_ent->version = version;
 851	cache_ent->id = vgdev->capsets[idx].id;
 852	atomic_set(&cache_ent->is_valid, 0);
 853	cache_ent->size = max_size;
 854	spin_lock(&vgdev->display_info_lock);
 855	/* Search while under lock in case it was added by another task. */
 856	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
 857		if (search_ent->id == vgdev->capsets[idx].id &&
 858		    search_ent->version == version) {
 859			*cache_p = search_ent;
 860			break;
 861		}
 862	}
 863	if (!*cache_p)
 864		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 865	spin_unlock(&vgdev->display_info_lock);
 866
 867	if (*cache_p) {
 868		/* Entry was found, so free everything that was just created. */
 869		kfree(resp_buf);
 870		kfree(cache_ent->caps_cache);
 871		kfree(cache_ent);
 872		return 0;
 873	}
 874
 875	cmd_p = virtio_gpu_alloc_cmd_resp
 876		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
 877		 sizeof(struct virtio_gpu_resp_capset) + max_size,
 878		 resp_buf);
 879	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
 880	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
 881	cmd_p->capset_version = cpu_to_le32(version);
 882	*cache_p = cache_ent;
 883	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 884
 885	return 0;
 886}
 887
 888int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
 889{
 890	struct virtio_gpu_cmd_get_edid *cmd_p;
 891	struct virtio_gpu_vbuffer *vbuf;
 892	void *resp_buf;
 893	int scanout;
 894
 895	if (WARN_ON(!vgdev->has_edid))
 896		return -EINVAL;
 897
 898	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
 899		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
 900				   GFP_KERNEL);
 901		if (!resp_buf)
 902			return -ENOMEM;
 903
 904		cmd_p = virtio_gpu_alloc_cmd_resp
 905			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
 906			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
 907			 resp_buf);
 908		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
 909		cmd_p->scanout = cpu_to_le32(scanout);
 910		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 911	}
 912
 913	return 0;
 914}
 915
 916void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 917				   uint32_t nlen, const char *name)
 918{
 919	struct virtio_gpu_ctx_create *cmd_p;
 920	struct virtio_gpu_vbuffer *vbuf;
 921
 922	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 923	memset(cmd_p, 0, sizeof(*cmd_p));
 924
 925	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
 926	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 927	cmd_p->nlen = cpu_to_le32(nlen);
 928	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
 929	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
 930	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 931}
 932
 933void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
 934				    uint32_t id)
 935{
 936	struct virtio_gpu_ctx_destroy *cmd_p;
 937	struct virtio_gpu_vbuffer *vbuf;
 938
 939	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 940	memset(cmd_p, 0, sizeof(*cmd_p));
 941
 942	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
 943	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 944	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 945}
 946
 947void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 948					    uint32_t ctx_id,
 949					    struct virtio_gpu_object_array *objs)
 950{
 951	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 952	struct virtio_gpu_ctx_resource *cmd_p;
 953	struct virtio_gpu_vbuffer *vbuf;
 954
 955	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 956	memset(cmd_p, 0, sizeof(*cmd_p));
 957	vbuf->objs = objs;
 958
 959	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
 960	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 961	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 962	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
 963}
 964
 965void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 966					    uint32_t ctx_id,
 967					    struct virtio_gpu_object_array *objs)
 968{
 969	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 970	struct virtio_gpu_ctx_resource *cmd_p;
 971	struct virtio_gpu_vbuffer *vbuf;
 972
 973	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 974	memset(cmd_p, 0, sizeof(*cmd_p));
 975	vbuf->objs = objs;
 976
 977	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
 978	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 979	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 980	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 981}
 982
 983void
 984virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 985				  struct virtio_gpu_object *bo,
 986				  struct virtio_gpu_object_params *params,
 987				  struct virtio_gpu_object_array *objs,
 988				  struct virtio_gpu_fence *fence)
 989{
 990	struct virtio_gpu_resource_create_3d *cmd_p;
 991	struct virtio_gpu_vbuffer *vbuf;
 992
 993	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 994	memset(cmd_p, 0, sizeof(*cmd_p));
 995	vbuf->objs = objs;
 996
 
 997	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 998	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 999	cmd_p->format = cpu_to_le32(params->format);
1000	cmd_p->width = cpu_to_le32(params->width);
1001	cmd_p->height = cpu_to_le32(params->height);
1002
1003	cmd_p->target = cpu_to_le32(params->target);
1004	cmd_p->bind = cpu_to_le32(params->bind);
1005	cmd_p->depth = cpu_to_le32(params->depth);
1006	cmd_p->array_size = cpu_to_le32(params->array_size);
1007	cmd_p->last_level = cpu_to_le32(params->last_level);
1008	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1009	cmd_p->flags = cpu_to_le32(params->flags);
1010
1011	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1012
1013	bo->created = true;
1014}
1015
1016void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1017					uint32_t ctx_id,
1018					uint64_t offset, uint32_t level,
1019					uint32_t stride,
1020					uint32_t layer_stride,
1021					struct drm_virtgpu_3d_box *box,
1022					struct virtio_gpu_object_array *objs,
1023					struct virtio_gpu_fence *fence)
1024{
1025	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1026	struct virtio_gpu_transfer_host_3d *cmd_p;
1027	struct virtio_gpu_vbuffer *vbuf;
1028	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1029
1030	if (virtio_gpu_is_shmem(bo) && use_dma_api) {
1031		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1032		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1033					    shmem->pages, DMA_TO_DEVICE);
1034	}
1035
1036	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1037	memset(cmd_p, 0, sizeof(*cmd_p));
1038
1039	vbuf->objs = objs;
1040
1041	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1042	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1043	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1044	convert_to_hw_box(&cmd_p->box, box);
1045	cmd_p->offset = cpu_to_le64(offset);
1046	cmd_p->level = cpu_to_le32(level);
1047	cmd_p->stride = cpu_to_le32(stride);
1048	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1049
1050	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1051}
1052
1053void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1054					  uint32_t ctx_id,
1055					  uint64_t offset, uint32_t level,
1056					  uint32_t stride,
1057					  uint32_t layer_stride,
1058					  struct drm_virtgpu_3d_box *box,
1059					  struct virtio_gpu_object_array *objs,
1060					  struct virtio_gpu_fence *fence)
1061{
1062	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1063	struct virtio_gpu_transfer_host_3d *cmd_p;
1064	struct virtio_gpu_vbuffer *vbuf;
1065
1066	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1067	memset(cmd_p, 0, sizeof(*cmd_p));
1068
1069	vbuf->objs = objs;
1070
1071	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1072	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1073	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1074	convert_to_hw_box(&cmd_p->box, box);
1075	cmd_p->offset = cpu_to_le64(offset);
1076	cmd_p->level = cpu_to_le32(level);
1077	cmd_p->stride = cpu_to_le32(stride);
1078	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1079
1080	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1081}
1082
1083void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1084			   void *data, uint32_t data_size,
1085			   uint32_t ctx_id,
1086			   struct virtio_gpu_object_array *objs,
1087			   struct virtio_gpu_fence *fence)
1088{
1089	struct virtio_gpu_cmd_submit *cmd_p;
1090	struct virtio_gpu_vbuffer *vbuf;
1091
1092	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1093	memset(cmd_p, 0, sizeof(*cmd_p));
1094
1095	vbuf->data_buf = data;
1096	vbuf->data_size = data_size;
1097	vbuf->objs = objs;
1098
1099	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1100	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1101	cmd_p->size = cpu_to_le32(data_size);
1102
1103	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1104}
1105
1106void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1107			      struct virtio_gpu_object *obj,
1108			      struct virtio_gpu_mem_entry *ents,
1109			      unsigned int nents)
1110{
1111	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1112					       ents, nents, NULL);
1113}
1114
1115void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1116			    struct virtio_gpu_output *output)
1117{
1118	struct virtio_gpu_vbuffer *vbuf;
1119	struct virtio_gpu_update_cursor *cur_p;
1120
1121	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1122	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1123	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1124	virtio_gpu_queue_cursor(vgdev, vbuf);
1125}
1126
1127static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1128					    struct virtio_gpu_vbuffer *vbuf)
1129{
1130	struct virtio_gpu_object *obj =
1131		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1132	struct virtio_gpu_resp_resource_uuid *resp =
1133		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1134	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1135
1136	spin_lock(&vgdev->resource_export_lock);
1137	WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1138
1139	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1140	    obj->uuid_state == STATE_INITIALIZING) {
1141		import_uuid(&obj->uuid, resp->uuid);
1142		obj->uuid_state = STATE_OK;
1143	} else {
1144		obj->uuid_state = STATE_ERR;
1145	}
1146	spin_unlock(&vgdev->resource_export_lock);
1147
1148	wake_up_all(&vgdev->resp_wq);
1149}
1150
1151int
1152virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1153				    struct virtio_gpu_object_array *objs)
1154{
1155	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1156	struct virtio_gpu_resource_assign_uuid *cmd_p;
1157	struct virtio_gpu_vbuffer *vbuf;
1158	struct virtio_gpu_resp_resource_uuid *resp_buf;
1159
1160	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1161	if (!resp_buf) {
1162		spin_lock(&vgdev->resource_export_lock);
1163		bo->uuid_state = STATE_ERR;
1164		spin_unlock(&vgdev->resource_export_lock);
1165		virtio_gpu_array_put_free(objs);
1166		return -ENOMEM;
1167	}
1168
1169	cmd_p = virtio_gpu_alloc_cmd_resp
1170		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1171		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1172	memset(cmd_p, 0, sizeof(*cmd_p));
1173
1174	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1175	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1176
1177	vbuf->objs = objs;
1178	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1179	return 0;
1180}
1181
1182static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1183					   struct virtio_gpu_vbuffer *vbuf)
1184{
1185	struct virtio_gpu_object *bo =
1186		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1187	struct virtio_gpu_resp_map_info *resp =
1188		(struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1189	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1190	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1191
1192	spin_lock(&vgdev->host_visible_lock);
1193
1194	if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1195		vram->map_info = resp->map_info;
1196		vram->map_state = STATE_OK;
1197	} else {
1198		vram->map_state = STATE_ERR;
1199	}
1200
1201	spin_unlock(&vgdev->host_visible_lock);
1202	wake_up_all(&vgdev->resp_wq);
1203}
1204
1205int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1206		       struct virtio_gpu_object_array *objs, uint64_t offset)
1207{
1208	struct virtio_gpu_resource_map_blob *cmd_p;
1209	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1210	struct virtio_gpu_vbuffer *vbuf;
1211	struct virtio_gpu_resp_map_info *resp_buf;
1212
1213	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1214	if (!resp_buf)
1215		return -ENOMEM;
1216
1217	cmd_p = virtio_gpu_alloc_cmd_resp
1218		(vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1219		 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1220	memset(cmd_p, 0, sizeof(*cmd_p));
1221
1222	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1223	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1224	cmd_p->offset = cpu_to_le64(offset);
1225	vbuf->objs = objs;
1226
1227	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1228	return 0;
1229}
1230
1231void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1232			  struct virtio_gpu_object *bo)
1233{
1234	struct virtio_gpu_resource_unmap_blob *cmd_p;
1235	struct virtio_gpu_vbuffer *vbuf;
 
1236
1237	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1238	memset(cmd_p, 0, sizeof(*cmd_p));
1239
1240	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1241	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1242
1243	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1244}
1245
1246void
1247virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1248				    struct virtio_gpu_object *bo,
1249				    struct virtio_gpu_object_params *params,
1250				    struct virtio_gpu_mem_entry *ents,
1251				    uint32_t nents)
1252{
1253	struct virtio_gpu_resource_create_blob *cmd_p;
1254	struct virtio_gpu_vbuffer *vbuf;
1255
1256	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1257	memset(cmd_p, 0, sizeof(*cmd_p));
1258
1259	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1260	cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1261	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1262	cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1263	cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1264	cmd_p->blob_id = cpu_to_le64(params->blob_id);
1265	cmd_p->size = cpu_to_le64(params->size);
1266	cmd_p->nr_entries = cpu_to_le32(nents);
1267
1268	vbuf->data_buf = ents;
1269	vbuf->data_size = sizeof(*ents) * nents;
1270
1271	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1272	bo->created = true;
1273}
1274
1275void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1276				     uint32_t scanout_id,
1277				     struct virtio_gpu_object *bo,
1278				     struct drm_framebuffer *fb,
1279				     uint32_t width, uint32_t height,
1280				     uint32_t x, uint32_t y)
1281{
1282	uint32_t i;
1283	struct virtio_gpu_set_scanout_blob *cmd_p;
1284	struct virtio_gpu_vbuffer *vbuf;
1285	uint32_t format = virtio_gpu_translate_format(fb->format->format);
1286
1287	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1288	memset(cmd_p, 0, sizeof(*cmd_p));
1289
1290	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1291	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1292	cmd_p->scanout_id = cpu_to_le32(scanout_id);
1293
1294	cmd_p->format = cpu_to_le32(format);
1295	cmd_p->width  = cpu_to_le32(fb->width);
1296	cmd_p->height = cpu_to_le32(fb->height);
1297
1298	for (i = 0; i < 4; i++) {
1299		cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1300		cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1301	}
1302
1303	cmd_p->r.width = cpu_to_le32(width);
1304	cmd_p->r.height = cpu_to_le32(height);
1305	cmd_p->r.x = cpu_to_le32(x);
1306	cmd_p->r.y = cpu_to_le32(y);
1307
1308	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1309}