Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie <airlied@redhat.com>
  7 *    Gerd Hoffmann <kraxel@redhat.com>
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice (including the next
 17 * paragraph) shall be included in all copies or substantial portions of the
 18 * Software.
 19 *
 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 26 * OTHER DEALINGS IN THE SOFTWARE.
 27 */
 28
 29#include <drm/drmP.h>
 30#include "virtgpu_drv.h"
 31#include <linux/virtio.h>
 32#include <linux/virtio_config.h>
 33#include <linux/virtio_ring.h>
 34
 35#define MAX_INLINE_CMD_SIZE   96
 36#define MAX_INLINE_RESP_SIZE  24
 37#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
 38			       + MAX_INLINE_CMD_SIZE		 \
 39			       + MAX_INLINE_RESP_SIZE)
 40
 41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 42				uint32_t *resid)
 43{
 44	int handle;
 45
 46	idr_preload(GFP_KERNEL);
 47	spin_lock(&vgdev->resource_idr_lock);
 48	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
 49	spin_unlock(&vgdev->resource_idr_lock);
 50	idr_preload_end();
 51	*resid = handle;
 52}
 53
 54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 55{
 56	spin_lock(&vgdev->resource_idr_lock);
 57	idr_remove(&vgdev->resource_idr, id);
 58	spin_unlock(&vgdev->resource_idr_lock);
 59}
 60
 61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
 62{
 63	struct drm_device *dev = vq->vdev->priv;
 64	struct virtio_gpu_device *vgdev = dev->dev_private;
 65	schedule_work(&vgdev->ctrlq.dequeue_work);
 66}
 67
 68void virtio_gpu_cursor_ack(struct virtqueue *vq)
 69{
 70	struct drm_device *dev = vq->vdev->priv;
 71	struct virtio_gpu_device *vgdev = dev->dev_private;
 72	schedule_work(&vgdev->cursorq.dequeue_work);
 73}
 74
 75int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
 76{
 77	struct virtio_gpu_vbuffer *vbuf;
 78	int i, size, count = 0;
 79	void *ptr;
 80
 81	INIT_LIST_HEAD(&vgdev->free_vbufs);
 82	spin_lock_init(&vgdev->free_vbufs_lock);
 83	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
 84	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
 85	size = count * VBUFFER_SIZE;
 86	DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
 87		 count, VBUFFER_SIZE, size / 1024);
 88
 89	vgdev->vbufs = kzalloc(size, GFP_KERNEL);
 90	if (!vgdev->vbufs)
 91		return -ENOMEM;
 92
 93	for (i = 0, ptr = vgdev->vbufs;
 94	     i < count;
 95	     i++, ptr += VBUFFER_SIZE) {
 96		vbuf = ptr;
 97		list_add(&vbuf->list, &vgdev->free_vbufs);
 98	}
 99	return 0;
100}
101
102void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103{
104	struct virtio_gpu_vbuffer *vbuf;
105	int i, count = 0;
106
107	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109
110	spin_lock(&vgdev->free_vbufs_lock);
111	for (i = 0; i < count; i++) {
112		if (WARN_ON(list_empty(&vgdev->free_vbufs)))
 
113			return;
 
114		vbuf = list_first_entry(&vgdev->free_vbufs,
115					struct virtio_gpu_vbuffer, list);
116		list_del(&vbuf->list);
117	}
118	spin_unlock(&vgdev->free_vbufs_lock);
119	kfree(vgdev->vbufs);
120}
121
122static struct virtio_gpu_vbuffer*
123virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
124		    int size, int resp_size, void *resp_buf,
125		    virtio_gpu_resp_cb resp_cb)
126{
127	struct virtio_gpu_vbuffer *vbuf;
128
129	spin_lock(&vgdev->free_vbufs_lock);
130	BUG_ON(list_empty(&vgdev->free_vbufs));
131	vbuf = list_first_entry(&vgdev->free_vbufs,
132				struct virtio_gpu_vbuffer, list);
133	list_del(&vbuf->list);
134	spin_unlock(&vgdev->free_vbufs_lock);
135	memset(vbuf, 0, VBUFFER_SIZE);
136
137	BUG_ON(size > MAX_INLINE_CMD_SIZE);
138	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
139	vbuf->size = size;
140
141	vbuf->resp_cb = resp_cb;
142	vbuf->resp_size = resp_size;
143	if (resp_size <= MAX_INLINE_RESP_SIZE)
144		vbuf->resp_buf = (void *)vbuf->buf + size;
145	else
146		vbuf->resp_buf = resp_buf;
147	BUG_ON(!vbuf->resp_buf);
148	return vbuf;
149}
150
151static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
152				  struct virtio_gpu_vbuffer **vbuffer_p,
153				  int size)
154{
155	struct virtio_gpu_vbuffer *vbuf;
156
157	vbuf = virtio_gpu_get_vbuf(vgdev, size,
158				   sizeof(struct virtio_gpu_ctrl_hdr),
159				   NULL, NULL);
160	if (IS_ERR(vbuf)) {
161		*vbuffer_p = NULL;
162		return ERR_CAST(vbuf);
163	}
164	*vbuffer_p = vbuf;
165	return vbuf->buf;
166}
167
168static struct virtio_gpu_update_cursor*
169virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
170			struct virtio_gpu_vbuffer **vbuffer_p)
171{
172	struct virtio_gpu_vbuffer *vbuf;
173
174	vbuf = virtio_gpu_get_vbuf
175		(vgdev, sizeof(struct virtio_gpu_update_cursor),
176		 0, NULL, NULL);
177	if (IS_ERR(vbuf)) {
178		*vbuffer_p = NULL;
179		return ERR_CAST(vbuf);
180	}
181	*vbuffer_p = vbuf;
182	return (struct virtio_gpu_update_cursor *)vbuf->buf;
183}
184
185static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
186				       virtio_gpu_resp_cb cb,
187				       struct virtio_gpu_vbuffer **vbuffer_p,
188				       int cmd_size, int resp_size,
189				       void *resp_buf)
190{
191	struct virtio_gpu_vbuffer *vbuf;
192
193	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
194				   resp_size, resp_buf, cb);
195	if (IS_ERR(vbuf)) {
196		*vbuffer_p = NULL;
197		return ERR_CAST(vbuf);
198	}
199	*vbuffer_p = vbuf;
200	return (struct virtio_gpu_command *)vbuf->buf;
201}
202
203static void free_vbuf(struct virtio_gpu_device *vgdev,
204		      struct virtio_gpu_vbuffer *vbuf)
205{
206	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
207		kfree(vbuf->resp_buf);
208	kfree(vbuf->data_buf);
209	spin_lock(&vgdev->free_vbufs_lock);
210	list_add(&vbuf->list, &vgdev->free_vbufs);
211	spin_unlock(&vgdev->free_vbufs_lock);
212}
213
214static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
215{
216	struct virtio_gpu_vbuffer *vbuf;
217	unsigned int len;
218	int freed = 0;
219
220	while ((vbuf = virtqueue_get_buf(vq, &len))) {
221		list_add_tail(&vbuf->list, reclaim_list);
222		freed++;
223	}
224	if (freed == 0)
225		DRM_DEBUG("Huh? zero vbufs reclaimed");
226}
227
228void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
229{
230	struct virtio_gpu_device *vgdev =
231		container_of(work, struct virtio_gpu_device,
232			     ctrlq.dequeue_work);
233	struct list_head reclaim_list;
234	struct virtio_gpu_vbuffer *entry, *tmp;
235	struct virtio_gpu_ctrl_hdr *resp;
236	u64 fence_id = 0;
237
238	INIT_LIST_HEAD(&reclaim_list);
239	spin_lock(&vgdev->ctrlq.qlock);
240	do {
241		virtqueue_disable_cb(vgdev->ctrlq.vq);
242		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
243
244	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
245	spin_unlock(&vgdev->ctrlq.qlock);
246
247	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
248		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
249		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
250			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
251		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
252			u64 f = le64_to_cpu(resp->fence_id);
253
254			if (fence_id > f) {
255				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
256					  __func__, fence_id, f);
257			} else {
258				fence_id = f;
259			}
260		}
261		if (entry->resp_cb)
262			entry->resp_cb(vgdev, entry);
263
264		list_del(&entry->list);
265		free_vbuf(vgdev, entry);
266	}
267	wake_up(&vgdev->ctrlq.ack_queue);
268
269	if (fence_id)
270		virtio_gpu_fence_event_process(vgdev, fence_id);
271}
272
273void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
274{
275	struct virtio_gpu_device *vgdev =
276		container_of(work, struct virtio_gpu_device,
277			     cursorq.dequeue_work);
278	struct list_head reclaim_list;
279	struct virtio_gpu_vbuffer *entry, *tmp;
280
281	INIT_LIST_HEAD(&reclaim_list);
282	spin_lock(&vgdev->cursorq.qlock);
283	do {
284		virtqueue_disable_cb(vgdev->cursorq.vq);
285		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
286	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
287	spin_unlock(&vgdev->cursorq.qlock);
288
289	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
290		list_del(&entry->list);
291		free_vbuf(vgdev, entry);
292	}
293	wake_up(&vgdev->cursorq.ack_queue);
294}
295
296static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
297					       struct virtio_gpu_vbuffer *vbuf)
 
 
298{
299	struct virtqueue *vq = vgdev->ctrlq.vq;
300	struct scatterlist *sgs[3], vcmd, vout, vresp;
301	int outcnt = 0, incnt = 0;
302	int ret;
303
304	if (!vgdev->vqs_ready)
305		return -ENODEV;
306
307	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
308	sgs[outcnt+incnt] = &vcmd;
309	outcnt++;
310
311	if (vbuf->data_size) {
312		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
313		sgs[outcnt + incnt] = &vout;
314		outcnt++;
315	}
316
317	if (vbuf->resp_size) {
318		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
319		sgs[outcnt + incnt] = &vresp;
320		incnt++;
321	}
322
323retry:
324	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
325	if (ret == -ENOSPC) {
326		spin_unlock(&vgdev->ctrlq.qlock);
327		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
328		spin_lock(&vgdev->ctrlq.qlock);
329		goto retry;
330	} else {
331		virtqueue_kick(vq);
332	}
333
334	if (!ret)
335		ret = vq->num_free;
336	return ret;
337}
338
339static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
340					struct virtio_gpu_vbuffer *vbuf)
341{
342	int rc;
343
344	spin_lock(&vgdev->ctrlq.qlock);
345	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
346	spin_unlock(&vgdev->ctrlq.qlock);
347	return rc;
348}
349
350static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
351					       struct virtio_gpu_vbuffer *vbuf,
352					       struct virtio_gpu_ctrl_hdr *hdr,
353					       struct virtio_gpu_fence **fence)
354{
355	struct virtqueue *vq = vgdev->ctrlq.vq;
356	int rc;
357
358again:
359	spin_lock(&vgdev->ctrlq.qlock);
360
361	/*
362	 * Make sure we have enouth space in the virtqueue.  If not
363	 * wait here until we have.
364	 *
365	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
366	 * to wait for free space, which can result in fence ids being
367	 * submitted out-of-order.
368	 */
369	if (vq->num_free < 3) {
370		spin_unlock(&vgdev->ctrlq.qlock);
371		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
372		goto again;
373	}
374
375	if (fence)
376		virtio_gpu_fence_emit(vgdev, hdr, fence);
377	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
378	spin_unlock(&vgdev->ctrlq.qlock);
379	return rc;
380}
381
382static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
383				   struct virtio_gpu_vbuffer *vbuf)
384{
385	struct virtqueue *vq = vgdev->cursorq.vq;
386	struct scatterlist *sgs[1], ccmd;
387	int ret;
388	int outcnt;
389
390	if (!vgdev->vqs_ready)
391		return -ENODEV;
392
393	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
394	sgs[0] = &ccmd;
395	outcnt = 1;
396
397	spin_lock(&vgdev->cursorq.qlock);
398retry:
399	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
400	if (ret == -ENOSPC) {
401		spin_unlock(&vgdev->cursorq.qlock);
402		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
403		spin_lock(&vgdev->cursorq.qlock);
404		goto retry;
405	} else {
406		virtqueue_kick(vq);
407	}
408
409	spin_unlock(&vgdev->cursorq.qlock);
410
411	if (!ret)
412		ret = vq->num_free;
413	return ret;
414}
415
416/* just create gem objects for userspace and long lived objects,
417   just use dma_alloced pages for the queue objects? */
418
419/* create a basic resource */
420void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
421				    uint32_t resource_id,
422				    uint32_t format,
423				    uint32_t width,
424				    uint32_t height)
425{
426	struct virtio_gpu_resource_create_2d *cmd_p;
427	struct virtio_gpu_vbuffer *vbuf;
428
429	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
430	memset(cmd_p, 0, sizeof(*cmd_p));
431
432	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
433	cmd_p->resource_id = cpu_to_le32(resource_id);
434	cmd_p->format = cpu_to_le32(format);
435	cmd_p->width = cpu_to_le32(width);
436	cmd_p->height = cpu_to_le32(height);
437
438	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
439}
440
441void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
442				   uint32_t resource_id)
443{
444	struct virtio_gpu_resource_unref *cmd_p;
445	struct virtio_gpu_vbuffer *vbuf;
446
447	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
448	memset(cmd_p, 0, sizeof(*cmd_p));
449
450	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
451	cmd_p->resource_id = cpu_to_le32(resource_id);
452
453	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
454}
455
456void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
457					   uint32_t resource_id)
458{
459	struct virtio_gpu_resource_detach_backing *cmd_p;
460	struct virtio_gpu_vbuffer *vbuf;
461
462	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
463	memset(cmd_p, 0, sizeof(*cmd_p));
464
465	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
466	cmd_p->resource_id = cpu_to_le32(resource_id);
467
468	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
469}
470
471void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
472				uint32_t scanout_id, uint32_t resource_id,
473				uint32_t width, uint32_t height,
474				uint32_t x, uint32_t y)
475{
476	struct virtio_gpu_set_scanout *cmd_p;
477	struct virtio_gpu_vbuffer *vbuf;
478
479	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
480	memset(cmd_p, 0, sizeof(*cmd_p));
481
482	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
483	cmd_p->resource_id = cpu_to_le32(resource_id);
484	cmd_p->scanout_id = cpu_to_le32(scanout_id);
485	cmd_p->r.width = cpu_to_le32(width);
486	cmd_p->r.height = cpu_to_le32(height);
487	cmd_p->r.x = cpu_to_le32(x);
488	cmd_p->r.y = cpu_to_le32(y);
489
490	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
491}
492
493void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
494				   uint32_t resource_id,
495				   uint32_t x, uint32_t y,
496				   uint32_t width, uint32_t height)
497{
498	struct virtio_gpu_resource_flush *cmd_p;
499	struct virtio_gpu_vbuffer *vbuf;
500
501	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
502	memset(cmd_p, 0, sizeof(*cmd_p));
503
504	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
505	cmd_p->resource_id = cpu_to_le32(resource_id);
506	cmd_p->r.width = cpu_to_le32(width);
507	cmd_p->r.height = cpu_to_le32(height);
508	cmd_p->r.x = cpu_to_le32(x);
509	cmd_p->r.y = cpu_to_le32(y);
510
511	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
512}
513
514void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
515					uint32_t resource_id, uint64_t offset,
516					__le32 width, __le32 height,
517					__le32 x, __le32 y,
518					struct virtio_gpu_fence **fence)
519{
520	struct virtio_gpu_transfer_to_host_2d *cmd_p;
521	struct virtio_gpu_vbuffer *vbuf;
522
523	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
524	memset(cmd_p, 0, sizeof(*cmd_p));
525
526	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
527	cmd_p->resource_id = cpu_to_le32(resource_id);
528	cmd_p->offset = cpu_to_le64(offset);
529	cmd_p->r.width = width;
530	cmd_p->r.height = height;
531	cmd_p->r.x = x;
532	cmd_p->r.y = y;
533
534	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
535}
536
537static void
538virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
539				       uint32_t resource_id,
540				       struct virtio_gpu_mem_entry *ents,
541				       uint32_t nents,
542				       struct virtio_gpu_fence **fence)
543{
544	struct virtio_gpu_resource_attach_backing *cmd_p;
545	struct virtio_gpu_vbuffer *vbuf;
546
547	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
548	memset(cmd_p, 0, sizeof(*cmd_p));
549
550	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
551	cmd_p->resource_id = cpu_to_le32(resource_id);
552	cmd_p->nr_entries = cpu_to_le32(nents);
553
554	vbuf->data_buf = ents;
555	vbuf->data_size = sizeof(*ents) * nents;
556
557	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
558}
559
560static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
561					       struct virtio_gpu_vbuffer *vbuf)
562{
563	struct virtio_gpu_resp_display_info *resp =
564		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
565	int i;
566
567	spin_lock(&vgdev->display_info_lock);
568	for (i = 0; i < vgdev->num_scanouts; i++) {
569		vgdev->outputs[i].info = resp->pmodes[i];
570		if (resp->pmodes[i].enabled) {
571			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
572				  le32_to_cpu(resp->pmodes[i].r.width),
573				  le32_to_cpu(resp->pmodes[i].r.height),
574				  le32_to_cpu(resp->pmodes[i].r.x),
575				  le32_to_cpu(resp->pmodes[i].r.y));
576		} else {
577			DRM_DEBUG("output %d: disabled", i);
578		}
579	}
580
581	vgdev->display_info_pending = false;
582	spin_unlock(&vgdev->display_info_lock);
583	wake_up(&vgdev->resp_wq);
584
585	if (!drm_helper_hpd_irq_event(vgdev->ddev))
586		drm_kms_helper_hotplug_event(vgdev->ddev);
587}
588
589static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
590					      struct virtio_gpu_vbuffer *vbuf)
591{
592	struct virtio_gpu_get_capset_info *cmd =
593		(struct virtio_gpu_get_capset_info *)vbuf->buf;
594	struct virtio_gpu_resp_capset_info *resp =
595		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
596	int i = le32_to_cpu(cmd->capset_index);
597
598	spin_lock(&vgdev->display_info_lock);
599	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
600	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
601	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
602	spin_unlock(&vgdev->display_info_lock);
603	wake_up(&vgdev->resp_wq);
604}
605
606static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
607				     struct virtio_gpu_vbuffer *vbuf)
608{
609	struct virtio_gpu_get_capset *cmd =
610		(struct virtio_gpu_get_capset *)vbuf->buf;
611	struct virtio_gpu_resp_capset *resp =
612		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
613	struct virtio_gpu_drv_cap_cache *cache_ent;
614
615	spin_lock(&vgdev->display_info_lock);
616	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
617		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
618		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
619			memcpy(cache_ent->caps_cache, resp->capset_data,
620			       cache_ent->size);
621			atomic_set(&cache_ent->is_valid, 1);
622			break;
623		}
624	}
625	spin_unlock(&vgdev->display_info_lock);
626	wake_up(&vgdev->resp_wq);
627}
628
629
630int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
631{
632	struct virtio_gpu_ctrl_hdr *cmd_p;
633	struct virtio_gpu_vbuffer *vbuf;
634	void *resp_buf;
635
636	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
637			   GFP_KERNEL);
638	if (!resp_buf)
639		return -ENOMEM;
640
641	cmd_p = virtio_gpu_alloc_cmd_resp
642		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
643		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
644		 resp_buf);
645	memset(cmd_p, 0, sizeof(*cmd_p));
646
647	vgdev->display_info_pending = true;
648	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
649	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
650	return 0;
651}
652
653int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
654{
655	struct virtio_gpu_get_capset_info *cmd_p;
656	struct virtio_gpu_vbuffer *vbuf;
657	void *resp_buf;
658
659	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
660			   GFP_KERNEL);
661	if (!resp_buf)
662		return -ENOMEM;
663
664	cmd_p = virtio_gpu_alloc_cmd_resp
665		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
666		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
667		 resp_buf);
668	memset(cmd_p, 0, sizeof(*cmd_p));
669
670	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
671	cmd_p->capset_index = cpu_to_le32(idx);
672	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
673	return 0;
674}
675
676int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
677			      int idx, int version,
678			      struct virtio_gpu_drv_cap_cache **cache_p)
679{
680	struct virtio_gpu_get_capset *cmd_p;
681	struct virtio_gpu_vbuffer *vbuf;
682	int max_size = vgdev->capsets[idx].max_size;
683	struct virtio_gpu_drv_cap_cache *cache_ent;
684	void *resp_buf;
685
686	if (idx > vgdev->num_capsets)
687		return -EINVAL;
688
689	if (version > vgdev->capsets[idx].max_version)
690		return -EINVAL;
691
692	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
693	if (!cache_ent)
694		return -ENOMEM;
695
696	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
697	if (!cache_ent->caps_cache) {
698		kfree(cache_ent);
699		return -ENOMEM;
700	}
701
702	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
703			   GFP_KERNEL);
704	if (!resp_buf) {
705		kfree(cache_ent->caps_cache);
706		kfree(cache_ent);
707		return -ENOMEM;
708	}
709
710	cache_ent->version = version;
711	cache_ent->id = vgdev->capsets[idx].id;
712	atomic_set(&cache_ent->is_valid, 0);
713	cache_ent->size = max_size;
714	spin_lock(&vgdev->display_info_lock);
715	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
716	spin_unlock(&vgdev->display_info_lock);
717
718	cmd_p = virtio_gpu_alloc_cmd_resp
719		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
720		 sizeof(struct virtio_gpu_resp_capset) + max_size,
721		 resp_buf);
722	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
723	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
724	cmd_p->capset_version = cpu_to_le32(version);
725	*cache_p = cache_ent;
726	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
727
728	return 0;
729}
730
731void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
732				   uint32_t nlen, const char *name)
733{
734	struct virtio_gpu_ctx_create *cmd_p;
735	struct virtio_gpu_vbuffer *vbuf;
736
737	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
738	memset(cmd_p, 0, sizeof(*cmd_p));
739
740	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
741	cmd_p->hdr.ctx_id = cpu_to_le32(id);
742	cmd_p->nlen = cpu_to_le32(nlen);
743	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
744	cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
745	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
746}
747
748void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
749				    uint32_t id)
750{
751	struct virtio_gpu_ctx_destroy *cmd_p;
752	struct virtio_gpu_vbuffer *vbuf;
753
754	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
755	memset(cmd_p, 0, sizeof(*cmd_p));
756
757	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
758	cmd_p->hdr.ctx_id = cpu_to_le32(id);
759	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
760}
761
762void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
763					    uint32_t ctx_id,
764					    uint32_t resource_id)
765{
766	struct virtio_gpu_ctx_resource *cmd_p;
767	struct virtio_gpu_vbuffer *vbuf;
768
769	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
770	memset(cmd_p, 0, sizeof(*cmd_p));
771
772	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
773	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
774	cmd_p->resource_id = cpu_to_le32(resource_id);
775	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
776
777}
778
779void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
780					    uint32_t ctx_id,
781					    uint32_t resource_id)
782{
783	struct virtio_gpu_ctx_resource *cmd_p;
784	struct virtio_gpu_vbuffer *vbuf;
785
786	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
787	memset(cmd_p, 0, sizeof(*cmd_p));
788
789	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
790	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
791	cmd_p->resource_id = cpu_to_le32(resource_id);
792	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
793}
794
795void
796virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
797				  struct virtio_gpu_resource_create_3d *rc_3d,
798				  struct virtio_gpu_fence **fence)
799{
800	struct virtio_gpu_resource_create_3d *cmd_p;
801	struct virtio_gpu_vbuffer *vbuf;
802
803	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
804	memset(cmd_p, 0, sizeof(*cmd_p));
805
806	*cmd_p = *rc_3d;
807	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
808	cmd_p->hdr.flags = 0;
809
810	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
811}
812
813void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
814					uint32_t resource_id, uint32_t ctx_id,
815					uint64_t offset, uint32_t level,
816					struct virtio_gpu_box *box,
817					struct virtio_gpu_fence **fence)
818{
819	struct virtio_gpu_transfer_host_3d *cmd_p;
820	struct virtio_gpu_vbuffer *vbuf;
821
822	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
823	memset(cmd_p, 0, sizeof(*cmd_p));
824
825	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
826	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
827	cmd_p->resource_id = cpu_to_le32(resource_id);
828	cmd_p->box = *box;
829	cmd_p->offset = cpu_to_le64(offset);
830	cmd_p->level = cpu_to_le32(level);
831
832	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
833}
834
835void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
836					  uint32_t resource_id, uint32_t ctx_id,
837					  uint64_t offset, uint32_t level,
838					  struct virtio_gpu_box *box,
839					  struct virtio_gpu_fence **fence)
840{
841	struct virtio_gpu_transfer_host_3d *cmd_p;
842	struct virtio_gpu_vbuffer *vbuf;
843
844	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
845	memset(cmd_p, 0, sizeof(*cmd_p));
846
847	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
848	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
849	cmd_p->resource_id = cpu_to_le32(resource_id);
850	cmd_p->box = *box;
851	cmd_p->offset = cpu_to_le64(offset);
852	cmd_p->level = cpu_to_le32(level);
853
854	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
855}
856
857void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
858			   void *data, uint32_t data_size,
859			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
860{
861	struct virtio_gpu_cmd_submit *cmd_p;
862	struct virtio_gpu_vbuffer *vbuf;
863
864	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
865	memset(cmd_p, 0, sizeof(*cmd_p));
866
867	vbuf->data_buf = data;
868	vbuf->data_size = data_size;
869
870	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
871	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
872	cmd_p->size = cpu_to_le32(data_size);
873
874	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
875}
876
877int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
878			     struct virtio_gpu_object *obj,
879			     uint32_t resource_id,
880			     struct virtio_gpu_fence **fence)
881{
882	struct virtio_gpu_mem_entry *ents;
883	struct scatterlist *sg;
884	int si;
885
886	if (!obj->pages) {
887		int ret;
888		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
889		if (ret)
890			return ret;
891	}
892
893	/* gets freed when the ring has consumed it */
894	ents = kmalloc_array(obj->pages->nents,
895			     sizeof(struct virtio_gpu_mem_entry),
896			     GFP_KERNEL);
897	if (!ents) {
898		DRM_ERROR("failed to allocate ent list\n");
899		return -ENOMEM;
900	}
901
902	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
903		ents[si].addr = cpu_to_le64(sg_phys(sg));
904		ents[si].length = cpu_to_le32(sg->length);
905		ents[si].padding = 0;
906	}
907
908	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
909					       ents, obj->pages->nents,
910					       fence);
911	obj->hw_res_handle = resource_id;
912	return 0;
913}
914
915void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
916			    struct virtio_gpu_output *output)
917{
918	struct virtio_gpu_vbuffer *vbuf;
919	struct virtio_gpu_update_cursor *cur_p;
920
921	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
922	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
923	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
924	virtio_gpu_queue_cursor(vgdev, vbuf);
925}
v4.10.11
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie <airlied@redhat.com>
  7 *    Gerd Hoffmann <kraxel@redhat.com>
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice (including the next
 17 * paragraph) shall be included in all copies or substantial portions of the
 18 * Software.
 19 *
 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 26 * OTHER DEALINGS IN THE SOFTWARE.
 27 */
 28
 29#include <drm/drmP.h>
 30#include "virtgpu_drv.h"
 31#include <linux/virtio.h>
 32#include <linux/virtio_config.h>
 33#include <linux/virtio_ring.h>
 34
 35#define MAX_INLINE_CMD_SIZE   96
 36#define MAX_INLINE_RESP_SIZE  24
 37#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
 38			       + MAX_INLINE_CMD_SIZE		 \
 39			       + MAX_INLINE_RESP_SIZE)
 40
 41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 42				uint32_t *resid)
 43{
 44	int handle;
 45
 46	idr_preload(GFP_KERNEL);
 47	spin_lock(&vgdev->resource_idr_lock);
 48	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
 49	spin_unlock(&vgdev->resource_idr_lock);
 50	idr_preload_end();
 51	*resid = handle;
 52}
 53
 54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 55{
 56	spin_lock(&vgdev->resource_idr_lock);
 57	idr_remove(&vgdev->resource_idr, id);
 58	spin_unlock(&vgdev->resource_idr_lock);
 59}
 60
 61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
 62{
 63	struct drm_device *dev = vq->vdev->priv;
 64	struct virtio_gpu_device *vgdev = dev->dev_private;
 65	schedule_work(&vgdev->ctrlq.dequeue_work);
 66}
 67
 68void virtio_gpu_cursor_ack(struct virtqueue *vq)
 69{
 70	struct drm_device *dev = vq->vdev->priv;
 71	struct virtio_gpu_device *vgdev = dev->dev_private;
 72	schedule_work(&vgdev->cursorq.dequeue_work);
 73}
 74
 75int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
 76{
 77	struct virtio_gpu_vbuffer *vbuf;
 78	int i, size, count = 16;
 79	void *ptr;
 80
 81	INIT_LIST_HEAD(&vgdev->free_vbufs);
 82	spin_lock_init(&vgdev->free_vbufs_lock);
 83	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
 84	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
 85	size = count * VBUFFER_SIZE;
 86	DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
 87		 count, VBUFFER_SIZE, size / 1024);
 88
 89	vgdev->vbufs = kzalloc(size, GFP_KERNEL);
 90	if (!vgdev->vbufs)
 91		return -ENOMEM;
 92
 93	for (i = 0, ptr = vgdev->vbufs;
 94	     i < count;
 95	     i++, ptr += VBUFFER_SIZE) {
 96		vbuf = ptr;
 97		list_add(&vbuf->list, &vgdev->free_vbufs);
 98	}
 99	return 0;
100}
101
102void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103{
104	struct virtio_gpu_vbuffer *vbuf;
105	int i, count = 0;
106
107	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109
110	spin_lock(&vgdev->free_vbufs_lock);
111	for (i = 0; i < count; i++) {
112		if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
113			spin_unlock(&vgdev->free_vbufs_lock);
114			return;
115		}
116		vbuf = list_first_entry(&vgdev->free_vbufs,
117					struct virtio_gpu_vbuffer, list);
118		list_del(&vbuf->list);
119	}
120	spin_unlock(&vgdev->free_vbufs_lock);
121	kfree(vgdev->vbufs);
122}
123
124static struct virtio_gpu_vbuffer*
125virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
126		    int size, int resp_size, void *resp_buf,
127		    virtio_gpu_resp_cb resp_cb)
128{
129	struct virtio_gpu_vbuffer *vbuf;
130
131	spin_lock(&vgdev->free_vbufs_lock);
132	BUG_ON(list_empty(&vgdev->free_vbufs));
133	vbuf = list_first_entry(&vgdev->free_vbufs,
134				struct virtio_gpu_vbuffer, list);
135	list_del(&vbuf->list);
136	spin_unlock(&vgdev->free_vbufs_lock);
137	memset(vbuf, 0, VBUFFER_SIZE);
138
139	BUG_ON(size > MAX_INLINE_CMD_SIZE);
140	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
141	vbuf->size = size;
142
143	vbuf->resp_cb = resp_cb;
144	vbuf->resp_size = resp_size;
145	if (resp_size <= MAX_INLINE_RESP_SIZE)
146		vbuf->resp_buf = (void *)vbuf->buf + size;
147	else
148		vbuf->resp_buf = resp_buf;
149	BUG_ON(!vbuf->resp_buf);
150	return vbuf;
151}
152
153static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
154				  struct virtio_gpu_vbuffer **vbuffer_p,
155				  int size)
156{
157	struct virtio_gpu_vbuffer *vbuf;
158
159	vbuf = virtio_gpu_get_vbuf(vgdev, size,
160				   sizeof(struct virtio_gpu_ctrl_hdr),
161				   NULL, NULL);
162	if (IS_ERR(vbuf)) {
163		*vbuffer_p = NULL;
164		return ERR_CAST(vbuf);
165	}
166	*vbuffer_p = vbuf;
167	return vbuf->buf;
168}
169
170static struct virtio_gpu_update_cursor*
171virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
172			struct virtio_gpu_vbuffer **vbuffer_p)
173{
174	struct virtio_gpu_vbuffer *vbuf;
175
176	vbuf = virtio_gpu_get_vbuf
177		(vgdev, sizeof(struct virtio_gpu_update_cursor),
178		 0, NULL, NULL);
179	if (IS_ERR(vbuf)) {
180		*vbuffer_p = NULL;
181		return ERR_CAST(vbuf);
182	}
183	*vbuffer_p = vbuf;
184	return (struct virtio_gpu_update_cursor *)vbuf->buf;
185}
186
187static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
188				       virtio_gpu_resp_cb cb,
189				       struct virtio_gpu_vbuffer **vbuffer_p,
190				       int cmd_size, int resp_size,
191				       void *resp_buf)
192{
193	struct virtio_gpu_vbuffer *vbuf;
194
195	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
196				   resp_size, resp_buf, cb);
197	if (IS_ERR(vbuf)) {
198		*vbuffer_p = NULL;
199		return ERR_CAST(vbuf);
200	}
201	*vbuffer_p = vbuf;
202	return (struct virtio_gpu_command *)vbuf->buf;
203}
204
205static void free_vbuf(struct virtio_gpu_device *vgdev,
206		      struct virtio_gpu_vbuffer *vbuf)
207{
208	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
209		kfree(vbuf->resp_buf);
210	kfree(vbuf->data_buf);
211	spin_lock(&vgdev->free_vbufs_lock);
212	list_add(&vbuf->list, &vgdev->free_vbufs);
213	spin_unlock(&vgdev->free_vbufs_lock);
214}
215
216static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
217{
218	struct virtio_gpu_vbuffer *vbuf;
219	unsigned int len;
220	int freed = 0;
221
222	while ((vbuf = virtqueue_get_buf(vq, &len))) {
223		list_add_tail(&vbuf->list, reclaim_list);
224		freed++;
225	}
226	if (freed == 0)
227		DRM_DEBUG("Huh? zero vbufs reclaimed");
228}
229
230void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
231{
232	struct virtio_gpu_device *vgdev =
233		container_of(work, struct virtio_gpu_device,
234			     ctrlq.dequeue_work);
235	struct list_head reclaim_list;
236	struct virtio_gpu_vbuffer *entry, *tmp;
237	struct virtio_gpu_ctrl_hdr *resp;
238	u64 fence_id = 0;
239
240	INIT_LIST_HEAD(&reclaim_list);
241	spin_lock(&vgdev->ctrlq.qlock);
242	do {
243		virtqueue_disable_cb(vgdev->ctrlq.vq);
244		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
245
246	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
247	spin_unlock(&vgdev->ctrlq.qlock);
248
249	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
250		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
251		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
252			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
253		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
254			u64 f = le64_to_cpu(resp->fence_id);
255
256			if (fence_id > f) {
257				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
258					  __func__, fence_id, f);
259			} else {
260				fence_id = f;
261			}
262		}
263		if (entry->resp_cb)
264			entry->resp_cb(vgdev, entry);
265
266		list_del(&entry->list);
267		free_vbuf(vgdev, entry);
268	}
269	wake_up(&vgdev->ctrlq.ack_queue);
270
271	if (fence_id)
272		virtio_gpu_fence_event_process(vgdev, fence_id);
273}
274
275void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
276{
277	struct virtio_gpu_device *vgdev =
278		container_of(work, struct virtio_gpu_device,
279			     cursorq.dequeue_work);
280	struct list_head reclaim_list;
281	struct virtio_gpu_vbuffer *entry, *tmp;
282
283	INIT_LIST_HEAD(&reclaim_list);
284	spin_lock(&vgdev->cursorq.qlock);
285	do {
286		virtqueue_disable_cb(vgdev->cursorq.vq);
287		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
288	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
289	spin_unlock(&vgdev->cursorq.qlock);
290
291	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
292		list_del(&entry->list);
293		free_vbuf(vgdev, entry);
294	}
295	wake_up(&vgdev->cursorq.ack_queue);
296}
297
298static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
299					       struct virtio_gpu_vbuffer *vbuf)
300		__releases(&vgdev->ctrlq.qlock)
301		__acquires(&vgdev->ctrlq.qlock)
302{
303	struct virtqueue *vq = vgdev->ctrlq.vq;
304	struct scatterlist *sgs[3], vcmd, vout, vresp;
305	int outcnt = 0, incnt = 0;
306	int ret;
307
308	if (!vgdev->vqs_ready)
309		return -ENODEV;
310
311	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
312	sgs[outcnt+incnt] = &vcmd;
313	outcnt++;
314
315	if (vbuf->data_size) {
316		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
317		sgs[outcnt + incnt] = &vout;
318		outcnt++;
319	}
320
321	if (vbuf->resp_size) {
322		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
323		sgs[outcnt + incnt] = &vresp;
324		incnt++;
325	}
326
327retry:
328	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
329	if (ret == -ENOSPC) {
330		spin_unlock(&vgdev->ctrlq.qlock);
331		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
332		spin_lock(&vgdev->ctrlq.qlock);
333		goto retry;
334	} else {
335		virtqueue_kick(vq);
336	}
337
338	if (!ret)
339		ret = vq->num_free;
340	return ret;
341}
342
343static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
344					struct virtio_gpu_vbuffer *vbuf)
345{
346	int rc;
347
348	spin_lock(&vgdev->ctrlq.qlock);
349	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
350	spin_unlock(&vgdev->ctrlq.qlock);
351	return rc;
352}
353
354static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
355					       struct virtio_gpu_vbuffer *vbuf,
356					       struct virtio_gpu_ctrl_hdr *hdr,
357					       struct virtio_gpu_fence **fence)
358{
359	struct virtqueue *vq = vgdev->ctrlq.vq;
360	int rc;
361
362again:
363	spin_lock(&vgdev->ctrlq.qlock);
364
365	/*
366	 * Make sure we have enouth space in the virtqueue.  If not
367	 * wait here until we have.
368	 *
369	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
370	 * to wait for free space, which can result in fence ids being
371	 * submitted out-of-order.
372	 */
373	if (vq->num_free < 3) {
374		spin_unlock(&vgdev->ctrlq.qlock);
375		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
376		goto again;
377	}
378
379	if (fence)
380		virtio_gpu_fence_emit(vgdev, hdr, fence);
381	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
382	spin_unlock(&vgdev->ctrlq.qlock);
383	return rc;
384}
385
386static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
387				   struct virtio_gpu_vbuffer *vbuf)
388{
389	struct virtqueue *vq = vgdev->cursorq.vq;
390	struct scatterlist *sgs[1], ccmd;
391	int ret;
392	int outcnt;
393
394	if (!vgdev->vqs_ready)
395		return -ENODEV;
396
397	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
398	sgs[0] = &ccmd;
399	outcnt = 1;
400
401	spin_lock(&vgdev->cursorq.qlock);
402retry:
403	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
404	if (ret == -ENOSPC) {
405		spin_unlock(&vgdev->cursorq.qlock);
406		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
407		spin_lock(&vgdev->cursorq.qlock);
408		goto retry;
409	} else {
410		virtqueue_kick(vq);
411	}
412
413	spin_unlock(&vgdev->cursorq.qlock);
414
415	if (!ret)
416		ret = vq->num_free;
417	return ret;
418}
419
420/* just create gem objects for userspace and long lived objects,
421   just use dma_alloced pages for the queue objects? */
422
423/* create a basic resource */
424void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
425				    uint32_t resource_id,
426				    uint32_t format,
427				    uint32_t width,
428				    uint32_t height)
429{
430	struct virtio_gpu_resource_create_2d *cmd_p;
431	struct virtio_gpu_vbuffer *vbuf;
432
433	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
434	memset(cmd_p, 0, sizeof(*cmd_p));
435
436	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
437	cmd_p->resource_id = cpu_to_le32(resource_id);
438	cmd_p->format = cpu_to_le32(format);
439	cmd_p->width = cpu_to_le32(width);
440	cmd_p->height = cpu_to_le32(height);
441
442	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
443}
444
445void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
446				   uint32_t resource_id)
447{
448	struct virtio_gpu_resource_unref *cmd_p;
449	struct virtio_gpu_vbuffer *vbuf;
450
451	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
452	memset(cmd_p, 0, sizeof(*cmd_p));
453
454	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
455	cmd_p->resource_id = cpu_to_le32(resource_id);
456
457	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
458}
459
460void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
461					   uint32_t resource_id)
462{
463	struct virtio_gpu_resource_detach_backing *cmd_p;
464	struct virtio_gpu_vbuffer *vbuf;
465
466	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
467	memset(cmd_p, 0, sizeof(*cmd_p));
468
469	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
470	cmd_p->resource_id = cpu_to_le32(resource_id);
471
472	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
473}
474
475void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
476				uint32_t scanout_id, uint32_t resource_id,
477				uint32_t width, uint32_t height,
478				uint32_t x, uint32_t y)
479{
480	struct virtio_gpu_set_scanout *cmd_p;
481	struct virtio_gpu_vbuffer *vbuf;
482
483	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
484	memset(cmd_p, 0, sizeof(*cmd_p));
485
486	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
487	cmd_p->resource_id = cpu_to_le32(resource_id);
488	cmd_p->scanout_id = cpu_to_le32(scanout_id);
489	cmd_p->r.width = cpu_to_le32(width);
490	cmd_p->r.height = cpu_to_le32(height);
491	cmd_p->r.x = cpu_to_le32(x);
492	cmd_p->r.y = cpu_to_le32(y);
493
494	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
495}
496
497void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
498				   uint32_t resource_id,
499				   uint32_t x, uint32_t y,
500				   uint32_t width, uint32_t height)
501{
502	struct virtio_gpu_resource_flush *cmd_p;
503	struct virtio_gpu_vbuffer *vbuf;
504
505	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
506	memset(cmd_p, 0, sizeof(*cmd_p));
507
508	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
509	cmd_p->resource_id = cpu_to_le32(resource_id);
510	cmd_p->r.width = cpu_to_le32(width);
511	cmd_p->r.height = cpu_to_le32(height);
512	cmd_p->r.x = cpu_to_le32(x);
513	cmd_p->r.y = cpu_to_le32(y);
514
515	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
516}
517
518void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
519					uint32_t resource_id, uint64_t offset,
520					__le32 width, __le32 height,
521					__le32 x, __le32 y,
522					struct virtio_gpu_fence **fence)
523{
524	struct virtio_gpu_transfer_to_host_2d *cmd_p;
525	struct virtio_gpu_vbuffer *vbuf;
526
527	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
528	memset(cmd_p, 0, sizeof(*cmd_p));
529
530	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
531	cmd_p->resource_id = cpu_to_le32(resource_id);
532	cmd_p->offset = cpu_to_le64(offset);
533	cmd_p->r.width = width;
534	cmd_p->r.height = height;
535	cmd_p->r.x = x;
536	cmd_p->r.y = y;
537
538	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
539}
540
541static void
542virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
543				       uint32_t resource_id,
544				       struct virtio_gpu_mem_entry *ents,
545				       uint32_t nents,
546				       struct virtio_gpu_fence **fence)
547{
548	struct virtio_gpu_resource_attach_backing *cmd_p;
549	struct virtio_gpu_vbuffer *vbuf;
550
551	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
552	memset(cmd_p, 0, sizeof(*cmd_p));
553
554	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
555	cmd_p->resource_id = cpu_to_le32(resource_id);
556	cmd_p->nr_entries = cpu_to_le32(nents);
557
558	vbuf->data_buf = ents;
559	vbuf->data_size = sizeof(*ents) * nents;
560
561	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
562}
563
564static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
565					       struct virtio_gpu_vbuffer *vbuf)
566{
567	struct virtio_gpu_resp_display_info *resp =
568		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
569	int i;
570
571	spin_lock(&vgdev->display_info_lock);
572	for (i = 0; i < vgdev->num_scanouts; i++) {
573		vgdev->outputs[i].info = resp->pmodes[i];
574		if (resp->pmodes[i].enabled) {
575			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
576				  le32_to_cpu(resp->pmodes[i].r.width),
577				  le32_to_cpu(resp->pmodes[i].r.height),
578				  le32_to_cpu(resp->pmodes[i].r.x),
579				  le32_to_cpu(resp->pmodes[i].r.y));
580		} else {
581			DRM_DEBUG("output %d: disabled", i);
582		}
583	}
584
585	vgdev->display_info_pending = false;
586	spin_unlock(&vgdev->display_info_lock);
587	wake_up(&vgdev->resp_wq);
588
589	if (!drm_helper_hpd_irq_event(vgdev->ddev))
590		drm_kms_helper_hotplug_event(vgdev->ddev);
591}
592
593static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
594					      struct virtio_gpu_vbuffer *vbuf)
595{
596	struct virtio_gpu_get_capset_info *cmd =
597		(struct virtio_gpu_get_capset_info *)vbuf->buf;
598	struct virtio_gpu_resp_capset_info *resp =
599		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
600	int i = le32_to_cpu(cmd->capset_index);
601
602	spin_lock(&vgdev->display_info_lock);
603	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
604	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
605	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
606	spin_unlock(&vgdev->display_info_lock);
607	wake_up(&vgdev->resp_wq);
608}
609
610static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
611				     struct virtio_gpu_vbuffer *vbuf)
612{
613	struct virtio_gpu_get_capset *cmd =
614		(struct virtio_gpu_get_capset *)vbuf->buf;
615	struct virtio_gpu_resp_capset *resp =
616		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
617	struct virtio_gpu_drv_cap_cache *cache_ent;
618
619	spin_lock(&vgdev->display_info_lock);
620	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
621		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
622		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
623			memcpy(cache_ent->caps_cache, resp->capset_data,
624			       cache_ent->size);
625			atomic_set(&cache_ent->is_valid, 1);
626			break;
627		}
628	}
629	spin_unlock(&vgdev->display_info_lock);
630	wake_up(&vgdev->resp_wq);
631}
632
633
634int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
635{
636	struct virtio_gpu_ctrl_hdr *cmd_p;
637	struct virtio_gpu_vbuffer *vbuf;
638	void *resp_buf;
639
640	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
641			   GFP_KERNEL);
642	if (!resp_buf)
643		return -ENOMEM;
644
645	cmd_p = virtio_gpu_alloc_cmd_resp
646		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
647		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
648		 resp_buf);
649	memset(cmd_p, 0, sizeof(*cmd_p));
650
651	vgdev->display_info_pending = true;
652	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
653	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
654	return 0;
655}
656
657int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
658{
659	struct virtio_gpu_get_capset_info *cmd_p;
660	struct virtio_gpu_vbuffer *vbuf;
661	void *resp_buf;
662
663	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
664			   GFP_KERNEL);
665	if (!resp_buf)
666		return -ENOMEM;
667
668	cmd_p = virtio_gpu_alloc_cmd_resp
669		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
670		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
671		 resp_buf);
672	memset(cmd_p, 0, sizeof(*cmd_p));
673
674	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
675	cmd_p->capset_index = cpu_to_le32(idx);
676	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
677	return 0;
678}
679
680int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
681			      int idx, int version,
682			      struct virtio_gpu_drv_cap_cache **cache_p)
683{
684	struct virtio_gpu_get_capset *cmd_p;
685	struct virtio_gpu_vbuffer *vbuf;
686	int max_size = vgdev->capsets[idx].max_size;
687	struct virtio_gpu_drv_cap_cache *cache_ent;
688	void *resp_buf;
689
690	if (idx > vgdev->num_capsets)
691		return -EINVAL;
692
693	if (version > vgdev->capsets[idx].max_version)
694		return -EINVAL;
695
696	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
697	if (!cache_ent)
698		return -ENOMEM;
699
700	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
701	if (!cache_ent->caps_cache) {
702		kfree(cache_ent);
703		return -ENOMEM;
704	}
705
706	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
707			   GFP_KERNEL);
708	if (!resp_buf) {
709		kfree(cache_ent->caps_cache);
710		kfree(cache_ent);
711		return -ENOMEM;
712	}
713
714	cache_ent->version = version;
715	cache_ent->id = vgdev->capsets[idx].id;
716	atomic_set(&cache_ent->is_valid, 0);
717	cache_ent->size = max_size;
718	spin_lock(&vgdev->display_info_lock);
719	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
720	spin_unlock(&vgdev->display_info_lock);
721
722	cmd_p = virtio_gpu_alloc_cmd_resp
723		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
724		 sizeof(struct virtio_gpu_resp_capset) + max_size,
725		 resp_buf);
726	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
727	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
728	cmd_p->capset_version = cpu_to_le32(version);
729	*cache_p = cache_ent;
730	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
731
732	return 0;
733}
734
735void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
736				   uint32_t nlen, const char *name)
737{
738	struct virtio_gpu_ctx_create *cmd_p;
739	struct virtio_gpu_vbuffer *vbuf;
740
741	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
742	memset(cmd_p, 0, sizeof(*cmd_p));
743
744	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
745	cmd_p->hdr.ctx_id = cpu_to_le32(id);
746	cmd_p->nlen = cpu_to_le32(nlen);
747	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
748	cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
749	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
750}
751
752void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
753				    uint32_t id)
754{
755	struct virtio_gpu_ctx_destroy *cmd_p;
756	struct virtio_gpu_vbuffer *vbuf;
757
758	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
759	memset(cmd_p, 0, sizeof(*cmd_p));
760
761	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
762	cmd_p->hdr.ctx_id = cpu_to_le32(id);
763	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
764}
765
766void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
767					    uint32_t ctx_id,
768					    uint32_t resource_id)
769{
770	struct virtio_gpu_ctx_resource *cmd_p;
771	struct virtio_gpu_vbuffer *vbuf;
772
773	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
774	memset(cmd_p, 0, sizeof(*cmd_p));
775
776	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
777	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
778	cmd_p->resource_id = cpu_to_le32(resource_id);
779	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
780
781}
782
783void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
784					    uint32_t ctx_id,
785					    uint32_t resource_id)
786{
787	struct virtio_gpu_ctx_resource *cmd_p;
788	struct virtio_gpu_vbuffer *vbuf;
789
790	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
791	memset(cmd_p, 0, sizeof(*cmd_p));
792
793	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
794	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
795	cmd_p->resource_id = cpu_to_le32(resource_id);
796	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
797}
798
799void
800virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
801				  struct virtio_gpu_resource_create_3d *rc_3d,
802				  struct virtio_gpu_fence **fence)
803{
804	struct virtio_gpu_resource_create_3d *cmd_p;
805	struct virtio_gpu_vbuffer *vbuf;
806
807	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
808	memset(cmd_p, 0, sizeof(*cmd_p));
809
810	*cmd_p = *rc_3d;
811	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
812	cmd_p->hdr.flags = 0;
813
814	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
815}
816
817void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
818					uint32_t resource_id, uint32_t ctx_id,
819					uint64_t offset, uint32_t level,
820					struct virtio_gpu_box *box,
821					struct virtio_gpu_fence **fence)
822{
823	struct virtio_gpu_transfer_host_3d *cmd_p;
824	struct virtio_gpu_vbuffer *vbuf;
825
826	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
827	memset(cmd_p, 0, sizeof(*cmd_p));
828
829	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
830	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
831	cmd_p->resource_id = cpu_to_le32(resource_id);
832	cmd_p->box = *box;
833	cmd_p->offset = cpu_to_le64(offset);
834	cmd_p->level = cpu_to_le32(level);
835
836	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
837}
838
839void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
840					  uint32_t resource_id, uint32_t ctx_id,
841					  uint64_t offset, uint32_t level,
842					  struct virtio_gpu_box *box,
843					  struct virtio_gpu_fence **fence)
844{
845	struct virtio_gpu_transfer_host_3d *cmd_p;
846	struct virtio_gpu_vbuffer *vbuf;
847
848	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
849	memset(cmd_p, 0, sizeof(*cmd_p));
850
851	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
852	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
853	cmd_p->resource_id = cpu_to_le32(resource_id);
854	cmd_p->box = *box;
855	cmd_p->offset = cpu_to_le64(offset);
856	cmd_p->level = cpu_to_le32(level);
857
858	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
859}
860
861void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
862			   void *data, uint32_t data_size,
863			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
864{
865	struct virtio_gpu_cmd_submit *cmd_p;
866	struct virtio_gpu_vbuffer *vbuf;
867
868	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
869	memset(cmd_p, 0, sizeof(*cmd_p));
870
871	vbuf->data_buf = data;
872	vbuf->data_size = data_size;
873
874	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
875	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
876	cmd_p->size = cpu_to_le32(data_size);
877
878	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
879}
880
881int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
882			     struct virtio_gpu_object *obj,
883			     uint32_t resource_id,
884			     struct virtio_gpu_fence **fence)
885{
886	struct virtio_gpu_mem_entry *ents;
887	struct scatterlist *sg;
888	int si;
889
890	if (!obj->pages) {
891		int ret;
892		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
893		if (ret)
894			return ret;
895	}
896
897	/* gets freed when the ring has consumed it */
898	ents = kmalloc_array(obj->pages->nents,
899			     sizeof(struct virtio_gpu_mem_entry),
900			     GFP_KERNEL);
901	if (!ents) {
902		DRM_ERROR("failed to allocate ent list\n");
903		return -ENOMEM;
904	}
905
906	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
907		ents[si].addr = cpu_to_le64(sg_phys(sg));
908		ents[si].length = cpu_to_le32(sg->length);
909		ents[si].padding = 0;
910	}
911
912	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
913					       ents, obj->pages->nents,
914					       fence);
915	obj->hw_res_handle = resource_id;
916	return 0;
917}
918
919void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
920			    struct virtio_gpu_output *output)
921{
922	struct virtio_gpu_vbuffer *vbuf;
923	struct virtio_gpu_update_cursor *cur_p;
924
925	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
926	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
927	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
928	virtio_gpu_queue_cursor(vgdev, vbuf);
929}