Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie <airlied@redhat.com>
   7 *    Gerd Hoffmann <kraxel@redhat.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the next
  17 * paragraph) shall be included in all copies or substantial portions of the
  18 * Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26 * OTHER DEALINGS IN THE SOFTWARE.
  27 */
  28
  29#include <linux/dma-mapping.h>
 
  30#include <linux/virtio.h>
  31#include <linux/virtio_config.h>
  32#include <linux/virtio_ring.h>
  33
  34#include <drm/drm_edid.h>
  35
  36#include "virtgpu_drv.h"
  37#include "virtgpu_trace.h"
  38
  39#define MAX_INLINE_CMD_SIZE   96
  40#define MAX_INLINE_RESP_SIZE  24
  41#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
  42			       + MAX_INLINE_CMD_SIZE		 \
  43			       + MAX_INLINE_RESP_SIZE)
  44
  45static void convert_to_hw_box(struct virtio_gpu_box *dst,
  46			      const struct drm_virtgpu_3d_box *src)
 
 
 
 
 
 
 
 
 
 
 
 
  47{
  48	dst->x = cpu_to_le32(src->x);
  49	dst->y = cpu_to_le32(src->y);
  50	dst->z = cpu_to_le32(src->z);
  51	dst->w = cpu_to_le32(src->w);
  52	dst->h = cpu_to_le32(src->h);
  53	dst->d = cpu_to_le32(src->d);
  54}
  55
  56void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  57{
  58	struct drm_device *dev = vq->vdev->priv;
  59	struct virtio_gpu_device *vgdev = dev->dev_private;
  60
  61	schedule_work(&vgdev->ctrlq.dequeue_work);
  62}
  63
  64void virtio_gpu_cursor_ack(struct virtqueue *vq)
  65{
  66	struct drm_device *dev = vq->vdev->priv;
  67	struct virtio_gpu_device *vgdev = dev->dev_private;
  68
  69	schedule_work(&vgdev->cursorq.dequeue_work);
  70}
  71
  72int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  73{
  74	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  75					 VBUFFER_SIZE,
  76					 __alignof__(struct virtio_gpu_vbuffer),
  77					 0, NULL);
 
 
 
 
 
 
 
 
 
  78	if (!vgdev->vbufs)
  79		return -ENOMEM;
 
 
 
 
 
 
 
  80	return 0;
  81}
  82
  83void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  84{
  85	kmem_cache_destroy(vgdev->vbufs);
  86	vgdev->vbufs = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87}
  88
  89static struct virtio_gpu_vbuffer*
  90virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  91		    int size, int resp_size, void *resp_buf,
  92		    virtio_gpu_resp_cb resp_cb)
  93{
  94	struct virtio_gpu_vbuffer *vbuf;
  95
  96	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
 
 
 
 
 
 
  97
  98	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
  99	       size < sizeof(struct virtio_gpu_ctrl_hdr));
 100	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
 101	vbuf->size = size;
 102
 103	vbuf->resp_cb = resp_cb;
 104	vbuf->resp_size = resp_size;
 105	if (resp_size <= MAX_INLINE_RESP_SIZE)
 106		vbuf->resp_buf = (void *)vbuf->buf + size;
 107	else
 108		vbuf->resp_buf = resp_buf;
 109	BUG_ON(!vbuf->resp_buf);
 110	return vbuf;
 111}
 112
 113static struct virtio_gpu_ctrl_hdr *
 114virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
 
 115{
 116	/* this assumes a vbuf contains a command that starts with a
 117	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
 118	 * virtqueues.
 119	 */
 120	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
 
 
 
 
 
 
 121}
 122
 123static struct virtio_gpu_update_cursor*
 124virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
 125			struct virtio_gpu_vbuffer **vbuffer_p)
 126{
 127	struct virtio_gpu_vbuffer *vbuf;
 128
 129	vbuf = virtio_gpu_get_vbuf
 130		(vgdev, sizeof(struct virtio_gpu_update_cursor),
 131		 0, NULL, NULL);
 132	if (IS_ERR(vbuf)) {
 133		*vbuffer_p = NULL;
 134		return ERR_CAST(vbuf);
 135	}
 136	*vbuffer_p = vbuf;
 137	return (struct virtio_gpu_update_cursor *)vbuf->buf;
 138}
 139
 140static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 141				       virtio_gpu_resp_cb cb,
 142				       struct virtio_gpu_vbuffer **vbuffer_p,
 143				       int cmd_size, int resp_size,
 144				       void *resp_buf)
 145{
 146	struct virtio_gpu_vbuffer *vbuf;
 147
 148	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
 149				   resp_size, resp_buf, cb);
 
 
 
 
 150	*vbuffer_p = vbuf;
 151	return (struct virtio_gpu_command *)vbuf->buf;
 152}
 153
 154static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
 155				  struct virtio_gpu_vbuffer **vbuffer_p,
 156				  int size)
 157{
 158	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
 159					 sizeof(struct virtio_gpu_ctrl_hdr),
 160					 NULL);
 161}
 162
 163static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
 164				     struct virtio_gpu_vbuffer **vbuffer_p,
 165				     int size,
 166				     virtio_gpu_resp_cb cb)
 167{
 168	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
 169					 sizeof(struct virtio_gpu_ctrl_hdr),
 170					 NULL);
 171}
 172
 173static void free_vbuf(struct virtio_gpu_device *vgdev,
 174		      struct virtio_gpu_vbuffer *vbuf)
 175{
 176	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
 177		kfree(vbuf->resp_buf);
 178	kvfree(vbuf->data_buf);
 179	kmem_cache_free(vgdev->vbufs, vbuf);
 
 
 180}
 181
 182static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
 183{
 184	struct virtio_gpu_vbuffer *vbuf;
 185	unsigned int len;
 186	int freed = 0;
 187
 188	while ((vbuf = virtqueue_get_buf(vq, &len))) {
 189		list_add_tail(&vbuf->list, reclaim_list);
 190		freed++;
 191	}
 192	if (freed == 0)
 193		DRM_DEBUG("Huh? zero vbufs reclaimed");
 194}
 195
 196void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 197{
 198	struct virtio_gpu_device *vgdev =
 199		container_of(work, struct virtio_gpu_device,
 200			     ctrlq.dequeue_work);
 201	struct list_head reclaim_list;
 202	struct virtio_gpu_vbuffer *entry, *tmp;
 203	struct virtio_gpu_ctrl_hdr *resp;
 204	u64 fence_id;
 205
 206	INIT_LIST_HEAD(&reclaim_list);
 207	spin_lock(&vgdev->ctrlq.qlock);
 208	do {
 209		virtqueue_disable_cb(vgdev->ctrlq.vq);
 210		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
 211
 212	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
 213	spin_unlock(&vgdev->ctrlq.qlock);
 214
 215	list_for_each_entry(entry, &reclaim_list, list) {
 216		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 217
 218		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
 219
 220		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
 221			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
 222				struct virtio_gpu_ctrl_hdr *cmd;
 223				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
 224				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
 225						      le32_to_cpu(resp->type),
 226						      le32_to_cpu(cmd->type));
 227			} else
 228				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 229		}
 230		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
 231			fence_id = le64_to_cpu(resp->fence_id);
 232			virtio_gpu_fence_event_process(vgdev, fence_id);
 
 
 
 
 
 
 233		}
 234		if (entry->resp_cb)
 235			entry->resp_cb(vgdev, entry);
 236	}
 237	wake_up(&vgdev->ctrlq.ack_queue);
 238
 239	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 240		if (entry->objs)
 241			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
 242		list_del(&entry->list);
 243		free_vbuf(vgdev, entry);
 244	}
 
 
 
 
 245}
 246
 247void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 248{
 249	struct virtio_gpu_device *vgdev =
 250		container_of(work, struct virtio_gpu_device,
 251			     cursorq.dequeue_work);
 252	struct list_head reclaim_list;
 253	struct virtio_gpu_vbuffer *entry, *tmp;
 254
 255	INIT_LIST_HEAD(&reclaim_list);
 256	spin_lock(&vgdev->cursorq.qlock);
 257	do {
 258		virtqueue_disable_cb(vgdev->cursorq.vq);
 259		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
 260	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
 261	spin_unlock(&vgdev->cursorq.qlock);
 262
 263	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 264		struct virtio_gpu_ctrl_hdr *resp =
 265			(struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 266
 267		trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
 268		list_del(&entry->list);
 269		free_vbuf(vgdev, entry);
 270	}
 271	wake_up(&vgdev->cursorq.ack_queue);
 272}
 273
 274/* Create sg_table from a vmalloc'd buffer. */
 275static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
 276{
 277	int ret, s, i;
 278	struct sg_table *sgt;
 279	struct scatterlist *sg;
 280	struct page *pg;
 281
 282	if (WARN_ON(!PAGE_ALIGNED(data)))
 283		return NULL;
 284
 285	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 286	if (!sgt)
 287		return NULL;
 288
 289	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
 290	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
 291	if (ret) {
 292		kfree(sgt);
 293		return NULL;
 294	}
 295
 296	for_each_sgtable_sg(sgt, sg, i) {
 297		pg = vmalloc_to_page(data);
 298		if (!pg) {
 299			sg_free_table(sgt);
 300			kfree(sgt);
 301			return NULL;
 302		}
 303
 304		s = min_t(int, PAGE_SIZE, size);
 305		sg_set_page(sg, pg, s, 0);
 306
 307		size -= s;
 308		data += s;
 309	}
 310
 311	return sgt;
 312}
 313
 314static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
 315				     struct virtio_gpu_vbuffer *vbuf,
 316				     struct virtio_gpu_fence *fence,
 317				     int elemcnt,
 318				     struct scatterlist **sgs,
 319				     int outcnt,
 320				     int incnt)
 321{
 322	struct virtqueue *vq = vgdev->ctrlq.vq;
 323	int ret, idx;
 
 
 324
 325	if (!drm_dev_enter(vgdev->ddev, &idx)) {
 326		if (fence && vbuf->objs)
 327			virtio_gpu_array_unlock_resv(vbuf->objs);
 328		free_vbuf(vgdev, vbuf);
 329		return -ENODEV;
 330	}
 331
 332	if (vgdev->has_indirect)
 333		elemcnt = 1;
 334
 335again:
 336	spin_lock(&vgdev->ctrlq.qlock);
 337
 338	if (vq->num_free < elemcnt) {
 339		spin_unlock(&vgdev->ctrlq.qlock);
 340		virtio_gpu_notify(vgdev);
 341		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
 342		goto again;
 343	}
 344
 345	/* now that the position of the vbuf in the virtqueue is known, we can
 346	 * finally set the fence id
 347	 */
 348	if (fence) {
 349		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
 350				      fence);
 351		if (vbuf->objs) {
 352			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
 353			virtio_gpu_array_unlock_resv(vbuf->objs);
 354		}
 355	}
 356
 357	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 358	WARN_ON(ret);
 359
 360	vbuf->seqno = ++vgdev->ctrlq.seqno;
 361	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
 362
 363	atomic_inc(&vgdev->pending_commands);
 364
 365	spin_unlock(&vgdev->ctrlq.qlock);
 366
 367	drm_dev_exit(idx);
 368	return 0;
 369}
 370
 371static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 372					       struct virtio_gpu_vbuffer *vbuf,
 373					       struct virtio_gpu_fence *fence)
 374{
 375	struct scatterlist *sgs[3], vcmd, vout, vresp;
 376	struct sg_table *sgt = NULL;
 377	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
 378
 379	/* set up vcmd */
 380	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 381	elemcnt++;
 382	sgs[outcnt] = &vcmd;
 383	outcnt++;
 384
 385	/* set up vout */
 386	if (vbuf->data_size) {
 387		if (is_vmalloc_addr(vbuf->data_buf)) {
 388			int sg_ents;
 389			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
 390					     &sg_ents);
 391			if (!sgt) {
 392				if (fence && vbuf->objs)
 393					virtio_gpu_array_unlock_resv(vbuf->objs);
 394				return -ENOMEM;
 395			}
 396
 397			elemcnt += sg_ents;
 398			sgs[outcnt] = sgt->sgl;
 399		} else {
 400			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
 401			elemcnt++;
 402			sgs[outcnt] = &vout;
 403		}
 404		outcnt++;
 405	}
 406
 407	/* set up vresp */
 408	if (vbuf->resp_size) {
 409		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 410		elemcnt++;
 411		sgs[outcnt + incnt] = &vresp;
 412		incnt++;
 413	}
 414
 415	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
 416					incnt);
 417
 418	if (sgt) {
 419		sg_free_table(sgt);
 420		kfree(sgt);
 
 
 
 421	}
 
 
 
 422	return ret;
 423}
 424
 425void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
 
 426{
 427	bool notify;
 428
 429	if (!atomic_read(&vgdev->pending_commands))
 430		return;
 431
 432	spin_lock(&vgdev->ctrlq.qlock);
 433	atomic_set(&vgdev->pending_commands, 0);
 434	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
 435	spin_unlock(&vgdev->ctrlq.qlock);
 436
 437	if (notify)
 438		virtqueue_notify(vgdev->ctrlq.vq);
 439}
 440
 441static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 442					struct virtio_gpu_vbuffer *vbuf)
 
 
 443{
 444	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445}
 446
 447static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 448				    struct virtio_gpu_vbuffer *vbuf)
 449{
 450	struct virtqueue *vq = vgdev->cursorq.vq;
 451	struct scatterlist *sgs[1], ccmd;
 452	int idx, ret, outcnt;
 453	bool notify;
 454
 455	if (!drm_dev_enter(vgdev->ddev, &idx)) {
 456		free_vbuf(vgdev, vbuf);
 457		return;
 458	}
 459
 460	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 461	sgs[0] = &ccmd;
 462	outcnt = 1;
 463
 464	spin_lock(&vgdev->cursorq.qlock);
 465retry:
 466	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 467	if (ret == -ENOSPC) {
 468		spin_unlock(&vgdev->cursorq.qlock);
 469		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 470		spin_lock(&vgdev->cursorq.qlock);
 471		goto retry;
 472	} else {
 473		vbuf->seqno = ++vgdev->cursorq.seqno;
 474		trace_virtio_gpu_cmd_queue(vq,
 475			virtio_gpu_vbuf_ctrl_hdr(vbuf),
 476			vbuf->seqno);
 477
 478		notify = virtqueue_kick_prepare(vq);
 479	}
 480
 481	spin_unlock(&vgdev->cursorq.qlock);
 482
 483	if (notify)
 484		virtqueue_notify(vq);
 485
 486	drm_dev_exit(idx);
 487}
 488
 489/* just create gem objects for userspace and long lived objects,
 490 * just use dma_alloced pages for the queue objects?
 491 */
 492
 493/* create a basic resource */
 494void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 495				    struct virtio_gpu_object *bo,
 496				    struct virtio_gpu_object_params *params,
 497				    struct virtio_gpu_object_array *objs,
 498				    struct virtio_gpu_fence *fence)
 499{
 500	struct virtio_gpu_resource_create_2d *cmd_p;
 501	struct virtio_gpu_vbuffer *vbuf;
 502
 503	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 504	memset(cmd_p, 0, sizeof(*cmd_p));
 505	vbuf->objs = objs;
 506
 507	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
 508	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 509	cmd_p->format = cpu_to_le32(params->format);
 510	cmd_p->width = cpu_to_le32(params->width);
 511	cmd_p->height = cpu_to_le32(params->height);
 512
 513	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 514	bo->created = true;
 515}
 516
 517static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
 518				    struct virtio_gpu_vbuffer *vbuf)
 519{
 520	struct virtio_gpu_object *bo;
 
 521
 522	bo = vbuf->resp_cb_data;
 523	vbuf->resp_cb_data = NULL;
 
 
 
 524
 525	virtio_gpu_cleanup_object(bo);
 526}
 527
 528void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 529				   struct virtio_gpu_object *bo)
 530{
 531	struct virtio_gpu_resource_unref *cmd_p;
 532	struct virtio_gpu_vbuffer *vbuf;
 533	int ret;
 534
 535	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
 536					virtio_gpu_cmd_unref_cb);
 537	memset(cmd_p, 0, sizeof(*cmd_p));
 538
 539	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
 540	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 541
 542	vbuf->resp_cb_data = bo;
 543	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 544	if (ret < 0)
 545		virtio_gpu_cleanup_object(bo);
 546}
 547
 548void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 549				uint32_t scanout_id, uint32_t resource_id,
 550				uint32_t width, uint32_t height,
 551				uint32_t x, uint32_t y)
 552{
 553	struct virtio_gpu_set_scanout *cmd_p;
 554	struct virtio_gpu_vbuffer *vbuf;
 555
 556	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 557	memset(cmd_p, 0, sizeof(*cmd_p));
 558
 559	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
 560	cmd_p->resource_id = cpu_to_le32(resource_id);
 561	cmd_p->scanout_id = cpu_to_le32(scanout_id);
 562	cmd_p->r.width = cpu_to_le32(width);
 563	cmd_p->r.height = cpu_to_le32(height);
 564	cmd_p->r.x = cpu_to_le32(x);
 565	cmd_p->r.y = cpu_to_le32(y);
 566
 567	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 568}
 569
 570void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 571				   uint32_t resource_id,
 572				   uint32_t x, uint32_t y,
 573				   uint32_t width, uint32_t height,
 574				   struct virtio_gpu_object_array *objs,
 575				   struct virtio_gpu_fence *fence)
 576{
 577	struct virtio_gpu_resource_flush *cmd_p;
 578	struct virtio_gpu_vbuffer *vbuf;
 579
 580	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 581	memset(cmd_p, 0, sizeof(*cmd_p));
 582	vbuf->objs = objs;
 583
 584	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
 585	cmd_p->resource_id = cpu_to_le32(resource_id);
 586	cmd_p->r.width = cpu_to_le32(width);
 587	cmd_p->r.height = cpu_to_le32(height);
 588	cmd_p->r.x = cpu_to_le32(x);
 589	cmd_p->r.y = cpu_to_le32(y);
 590
 591	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 592}
 593
 594void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 595					uint64_t offset,
 596					uint32_t width, uint32_t height,
 597					uint32_t x, uint32_t y,
 598					struct virtio_gpu_object_array *objs,
 599					struct virtio_gpu_fence *fence)
 600{
 601	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 602	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 603	struct virtio_gpu_vbuffer *vbuf;
 604	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 605
 606	if (virtio_gpu_is_shmem(bo) && use_dma_api)
 607		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
 608					    bo->base.sgt, DMA_TO_DEVICE);
 609
 610	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 611	memset(cmd_p, 0, sizeof(*cmd_p));
 612	vbuf->objs = objs;
 613
 614	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
 615	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 616	cmd_p->offset = cpu_to_le64(offset);
 617	cmd_p->r.width = cpu_to_le32(width);
 618	cmd_p->r.height = cpu_to_le32(height);
 619	cmd_p->r.x = cpu_to_le32(x);
 620	cmd_p->r.y = cpu_to_le32(y);
 621
 622	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 623}
 624
 625static void
 626virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 627				       uint32_t resource_id,
 628				       struct virtio_gpu_mem_entry *ents,
 629				       uint32_t nents,
 630				       struct virtio_gpu_fence *fence)
 631{
 632	struct virtio_gpu_resource_attach_backing *cmd_p;
 633	struct virtio_gpu_vbuffer *vbuf;
 634
 635	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 636	memset(cmd_p, 0, sizeof(*cmd_p));
 637
 638	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
 639	cmd_p->resource_id = cpu_to_le32(resource_id);
 640	cmd_p->nr_entries = cpu_to_le32(nents);
 641
 642	vbuf->data_buf = ents;
 643	vbuf->data_size = sizeof(*ents) * nents;
 644
 645	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 646}
 647
 648static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
 649					       struct virtio_gpu_vbuffer *vbuf)
 650{
 651	struct virtio_gpu_resp_display_info *resp =
 652		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
 653	int i;
 654
 655	spin_lock(&vgdev->display_info_lock);
 656	for (i = 0; i < vgdev->num_scanouts; i++) {
 657		vgdev->outputs[i].info = resp->pmodes[i];
 658		if (resp->pmodes[i].enabled) {
 659			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
 660				  le32_to_cpu(resp->pmodes[i].r.width),
 661				  le32_to_cpu(resp->pmodes[i].r.height),
 662				  le32_to_cpu(resp->pmodes[i].r.x),
 663				  le32_to_cpu(resp->pmodes[i].r.y));
 664		} else {
 665			DRM_DEBUG("output %d: disabled", i);
 666		}
 667	}
 668
 669	vgdev->display_info_pending = false;
 670	spin_unlock(&vgdev->display_info_lock);
 671	wake_up(&vgdev->resp_wq);
 672
 673	if (!drm_helper_hpd_irq_event(vgdev->ddev))
 674		drm_kms_helper_hotplug_event(vgdev->ddev);
 675}
 676
 677static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
 678					      struct virtio_gpu_vbuffer *vbuf)
 679{
 680	struct virtio_gpu_get_capset_info *cmd =
 681		(struct virtio_gpu_get_capset_info *)vbuf->buf;
 682	struct virtio_gpu_resp_capset_info *resp =
 683		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
 684	int i = le32_to_cpu(cmd->capset_index);
 685
 686	spin_lock(&vgdev->display_info_lock);
 687	if (vgdev->capsets) {
 688		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
 689		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
 690		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 691	} else {
 692		DRM_ERROR("invalid capset memory.");
 693	}
 694	spin_unlock(&vgdev->display_info_lock);
 695	wake_up(&vgdev->resp_wq);
 696}
 697
 698static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 699				     struct virtio_gpu_vbuffer *vbuf)
 700{
 701	struct virtio_gpu_get_capset *cmd =
 702		(struct virtio_gpu_get_capset *)vbuf->buf;
 703	struct virtio_gpu_resp_capset *resp =
 704		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
 705	struct virtio_gpu_drv_cap_cache *cache_ent;
 706
 707	spin_lock(&vgdev->display_info_lock);
 708	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 709		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
 710		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
 711			memcpy(cache_ent->caps_cache, resp->capset_data,
 712			       cache_ent->size);
 713			/* Copy must occur before is_valid is signalled. */
 714			smp_wmb();
 715			atomic_set(&cache_ent->is_valid, 1);
 716			break;
 717		}
 718	}
 719	spin_unlock(&vgdev->display_info_lock);
 720	wake_up_all(&vgdev->resp_wq);
 721}
 722
 723static int virtio_get_edid_block(void *data, u8 *buf,
 724				 unsigned int block, size_t len)
 725{
 726	struct virtio_gpu_resp_edid *resp = data;
 727	size_t start = block * EDID_LENGTH;
 728
 729	if (start + len > le32_to_cpu(resp->size))
 730		return -EINVAL;
 731	memcpy(buf, resp->edid + start, len);
 732	return 0;
 733}
 734
 735static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
 736				       struct virtio_gpu_vbuffer *vbuf)
 737{
 738	struct virtio_gpu_cmd_get_edid *cmd =
 739		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
 740	struct virtio_gpu_resp_edid *resp =
 741		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
 742	uint32_t scanout = le32_to_cpu(cmd->scanout);
 743	struct virtio_gpu_output *output;
 744	struct edid *new_edid, *old_edid;
 745
 746	if (scanout >= vgdev->num_scanouts)
 747		return;
 748	output = vgdev->outputs + scanout;
 749
 750	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
 751	drm_connector_update_edid_property(&output->conn, new_edid);
 752
 753	spin_lock(&vgdev->display_info_lock);
 754	old_edid = output->edid;
 755	output->edid = new_edid;
 756	spin_unlock(&vgdev->display_info_lock);
 757
 758	kfree(old_edid);
 759	wake_up(&vgdev->resp_wq);
 760}
 761
 
 762int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 763{
 764	struct virtio_gpu_ctrl_hdr *cmd_p;
 765	struct virtio_gpu_vbuffer *vbuf;
 766	void *resp_buf;
 767
 768	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
 769			   GFP_KERNEL);
 770	if (!resp_buf)
 771		return -ENOMEM;
 772
 773	cmd_p = virtio_gpu_alloc_cmd_resp
 774		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
 775		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
 776		 resp_buf);
 777	memset(cmd_p, 0, sizeof(*cmd_p));
 778
 779	vgdev->display_info_pending = true;
 780	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
 781	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 782	return 0;
 783}
 784
 785int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
 786{
 787	struct virtio_gpu_get_capset_info *cmd_p;
 788	struct virtio_gpu_vbuffer *vbuf;
 789	void *resp_buf;
 790
 791	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
 792			   GFP_KERNEL);
 793	if (!resp_buf)
 794		return -ENOMEM;
 795
 796	cmd_p = virtio_gpu_alloc_cmd_resp
 797		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
 798		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
 799		 resp_buf);
 800	memset(cmd_p, 0, sizeof(*cmd_p));
 801
 802	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
 803	cmd_p->capset_index = cpu_to_le32(idx);
 804	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 805	return 0;
 806}
 807
 808int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 809			      int idx, int version,
 810			      struct virtio_gpu_drv_cap_cache **cache_p)
 811{
 812	struct virtio_gpu_get_capset *cmd_p;
 813	struct virtio_gpu_vbuffer *vbuf;
 814	int max_size;
 815	struct virtio_gpu_drv_cap_cache *cache_ent;
 816	struct virtio_gpu_drv_cap_cache *search_ent;
 817	void *resp_buf;
 818
 819	*cache_p = NULL;
 820
 821	if (idx >= vgdev->num_capsets)
 822		return -EINVAL;
 823
 824	if (version > vgdev->capsets[idx].max_version)
 825		return -EINVAL;
 826
 827	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
 828	if (!cache_ent)
 829		return -ENOMEM;
 830
 831	max_size = vgdev->capsets[idx].max_size;
 832	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
 833	if (!cache_ent->caps_cache) {
 834		kfree(cache_ent);
 835		return -ENOMEM;
 836	}
 837
 838	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
 839			   GFP_KERNEL);
 840	if (!resp_buf) {
 841		kfree(cache_ent->caps_cache);
 842		kfree(cache_ent);
 843		return -ENOMEM;
 844	}
 845
 846	cache_ent->version = version;
 847	cache_ent->id = vgdev->capsets[idx].id;
 848	atomic_set(&cache_ent->is_valid, 0);
 849	cache_ent->size = max_size;
 850	spin_lock(&vgdev->display_info_lock);
 851	/* Search while under lock in case it was added by another task. */
 852	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
 853		if (search_ent->id == vgdev->capsets[idx].id &&
 854		    search_ent->version == version) {
 855			*cache_p = search_ent;
 856			break;
 857		}
 858	}
 859	if (!*cache_p)
 860		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 861	spin_unlock(&vgdev->display_info_lock);
 862
 863	if (*cache_p) {
 864		/* Entry was found, so free everything that was just created. */
 865		kfree(resp_buf);
 866		kfree(cache_ent->caps_cache);
 867		kfree(cache_ent);
 868		return 0;
 869	}
 870
 871	cmd_p = virtio_gpu_alloc_cmd_resp
 872		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
 873		 sizeof(struct virtio_gpu_resp_capset) + max_size,
 874		 resp_buf);
 875	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
 876	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
 877	cmd_p->capset_version = cpu_to_le32(version);
 878	*cache_p = cache_ent;
 879	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 880
 881	return 0;
 882}
 883
 884int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
 885{
 886	struct virtio_gpu_cmd_get_edid *cmd_p;
 887	struct virtio_gpu_vbuffer *vbuf;
 888	void *resp_buf;
 889	int scanout;
 890
 891	if (WARN_ON(!vgdev->has_edid))
 892		return -EINVAL;
 893
 894	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
 895		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
 896				   GFP_KERNEL);
 897		if (!resp_buf)
 898			return -ENOMEM;
 899
 900		cmd_p = virtio_gpu_alloc_cmd_resp
 901			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
 902			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
 903			 resp_buf);
 904		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
 905		cmd_p->scanout = cpu_to_le32(scanout);
 906		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 907	}
 908
 909	return 0;
 910}
 911
 912void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 913				   uint32_t context_init, uint32_t nlen,
 914				   const char *name)
 915{
 916	struct virtio_gpu_ctx_create *cmd_p;
 917	struct virtio_gpu_vbuffer *vbuf;
 918
 919	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 920	memset(cmd_p, 0, sizeof(*cmd_p));
 921
 922	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
 923	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 924	cmd_p->nlen = cpu_to_le32(nlen);
 925	cmd_p->context_init = cpu_to_le32(context_init);
 926	strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
 927	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 928}
 929
 930void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
 931				    uint32_t id)
 932{
 933	struct virtio_gpu_ctx_destroy *cmd_p;
 934	struct virtio_gpu_vbuffer *vbuf;
 935
 936	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 937	memset(cmd_p, 0, sizeof(*cmd_p));
 938
 939	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
 940	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 941	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 942}
 943
 944void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 945					    uint32_t ctx_id,
 946					    struct virtio_gpu_object_array *objs)
 947{
 948	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 949	struct virtio_gpu_ctx_resource *cmd_p;
 950	struct virtio_gpu_vbuffer *vbuf;
 951
 952	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 953	memset(cmd_p, 0, sizeof(*cmd_p));
 954	vbuf->objs = objs;
 955
 956	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
 957	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 958	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 959	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
 960}
 961
 962void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 963					    uint32_t ctx_id,
 964					    struct virtio_gpu_object_array *objs)
 965{
 966	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 967	struct virtio_gpu_ctx_resource *cmd_p;
 968	struct virtio_gpu_vbuffer *vbuf;
 969
 970	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 971	memset(cmd_p, 0, sizeof(*cmd_p));
 972	vbuf->objs = objs;
 973
 974	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
 975	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 976	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 977	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 978}
 979
 980void
 981virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 982				  struct virtio_gpu_object *bo,
 983				  struct virtio_gpu_object_params *params,
 984				  struct virtio_gpu_object_array *objs,
 985				  struct virtio_gpu_fence *fence)
 986{
 987	struct virtio_gpu_resource_create_3d *cmd_p;
 988	struct virtio_gpu_vbuffer *vbuf;
 989
 990	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 991	memset(cmd_p, 0, sizeof(*cmd_p));
 992	vbuf->objs = objs;
 993
 
 994	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 995	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 996	cmd_p->format = cpu_to_le32(params->format);
 997	cmd_p->width = cpu_to_le32(params->width);
 998	cmd_p->height = cpu_to_le32(params->height);
 999
1000	cmd_p->target = cpu_to_le32(params->target);
1001	cmd_p->bind = cpu_to_le32(params->bind);
1002	cmd_p->depth = cpu_to_le32(params->depth);
1003	cmd_p->array_size = cpu_to_le32(params->array_size);
1004	cmd_p->last_level = cpu_to_le32(params->last_level);
1005	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1006	cmd_p->flags = cpu_to_le32(params->flags);
1007
1008	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1009
1010	bo->created = true;
1011}
1012
1013void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1014					uint32_t ctx_id,
1015					uint64_t offset, uint32_t level,
1016					uint32_t stride,
1017					uint32_t layer_stride,
1018					struct drm_virtgpu_3d_box *box,
1019					struct virtio_gpu_object_array *objs,
1020					struct virtio_gpu_fence *fence)
1021{
1022	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1023	struct virtio_gpu_transfer_host_3d *cmd_p;
1024	struct virtio_gpu_vbuffer *vbuf;
1025	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1026
1027	if (virtio_gpu_is_shmem(bo) && use_dma_api)
1028		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1029					    bo->base.sgt, DMA_TO_DEVICE);
1030
1031	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1032	memset(cmd_p, 0, sizeof(*cmd_p));
1033
1034	vbuf->objs = objs;
1035
1036	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1037	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1038	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1039	convert_to_hw_box(&cmd_p->box, box);
1040	cmd_p->offset = cpu_to_le64(offset);
1041	cmd_p->level = cpu_to_le32(level);
1042	cmd_p->stride = cpu_to_le32(stride);
1043	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1044
1045	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1046}
1047
1048void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1049					  uint32_t ctx_id,
1050					  uint64_t offset, uint32_t level,
1051					  uint32_t stride,
1052					  uint32_t layer_stride,
1053					  struct drm_virtgpu_3d_box *box,
1054					  struct virtio_gpu_object_array *objs,
1055					  struct virtio_gpu_fence *fence)
1056{
1057	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1058	struct virtio_gpu_transfer_host_3d *cmd_p;
1059	struct virtio_gpu_vbuffer *vbuf;
1060
1061	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1062	memset(cmd_p, 0, sizeof(*cmd_p));
1063
1064	vbuf->objs = objs;
1065
1066	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1067	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1068	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1069	convert_to_hw_box(&cmd_p->box, box);
1070	cmd_p->offset = cpu_to_le64(offset);
1071	cmd_p->level = cpu_to_le32(level);
1072	cmd_p->stride = cpu_to_le32(stride);
1073	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1074
1075	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1076}
1077
1078void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1079			   void *data, uint32_t data_size,
1080			   uint32_t ctx_id,
1081			   struct virtio_gpu_object_array *objs,
1082			   struct virtio_gpu_fence *fence)
1083{
1084	struct virtio_gpu_cmd_submit *cmd_p;
1085	struct virtio_gpu_vbuffer *vbuf;
1086
1087	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1088	memset(cmd_p, 0, sizeof(*cmd_p));
1089
1090	vbuf->data_buf = data;
1091	vbuf->data_size = data_size;
1092	vbuf->objs = objs;
1093
1094	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1095	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1096	cmd_p->size = cpu_to_le32(data_size);
1097
1098	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1099}
1100
1101void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1102			      struct virtio_gpu_object *obj,
1103			      struct virtio_gpu_mem_entry *ents,
1104			      unsigned int nents)
1105{
1106	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1107					       ents, nents, NULL);
1108}
1109
1110void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1111			    struct virtio_gpu_output *output)
1112{
1113	struct virtio_gpu_vbuffer *vbuf;
1114	struct virtio_gpu_update_cursor *cur_p;
1115
1116	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1117	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1118	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1119	virtio_gpu_queue_cursor(vgdev, vbuf);
1120}
1121
1122static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1123					    struct virtio_gpu_vbuffer *vbuf)
1124{
1125	struct virtio_gpu_object *obj =
1126		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1127	struct virtio_gpu_resp_resource_uuid *resp =
1128		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1129	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1130
1131	spin_lock(&vgdev->resource_export_lock);
1132	WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1133
1134	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1135	    obj->uuid_state == STATE_INITIALIZING) {
1136		import_uuid(&obj->uuid, resp->uuid);
1137		obj->uuid_state = STATE_OK;
1138	} else {
1139		obj->uuid_state = STATE_ERR;
1140	}
1141	spin_unlock(&vgdev->resource_export_lock);
1142
1143	wake_up_all(&vgdev->resp_wq);
1144}
1145
1146int
1147virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1148				    struct virtio_gpu_object_array *objs)
1149{
1150	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1151	struct virtio_gpu_resource_assign_uuid *cmd_p;
1152	struct virtio_gpu_vbuffer *vbuf;
1153	struct virtio_gpu_resp_resource_uuid *resp_buf;
1154
1155	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1156	if (!resp_buf) {
1157		spin_lock(&vgdev->resource_export_lock);
1158		bo->uuid_state = STATE_ERR;
1159		spin_unlock(&vgdev->resource_export_lock);
1160		virtio_gpu_array_put_free(objs);
1161		return -ENOMEM;
1162	}
1163
1164	cmd_p = virtio_gpu_alloc_cmd_resp
1165		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1166		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1167	memset(cmd_p, 0, sizeof(*cmd_p));
1168
1169	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1170	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1171
1172	vbuf->objs = objs;
1173	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1174	return 0;
1175}
1176
1177static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1178					   struct virtio_gpu_vbuffer *vbuf)
1179{
1180	struct virtio_gpu_object *bo =
1181		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1182	struct virtio_gpu_resp_map_info *resp =
1183		(struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1184	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1185	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1186
1187	spin_lock(&vgdev->host_visible_lock);
1188
1189	if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1190		vram->map_info = resp->map_info;
1191		vram->map_state = STATE_OK;
1192	} else {
1193		vram->map_state = STATE_ERR;
1194	}
1195
1196	spin_unlock(&vgdev->host_visible_lock);
1197	wake_up_all(&vgdev->resp_wq);
1198}
1199
1200int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1201		       struct virtio_gpu_object_array *objs, uint64_t offset)
1202{
1203	struct virtio_gpu_resource_map_blob *cmd_p;
1204	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1205	struct virtio_gpu_vbuffer *vbuf;
1206	struct virtio_gpu_resp_map_info *resp_buf;
1207
1208	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1209	if (!resp_buf)
1210		return -ENOMEM;
1211
1212	cmd_p = virtio_gpu_alloc_cmd_resp
1213		(vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1214		 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1215	memset(cmd_p, 0, sizeof(*cmd_p));
1216
1217	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1218	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1219	cmd_p->offset = cpu_to_le64(offset);
1220	vbuf->objs = objs;
1221
1222	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1223	return 0;
1224}
1225
1226void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1227			  struct virtio_gpu_object *bo)
1228{
1229	struct virtio_gpu_resource_unmap_blob *cmd_p;
1230	struct virtio_gpu_vbuffer *vbuf;
1231
1232	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1233	memset(cmd_p, 0, sizeof(*cmd_p));
1234
1235	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1236	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1237
1238	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1239}
1240
1241void
1242virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1243				    struct virtio_gpu_object *bo,
1244				    struct virtio_gpu_object_params *params,
1245				    struct virtio_gpu_mem_entry *ents,
1246				    uint32_t nents)
1247{
1248	struct virtio_gpu_resource_create_blob *cmd_p;
1249	struct virtio_gpu_vbuffer *vbuf;
1250
1251	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1252	memset(cmd_p, 0, sizeof(*cmd_p));
1253
1254	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1255	cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1256	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1257	cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1258	cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1259	cmd_p->blob_id = cpu_to_le64(params->blob_id);
1260	cmd_p->size = cpu_to_le64(params->size);
1261	cmd_p->nr_entries = cpu_to_le32(nents);
1262
1263	vbuf->data_buf = ents;
1264	vbuf->data_size = sizeof(*ents) * nents;
1265
1266	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1267	bo->created = true;
1268}
1269
1270void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1271				     uint32_t scanout_id,
1272				     struct virtio_gpu_object *bo,
1273				     struct drm_framebuffer *fb,
1274				     uint32_t width, uint32_t height,
1275				     uint32_t x, uint32_t y)
1276{
1277	uint32_t i;
1278	struct virtio_gpu_set_scanout_blob *cmd_p;
1279	struct virtio_gpu_vbuffer *vbuf;
1280	uint32_t format = virtio_gpu_translate_format(fb->format->format);
1281
1282	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1283	memset(cmd_p, 0, sizeof(*cmd_p));
1284
1285	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1286	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1287	cmd_p->scanout_id = cpu_to_le32(scanout_id);
1288
1289	cmd_p->format = cpu_to_le32(format);
1290	cmd_p->width  = cpu_to_le32(fb->width);
1291	cmd_p->height = cpu_to_le32(fb->height);
1292
1293	for (i = 0; i < 4; i++) {
1294		cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1295		cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1296	}
1297
1298	cmd_p->r.width = cpu_to_le32(width);
1299	cmd_p->r.height = cpu_to_le32(height);
1300	cmd_p->r.x = cpu_to_le32(x);
1301	cmd_p->r.y = cpu_to_le32(y);
1302
1303	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
 
 
1304}
v4.10.11
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie <airlied@redhat.com>
  7 *    Gerd Hoffmann <kraxel@redhat.com>
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice (including the next
 17 * paragraph) shall be included in all copies or substantial portions of the
 18 * Software.
 19 *
 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 26 * OTHER DEALINGS IN THE SOFTWARE.
 27 */
 28
 29#include <drm/drmP.h>
 30#include "virtgpu_drv.h"
 31#include <linux/virtio.h>
 32#include <linux/virtio_config.h>
 33#include <linux/virtio_ring.h>
 34
 
 
 
 
 
 35#define MAX_INLINE_CMD_SIZE   96
 36#define MAX_INLINE_RESP_SIZE  24
 37#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
 38			       + MAX_INLINE_CMD_SIZE		 \
 39			       + MAX_INLINE_RESP_SIZE)
 40
 41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 42				uint32_t *resid)
 43{
 44	int handle;
 45
 46	idr_preload(GFP_KERNEL);
 47	spin_lock(&vgdev->resource_idr_lock);
 48	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
 49	spin_unlock(&vgdev->resource_idr_lock);
 50	idr_preload_end();
 51	*resid = handle;
 52}
 53
 54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 55{
 56	spin_lock(&vgdev->resource_idr_lock);
 57	idr_remove(&vgdev->resource_idr, id);
 58	spin_unlock(&vgdev->resource_idr_lock);
 
 
 
 59}
 60
 61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
 62{
 63	struct drm_device *dev = vq->vdev->priv;
 64	struct virtio_gpu_device *vgdev = dev->dev_private;
 
 65	schedule_work(&vgdev->ctrlq.dequeue_work);
 66}
 67
 68void virtio_gpu_cursor_ack(struct virtqueue *vq)
 69{
 70	struct drm_device *dev = vq->vdev->priv;
 71	struct virtio_gpu_device *vgdev = dev->dev_private;
 
 72	schedule_work(&vgdev->cursorq.dequeue_work);
 73}
 74
 75int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
 76{
 77	struct virtio_gpu_vbuffer *vbuf;
 78	int i, size, count = 16;
 79	void *ptr;
 80
 81	INIT_LIST_HEAD(&vgdev->free_vbufs);
 82	spin_lock_init(&vgdev->free_vbufs_lock);
 83	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
 84	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
 85	size = count * VBUFFER_SIZE;
 86	DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
 87		 count, VBUFFER_SIZE, size / 1024);
 88
 89	vgdev->vbufs = kzalloc(size, GFP_KERNEL);
 90	if (!vgdev->vbufs)
 91		return -ENOMEM;
 92
 93	for (i = 0, ptr = vgdev->vbufs;
 94	     i < count;
 95	     i++, ptr += VBUFFER_SIZE) {
 96		vbuf = ptr;
 97		list_add(&vbuf->list, &vgdev->free_vbufs);
 98	}
 99	return 0;
100}
101
102void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103{
104	struct virtio_gpu_vbuffer *vbuf;
105	int i, count = 0;
106
107	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109
110	spin_lock(&vgdev->free_vbufs_lock);
111	for (i = 0; i < count; i++) {
112		if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
113			spin_unlock(&vgdev->free_vbufs_lock);
114			return;
115		}
116		vbuf = list_first_entry(&vgdev->free_vbufs,
117					struct virtio_gpu_vbuffer, list);
118		list_del(&vbuf->list);
119	}
120	spin_unlock(&vgdev->free_vbufs_lock);
121	kfree(vgdev->vbufs);
122}
123
124static struct virtio_gpu_vbuffer*
125virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
126		    int size, int resp_size, void *resp_buf,
127		    virtio_gpu_resp_cb resp_cb)
128{
129	struct virtio_gpu_vbuffer *vbuf;
130
131	spin_lock(&vgdev->free_vbufs_lock);
132	BUG_ON(list_empty(&vgdev->free_vbufs));
133	vbuf = list_first_entry(&vgdev->free_vbufs,
134				struct virtio_gpu_vbuffer, list);
135	list_del(&vbuf->list);
136	spin_unlock(&vgdev->free_vbufs_lock);
137	memset(vbuf, 0, VBUFFER_SIZE);
138
139	BUG_ON(size > MAX_INLINE_CMD_SIZE);
 
140	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
141	vbuf->size = size;
142
143	vbuf->resp_cb = resp_cb;
144	vbuf->resp_size = resp_size;
145	if (resp_size <= MAX_INLINE_RESP_SIZE)
146		vbuf->resp_buf = (void *)vbuf->buf + size;
147	else
148		vbuf->resp_buf = resp_buf;
149	BUG_ON(!vbuf->resp_buf);
150	return vbuf;
151}
152
153static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
154				  struct virtio_gpu_vbuffer **vbuffer_p,
155				  int size)
156{
157	struct virtio_gpu_vbuffer *vbuf;
158
159	vbuf = virtio_gpu_get_vbuf(vgdev, size,
160				   sizeof(struct virtio_gpu_ctrl_hdr),
161				   NULL, NULL);
162	if (IS_ERR(vbuf)) {
163		*vbuffer_p = NULL;
164		return ERR_CAST(vbuf);
165	}
166	*vbuffer_p = vbuf;
167	return vbuf->buf;
168}
169
170static struct virtio_gpu_update_cursor*
171virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
172			struct virtio_gpu_vbuffer **vbuffer_p)
173{
174	struct virtio_gpu_vbuffer *vbuf;
175
176	vbuf = virtio_gpu_get_vbuf
177		(vgdev, sizeof(struct virtio_gpu_update_cursor),
178		 0, NULL, NULL);
179	if (IS_ERR(vbuf)) {
180		*vbuffer_p = NULL;
181		return ERR_CAST(vbuf);
182	}
183	*vbuffer_p = vbuf;
184	return (struct virtio_gpu_update_cursor *)vbuf->buf;
185}
186
187static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
188				       virtio_gpu_resp_cb cb,
189				       struct virtio_gpu_vbuffer **vbuffer_p,
190				       int cmd_size, int resp_size,
191				       void *resp_buf)
192{
193	struct virtio_gpu_vbuffer *vbuf;
194
195	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
196				   resp_size, resp_buf, cb);
197	if (IS_ERR(vbuf)) {
198		*vbuffer_p = NULL;
199		return ERR_CAST(vbuf);
200	}
201	*vbuffer_p = vbuf;
202	return (struct virtio_gpu_command *)vbuf->buf;
203}
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205static void free_vbuf(struct virtio_gpu_device *vgdev,
206		      struct virtio_gpu_vbuffer *vbuf)
207{
208	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
209		kfree(vbuf->resp_buf);
210	kfree(vbuf->data_buf);
211	spin_lock(&vgdev->free_vbufs_lock);
212	list_add(&vbuf->list, &vgdev->free_vbufs);
213	spin_unlock(&vgdev->free_vbufs_lock);
214}
215
216static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
217{
218	struct virtio_gpu_vbuffer *vbuf;
219	unsigned int len;
220	int freed = 0;
221
222	while ((vbuf = virtqueue_get_buf(vq, &len))) {
223		list_add_tail(&vbuf->list, reclaim_list);
224		freed++;
225	}
226	if (freed == 0)
227		DRM_DEBUG("Huh? zero vbufs reclaimed");
228}
229
230void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
231{
232	struct virtio_gpu_device *vgdev =
233		container_of(work, struct virtio_gpu_device,
234			     ctrlq.dequeue_work);
235	struct list_head reclaim_list;
236	struct virtio_gpu_vbuffer *entry, *tmp;
237	struct virtio_gpu_ctrl_hdr *resp;
238	u64 fence_id = 0;
239
240	INIT_LIST_HEAD(&reclaim_list);
241	spin_lock(&vgdev->ctrlq.qlock);
242	do {
243		virtqueue_disable_cb(vgdev->ctrlq.vq);
244		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
245
246	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
247	spin_unlock(&vgdev->ctrlq.qlock);
248
249	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
250		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
251		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
252			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 
 
 
 
 
 
 
 
 
 
 
253		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
254			u64 f = le64_to_cpu(resp->fence_id);
255
256			if (fence_id > f) {
257				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
258					  __func__, fence_id, f);
259			} else {
260				fence_id = f;
261			}
262		}
263		if (entry->resp_cb)
264			entry->resp_cb(vgdev, entry);
 
 
265
 
 
 
266		list_del(&entry->list);
267		free_vbuf(vgdev, entry);
268	}
269	wake_up(&vgdev->ctrlq.ack_queue);
270
271	if (fence_id)
272		virtio_gpu_fence_event_process(vgdev, fence_id);
273}
274
275void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
276{
277	struct virtio_gpu_device *vgdev =
278		container_of(work, struct virtio_gpu_device,
279			     cursorq.dequeue_work);
280	struct list_head reclaim_list;
281	struct virtio_gpu_vbuffer *entry, *tmp;
282
283	INIT_LIST_HEAD(&reclaim_list);
284	spin_lock(&vgdev->cursorq.qlock);
285	do {
286		virtqueue_disable_cb(vgdev->cursorq.vq);
287		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
288	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
289	spin_unlock(&vgdev->cursorq.qlock);
290
291	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 
 
 
 
292		list_del(&entry->list);
293		free_vbuf(vgdev, entry);
294	}
295	wake_up(&vgdev->cursorq.ack_queue);
296}
297
298static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
299					       struct virtio_gpu_vbuffer *vbuf)
300		__releases(&vgdev->ctrlq.qlock)
301		__acquires(&vgdev->ctrlq.qlock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302{
303	struct virtqueue *vq = vgdev->ctrlq.vq;
304	struct scatterlist *sgs[3], vcmd, vout, vresp;
305	int outcnt = 0, incnt = 0;
306	int ret;
307
308	if (!vgdev->vqs_ready)
 
 
 
309		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
 
311	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
312	sgs[outcnt+incnt] = &vcmd;
 
313	outcnt++;
314
 
315	if (vbuf->data_size) {
316		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
317		sgs[outcnt + incnt] = &vout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318		outcnt++;
319	}
320
 
321	if (vbuf->resp_size) {
322		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 
323		sgs[outcnt + incnt] = &vresp;
324		incnt++;
325	}
326
327retry:
328	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
329	if (ret == -ENOSPC) {
330		spin_unlock(&vgdev->ctrlq.qlock);
331		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
332		spin_lock(&vgdev->ctrlq.qlock);
333		goto retry;
334	} else {
335		virtqueue_kick(vq);
336	}
337
338	if (!ret)
339		ret = vq->num_free;
340	return ret;
341}
342
343static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
344					struct virtio_gpu_vbuffer *vbuf)
345{
346	int rc;
 
 
 
347
348	spin_lock(&vgdev->ctrlq.qlock);
349	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 
350	spin_unlock(&vgdev->ctrlq.qlock);
351	return rc;
 
 
352}
353
354static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
355					       struct virtio_gpu_vbuffer *vbuf,
356					       struct virtio_gpu_ctrl_hdr *hdr,
357					       struct virtio_gpu_fence **fence)
358{
359	struct virtqueue *vq = vgdev->ctrlq.vq;
360	int rc;
361
362again:
363	spin_lock(&vgdev->ctrlq.qlock);
364
365	/*
366	 * Make sure we have enouth space in the virtqueue.  If not
367	 * wait here until we have.
368	 *
369	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
370	 * to wait for free space, which can result in fence ids being
371	 * submitted out-of-order.
372	 */
373	if (vq->num_free < 3) {
374		spin_unlock(&vgdev->ctrlq.qlock);
375		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
376		goto again;
377	}
378
379	if (fence)
380		virtio_gpu_fence_emit(vgdev, hdr, fence);
381	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
382	spin_unlock(&vgdev->ctrlq.qlock);
383	return rc;
384}
385
386static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
387				   struct virtio_gpu_vbuffer *vbuf)
388{
389	struct virtqueue *vq = vgdev->cursorq.vq;
390	struct scatterlist *sgs[1], ccmd;
391	int ret;
392	int outcnt;
393
394	if (!vgdev->vqs_ready)
395		return -ENODEV;
 
 
396
397	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
398	sgs[0] = &ccmd;
399	outcnt = 1;
400
401	spin_lock(&vgdev->cursorq.qlock);
402retry:
403	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
404	if (ret == -ENOSPC) {
405		spin_unlock(&vgdev->cursorq.qlock);
406		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
407		spin_lock(&vgdev->cursorq.qlock);
408		goto retry;
409	} else {
410		virtqueue_kick(vq);
 
 
 
 
 
411	}
412
413	spin_unlock(&vgdev->cursorq.qlock);
414
415	if (!ret)
416		ret = vq->num_free;
417	return ret;
 
418}
419
420/* just create gem objects for userspace and long lived objects,
421   just use dma_alloced pages for the queue objects? */
 
422
423/* create a basic resource */
424void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
425				    uint32_t resource_id,
426				    uint32_t format,
427				    uint32_t width,
428				    uint32_t height)
429{
430	struct virtio_gpu_resource_create_2d *cmd_p;
431	struct virtio_gpu_vbuffer *vbuf;
432
433	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
434	memset(cmd_p, 0, sizeof(*cmd_p));
 
435
436	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
437	cmd_p->resource_id = cpu_to_le32(resource_id);
438	cmd_p->format = cpu_to_le32(format);
439	cmd_p->width = cpu_to_le32(width);
440	cmd_p->height = cpu_to_le32(height);
441
442	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
443}
444
445void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
446				   uint32_t resource_id)
447{
448	struct virtio_gpu_resource_unref *cmd_p;
449	struct virtio_gpu_vbuffer *vbuf;
450
451	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
452	memset(cmd_p, 0, sizeof(*cmd_p));
453
454	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
455	cmd_p->resource_id = cpu_to_le32(resource_id);
456
457	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
458}
459
460void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
461					   uint32_t resource_id)
462{
463	struct virtio_gpu_resource_detach_backing *cmd_p;
464	struct virtio_gpu_vbuffer *vbuf;
 
465
466	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 
467	memset(cmd_p, 0, sizeof(*cmd_p));
468
469	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
470	cmd_p->resource_id = cpu_to_le32(resource_id);
471
472	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
 
 
473}
474
475void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
476				uint32_t scanout_id, uint32_t resource_id,
477				uint32_t width, uint32_t height,
478				uint32_t x, uint32_t y)
479{
480	struct virtio_gpu_set_scanout *cmd_p;
481	struct virtio_gpu_vbuffer *vbuf;
482
483	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
484	memset(cmd_p, 0, sizeof(*cmd_p));
485
486	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
487	cmd_p->resource_id = cpu_to_le32(resource_id);
488	cmd_p->scanout_id = cpu_to_le32(scanout_id);
489	cmd_p->r.width = cpu_to_le32(width);
490	cmd_p->r.height = cpu_to_le32(height);
491	cmd_p->r.x = cpu_to_le32(x);
492	cmd_p->r.y = cpu_to_le32(y);
493
494	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
495}
496
497void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
498				   uint32_t resource_id,
499				   uint32_t x, uint32_t y,
500				   uint32_t width, uint32_t height)
 
 
501{
502	struct virtio_gpu_resource_flush *cmd_p;
503	struct virtio_gpu_vbuffer *vbuf;
504
505	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
506	memset(cmd_p, 0, sizeof(*cmd_p));
 
507
508	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
509	cmd_p->resource_id = cpu_to_le32(resource_id);
510	cmd_p->r.width = cpu_to_le32(width);
511	cmd_p->r.height = cpu_to_le32(height);
512	cmd_p->r.x = cpu_to_le32(x);
513	cmd_p->r.y = cpu_to_le32(y);
514
515	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
516}
517
518void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
519					uint32_t resource_id, uint64_t offset,
520					__le32 width, __le32 height,
521					__le32 x, __le32 y,
522					struct virtio_gpu_fence **fence)
 
523{
 
524	struct virtio_gpu_transfer_to_host_2d *cmd_p;
525	struct virtio_gpu_vbuffer *vbuf;
 
 
 
 
 
526
527	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
528	memset(cmd_p, 0, sizeof(*cmd_p));
 
529
530	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
531	cmd_p->resource_id = cpu_to_le32(resource_id);
532	cmd_p->offset = cpu_to_le64(offset);
533	cmd_p->r.width = width;
534	cmd_p->r.height = height;
535	cmd_p->r.x = x;
536	cmd_p->r.y = y;
537
538	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
539}
540
541static void
542virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
543				       uint32_t resource_id,
544				       struct virtio_gpu_mem_entry *ents,
545				       uint32_t nents,
546				       struct virtio_gpu_fence **fence)
547{
548	struct virtio_gpu_resource_attach_backing *cmd_p;
549	struct virtio_gpu_vbuffer *vbuf;
550
551	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
552	memset(cmd_p, 0, sizeof(*cmd_p));
553
554	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
555	cmd_p->resource_id = cpu_to_le32(resource_id);
556	cmd_p->nr_entries = cpu_to_le32(nents);
557
558	vbuf->data_buf = ents;
559	vbuf->data_size = sizeof(*ents) * nents;
560
561	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
562}
563
564static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
565					       struct virtio_gpu_vbuffer *vbuf)
566{
567	struct virtio_gpu_resp_display_info *resp =
568		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
569	int i;
570
571	spin_lock(&vgdev->display_info_lock);
572	for (i = 0; i < vgdev->num_scanouts; i++) {
573		vgdev->outputs[i].info = resp->pmodes[i];
574		if (resp->pmodes[i].enabled) {
575			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
576				  le32_to_cpu(resp->pmodes[i].r.width),
577				  le32_to_cpu(resp->pmodes[i].r.height),
578				  le32_to_cpu(resp->pmodes[i].r.x),
579				  le32_to_cpu(resp->pmodes[i].r.y));
580		} else {
581			DRM_DEBUG("output %d: disabled", i);
582		}
583	}
584
585	vgdev->display_info_pending = false;
586	spin_unlock(&vgdev->display_info_lock);
587	wake_up(&vgdev->resp_wq);
588
589	if (!drm_helper_hpd_irq_event(vgdev->ddev))
590		drm_kms_helper_hotplug_event(vgdev->ddev);
591}
592
593static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
594					      struct virtio_gpu_vbuffer *vbuf)
595{
596	struct virtio_gpu_get_capset_info *cmd =
597		(struct virtio_gpu_get_capset_info *)vbuf->buf;
598	struct virtio_gpu_resp_capset_info *resp =
599		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
600	int i = le32_to_cpu(cmd->capset_index);
601
602	spin_lock(&vgdev->display_info_lock);
603	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
604	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
605	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 
 
 
 
606	spin_unlock(&vgdev->display_info_lock);
607	wake_up(&vgdev->resp_wq);
608}
609
610static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
611				     struct virtio_gpu_vbuffer *vbuf)
612{
613	struct virtio_gpu_get_capset *cmd =
614		(struct virtio_gpu_get_capset *)vbuf->buf;
615	struct virtio_gpu_resp_capset *resp =
616		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
617	struct virtio_gpu_drv_cap_cache *cache_ent;
618
619	spin_lock(&vgdev->display_info_lock);
620	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
621		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
622		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
623			memcpy(cache_ent->caps_cache, resp->capset_data,
624			       cache_ent->size);
 
 
625			atomic_set(&cache_ent->is_valid, 1);
626			break;
627		}
628	}
629	spin_unlock(&vgdev->display_info_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630	wake_up(&vgdev->resp_wq);
631}
632
633
634int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
635{
636	struct virtio_gpu_ctrl_hdr *cmd_p;
637	struct virtio_gpu_vbuffer *vbuf;
638	void *resp_buf;
639
640	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
641			   GFP_KERNEL);
642	if (!resp_buf)
643		return -ENOMEM;
644
645	cmd_p = virtio_gpu_alloc_cmd_resp
646		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
647		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
648		 resp_buf);
649	memset(cmd_p, 0, sizeof(*cmd_p));
650
651	vgdev->display_info_pending = true;
652	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
653	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
654	return 0;
655}
656
657int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
658{
659	struct virtio_gpu_get_capset_info *cmd_p;
660	struct virtio_gpu_vbuffer *vbuf;
661	void *resp_buf;
662
663	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
664			   GFP_KERNEL);
665	if (!resp_buf)
666		return -ENOMEM;
667
668	cmd_p = virtio_gpu_alloc_cmd_resp
669		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
670		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
671		 resp_buf);
672	memset(cmd_p, 0, sizeof(*cmd_p));
673
674	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
675	cmd_p->capset_index = cpu_to_le32(idx);
676	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
677	return 0;
678}
679
680int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
681			      int idx, int version,
682			      struct virtio_gpu_drv_cap_cache **cache_p)
683{
684	struct virtio_gpu_get_capset *cmd_p;
685	struct virtio_gpu_vbuffer *vbuf;
686	int max_size = vgdev->capsets[idx].max_size;
687	struct virtio_gpu_drv_cap_cache *cache_ent;
 
688	void *resp_buf;
689
690	if (idx > vgdev->num_capsets)
 
 
691		return -EINVAL;
692
693	if (version > vgdev->capsets[idx].max_version)
694		return -EINVAL;
695
696	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
697	if (!cache_ent)
698		return -ENOMEM;
699
 
700	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
701	if (!cache_ent->caps_cache) {
702		kfree(cache_ent);
703		return -ENOMEM;
704	}
705
706	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
707			   GFP_KERNEL);
708	if (!resp_buf) {
709		kfree(cache_ent->caps_cache);
710		kfree(cache_ent);
711		return -ENOMEM;
712	}
713
714	cache_ent->version = version;
715	cache_ent->id = vgdev->capsets[idx].id;
716	atomic_set(&cache_ent->is_valid, 0);
717	cache_ent->size = max_size;
718	spin_lock(&vgdev->display_info_lock);
719	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 
 
 
 
 
 
 
 
 
720	spin_unlock(&vgdev->display_info_lock);
721
 
 
 
 
 
 
 
 
722	cmd_p = virtio_gpu_alloc_cmd_resp
723		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
724		 sizeof(struct virtio_gpu_resp_capset) + max_size,
725		 resp_buf);
726	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
727	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
728	cmd_p->capset_version = cpu_to_le32(version);
729	*cache_p = cache_ent;
730	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
731
732	return 0;
733}
734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
735void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
736				   uint32_t nlen, const char *name)
 
737{
738	struct virtio_gpu_ctx_create *cmd_p;
739	struct virtio_gpu_vbuffer *vbuf;
740
741	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
742	memset(cmd_p, 0, sizeof(*cmd_p));
743
744	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
745	cmd_p->hdr.ctx_id = cpu_to_le32(id);
746	cmd_p->nlen = cpu_to_le32(nlen);
747	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
748	cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
749	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
750}
751
752void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
753				    uint32_t id)
754{
755	struct virtio_gpu_ctx_destroy *cmd_p;
756	struct virtio_gpu_vbuffer *vbuf;
757
758	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
759	memset(cmd_p, 0, sizeof(*cmd_p));
760
761	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
762	cmd_p->hdr.ctx_id = cpu_to_le32(id);
763	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
764}
765
766void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
767					    uint32_t ctx_id,
768					    uint32_t resource_id)
769{
 
770	struct virtio_gpu_ctx_resource *cmd_p;
771	struct virtio_gpu_vbuffer *vbuf;
772
773	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
774	memset(cmd_p, 0, sizeof(*cmd_p));
 
775
776	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
777	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
778	cmd_p->resource_id = cpu_to_le32(resource_id);
779	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
780
781}
782
783void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
784					    uint32_t ctx_id,
785					    uint32_t resource_id)
786{
 
787	struct virtio_gpu_ctx_resource *cmd_p;
788	struct virtio_gpu_vbuffer *vbuf;
789
790	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
791	memset(cmd_p, 0, sizeof(*cmd_p));
 
792
793	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
794	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
795	cmd_p->resource_id = cpu_to_le32(resource_id);
796	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
797}
798
799void
800virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
801				  struct virtio_gpu_resource_create_3d *rc_3d,
802				  struct virtio_gpu_fence **fence)
 
 
803{
804	struct virtio_gpu_resource_create_3d *cmd_p;
805	struct virtio_gpu_vbuffer *vbuf;
806
807	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
808	memset(cmd_p, 0, sizeof(*cmd_p));
 
809
810	*cmd_p = *rc_3d;
811	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
812	cmd_p->hdr.flags = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
813
814	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
815}
816
817void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
818					uint32_t resource_id, uint32_t ctx_id,
819					uint64_t offset, uint32_t level,
820					struct virtio_gpu_box *box,
821					struct virtio_gpu_fence **fence)
 
 
 
822{
 
823	struct virtio_gpu_transfer_host_3d *cmd_p;
824	struct virtio_gpu_vbuffer *vbuf;
 
 
 
 
 
825
826	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
827	memset(cmd_p, 0, sizeof(*cmd_p));
828
 
 
829	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
830	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
831	cmd_p->resource_id = cpu_to_le32(resource_id);
832	cmd_p->box = *box;
833	cmd_p->offset = cpu_to_le64(offset);
834	cmd_p->level = cpu_to_le32(level);
 
 
835
836	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
837}
838
839void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
840					  uint32_t resource_id, uint32_t ctx_id,
841					  uint64_t offset, uint32_t level,
842					  struct virtio_gpu_box *box,
843					  struct virtio_gpu_fence **fence)
 
 
 
844{
 
845	struct virtio_gpu_transfer_host_3d *cmd_p;
846	struct virtio_gpu_vbuffer *vbuf;
847
848	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
849	memset(cmd_p, 0, sizeof(*cmd_p));
850
 
 
851	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
852	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
853	cmd_p->resource_id = cpu_to_le32(resource_id);
854	cmd_p->box = *box;
855	cmd_p->offset = cpu_to_le64(offset);
856	cmd_p->level = cpu_to_le32(level);
 
 
857
858	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
859}
860
861void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
862			   void *data, uint32_t data_size,
863			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
 
 
864{
865	struct virtio_gpu_cmd_submit *cmd_p;
866	struct virtio_gpu_vbuffer *vbuf;
867
868	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
869	memset(cmd_p, 0, sizeof(*cmd_p));
870
871	vbuf->data_buf = data;
872	vbuf->data_size = data_size;
 
873
874	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
875	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
876	cmd_p->size = cpu_to_le32(data_size);
877
878	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
879}
880
881int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
882			     struct virtio_gpu_object *obj,
883			     uint32_t resource_id,
884			     struct virtio_gpu_fence **fence)
885{
886	struct virtio_gpu_mem_entry *ents;
887	struct scatterlist *sg;
888	int si;
 
 
 
 
 
 
 
 
 
 
 
 
889
890	if (!obj->pages) {
891		int ret;
892		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
893		if (ret)
894			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
895	}
 
 
 
 
896
897	/* gets freed when the ring has consumed it */
898	ents = kmalloc_array(obj->pages->nents,
899			     sizeof(struct virtio_gpu_mem_entry),
900			     GFP_KERNEL);
901	if (!ents) {
902		DRM_ERROR("failed to allocate ent list\n");
 
 
 
 
 
 
 
 
 
903		return -ENOMEM;
904	}
905
906	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
907		ents[si].addr = cpu_to_le64(sg_phys(sg));
908		ents[si].length = cpu_to_le32(sg->length);
909		ents[si].padding = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
910	}
911
912	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
913					       ents, obj->pages->nents,
914					       fence);
915	obj->hw_res_handle = resource_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
916	return 0;
917}
918
919void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
920			    struct virtio_gpu_output *output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
921{
 
 
922	struct virtio_gpu_vbuffer *vbuf;
923	struct virtio_gpu_update_cursor *cur_p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
924
925	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
926	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
927	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
928	virtio_gpu_queue_cursor(vgdev, vbuf);
929}