Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie <airlied@redhat.com>
   7 *    Gerd Hoffmann <kraxel@redhat.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the next
  17 * paragraph) shall be included in all copies or substantial portions of the
  18 * Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26 * OTHER DEALINGS IN THE SOFTWARE.
  27 */
  28
  29#include <linux/dma-mapping.h>
  30#include <linux/virtio.h>
  31#include <linux/virtio_config.h>
  32#include <linux/virtio_ring.h>
  33
 
 
  34#include "virtgpu_drv.h"
  35#include "virtgpu_trace.h"
  36
  37#define MAX_INLINE_CMD_SIZE   96
  38#define MAX_INLINE_RESP_SIZE  24
  39#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
  40			       + MAX_INLINE_CMD_SIZE		 \
  41			       + MAX_INLINE_RESP_SIZE)
  42
 
 
 
 
 
 
 
 
 
 
 
  43void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  44{
  45	struct drm_device *dev = vq->vdev->priv;
  46	struct virtio_gpu_device *vgdev = dev->dev_private;
  47
  48	schedule_work(&vgdev->ctrlq.dequeue_work);
  49}
  50
  51void virtio_gpu_cursor_ack(struct virtqueue *vq)
  52{
  53	struct drm_device *dev = vq->vdev->priv;
  54	struct virtio_gpu_device *vgdev = dev->dev_private;
  55
  56	schedule_work(&vgdev->cursorq.dequeue_work);
  57}
  58
  59int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  60{
  61	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  62					 VBUFFER_SIZE,
  63					 __alignof__(struct virtio_gpu_vbuffer),
  64					 0, NULL);
  65	if (!vgdev->vbufs)
  66		return -ENOMEM;
  67	return 0;
  68}
  69
  70void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  71{
  72	kmem_cache_destroy(vgdev->vbufs);
  73	vgdev->vbufs = NULL;
  74}
  75
  76static struct virtio_gpu_vbuffer*
  77virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  78		    int size, int resp_size, void *resp_buf,
  79		    virtio_gpu_resp_cb resp_cb)
  80{
  81	struct virtio_gpu_vbuffer *vbuf;
  82
  83	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
  84	if (!vbuf)
  85		return ERR_PTR(-ENOMEM);
  86
  87	BUG_ON(size > MAX_INLINE_CMD_SIZE);
 
  88	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
  89	vbuf->size = size;
  90
  91	vbuf->resp_cb = resp_cb;
  92	vbuf->resp_size = resp_size;
  93	if (resp_size <= MAX_INLINE_RESP_SIZE)
  94		vbuf->resp_buf = (void *)vbuf->buf + size;
  95	else
  96		vbuf->resp_buf = resp_buf;
  97	BUG_ON(!vbuf->resp_buf);
  98	return vbuf;
  99}
 100
 101static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
 102				  struct virtio_gpu_vbuffer **vbuffer_p,
 103				  int size)
 104{
 105	struct virtio_gpu_vbuffer *vbuf;
 106
 107	vbuf = virtio_gpu_get_vbuf(vgdev, size,
 108				   sizeof(struct virtio_gpu_ctrl_hdr),
 109				   NULL, NULL);
 110	if (IS_ERR(vbuf)) {
 111		*vbuffer_p = NULL;
 112		return ERR_CAST(vbuf);
 113	}
 114	*vbuffer_p = vbuf;
 115	return vbuf->buf;
 116}
 117
 118static struct virtio_gpu_update_cursor*
 119virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
 120			struct virtio_gpu_vbuffer **vbuffer_p)
 121{
 122	struct virtio_gpu_vbuffer *vbuf;
 123
 124	vbuf = virtio_gpu_get_vbuf
 125		(vgdev, sizeof(struct virtio_gpu_update_cursor),
 126		 0, NULL, NULL);
 127	if (IS_ERR(vbuf)) {
 128		*vbuffer_p = NULL;
 129		return ERR_CAST(vbuf);
 130	}
 131	*vbuffer_p = vbuf;
 132	return (struct virtio_gpu_update_cursor *)vbuf->buf;
 133}
 134
 135static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 136				       virtio_gpu_resp_cb cb,
 137				       struct virtio_gpu_vbuffer **vbuffer_p,
 138				       int cmd_size, int resp_size,
 139				       void *resp_buf)
 140{
 141	struct virtio_gpu_vbuffer *vbuf;
 142
 143	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
 144				   resp_size, resp_buf, cb);
 145	if (IS_ERR(vbuf)) {
 146		*vbuffer_p = NULL;
 147		return ERR_CAST(vbuf);
 148	}
 149	*vbuffer_p = vbuf;
 150	return (struct virtio_gpu_command *)vbuf->buf;
 151}
 152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153static void free_vbuf(struct virtio_gpu_device *vgdev,
 154		      struct virtio_gpu_vbuffer *vbuf)
 155{
 156	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
 157		kfree(vbuf->resp_buf);
 158	kfree(vbuf->data_buf);
 159	kmem_cache_free(vgdev->vbufs, vbuf);
 160}
 161
 162static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
 163{
 164	struct virtio_gpu_vbuffer *vbuf;
 165	unsigned int len;
 166	int freed = 0;
 167
 168	while ((vbuf = virtqueue_get_buf(vq, &len))) {
 169		list_add_tail(&vbuf->list, reclaim_list);
 170		freed++;
 171	}
 172	if (freed == 0)
 173		DRM_DEBUG("Huh? zero vbufs reclaimed");
 174}
 175
 176void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 177{
 178	struct virtio_gpu_device *vgdev =
 179		container_of(work, struct virtio_gpu_device,
 180			     ctrlq.dequeue_work);
 181	struct list_head reclaim_list;
 182	struct virtio_gpu_vbuffer *entry, *tmp;
 183	struct virtio_gpu_ctrl_hdr *resp;
 184	u64 fence_id = 0;
 185
 186	INIT_LIST_HEAD(&reclaim_list);
 187	spin_lock(&vgdev->ctrlq.qlock);
 188	do {
 189		virtqueue_disable_cb(vgdev->ctrlq.vq);
 190		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
 191
 192	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
 193	spin_unlock(&vgdev->ctrlq.qlock);
 194
 195	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 196		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 197
 198		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
 199
 200		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
 201			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
 202				struct virtio_gpu_ctrl_hdr *cmd;
 203				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
 204				DRM_ERROR("response 0x%x (command 0x%x)\n",
 205					  le32_to_cpu(resp->type),
 206					  le32_to_cpu(cmd->type));
 207			} else
 208				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 209		}
 210		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
 211			u64 f = le64_to_cpu(resp->fence_id);
 212
 213			if (fence_id > f) {
 214				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
 215					  __func__, fence_id, f);
 216			} else {
 217				fence_id = f;
 218			}
 219		}
 220		if (entry->resp_cb)
 221			entry->resp_cb(vgdev, entry);
 
 
 222
 
 
 
 223		list_del(&entry->list);
 224		free_vbuf(vgdev, entry);
 225	}
 226	wake_up(&vgdev->ctrlq.ack_queue);
 227
 228	if (fence_id)
 229		virtio_gpu_fence_event_process(vgdev, fence_id);
 230}
 231
 232void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 233{
 234	struct virtio_gpu_device *vgdev =
 235		container_of(work, struct virtio_gpu_device,
 236			     cursorq.dequeue_work);
 237	struct list_head reclaim_list;
 238	struct virtio_gpu_vbuffer *entry, *tmp;
 239
 240	INIT_LIST_HEAD(&reclaim_list);
 241	spin_lock(&vgdev->cursorq.qlock);
 242	do {
 243		virtqueue_disable_cb(vgdev->cursorq.vq);
 244		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
 245	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
 246	spin_unlock(&vgdev->cursorq.qlock);
 247
 248	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 249		list_del(&entry->list);
 250		free_vbuf(vgdev, entry);
 251	}
 252	wake_up(&vgdev->cursorq.ack_queue);
 253}
 254
 255static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 256					       struct virtio_gpu_vbuffer *vbuf)
 257		__releases(&vgdev->ctrlq.qlock)
 258		__acquires(&vgdev->ctrlq.qlock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 259{
 260	struct virtqueue *vq = vgdev->ctrlq.vq;
 261	struct scatterlist *sgs[3], vcmd, vout, vresp;
 262	int outcnt = 0, incnt = 0;
 263	int ret;
 264
 265	if (!vgdev->vqs_ready)
 
 
 
 266		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 267
 
 
 
 
 
 
 
 
 
 268	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 269	sgs[outcnt + incnt] = &vcmd;
 
 270	outcnt++;
 271
 
 272	if (vbuf->data_size) {
 273		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
 274		sgs[outcnt + incnt] = &vout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 275		outcnt++;
 276	}
 277
 
 278	if (vbuf->resp_size) {
 279		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 
 280		sgs[outcnt + incnt] = &vresp;
 281		incnt++;
 282	}
 283
 284retry:
 285	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 286	if (ret == -ENOSPC) {
 287		spin_unlock(&vgdev->ctrlq.qlock);
 288		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
 289		spin_lock(&vgdev->ctrlq.qlock);
 290		goto retry;
 291	} else {
 292		trace_virtio_gpu_cmd_queue(vq,
 293			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
 294
 295		virtqueue_kick(vq);
 
 
 296	}
 297
 298	if (!ret)
 299		ret = vq->num_free;
 300	return ret;
 301}
 302
 303static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 304					struct virtio_gpu_vbuffer *vbuf)
 305{
 306	int rc;
 
 
 
 307
 308	spin_lock(&vgdev->ctrlq.qlock);
 309	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 
 310	spin_unlock(&vgdev->ctrlq.qlock);
 311	return rc;
 
 
 312}
 313
 314static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 315					       struct virtio_gpu_vbuffer *vbuf,
 316					       struct virtio_gpu_ctrl_hdr *hdr,
 317					       struct virtio_gpu_fence *fence)
 318{
 319	struct virtqueue *vq = vgdev->ctrlq.vq;
 320	int rc;
 321
 322again:
 323	spin_lock(&vgdev->ctrlq.qlock);
 324
 325	/*
 326	 * Make sure we have enouth space in the virtqueue.  If not
 327	 * wait here until we have.
 328	 *
 329	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
 330	 * to wait for free space, which can result in fence ids being
 331	 * submitted out-of-order.
 332	 */
 333	if (vq->num_free < 3) {
 334		spin_unlock(&vgdev->ctrlq.qlock);
 335		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
 336		goto again;
 337	}
 338
 339	if (fence)
 340		virtio_gpu_fence_emit(vgdev, hdr, fence);
 341	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 342	spin_unlock(&vgdev->ctrlq.qlock);
 343	return rc;
 344}
 345
 346static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 347				   struct virtio_gpu_vbuffer *vbuf)
 348{
 349	struct virtqueue *vq = vgdev->cursorq.vq;
 350	struct scatterlist *sgs[1], ccmd;
 351	int ret;
 352	int outcnt;
 353
 354	if (!vgdev->vqs_ready)
 355		return -ENODEV;
 
 
 356
 357	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 358	sgs[0] = &ccmd;
 359	outcnt = 1;
 360
 361	spin_lock(&vgdev->cursorq.qlock);
 362retry:
 363	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 364	if (ret == -ENOSPC) {
 365		spin_unlock(&vgdev->cursorq.qlock);
 366		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 367		spin_lock(&vgdev->cursorq.qlock);
 368		goto retry;
 369	} else {
 370		trace_virtio_gpu_cmd_queue(vq,
 371			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
 372
 373		virtqueue_kick(vq);
 374	}
 375
 376	spin_unlock(&vgdev->cursorq.qlock);
 377
 378	if (!ret)
 379		ret = vq->num_free;
 380	return ret;
 
 381}
 382
 383/* just create gem objects for userspace and long lived objects,
 384 * just use dma_alloced pages for the queue objects?
 385 */
 386
 387/* create a basic resource */
 388void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 389				    struct virtio_gpu_object *bo,
 390				    struct virtio_gpu_object_params *params,
 
 391				    struct virtio_gpu_fence *fence)
 392{
 393	struct virtio_gpu_resource_create_2d *cmd_p;
 394	struct virtio_gpu_vbuffer *vbuf;
 395
 396	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 397	memset(cmd_p, 0, sizeof(*cmd_p));
 
 398
 399	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
 400	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 401	cmd_p->format = cpu_to_le32(params->format);
 402	cmd_p->width = cpu_to_le32(params->width);
 403	cmd_p->height = cpu_to_le32(params->height);
 404
 405	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 406	bo->created = true;
 407}
 408
 409void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 410				   uint32_t resource_id)
 411{
 412	struct virtio_gpu_resource_unref *cmd_p;
 413	struct virtio_gpu_vbuffer *vbuf;
 414
 415	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 416	memset(cmd_p, 0, sizeof(*cmd_p));
 417
 418	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
 419	cmd_p->resource_id = cpu_to_le32(resource_id);
 420
 421	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 422}
 423
 424static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
 425						  uint32_t resource_id,
 426						  struct virtio_gpu_fence *fence)
 427{
 428	struct virtio_gpu_resource_detach_backing *cmd_p;
 429	struct virtio_gpu_vbuffer *vbuf;
 
 430
 431	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 
 432	memset(cmd_p, 0, sizeof(*cmd_p));
 433
 434	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
 435	cmd_p->resource_id = cpu_to_le32(resource_id);
 436
 437	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 
 
 
 438}
 439
 440void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 441				uint32_t scanout_id, uint32_t resource_id,
 442				uint32_t width, uint32_t height,
 443				uint32_t x, uint32_t y)
 444{
 445	struct virtio_gpu_set_scanout *cmd_p;
 446	struct virtio_gpu_vbuffer *vbuf;
 447
 448	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 449	memset(cmd_p, 0, sizeof(*cmd_p));
 450
 451	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
 452	cmd_p->resource_id = cpu_to_le32(resource_id);
 453	cmd_p->scanout_id = cpu_to_le32(scanout_id);
 454	cmd_p->r.width = cpu_to_le32(width);
 455	cmd_p->r.height = cpu_to_le32(height);
 456	cmd_p->r.x = cpu_to_le32(x);
 457	cmd_p->r.y = cpu_to_le32(y);
 458
 459	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 460}
 461
 462void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 463				   uint32_t resource_id,
 464				   uint32_t x, uint32_t y,
 465				   uint32_t width, uint32_t height)
 
 
 466{
 467	struct virtio_gpu_resource_flush *cmd_p;
 468	struct virtio_gpu_vbuffer *vbuf;
 469
 470	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 471	memset(cmd_p, 0, sizeof(*cmd_p));
 
 472
 473	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
 474	cmd_p->resource_id = cpu_to_le32(resource_id);
 475	cmd_p->r.width = cpu_to_le32(width);
 476	cmd_p->r.height = cpu_to_le32(height);
 477	cmd_p->r.x = cpu_to_le32(x);
 478	cmd_p->r.y = cpu_to_le32(y);
 479
 480	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 481}
 482
 483void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 484					struct virtio_gpu_object *bo,
 485					uint64_t offset,
 486					__le32 width, __le32 height,
 487					__le32 x, __le32 y,
 
 488					struct virtio_gpu_fence *fence)
 489{
 
 490	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 491	struct virtio_gpu_vbuffer *vbuf;
 492	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 493
 494	if (use_dma_api)
 495		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 496				       bo->pages->sgl, bo->pages->nents,
 497				       DMA_TO_DEVICE);
 498
 499	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 500	memset(cmd_p, 0, sizeof(*cmd_p));
 
 501
 502	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
 503	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 504	cmd_p->offset = cpu_to_le64(offset);
 505	cmd_p->r.width = width;
 506	cmd_p->r.height = height;
 507	cmd_p->r.x = x;
 508	cmd_p->r.y = y;
 509
 510	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 511}
 512
 513static void
 514virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 515				       uint32_t resource_id,
 516				       struct virtio_gpu_mem_entry *ents,
 517				       uint32_t nents,
 518				       struct virtio_gpu_fence *fence)
 519{
 520	struct virtio_gpu_resource_attach_backing *cmd_p;
 521	struct virtio_gpu_vbuffer *vbuf;
 522
 523	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 524	memset(cmd_p, 0, sizeof(*cmd_p));
 525
 526	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
 527	cmd_p->resource_id = cpu_to_le32(resource_id);
 528	cmd_p->nr_entries = cpu_to_le32(nents);
 529
 530	vbuf->data_buf = ents;
 531	vbuf->data_size = sizeof(*ents) * nents;
 532
 533	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 534}
 535
 536static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
 537					       struct virtio_gpu_vbuffer *vbuf)
 538{
 539	struct virtio_gpu_resp_display_info *resp =
 540		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
 541	int i;
 542
 543	spin_lock(&vgdev->display_info_lock);
 544	for (i = 0; i < vgdev->num_scanouts; i++) {
 545		vgdev->outputs[i].info = resp->pmodes[i];
 546		if (resp->pmodes[i].enabled) {
 547			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
 548				  le32_to_cpu(resp->pmodes[i].r.width),
 549				  le32_to_cpu(resp->pmodes[i].r.height),
 550				  le32_to_cpu(resp->pmodes[i].r.x),
 551				  le32_to_cpu(resp->pmodes[i].r.y));
 552		} else {
 553			DRM_DEBUG("output %d: disabled", i);
 554		}
 555	}
 556
 557	vgdev->display_info_pending = false;
 558	spin_unlock(&vgdev->display_info_lock);
 559	wake_up(&vgdev->resp_wq);
 560
 561	if (!drm_helper_hpd_irq_event(vgdev->ddev))
 562		drm_kms_helper_hotplug_event(vgdev->ddev);
 563}
 564
 565static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
 566					      struct virtio_gpu_vbuffer *vbuf)
 567{
 568	struct virtio_gpu_get_capset_info *cmd =
 569		(struct virtio_gpu_get_capset_info *)vbuf->buf;
 570	struct virtio_gpu_resp_capset_info *resp =
 571		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
 572	int i = le32_to_cpu(cmd->capset_index);
 573
 574	spin_lock(&vgdev->display_info_lock);
 575	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
 576	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
 577	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 
 
 
 
 578	spin_unlock(&vgdev->display_info_lock);
 579	wake_up(&vgdev->resp_wq);
 580}
 581
 582static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 583				     struct virtio_gpu_vbuffer *vbuf)
 584{
 585	struct virtio_gpu_get_capset *cmd =
 586		(struct virtio_gpu_get_capset *)vbuf->buf;
 587	struct virtio_gpu_resp_capset *resp =
 588		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
 589	struct virtio_gpu_drv_cap_cache *cache_ent;
 590
 591	spin_lock(&vgdev->display_info_lock);
 592	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 593		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
 594		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
 595			memcpy(cache_ent->caps_cache, resp->capset_data,
 596			       cache_ent->size);
 597			/* Copy must occur before is_valid is signalled. */
 598			smp_wmb();
 599			atomic_set(&cache_ent->is_valid, 1);
 600			break;
 601		}
 602	}
 603	spin_unlock(&vgdev->display_info_lock);
 604	wake_up_all(&vgdev->resp_wq);
 605}
 606
 607static int virtio_get_edid_block(void *data, u8 *buf,
 608				 unsigned int block, size_t len)
 609{
 610	struct virtio_gpu_resp_edid *resp = data;
 611	size_t start = block * EDID_LENGTH;
 612
 613	if (start + len > le32_to_cpu(resp->size))
 614		return -1;
 615	memcpy(buf, resp->edid + start, len);
 616	return 0;
 617}
 618
 619static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
 620				       struct virtio_gpu_vbuffer *vbuf)
 621{
 622	struct virtio_gpu_cmd_get_edid *cmd =
 623		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
 624	struct virtio_gpu_resp_edid *resp =
 625		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
 626	uint32_t scanout = le32_to_cpu(cmd->scanout);
 627	struct virtio_gpu_output *output;
 628	struct edid *new_edid, *old_edid;
 629
 630	if (scanout >= vgdev->num_scanouts)
 631		return;
 632	output = vgdev->outputs + scanout;
 633
 634	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
 635	drm_connector_update_edid_property(&output->conn, new_edid);
 636
 637	spin_lock(&vgdev->display_info_lock);
 638	old_edid = output->edid;
 639	output->edid = new_edid;
 640	spin_unlock(&vgdev->display_info_lock);
 641
 642	kfree(old_edid);
 643	wake_up(&vgdev->resp_wq);
 644}
 645
 646int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 647{
 648	struct virtio_gpu_ctrl_hdr *cmd_p;
 649	struct virtio_gpu_vbuffer *vbuf;
 650	void *resp_buf;
 651
 652	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
 653			   GFP_KERNEL);
 654	if (!resp_buf)
 655		return -ENOMEM;
 656
 657	cmd_p = virtio_gpu_alloc_cmd_resp
 658		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
 659		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
 660		 resp_buf);
 661	memset(cmd_p, 0, sizeof(*cmd_p));
 662
 663	vgdev->display_info_pending = true;
 664	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
 665	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 666	return 0;
 667}
 668
 669int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
 670{
 671	struct virtio_gpu_get_capset_info *cmd_p;
 672	struct virtio_gpu_vbuffer *vbuf;
 673	void *resp_buf;
 674
 675	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
 676			   GFP_KERNEL);
 677	if (!resp_buf)
 678		return -ENOMEM;
 679
 680	cmd_p = virtio_gpu_alloc_cmd_resp
 681		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
 682		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
 683		 resp_buf);
 684	memset(cmd_p, 0, sizeof(*cmd_p));
 685
 686	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
 687	cmd_p->capset_index = cpu_to_le32(idx);
 688	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 689	return 0;
 690}
 691
 692int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 693			      int idx, int version,
 694			      struct virtio_gpu_drv_cap_cache **cache_p)
 695{
 696	struct virtio_gpu_get_capset *cmd_p;
 697	struct virtio_gpu_vbuffer *vbuf;
 698	int max_size;
 699	struct virtio_gpu_drv_cap_cache *cache_ent;
 700	struct virtio_gpu_drv_cap_cache *search_ent;
 701	void *resp_buf;
 702
 703	*cache_p = NULL;
 704
 705	if (idx >= vgdev->num_capsets)
 706		return -EINVAL;
 707
 708	if (version > vgdev->capsets[idx].max_version)
 709		return -EINVAL;
 710
 711	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
 712	if (!cache_ent)
 713		return -ENOMEM;
 714
 715	max_size = vgdev->capsets[idx].max_size;
 716	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
 717	if (!cache_ent->caps_cache) {
 718		kfree(cache_ent);
 719		return -ENOMEM;
 720	}
 721
 722	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
 723			   GFP_KERNEL);
 724	if (!resp_buf) {
 725		kfree(cache_ent->caps_cache);
 726		kfree(cache_ent);
 727		return -ENOMEM;
 728	}
 729
 730	cache_ent->version = version;
 731	cache_ent->id = vgdev->capsets[idx].id;
 732	atomic_set(&cache_ent->is_valid, 0);
 733	cache_ent->size = max_size;
 734	spin_lock(&vgdev->display_info_lock);
 735	/* Search while under lock in case it was added by another task. */
 736	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
 737		if (search_ent->id == vgdev->capsets[idx].id &&
 738		    search_ent->version == version) {
 739			*cache_p = search_ent;
 740			break;
 741		}
 742	}
 743	if (!*cache_p)
 744		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 745	spin_unlock(&vgdev->display_info_lock);
 746
 747	if (*cache_p) {
 748		/* Entry was found, so free everything that was just created. */
 749		kfree(resp_buf);
 750		kfree(cache_ent->caps_cache);
 751		kfree(cache_ent);
 752		return 0;
 753	}
 754
 755	cmd_p = virtio_gpu_alloc_cmd_resp
 756		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
 757		 sizeof(struct virtio_gpu_resp_capset) + max_size,
 758		 resp_buf);
 759	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
 760	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
 761	cmd_p->capset_version = cpu_to_le32(version);
 762	*cache_p = cache_ent;
 763	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 764
 765	return 0;
 766}
 767
 768int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
 769{
 770	struct virtio_gpu_cmd_get_edid *cmd_p;
 771	struct virtio_gpu_vbuffer *vbuf;
 772	void *resp_buf;
 773	int scanout;
 774
 775	if (WARN_ON(!vgdev->has_edid))
 776		return -EINVAL;
 777
 778	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
 779		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
 780				   GFP_KERNEL);
 781		if (!resp_buf)
 782			return -ENOMEM;
 783
 784		cmd_p = virtio_gpu_alloc_cmd_resp
 785			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
 786			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
 787			 resp_buf);
 788		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
 789		cmd_p->scanout = cpu_to_le32(scanout);
 790		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 791	}
 792
 793	return 0;
 794}
 795
 796void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 797				   uint32_t nlen, const char *name)
 
 798{
 799	struct virtio_gpu_ctx_create *cmd_p;
 800	struct virtio_gpu_vbuffer *vbuf;
 801
 802	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 803	memset(cmd_p, 0, sizeof(*cmd_p));
 804
 805	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
 806	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 807	cmd_p->nlen = cpu_to_le32(nlen);
 
 808	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
 809	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
 810	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 811}
 812
 813void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
 814				    uint32_t id)
 815{
 816	struct virtio_gpu_ctx_destroy *cmd_p;
 817	struct virtio_gpu_vbuffer *vbuf;
 818
 819	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 820	memset(cmd_p, 0, sizeof(*cmd_p));
 821
 822	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
 823	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 824	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 825}
 826
 827void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 828					    uint32_t ctx_id,
 829					    uint32_t resource_id)
 830{
 
 831	struct virtio_gpu_ctx_resource *cmd_p;
 832	struct virtio_gpu_vbuffer *vbuf;
 833
 834	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 835	memset(cmd_p, 0, sizeof(*cmd_p));
 
 836
 837	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
 838	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 839	cmd_p->resource_id = cpu_to_le32(resource_id);
 840	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 841
 842}
 843
 844void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 845					    uint32_t ctx_id,
 846					    uint32_t resource_id)
 847{
 
 848	struct virtio_gpu_ctx_resource *cmd_p;
 849	struct virtio_gpu_vbuffer *vbuf;
 850
 851	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 852	memset(cmd_p, 0, sizeof(*cmd_p));
 
 853
 854	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
 855	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 856	cmd_p->resource_id = cpu_to_le32(resource_id);
 857	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 858}
 859
 860void
 861virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 862				  struct virtio_gpu_object *bo,
 863				  struct virtio_gpu_object_params *params,
 
 864				  struct virtio_gpu_fence *fence)
 865{
 866	struct virtio_gpu_resource_create_3d *cmd_p;
 867	struct virtio_gpu_vbuffer *vbuf;
 868
 869	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 870	memset(cmd_p, 0, sizeof(*cmd_p));
 
 871
 872	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 873	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 874	cmd_p->format = cpu_to_le32(params->format);
 875	cmd_p->width = cpu_to_le32(params->width);
 876	cmd_p->height = cpu_to_le32(params->height);
 877
 878	cmd_p->target = cpu_to_le32(params->target);
 879	cmd_p->bind = cpu_to_le32(params->bind);
 880	cmd_p->depth = cpu_to_le32(params->depth);
 881	cmd_p->array_size = cpu_to_le32(params->array_size);
 882	cmd_p->last_level = cpu_to_le32(params->last_level);
 883	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
 884	cmd_p->flags = cpu_to_le32(params->flags);
 885
 886	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 
 887	bo->created = true;
 888}
 889
 890void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 891					struct virtio_gpu_object *bo,
 892					uint32_t ctx_id,
 893					uint64_t offset, uint32_t level,
 894					struct virtio_gpu_box *box,
 
 
 
 895					struct virtio_gpu_fence *fence)
 896{
 
 897	struct virtio_gpu_transfer_host_3d *cmd_p;
 898	struct virtio_gpu_vbuffer *vbuf;
 899	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 900
 901	if (use_dma_api)
 902		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 903				       bo->pages->sgl, bo->pages->nents,
 904				       DMA_TO_DEVICE);
 905
 906	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 907	memset(cmd_p, 0, sizeof(*cmd_p));
 908
 
 
 909	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
 910	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 911	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 912	cmd_p->box = *box;
 913	cmd_p->offset = cpu_to_le64(offset);
 914	cmd_p->level = cpu_to_le32(level);
 
 
 915
 916	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 917}
 918
 919void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 920					  uint32_t resource_id, uint32_t ctx_id,
 921					  uint64_t offset, uint32_t level,
 922					  struct virtio_gpu_box *box,
 
 
 
 923					  struct virtio_gpu_fence *fence)
 924{
 
 925	struct virtio_gpu_transfer_host_3d *cmd_p;
 926	struct virtio_gpu_vbuffer *vbuf;
 927
 928	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 929	memset(cmd_p, 0, sizeof(*cmd_p));
 930
 
 
 931	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
 932	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 933	cmd_p->resource_id = cpu_to_le32(resource_id);
 934	cmd_p->box = *box;
 935	cmd_p->offset = cpu_to_le64(offset);
 936	cmd_p->level = cpu_to_le32(level);
 
 
 937
 938	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 939}
 940
 941void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 942			   void *data, uint32_t data_size,
 943			   uint32_t ctx_id, struct virtio_gpu_fence *fence)
 
 
 944{
 945	struct virtio_gpu_cmd_submit *cmd_p;
 946	struct virtio_gpu_vbuffer *vbuf;
 947
 948	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 949	memset(cmd_p, 0, sizeof(*cmd_p));
 950
 951	vbuf->data_buf = data;
 952	vbuf->data_size = data_size;
 
 953
 954	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
 955	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 956	cmd_p->size = cpu_to_le32(data_size);
 957
 958	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 959}
 960
 961int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 962			     struct virtio_gpu_object *obj,
 963			     struct virtio_gpu_fence *fence)
 
 964{
 965	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 966	struct virtio_gpu_mem_entry *ents;
 967	struct scatterlist *sg;
 968	int si, nents;
 969
 970	if (WARN_ON_ONCE(!obj->created))
 971		return -EINVAL;
 972
 973	if (!obj->pages) {
 974		int ret;
 
 
 
 975
 976		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
 977		if (ret)
 978			return ret;
 979	}
 
 980
 981	if (use_dma_api) {
 982		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
 983					 obj->pages->sgl, obj->pages->nents,
 984					 DMA_TO_DEVICE);
 985		nents = obj->mapped;
 
 
 
 
 
 
 
 
 
 
 
 986	} else {
 987		nents = obj->pages->nents;
 988	}
 
 
 
 
 989
 990	/* gets freed when the ring has consumed it */
 991	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
 992			     GFP_KERNEL);
 993	if (!ents) {
 994		DRM_ERROR("failed to allocate ent list\n");
 
 
 
 
 
 
 
 
 
 
 995		return -ENOMEM;
 996	}
 997
 998	for_each_sg(obj->pages->sgl, sg, nents, si) {
 999		ents[si].addr = cpu_to_le64(use_dma_api
1000					    ? sg_dma_address(sg)
1001					    : sg_phys(sg));
1002		ents[si].length = cpu_to_le32(sg->length);
1003		ents[si].padding = 0;
1004	}
1005
1006	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1007					       ents, nents,
1008					       fence);
 
 
1009	return 0;
1010}
1011
1012void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1013			      struct virtio_gpu_object *obj)
1014{
1015	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1016
1017	if (use_dma_api && obj->mapped) {
1018		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1019		/* detach backing and wait for the host process it ... */
1020		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1021		dma_fence_wait(&fence->f, true);
1022		dma_fence_put(&fence->f);
1023
1024		/* ... then tear down iommu mappings */
1025		dma_unmap_sg(vgdev->vdev->dev.parent,
1026			     obj->pages->sgl, obj->mapped,
1027			     DMA_TO_DEVICE);
1028		obj->mapped = 0;
1029	} else {
1030		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1031	}
 
 
 
1032}
1033
1034void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1035			    struct virtio_gpu_output *output)
1036{
 
 
1037	struct virtio_gpu_vbuffer *vbuf;
1038	struct virtio_gpu_update_cursor *cur_p;
1039
1040	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1041	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1042	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1043	virtio_gpu_queue_cursor(vgdev, vbuf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1044}
v6.2
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie <airlied@redhat.com>
   7 *    Gerd Hoffmann <kraxel@redhat.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the next
  17 * paragraph) shall be included in all copies or substantial portions of the
  18 * Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26 * OTHER DEALINGS IN THE SOFTWARE.
  27 */
  28
  29#include <linux/dma-mapping.h>
  30#include <linux/virtio.h>
  31#include <linux/virtio_config.h>
  32#include <linux/virtio_ring.h>
  33
  34#include <drm/drm_edid.h>
  35
  36#include "virtgpu_drv.h"
  37#include "virtgpu_trace.h"
  38
  39#define MAX_INLINE_CMD_SIZE   96
  40#define MAX_INLINE_RESP_SIZE  24
  41#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
  42			       + MAX_INLINE_CMD_SIZE		 \
  43			       + MAX_INLINE_RESP_SIZE)
  44
  45static void convert_to_hw_box(struct virtio_gpu_box *dst,
  46			      const struct drm_virtgpu_3d_box *src)
  47{
  48	dst->x = cpu_to_le32(src->x);
  49	dst->y = cpu_to_le32(src->y);
  50	dst->z = cpu_to_le32(src->z);
  51	dst->w = cpu_to_le32(src->w);
  52	dst->h = cpu_to_le32(src->h);
  53	dst->d = cpu_to_le32(src->d);
  54}
  55
  56void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  57{
  58	struct drm_device *dev = vq->vdev->priv;
  59	struct virtio_gpu_device *vgdev = dev->dev_private;
  60
  61	schedule_work(&vgdev->ctrlq.dequeue_work);
  62}
  63
  64void virtio_gpu_cursor_ack(struct virtqueue *vq)
  65{
  66	struct drm_device *dev = vq->vdev->priv;
  67	struct virtio_gpu_device *vgdev = dev->dev_private;
  68
  69	schedule_work(&vgdev->cursorq.dequeue_work);
  70}
  71
  72int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  73{
  74	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  75					 VBUFFER_SIZE,
  76					 __alignof__(struct virtio_gpu_vbuffer),
  77					 0, NULL);
  78	if (!vgdev->vbufs)
  79		return -ENOMEM;
  80	return 0;
  81}
  82
  83void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  84{
  85	kmem_cache_destroy(vgdev->vbufs);
  86	vgdev->vbufs = NULL;
  87}
  88
  89static struct virtio_gpu_vbuffer*
  90virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  91		    int size, int resp_size, void *resp_buf,
  92		    virtio_gpu_resp_cb resp_cb)
  93{
  94	struct virtio_gpu_vbuffer *vbuf;
  95
  96	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
 
 
  97
  98	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
  99	       size < sizeof(struct virtio_gpu_ctrl_hdr));
 100	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
 101	vbuf->size = size;
 102
 103	vbuf->resp_cb = resp_cb;
 104	vbuf->resp_size = resp_size;
 105	if (resp_size <= MAX_INLINE_RESP_SIZE)
 106		vbuf->resp_buf = (void *)vbuf->buf + size;
 107	else
 108		vbuf->resp_buf = resp_buf;
 109	BUG_ON(!vbuf->resp_buf);
 110	return vbuf;
 111}
 112
 113static struct virtio_gpu_ctrl_hdr *
 114virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
 
 115{
 116	/* this assumes a vbuf contains a command that starts with a
 117	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
 118	 * virtqueues.
 119	 */
 120	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
 
 
 
 
 
 
 121}
 122
 123static struct virtio_gpu_update_cursor*
 124virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
 125			struct virtio_gpu_vbuffer **vbuffer_p)
 126{
 127	struct virtio_gpu_vbuffer *vbuf;
 128
 129	vbuf = virtio_gpu_get_vbuf
 130		(vgdev, sizeof(struct virtio_gpu_update_cursor),
 131		 0, NULL, NULL);
 132	if (IS_ERR(vbuf)) {
 133		*vbuffer_p = NULL;
 134		return ERR_CAST(vbuf);
 135	}
 136	*vbuffer_p = vbuf;
 137	return (struct virtio_gpu_update_cursor *)vbuf->buf;
 138}
 139
 140static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 141				       virtio_gpu_resp_cb cb,
 142				       struct virtio_gpu_vbuffer **vbuffer_p,
 143				       int cmd_size, int resp_size,
 144				       void *resp_buf)
 145{
 146	struct virtio_gpu_vbuffer *vbuf;
 147
 148	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
 149				   resp_size, resp_buf, cb);
 
 
 
 
 150	*vbuffer_p = vbuf;
 151	return (struct virtio_gpu_command *)vbuf->buf;
 152}
 153
 154static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
 155				  struct virtio_gpu_vbuffer **vbuffer_p,
 156				  int size)
 157{
 158	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
 159					 sizeof(struct virtio_gpu_ctrl_hdr),
 160					 NULL);
 161}
 162
 163static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
 164				     struct virtio_gpu_vbuffer **vbuffer_p,
 165				     int size,
 166				     virtio_gpu_resp_cb cb)
 167{
 168	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
 169					 sizeof(struct virtio_gpu_ctrl_hdr),
 170					 NULL);
 171}
 172
 173static void free_vbuf(struct virtio_gpu_device *vgdev,
 174		      struct virtio_gpu_vbuffer *vbuf)
 175{
 176	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
 177		kfree(vbuf->resp_buf);
 178	kvfree(vbuf->data_buf);
 179	kmem_cache_free(vgdev->vbufs, vbuf);
 180}
 181
 182static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
 183{
 184	struct virtio_gpu_vbuffer *vbuf;
 185	unsigned int len;
 186	int freed = 0;
 187
 188	while ((vbuf = virtqueue_get_buf(vq, &len))) {
 189		list_add_tail(&vbuf->list, reclaim_list);
 190		freed++;
 191	}
 192	if (freed == 0)
 193		DRM_DEBUG("Huh? zero vbufs reclaimed");
 194}
 195
 196void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 197{
 198	struct virtio_gpu_device *vgdev =
 199		container_of(work, struct virtio_gpu_device,
 200			     ctrlq.dequeue_work);
 201	struct list_head reclaim_list;
 202	struct virtio_gpu_vbuffer *entry, *tmp;
 203	struct virtio_gpu_ctrl_hdr *resp;
 204	u64 fence_id;
 205
 206	INIT_LIST_HEAD(&reclaim_list);
 207	spin_lock(&vgdev->ctrlq.qlock);
 208	do {
 209		virtqueue_disable_cb(vgdev->ctrlq.vq);
 210		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
 211
 212	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
 213	spin_unlock(&vgdev->ctrlq.qlock);
 214
 215	list_for_each_entry(entry, &reclaim_list, list) {
 216		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 217
 218		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
 219
 220		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
 221			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
 222				struct virtio_gpu_ctrl_hdr *cmd;
 223				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
 224				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
 225						      le32_to_cpu(resp->type),
 226						      le32_to_cpu(cmd->type));
 227			} else
 228				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 229		}
 230		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
 231			fence_id = le64_to_cpu(resp->fence_id);
 232			virtio_gpu_fence_event_process(vgdev, fence_id);
 
 
 
 
 
 
 233		}
 234		if (entry->resp_cb)
 235			entry->resp_cb(vgdev, entry);
 236	}
 237	wake_up(&vgdev->ctrlq.ack_queue);
 238
 239	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 240		if (entry->objs)
 241			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
 242		list_del(&entry->list);
 243		free_vbuf(vgdev, entry);
 244	}
 
 
 
 
 245}
 246
 247void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 248{
 249	struct virtio_gpu_device *vgdev =
 250		container_of(work, struct virtio_gpu_device,
 251			     cursorq.dequeue_work);
 252	struct list_head reclaim_list;
 253	struct virtio_gpu_vbuffer *entry, *tmp;
 254
 255	INIT_LIST_HEAD(&reclaim_list);
 256	spin_lock(&vgdev->cursorq.qlock);
 257	do {
 258		virtqueue_disable_cb(vgdev->cursorq.vq);
 259		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
 260	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
 261	spin_unlock(&vgdev->cursorq.qlock);
 262
 263	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 264		list_del(&entry->list);
 265		free_vbuf(vgdev, entry);
 266	}
 267	wake_up(&vgdev->cursorq.ack_queue);
 268}
 269
 270/* Create sg_table from a vmalloc'd buffer. */
 271static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
 272{
 273	int ret, s, i;
 274	struct sg_table *sgt;
 275	struct scatterlist *sg;
 276	struct page *pg;
 277
 278	if (WARN_ON(!PAGE_ALIGNED(data)))
 279		return NULL;
 280
 281	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 282	if (!sgt)
 283		return NULL;
 284
 285	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
 286	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
 287	if (ret) {
 288		kfree(sgt);
 289		return NULL;
 290	}
 291
 292	for_each_sgtable_sg(sgt, sg, i) {
 293		pg = vmalloc_to_page(data);
 294		if (!pg) {
 295			sg_free_table(sgt);
 296			kfree(sgt);
 297			return NULL;
 298		}
 299
 300		s = min_t(int, PAGE_SIZE, size);
 301		sg_set_page(sg, pg, s, 0);
 302
 303		size -= s;
 304		data += s;
 305	}
 306
 307	return sgt;
 308}
 309
 310static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
 311				     struct virtio_gpu_vbuffer *vbuf,
 312				     struct virtio_gpu_fence *fence,
 313				     int elemcnt,
 314				     struct scatterlist **sgs,
 315				     int outcnt,
 316				     int incnt)
 317{
 318	struct virtqueue *vq = vgdev->ctrlq.vq;
 319	int ret, idx;
 
 
 320
 321	if (!drm_dev_enter(vgdev->ddev, &idx)) {
 322		if (fence && vbuf->objs)
 323			virtio_gpu_array_unlock_resv(vbuf->objs);
 324		free_vbuf(vgdev, vbuf);
 325		return -ENODEV;
 326	}
 327
 328	if (vgdev->has_indirect)
 329		elemcnt = 1;
 330
 331again:
 332	spin_lock(&vgdev->ctrlq.qlock);
 333
 334	if (vq->num_free < elemcnt) {
 335		spin_unlock(&vgdev->ctrlq.qlock);
 336		virtio_gpu_notify(vgdev);
 337		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
 338		goto again;
 339	}
 340
 341	/* now that the position of the vbuf in the virtqueue is known, we can
 342	 * finally set the fence id
 343	 */
 344	if (fence) {
 345		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
 346				      fence);
 347		if (vbuf->objs) {
 348			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
 349			virtio_gpu_array_unlock_resv(vbuf->objs);
 350		}
 351	}
 352
 353	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 354	WARN_ON(ret);
 355
 356	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
 357
 358	atomic_inc(&vgdev->pending_commands);
 359
 360	spin_unlock(&vgdev->ctrlq.qlock);
 361
 362	drm_dev_exit(idx);
 363	return 0;
 364}
 365
 366static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 367					       struct virtio_gpu_vbuffer *vbuf,
 368					       struct virtio_gpu_fence *fence)
 369{
 370	struct scatterlist *sgs[3], vcmd, vout, vresp;
 371	struct sg_table *sgt = NULL;
 372	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
 373
 374	/* set up vcmd */
 375	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 376	elemcnt++;
 377	sgs[outcnt] = &vcmd;
 378	outcnt++;
 379
 380	/* set up vout */
 381	if (vbuf->data_size) {
 382		if (is_vmalloc_addr(vbuf->data_buf)) {
 383			int sg_ents;
 384			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
 385					     &sg_ents);
 386			if (!sgt) {
 387				if (fence && vbuf->objs)
 388					virtio_gpu_array_unlock_resv(vbuf->objs);
 389				return -ENOMEM;
 390			}
 391
 392			elemcnt += sg_ents;
 393			sgs[outcnt] = sgt->sgl;
 394		} else {
 395			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
 396			elemcnt++;
 397			sgs[outcnt] = &vout;
 398		}
 399		outcnt++;
 400	}
 401
 402	/* set up vresp */
 403	if (vbuf->resp_size) {
 404		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 405		elemcnt++;
 406		sgs[outcnt + incnt] = &vresp;
 407		incnt++;
 408	}
 409
 410	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
 411					incnt);
 
 
 
 
 
 
 
 
 412
 413	if (sgt) {
 414		sg_free_table(sgt);
 415		kfree(sgt);
 416	}
 
 
 
 417	return ret;
 418}
 419
 420void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
 
 421{
 422	bool notify;
 423
 424	if (!atomic_read(&vgdev->pending_commands))
 425		return;
 426
 427	spin_lock(&vgdev->ctrlq.qlock);
 428	atomic_set(&vgdev->pending_commands, 0);
 429	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
 430	spin_unlock(&vgdev->ctrlq.qlock);
 431
 432	if (notify)
 433		virtqueue_notify(vgdev->ctrlq.vq);
 434}
 435
 436static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 437					struct virtio_gpu_vbuffer *vbuf)
 
 
 438{
 439	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 440}
 441
 442static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 443				    struct virtio_gpu_vbuffer *vbuf)
 444{
 445	struct virtqueue *vq = vgdev->cursorq.vq;
 446	struct scatterlist *sgs[1], ccmd;
 447	int idx, ret, outcnt;
 448	bool notify;
 449
 450	if (!drm_dev_enter(vgdev->ddev, &idx)) {
 451		free_vbuf(vgdev, vbuf);
 452		return;
 453	}
 454
 455	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 456	sgs[0] = &ccmd;
 457	outcnt = 1;
 458
 459	spin_lock(&vgdev->cursorq.qlock);
 460retry:
 461	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 462	if (ret == -ENOSPC) {
 463		spin_unlock(&vgdev->cursorq.qlock);
 464		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 465		spin_lock(&vgdev->cursorq.qlock);
 466		goto retry;
 467	} else {
 468		trace_virtio_gpu_cmd_queue(vq,
 469			virtio_gpu_vbuf_ctrl_hdr(vbuf));
 470
 471		notify = virtqueue_kick_prepare(vq);
 472	}
 473
 474	spin_unlock(&vgdev->cursorq.qlock);
 475
 476	if (notify)
 477		virtqueue_notify(vq);
 478
 479	drm_dev_exit(idx);
 480}
 481
 482/* just create gem objects for userspace and long lived objects,
 483 * just use dma_alloced pages for the queue objects?
 484 */
 485
 486/* create a basic resource */
 487void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 488				    struct virtio_gpu_object *bo,
 489				    struct virtio_gpu_object_params *params,
 490				    struct virtio_gpu_object_array *objs,
 491				    struct virtio_gpu_fence *fence)
 492{
 493	struct virtio_gpu_resource_create_2d *cmd_p;
 494	struct virtio_gpu_vbuffer *vbuf;
 495
 496	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 497	memset(cmd_p, 0, sizeof(*cmd_p));
 498	vbuf->objs = objs;
 499
 500	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
 501	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 502	cmd_p->format = cpu_to_le32(params->format);
 503	cmd_p->width = cpu_to_le32(params->width);
 504	cmd_p->height = cpu_to_le32(params->height);
 505
 506	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 507	bo->created = true;
 508}
 509
 510static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
 511				    struct virtio_gpu_vbuffer *vbuf)
 512{
 513	struct virtio_gpu_object *bo;
 
 514
 515	bo = vbuf->resp_cb_data;
 516	vbuf->resp_cb_data = NULL;
 517
 518	virtio_gpu_cleanup_object(bo);
 
 
 
 519}
 520
 521void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 522				   struct virtio_gpu_object *bo)
 
 523{
 524	struct virtio_gpu_resource_unref *cmd_p;
 525	struct virtio_gpu_vbuffer *vbuf;
 526	int ret;
 527
 528	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
 529					virtio_gpu_cmd_unref_cb);
 530	memset(cmd_p, 0, sizeof(*cmd_p));
 531
 532	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
 533	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 534
 535	vbuf->resp_cb_data = bo;
 536	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 537	if (ret < 0)
 538		virtio_gpu_cleanup_object(bo);
 539}
 540
 541void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 542				uint32_t scanout_id, uint32_t resource_id,
 543				uint32_t width, uint32_t height,
 544				uint32_t x, uint32_t y)
 545{
 546	struct virtio_gpu_set_scanout *cmd_p;
 547	struct virtio_gpu_vbuffer *vbuf;
 548
 549	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 550	memset(cmd_p, 0, sizeof(*cmd_p));
 551
 552	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
 553	cmd_p->resource_id = cpu_to_le32(resource_id);
 554	cmd_p->scanout_id = cpu_to_le32(scanout_id);
 555	cmd_p->r.width = cpu_to_le32(width);
 556	cmd_p->r.height = cpu_to_le32(height);
 557	cmd_p->r.x = cpu_to_le32(x);
 558	cmd_p->r.y = cpu_to_le32(y);
 559
 560	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 561}
 562
 563void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 564				   uint32_t resource_id,
 565				   uint32_t x, uint32_t y,
 566				   uint32_t width, uint32_t height,
 567				   struct virtio_gpu_object_array *objs,
 568				   struct virtio_gpu_fence *fence)
 569{
 570	struct virtio_gpu_resource_flush *cmd_p;
 571	struct virtio_gpu_vbuffer *vbuf;
 572
 573	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 574	memset(cmd_p, 0, sizeof(*cmd_p));
 575	vbuf->objs = objs;
 576
 577	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
 578	cmd_p->resource_id = cpu_to_le32(resource_id);
 579	cmd_p->r.width = cpu_to_le32(width);
 580	cmd_p->r.height = cpu_to_le32(height);
 581	cmd_p->r.x = cpu_to_le32(x);
 582	cmd_p->r.y = cpu_to_le32(y);
 583
 584	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 585}
 586
 587void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 
 588					uint64_t offset,
 589					uint32_t width, uint32_t height,
 590					uint32_t x, uint32_t y,
 591					struct virtio_gpu_object_array *objs,
 592					struct virtio_gpu_fence *fence)
 593{
 594	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 595	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 596	struct virtio_gpu_vbuffer *vbuf;
 597	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 598
 599	if (virtio_gpu_is_shmem(bo) && use_dma_api)
 600		dma_sync_sgtable_for_device(&vgdev->vdev->dev,
 601					    bo->base.sgt, DMA_TO_DEVICE);
 
 602
 603	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 604	memset(cmd_p, 0, sizeof(*cmd_p));
 605	vbuf->objs = objs;
 606
 607	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
 608	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 609	cmd_p->offset = cpu_to_le64(offset);
 610	cmd_p->r.width = cpu_to_le32(width);
 611	cmd_p->r.height = cpu_to_le32(height);
 612	cmd_p->r.x = cpu_to_le32(x);
 613	cmd_p->r.y = cpu_to_le32(y);
 614
 615	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 616}
 617
 618static void
 619virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 620				       uint32_t resource_id,
 621				       struct virtio_gpu_mem_entry *ents,
 622				       uint32_t nents,
 623				       struct virtio_gpu_fence *fence)
 624{
 625	struct virtio_gpu_resource_attach_backing *cmd_p;
 626	struct virtio_gpu_vbuffer *vbuf;
 627
 628	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 629	memset(cmd_p, 0, sizeof(*cmd_p));
 630
 631	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
 632	cmd_p->resource_id = cpu_to_le32(resource_id);
 633	cmd_p->nr_entries = cpu_to_le32(nents);
 634
 635	vbuf->data_buf = ents;
 636	vbuf->data_size = sizeof(*ents) * nents;
 637
 638	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 639}
 640
 641static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
 642					       struct virtio_gpu_vbuffer *vbuf)
 643{
 644	struct virtio_gpu_resp_display_info *resp =
 645		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
 646	int i;
 647
 648	spin_lock(&vgdev->display_info_lock);
 649	for (i = 0; i < vgdev->num_scanouts; i++) {
 650		vgdev->outputs[i].info = resp->pmodes[i];
 651		if (resp->pmodes[i].enabled) {
 652			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
 653				  le32_to_cpu(resp->pmodes[i].r.width),
 654				  le32_to_cpu(resp->pmodes[i].r.height),
 655				  le32_to_cpu(resp->pmodes[i].r.x),
 656				  le32_to_cpu(resp->pmodes[i].r.y));
 657		} else {
 658			DRM_DEBUG("output %d: disabled", i);
 659		}
 660	}
 661
 662	vgdev->display_info_pending = false;
 663	spin_unlock(&vgdev->display_info_lock);
 664	wake_up(&vgdev->resp_wq);
 665
 666	if (!drm_helper_hpd_irq_event(vgdev->ddev))
 667		drm_kms_helper_hotplug_event(vgdev->ddev);
 668}
 669
 670static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
 671					      struct virtio_gpu_vbuffer *vbuf)
 672{
 673	struct virtio_gpu_get_capset_info *cmd =
 674		(struct virtio_gpu_get_capset_info *)vbuf->buf;
 675	struct virtio_gpu_resp_capset_info *resp =
 676		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
 677	int i = le32_to_cpu(cmd->capset_index);
 678
 679	spin_lock(&vgdev->display_info_lock);
 680	if (vgdev->capsets) {
 681		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
 682		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
 683		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 684	} else {
 685		DRM_ERROR("invalid capset memory.");
 686	}
 687	spin_unlock(&vgdev->display_info_lock);
 688	wake_up(&vgdev->resp_wq);
 689}
 690
 691static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 692				     struct virtio_gpu_vbuffer *vbuf)
 693{
 694	struct virtio_gpu_get_capset *cmd =
 695		(struct virtio_gpu_get_capset *)vbuf->buf;
 696	struct virtio_gpu_resp_capset *resp =
 697		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
 698	struct virtio_gpu_drv_cap_cache *cache_ent;
 699
 700	spin_lock(&vgdev->display_info_lock);
 701	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 702		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
 703		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
 704			memcpy(cache_ent->caps_cache, resp->capset_data,
 705			       cache_ent->size);
 706			/* Copy must occur before is_valid is signalled. */
 707			smp_wmb();
 708			atomic_set(&cache_ent->is_valid, 1);
 709			break;
 710		}
 711	}
 712	spin_unlock(&vgdev->display_info_lock);
 713	wake_up_all(&vgdev->resp_wq);
 714}
 715
 716static int virtio_get_edid_block(void *data, u8 *buf,
 717				 unsigned int block, size_t len)
 718{
 719	struct virtio_gpu_resp_edid *resp = data;
 720	size_t start = block * EDID_LENGTH;
 721
 722	if (start + len > le32_to_cpu(resp->size))
 723		return -EINVAL;
 724	memcpy(buf, resp->edid + start, len);
 725	return 0;
 726}
 727
 728static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
 729				       struct virtio_gpu_vbuffer *vbuf)
 730{
 731	struct virtio_gpu_cmd_get_edid *cmd =
 732		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
 733	struct virtio_gpu_resp_edid *resp =
 734		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
 735	uint32_t scanout = le32_to_cpu(cmd->scanout);
 736	struct virtio_gpu_output *output;
 737	struct edid *new_edid, *old_edid;
 738
 739	if (scanout >= vgdev->num_scanouts)
 740		return;
 741	output = vgdev->outputs + scanout;
 742
 743	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
 744	drm_connector_update_edid_property(&output->conn, new_edid);
 745
 746	spin_lock(&vgdev->display_info_lock);
 747	old_edid = output->edid;
 748	output->edid = new_edid;
 749	spin_unlock(&vgdev->display_info_lock);
 750
 751	kfree(old_edid);
 752	wake_up(&vgdev->resp_wq);
 753}
 754
 755int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 756{
 757	struct virtio_gpu_ctrl_hdr *cmd_p;
 758	struct virtio_gpu_vbuffer *vbuf;
 759	void *resp_buf;
 760
 761	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
 762			   GFP_KERNEL);
 763	if (!resp_buf)
 764		return -ENOMEM;
 765
 766	cmd_p = virtio_gpu_alloc_cmd_resp
 767		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
 768		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
 769		 resp_buf);
 770	memset(cmd_p, 0, sizeof(*cmd_p));
 771
 772	vgdev->display_info_pending = true;
 773	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
 774	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 775	return 0;
 776}
 777
 778int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
 779{
 780	struct virtio_gpu_get_capset_info *cmd_p;
 781	struct virtio_gpu_vbuffer *vbuf;
 782	void *resp_buf;
 783
 784	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
 785			   GFP_KERNEL);
 786	if (!resp_buf)
 787		return -ENOMEM;
 788
 789	cmd_p = virtio_gpu_alloc_cmd_resp
 790		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
 791		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
 792		 resp_buf);
 793	memset(cmd_p, 0, sizeof(*cmd_p));
 794
 795	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
 796	cmd_p->capset_index = cpu_to_le32(idx);
 797	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 798	return 0;
 799}
 800
 801int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 802			      int idx, int version,
 803			      struct virtio_gpu_drv_cap_cache **cache_p)
 804{
 805	struct virtio_gpu_get_capset *cmd_p;
 806	struct virtio_gpu_vbuffer *vbuf;
 807	int max_size;
 808	struct virtio_gpu_drv_cap_cache *cache_ent;
 809	struct virtio_gpu_drv_cap_cache *search_ent;
 810	void *resp_buf;
 811
 812	*cache_p = NULL;
 813
 814	if (idx >= vgdev->num_capsets)
 815		return -EINVAL;
 816
 817	if (version > vgdev->capsets[idx].max_version)
 818		return -EINVAL;
 819
 820	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
 821	if (!cache_ent)
 822		return -ENOMEM;
 823
 824	max_size = vgdev->capsets[idx].max_size;
 825	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
 826	if (!cache_ent->caps_cache) {
 827		kfree(cache_ent);
 828		return -ENOMEM;
 829	}
 830
 831	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
 832			   GFP_KERNEL);
 833	if (!resp_buf) {
 834		kfree(cache_ent->caps_cache);
 835		kfree(cache_ent);
 836		return -ENOMEM;
 837	}
 838
 839	cache_ent->version = version;
 840	cache_ent->id = vgdev->capsets[idx].id;
 841	atomic_set(&cache_ent->is_valid, 0);
 842	cache_ent->size = max_size;
 843	spin_lock(&vgdev->display_info_lock);
 844	/* Search while under lock in case it was added by another task. */
 845	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
 846		if (search_ent->id == vgdev->capsets[idx].id &&
 847		    search_ent->version == version) {
 848			*cache_p = search_ent;
 849			break;
 850		}
 851	}
 852	if (!*cache_p)
 853		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 854	spin_unlock(&vgdev->display_info_lock);
 855
 856	if (*cache_p) {
 857		/* Entry was found, so free everything that was just created. */
 858		kfree(resp_buf);
 859		kfree(cache_ent->caps_cache);
 860		kfree(cache_ent);
 861		return 0;
 862	}
 863
 864	cmd_p = virtio_gpu_alloc_cmd_resp
 865		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
 866		 sizeof(struct virtio_gpu_resp_capset) + max_size,
 867		 resp_buf);
 868	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
 869	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
 870	cmd_p->capset_version = cpu_to_le32(version);
 871	*cache_p = cache_ent;
 872	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 873
 874	return 0;
 875}
 876
 877int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
 878{
 879	struct virtio_gpu_cmd_get_edid *cmd_p;
 880	struct virtio_gpu_vbuffer *vbuf;
 881	void *resp_buf;
 882	int scanout;
 883
 884	if (WARN_ON(!vgdev->has_edid))
 885		return -EINVAL;
 886
 887	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
 888		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
 889				   GFP_KERNEL);
 890		if (!resp_buf)
 891			return -ENOMEM;
 892
 893		cmd_p = virtio_gpu_alloc_cmd_resp
 894			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
 895			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
 896			 resp_buf);
 897		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
 898		cmd_p->scanout = cpu_to_le32(scanout);
 899		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 900	}
 901
 902	return 0;
 903}
 904
 905void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 906				   uint32_t context_init, uint32_t nlen,
 907				   const char *name)
 908{
 909	struct virtio_gpu_ctx_create *cmd_p;
 910	struct virtio_gpu_vbuffer *vbuf;
 911
 912	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 913	memset(cmd_p, 0, sizeof(*cmd_p));
 914
 915	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
 916	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 917	cmd_p->nlen = cpu_to_le32(nlen);
 918	cmd_p->context_init = cpu_to_le32(context_init);
 919	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
 920	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
 921	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 922}
 923
 924void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
 925				    uint32_t id)
 926{
 927	struct virtio_gpu_ctx_destroy *cmd_p;
 928	struct virtio_gpu_vbuffer *vbuf;
 929
 930	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 931	memset(cmd_p, 0, sizeof(*cmd_p));
 932
 933	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
 934	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 935	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 936}
 937
 938void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 939					    uint32_t ctx_id,
 940					    struct virtio_gpu_object_array *objs)
 941{
 942	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 943	struct virtio_gpu_ctx_resource *cmd_p;
 944	struct virtio_gpu_vbuffer *vbuf;
 945
 946	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 947	memset(cmd_p, 0, sizeof(*cmd_p));
 948	vbuf->objs = objs;
 949
 950	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
 951	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 952	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 953	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
 954}
 955
 956void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 957					    uint32_t ctx_id,
 958					    struct virtio_gpu_object_array *objs)
 959{
 960	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 961	struct virtio_gpu_ctx_resource *cmd_p;
 962	struct virtio_gpu_vbuffer *vbuf;
 963
 964	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 965	memset(cmd_p, 0, sizeof(*cmd_p));
 966	vbuf->objs = objs;
 967
 968	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
 969	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 970	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 971	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 972}
 973
 974void
 975virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 976				  struct virtio_gpu_object *bo,
 977				  struct virtio_gpu_object_params *params,
 978				  struct virtio_gpu_object_array *objs,
 979				  struct virtio_gpu_fence *fence)
 980{
 981	struct virtio_gpu_resource_create_3d *cmd_p;
 982	struct virtio_gpu_vbuffer *vbuf;
 983
 984	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 985	memset(cmd_p, 0, sizeof(*cmd_p));
 986	vbuf->objs = objs;
 987
 988	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 989	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 990	cmd_p->format = cpu_to_le32(params->format);
 991	cmd_p->width = cpu_to_le32(params->width);
 992	cmd_p->height = cpu_to_le32(params->height);
 993
 994	cmd_p->target = cpu_to_le32(params->target);
 995	cmd_p->bind = cpu_to_le32(params->bind);
 996	cmd_p->depth = cpu_to_le32(params->depth);
 997	cmd_p->array_size = cpu_to_le32(params->array_size);
 998	cmd_p->last_level = cpu_to_le32(params->last_level);
 999	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1000	cmd_p->flags = cpu_to_le32(params->flags);
1001
1002	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1003
1004	bo->created = true;
1005}
1006
1007void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 
1008					uint32_t ctx_id,
1009					uint64_t offset, uint32_t level,
1010					uint32_t stride,
1011					uint32_t layer_stride,
1012					struct drm_virtgpu_3d_box *box,
1013					struct virtio_gpu_object_array *objs,
1014					struct virtio_gpu_fence *fence)
1015{
1016	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1017	struct virtio_gpu_transfer_host_3d *cmd_p;
1018	struct virtio_gpu_vbuffer *vbuf;
1019	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1020
1021	if (virtio_gpu_is_shmem(bo) && use_dma_api)
1022		dma_sync_sgtable_for_device(&vgdev->vdev->dev,
1023					    bo->base.sgt, DMA_TO_DEVICE);
 
1024
1025	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1026	memset(cmd_p, 0, sizeof(*cmd_p));
1027
1028	vbuf->objs = objs;
1029
1030	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1031	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1032	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1033	convert_to_hw_box(&cmd_p->box, box);
1034	cmd_p->offset = cpu_to_le64(offset);
1035	cmd_p->level = cpu_to_le32(level);
1036	cmd_p->stride = cpu_to_le32(stride);
1037	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1038
1039	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1040}
1041
1042void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1043					  uint32_t ctx_id,
1044					  uint64_t offset, uint32_t level,
1045					  uint32_t stride,
1046					  uint32_t layer_stride,
1047					  struct drm_virtgpu_3d_box *box,
1048					  struct virtio_gpu_object_array *objs,
1049					  struct virtio_gpu_fence *fence)
1050{
1051	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1052	struct virtio_gpu_transfer_host_3d *cmd_p;
1053	struct virtio_gpu_vbuffer *vbuf;
1054
1055	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1056	memset(cmd_p, 0, sizeof(*cmd_p));
1057
1058	vbuf->objs = objs;
1059
1060	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1061	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1062	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1063	convert_to_hw_box(&cmd_p->box, box);
1064	cmd_p->offset = cpu_to_le64(offset);
1065	cmd_p->level = cpu_to_le32(level);
1066	cmd_p->stride = cpu_to_le32(stride);
1067	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1068
1069	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1070}
1071
1072void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1073			   void *data, uint32_t data_size,
1074			   uint32_t ctx_id,
1075			   struct virtio_gpu_object_array *objs,
1076			   struct virtio_gpu_fence *fence)
1077{
1078	struct virtio_gpu_cmd_submit *cmd_p;
1079	struct virtio_gpu_vbuffer *vbuf;
1080
1081	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1082	memset(cmd_p, 0, sizeof(*cmd_p));
1083
1084	vbuf->data_buf = data;
1085	vbuf->data_size = data_size;
1086	vbuf->objs = objs;
1087
1088	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1089	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1090	cmd_p->size = cpu_to_le32(data_size);
1091
1092	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1093}
1094
1095void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1096			      struct virtio_gpu_object *obj,
1097			      struct virtio_gpu_mem_entry *ents,
1098			      unsigned int nents)
1099{
1100	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1101					       ents, nents, NULL);
1102}
 
 
 
 
1103
1104void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1105			    struct virtio_gpu_output *output)
1106{
1107	struct virtio_gpu_vbuffer *vbuf;
1108	struct virtio_gpu_update_cursor *cur_p;
1109
1110	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1111	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1112	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1113	virtio_gpu_queue_cursor(vgdev, vbuf);
1114}
1115
1116static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1117					    struct virtio_gpu_vbuffer *vbuf)
1118{
1119	struct virtio_gpu_object *obj =
1120		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1121	struct virtio_gpu_resp_resource_uuid *resp =
1122		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1123	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1124
1125	spin_lock(&vgdev->resource_export_lock);
1126	WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1127
1128	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1129	    obj->uuid_state == STATE_INITIALIZING) {
1130		import_uuid(&obj->uuid, resp->uuid);
1131		obj->uuid_state = STATE_OK;
1132	} else {
1133		obj->uuid_state = STATE_ERR;
1134	}
1135	spin_unlock(&vgdev->resource_export_lock);
1136
1137	wake_up_all(&vgdev->resp_wq);
1138}
1139
1140int
1141virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1142				    struct virtio_gpu_object_array *objs)
1143{
1144	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1145	struct virtio_gpu_resource_assign_uuid *cmd_p;
1146	struct virtio_gpu_vbuffer *vbuf;
1147	struct virtio_gpu_resp_resource_uuid *resp_buf;
1148
1149	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1150	if (!resp_buf) {
1151		spin_lock(&vgdev->resource_export_lock);
1152		bo->uuid_state = STATE_ERR;
1153		spin_unlock(&vgdev->resource_export_lock);
1154		virtio_gpu_array_put_free(objs);
1155		return -ENOMEM;
1156	}
1157
1158	cmd_p = virtio_gpu_alloc_cmd_resp
1159		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1160		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1161	memset(cmd_p, 0, sizeof(*cmd_p));
 
 
 
1162
1163	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1164	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1165
1166	vbuf->objs = objs;
1167	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1168	return 0;
1169}
1170
1171static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1172					   struct virtio_gpu_vbuffer *vbuf)
1173{
1174	struct virtio_gpu_object *bo =
1175		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1176	struct virtio_gpu_resp_map_info *resp =
1177		(struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1178	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1179	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1180
1181	spin_lock(&vgdev->host_visible_lock);
1182
1183	if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1184		vram->map_info = resp->map_info;
1185		vram->map_state = STATE_OK;
 
 
1186	} else {
1187		vram->map_state = STATE_ERR;
1188	}
1189
1190	spin_unlock(&vgdev->host_visible_lock);
1191	wake_up_all(&vgdev->resp_wq);
1192}
1193
1194int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1195		       struct virtio_gpu_object_array *objs, uint64_t offset)
1196{
1197	struct virtio_gpu_resource_map_blob *cmd_p;
1198	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1199	struct virtio_gpu_vbuffer *vbuf;
1200	struct virtio_gpu_resp_map_info *resp_buf;
1201
1202	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1203	if (!resp_buf)
1204		return -ENOMEM;
1205
1206	cmd_p = virtio_gpu_alloc_cmd_resp
1207		(vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1208		 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1209	memset(cmd_p, 0, sizeof(*cmd_p));
1210
1211	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1212	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1213	cmd_p->offset = cpu_to_le64(offset);
1214	vbuf->objs = objs;
1215
1216	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1217	return 0;
1218}
1219
1220void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1221			  struct virtio_gpu_object *bo)
1222{
1223	struct virtio_gpu_resource_unmap_blob *cmd_p;
1224	struct virtio_gpu_vbuffer *vbuf;
1225
1226	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1227	memset(cmd_p, 0, sizeof(*cmd_p));
1228
1229	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1230	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1231
1232	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1233}
1234
1235void
1236virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1237				    struct virtio_gpu_object *bo,
1238				    struct virtio_gpu_object_params *params,
1239				    struct virtio_gpu_mem_entry *ents,
1240				    uint32_t nents)
1241{
1242	struct virtio_gpu_resource_create_blob *cmd_p;
1243	struct virtio_gpu_vbuffer *vbuf;
1244
1245	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1246	memset(cmd_p, 0, sizeof(*cmd_p));
1247
1248	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1249	cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1250	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1251	cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1252	cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1253	cmd_p->blob_id = cpu_to_le64(params->blob_id);
1254	cmd_p->size = cpu_to_le64(params->size);
1255	cmd_p->nr_entries = cpu_to_le32(nents);
1256
1257	vbuf->data_buf = ents;
1258	vbuf->data_size = sizeof(*ents) * nents;
1259
1260	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1261	bo->created = true;
1262}
1263
1264void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1265				     uint32_t scanout_id,
1266				     struct virtio_gpu_object *bo,
1267				     struct drm_framebuffer *fb,
1268				     uint32_t width, uint32_t height,
1269				     uint32_t x, uint32_t y)
1270{
1271	uint32_t i;
1272	struct virtio_gpu_set_scanout_blob *cmd_p;
1273	struct virtio_gpu_vbuffer *vbuf;
1274	uint32_t format = virtio_gpu_translate_format(fb->format->format);
1275
1276	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1277	memset(cmd_p, 0, sizeof(*cmd_p));
1278
1279	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1280	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1281	cmd_p->scanout_id = cpu_to_le32(scanout_id);
1282
1283	cmd_p->format = cpu_to_le32(format);
1284	cmd_p->width  = cpu_to_le32(fb->width);
1285	cmd_p->height = cpu_to_le32(fb->height);
1286
1287	for (i = 0; i < 4; i++) {
1288		cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1289		cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1290	}
1291
1292	cmd_p->r.width = cpu_to_le32(width);
1293	cmd_p->r.height = cpu_to_le32(height);
1294	cmd_p->r.x = cpu_to_le32(x);
1295	cmd_p->r.y = cpu_to_le32(y);
1296
1297	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1298}