Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie <airlied@redhat.com>
   7 *    Gerd Hoffmann <kraxel@redhat.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the next
  17 * paragraph) shall be included in all copies or substantial portions of the
  18 * Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26 * OTHER DEALINGS IN THE SOFTWARE.
  27 */
  28
  29#include <linux/dma-mapping.h>
 
  30#include <linux/virtio.h>
  31#include <linux/virtio_config.h>
  32#include <linux/virtio_ring.h>
  33
  34#include "virtgpu_drv.h"
  35#include "virtgpu_trace.h"
  36
  37#define MAX_INLINE_CMD_SIZE   96
  38#define MAX_INLINE_RESP_SIZE  24
  39#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
  40			       + MAX_INLINE_CMD_SIZE		 \
  41			       + MAX_INLINE_RESP_SIZE)
  42
  43static void convert_to_hw_box(struct virtio_gpu_box *dst,
  44			      const struct drm_virtgpu_3d_box *src)
 
 
 
 
 
 
 
 
 
 
 
 
  45{
  46	dst->x = cpu_to_le32(src->x);
  47	dst->y = cpu_to_le32(src->y);
  48	dst->z = cpu_to_le32(src->z);
  49	dst->w = cpu_to_le32(src->w);
  50	dst->h = cpu_to_le32(src->h);
  51	dst->d = cpu_to_le32(src->d);
  52}
  53
  54void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  55{
  56	struct drm_device *dev = vq->vdev->priv;
  57	struct virtio_gpu_device *vgdev = dev->dev_private;
  58
  59	schedule_work(&vgdev->ctrlq.dequeue_work);
  60}
  61
  62void virtio_gpu_cursor_ack(struct virtqueue *vq)
  63{
  64	struct drm_device *dev = vq->vdev->priv;
  65	struct virtio_gpu_device *vgdev = dev->dev_private;
  66
  67	schedule_work(&vgdev->cursorq.dequeue_work);
  68}
  69
  70int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  71{
  72	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  73					 VBUFFER_SIZE,
  74					 __alignof__(struct virtio_gpu_vbuffer),
  75					 0, NULL);
  76	if (!vgdev->vbufs)
  77		return -ENOMEM;
  78	return 0;
  79}
  80
  81void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  82{
  83	kmem_cache_destroy(vgdev->vbufs);
  84	vgdev->vbufs = NULL;
  85}
  86
  87static struct virtio_gpu_vbuffer*
  88virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  89		    int size, int resp_size, void *resp_buf,
  90		    virtio_gpu_resp_cb resp_cb)
  91{
  92	struct virtio_gpu_vbuffer *vbuf;
  93
  94	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
  95	if (!vbuf)
  96		return ERR_PTR(-ENOMEM);
 
  97
  98	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
  99	       size < sizeof(struct virtio_gpu_ctrl_hdr));
 100	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
 101	vbuf->size = size;
 102
 103	vbuf->resp_cb = resp_cb;
 104	vbuf->resp_size = resp_size;
 105	if (resp_size <= MAX_INLINE_RESP_SIZE)
 106		vbuf->resp_buf = (void *)vbuf->buf + size;
 107	else
 108		vbuf->resp_buf = resp_buf;
 109	BUG_ON(!vbuf->resp_buf);
 110	return vbuf;
 111}
 112
 113static struct virtio_gpu_ctrl_hdr *
 114virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
 
 115{
 116	/* this assumes a vbuf contains a command that starts with a
 117	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
 118	 * virtqueues.
 119	 */
 120	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
 
 
 
 
 
 
 121}
 122
 123static struct virtio_gpu_update_cursor*
 124virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
 125			struct virtio_gpu_vbuffer **vbuffer_p)
 126{
 127	struct virtio_gpu_vbuffer *vbuf;
 128
 129	vbuf = virtio_gpu_get_vbuf
 130		(vgdev, sizeof(struct virtio_gpu_update_cursor),
 131		 0, NULL, NULL);
 132	if (IS_ERR(vbuf)) {
 133		*vbuffer_p = NULL;
 134		return ERR_CAST(vbuf);
 135	}
 136	*vbuffer_p = vbuf;
 137	return (struct virtio_gpu_update_cursor *)vbuf->buf;
 138}
 139
 140static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 141				       virtio_gpu_resp_cb cb,
 142				       struct virtio_gpu_vbuffer **vbuffer_p,
 143				       int cmd_size, int resp_size,
 144				       void *resp_buf)
 145{
 146	struct virtio_gpu_vbuffer *vbuf;
 147
 148	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
 149				   resp_size, resp_buf, cb);
 150	if (IS_ERR(vbuf)) {
 151		*vbuffer_p = NULL;
 152		return ERR_CAST(vbuf);
 153	}
 154	*vbuffer_p = vbuf;
 155	return (struct virtio_gpu_command *)vbuf->buf;
 156}
 157
 158static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
 159				  struct virtio_gpu_vbuffer **vbuffer_p,
 160				  int size)
 161{
 162	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
 163					 sizeof(struct virtio_gpu_ctrl_hdr),
 164					 NULL);
 165}
 166
 167static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
 168				     struct virtio_gpu_vbuffer **vbuffer_p,
 169				     int size,
 170				     virtio_gpu_resp_cb cb)
 171{
 172	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
 173					 sizeof(struct virtio_gpu_ctrl_hdr),
 174					 NULL);
 175}
 176
 177static void free_vbuf(struct virtio_gpu_device *vgdev,
 178		      struct virtio_gpu_vbuffer *vbuf)
 179{
 180	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
 181		kfree(vbuf->resp_buf);
 182	kvfree(vbuf->data_buf);
 183	kmem_cache_free(vgdev->vbufs, vbuf);
 184}
 185
 186static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
 187{
 188	struct virtio_gpu_vbuffer *vbuf;
 189	unsigned int len;
 190	int freed = 0;
 191
 192	while ((vbuf = virtqueue_get_buf(vq, &len))) {
 193		list_add_tail(&vbuf->list, reclaim_list);
 194		freed++;
 195	}
 196	if (freed == 0)
 197		DRM_DEBUG("Huh? zero vbufs reclaimed");
 198}
 199
 200void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 201{
 202	struct virtio_gpu_device *vgdev =
 203		container_of(work, struct virtio_gpu_device,
 204			     ctrlq.dequeue_work);
 205	struct list_head reclaim_list;
 206	struct virtio_gpu_vbuffer *entry, *tmp;
 207	struct virtio_gpu_ctrl_hdr *resp;
 208	u64 fence_id = 0;
 209
 210	INIT_LIST_HEAD(&reclaim_list);
 211	spin_lock(&vgdev->ctrlq.qlock);
 212	do {
 213		virtqueue_disable_cb(vgdev->ctrlq.vq);
 214		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
 215
 216	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
 217	spin_unlock(&vgdev->ctrlq.qlock);
 218
 219	list_for_each_entry(entry, &reclaim_list, list) {
 220		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 221
 222		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
 223
 224		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
 225			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
 226				struct virtio_gpu_ctrl_hdr *cmd;
 227				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
 228				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
 229						      le32_to_cpu(resp->type),
 230						      le32_to_cpu(cmd->type));
 231			} else
 232				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 233		}
 234		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
 235			u64 f = le64_to_cpu(resp->fence_id);
 236
 237			if (fence_id > f) {
 238				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
 239					  __func__, fence_id, f);
 240			} else {
 241				fence_id = f;
 242			}
 243		}
 244		if (entry->resp_cb)
 245			entry->resp_cb(vgdev, entry);
 
 
 
 246	}
 247	wake_up(&vgdev->ctrlq.ack_queue);
 248
 249	if (fence_id)
 250		virtio_gpu_fence_event_process(vgdev, fence_id);
 251
 252	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 253		if (entry->objs)
 254			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
 255		list_del(&entry->list);
 256		free_vbuf(vgdev, entry);
 257	}
 258}
 259
 260void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 261{
 262	struct virtio_gpu_device *vgdev =
 263		container_of(work, struct virtio_gpu_device,
 264			     cursorq.dequeue_work);
 265	struct list_head reclaim_list;
 266	struct virtio_gpu_vbuffer *entry, *tmp;
 267
 268	INIT_LIST_HEAD(&reclaim_list);
 269	spin_lock(&vgdev->cursorq.qlock);
 270	do {
 271		virtqueue_disable_cb(vgdev->cursorq.vq);
 272		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
 273	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
 274	spin_unlock(&vgdev->cursorq.qlock);
 275
 276	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 277		list_del(&entry->list);
 278		free_vbuf(vgdev, entry);
 279	}
 280	wake_up(&vgdev->cursorq.ack_queue);
 281}
 282
 283/* Create sg_table from a vmalloc'd buffer. */
 284static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
 285{
 286	int ret, s, i;
 287	struct sg_table *sgt;
 288	struct scatterlist *sg;
 289	struct page *pg;
 290
 291	if (WARN_ON(!PAGE_ALIGNED(data)))
 292		return NULL;
 293
 294	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 295	if (!sgt)
 296		return NULL;
 297
 298	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
 299	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
 300	if (ret) {
 301		kfree(sgt);
 302		return NULL;
 303	}
 304
 305	for_each_sg(sgt->sgl, sg, *sg_ents, i) {
 306		pg = vmalloc_to_page(data);
 307		if (!pg) {
 308			sg_free_table(sgt);
 309			kfree(sgt);
 310			return NULL;
 311		}
 312
 313		s = min_t(int, PAGE_SIZE, size);
 314		sg_set_page(sg, pg, s, 0);
 315
 316		size -= s;
 317		data += s;
 318	}
 319
 320	return sgt;
 321}
 322
 323static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
 324				      struct virtio_gpu_vbuffer *vbuf,
 325				      struct virtio_gpu_fence *fence,
 326				      int elemcnt,
 327				      struct scatterlist **sgs,
 328				      int outcnt,
 329				      int incnt)
 330{
 331	struct virtqueue *vq = vgdev->ctrlq.vq;
 332	int ret, idx;
 333
 334	if (!drm_dev_enter(vgdev->ddev, &idx)) {
 335		if (fence && vbuf->objs)
 336			virtio_gpu_array_unlock_resv(vbuf->objs);
 337		free_vbuf(vgdev, vbuf);
 338		return;
 339	}
 340
 341	if (vgdev->has_indirect)
 342		elemcnt = 1;
 343
 344again:
 345	spin_lock(&vgdev->ctrlq.qlock);
 346
 347	if (vq->num_free < elemcnt) {
 348		spin_unlock(&vgdev->ctrlq.qlock);
 349		virtio_gpu_notify(vgdev);
 350		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
 351		goto again;
 352	}
 353
 354	/* now that the position of the vbuf in the virtqueue is known, we can
 355	 * finally set the fence id
 356	 */
 357	if (fence) {
 358		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
 359				      fence);
 360		if (vbuf->objs) {
 361			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
 362			virtio_gpu_array_unlock_resv(vbuf->objs);
 363		}
 364	}
 365
 366	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 367	WARN_ON(ret);
 368
 369	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
 370
 371	atomic_inc(&vgdev->pending_commands);
 372
 373	spin_unlock(&vgdev->ctrlq.qlock);
 374
 375	drm_dev_exit(idx);
 376}
 377
 378static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 379						struct virtio_gpu_vbuffer *vbuf,
 380						struct virtio_gpu_fence *fence)
 381{
 382	struct scatterlist *sgs[3], vcmd, vout, vresp;
 383	struct sg_table *sgt = NULL;
 384	int elemcnt = 0, outcnt = 0, incnt = 0;
 
 
 
 385
 386	/* set up vcmd */
 387	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 388	elemcnt++;
 389	sgs[outcnt] = &vcmd;
 390	outcnt++;
 391
 392	/* set up vout */
 393	if (vbuf->data_size) {
 394		if (is_vmalloc_addr(vbuf->data_buf)) {
 395			int sg_ents;
 396			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
 397					     &sg_ents);
 398			if (!sgt) {
 399				if (fence && vbuf->objs)
 400					virtio_gpu_array_unlock_resv(vbuf->objs);
 401				return;
 402			}
 403
 404			elemcnt += sg_ents;
 405			sgs[outcnt] = sgt->sgl;
 406		} else {
 407			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
 408			elemcnt++;
 409			sgs[outcnt] = &vout;
 410		}
 411		outcnt++;
 412	}
 413
 414	/* set up vresp */
 415	if (vbuf->resp_size) {
 416		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 417		elemcnt++;
 418		sgs[outcnt + incnt] = &vresp;
 419		incnt++;
 420	}
 421
 422	virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
 423				  incnt);
 424
 425	if (sgt) {
 426		sg_free_table(sgt);
 427		kfree(sgt);
 
 
 
 428	}
 
 
 
 
 429}
 430
 431void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
 
 432{
 433	bool notify;
 434
 435	if (!atomic_read(&vgdev->pending_commands))
 436		return;
 437
 438	spin_lock(&vgdev->ctrlq.qlock);
 439	atomic_set(&vgdev->pending_commands, 0);
 440	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
 441	spin_unlock(&vgdev->ctrlq.qlock);
 442
 443	if (notify)
 444		virtqueue_notify(vgdev->ctrlq.vq);
 445}
 446
 447static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 448					 struct virtio_gpu_vbuffer *vbuf)
 
 
 449{
 450	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451}
 452
 453static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 454				    struct virtio_gpu_vbuffer *vbuf)
 455{
 456	struct virtqueue *vq = vgdev->cursorq.vq;
 457	struct scatterlist *sgs[1], ccmd;
 458	int idx, ret, outcnt;
 459	bool notify;
 460
 461	if (!drm_dev_enter(vgdev->ddev, &idx)) {
 462		free_vbuf(vgdev, vbuf);
 463		return;
 464	}
 465
 466	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 467	sgs[0] = &ccmd;
 468	outcnt = 1;
 469
 470	spin_lock(&vgdev->cursorq.qlock);
 471retry:
 472	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 473	if (ret == -ENOSPC) {
 474		spin_unlock(&vgdev->cursorq.qlock);
 475		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 476		spin_lock(&vgdev->cursorq.qlock);
 477		goto retry;
 478	} else {
 479		trace_virtio_gpu_cmd_queue(vq,
 480			virtio_gpu_vbuf_ctrl_hdr(vbuf));
 481
 482		notify = virtqueue_kick_prepare(vq);
 483	}
 484
 485	spin_unlock(&vgdev->cursorq.qlock);
 486
 487	if (notify)
 488		virtqueue_notify(vq);
 489
 490	drm_dev_exit(idx);
 491}
 492
 493/* just create gem objects for userspace and long lived objects,
 494 * just use dma_alloced pages for the queue objects?
 495 */
 496
 497/* create a basic resource */
 498void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 499				    struct virtio_gpu_object *bo,
 500				    struct virtio_gpu_object_params *params,
 501				    struct virtio_gpu_object_array *objs,
 502				    struct virtio_gpu_fence *fence)
 503{
 504	struct virtio_gpu_resource_create_2d *cmd_p;
 505	struct virtio_gpu_vbuffer *vbuf;
 506
 507	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 508	memset(cmd_p, 0, sizeof(*cmd_p));
 509	vbuf->objs = objs;
 510
 511	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
 512	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 513	cmd_p->format = cpu_to_le32(params->format);
 514	cmd_p->width = cpu_to_le32(params->width);
 515	cmd_p->height = cpu_to_le32(params->height);
 516
 517	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 518	bo->created = true;
 519}
 520
 521static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
 522				    struct virtio_gpu_vbuffer *vbuf)
 523{
 524	struct virtio_gpu_object *bo;
 
 
 
 
 525
 526	bo = vbuf->resp_cb_data;
 527	vbuf->resp_cb_data = NULL;
 528
 529	virtio_gpu_cleanup_object(bo);
 530}
 531
 532void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 533				   struct virtio_gpu_object *bo)
 534{
 535	struct virtio_gpu_resource_unref *cmd_p;
 536	struct virtio_gpu_vbuffer *vbuf;
 537
 538	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
 539					virtio_gpu_cmd_unref_cb);
 540	memset(cmd_p, 0, sizeof(*cmd_p));
 541
 542	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
 543	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 544
 545	vbuf->resp_cb_data = bo;
 546	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 547}
 548
 549void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 550				uint32_t scanout_id, uint32_t resource_id,
 551				uint32_t width, uint32_t height,
 552				uint32_t x, uint32_t y)
 553{
 554	struct virtio_gpu_set_scanout *cmd_p;
 555	struct virtio_gpu_vbuffer *vbuf;
 556
 557	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 558	memset(cmd_p, 0, sizeof(*cmd_p));
 559
 560	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
 561	cmd_p->resource_id = cpu_to_le32(resource_id);
 562	cmd_p->scanout_id = cpu_to_le32(scanout_id);
 563	cmd_p->r.width = cpu_to_le32(width);
 564	cmd_p->r.height = cpu_to_le32(height);
 565	cmd_p->r.x = cpu_to_le32(x);
 566	cmd_p->r.y = cpu_to_le32(y);
 567
 568	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 569}
 570
 571void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 572				   uint32_t resource_id,
 573				   uint32_t x, uint32_t y,
 574				   uint32_t width, uint32_t height)
 575{
 576	struct virtio_gpu_resource_flush *cmd_p;
 577	struct virtio_gpu_vbuffer *vbuf;
 578
 579	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 580	memset(cmd_p, 0, sizeof(*cmd_p));
 581
 582	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
 583	cmd_p->resource_id = cpu_to_le32(resource_id);
 584	cmd_p->r.width = cpu_to_le32(width);
 585	cmd_p->r.height = cpu_to_le32(height);
 586	cmd_p->r.x = cpu_to_le32(x);
 587	cmd_p->r.y = cpu_to_le32(y);
 588
 589	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 590}
 591
 592void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 593					uint64_t offset,
 594					uint32_t width, uint32_t height,
 595					uint32_t x, uint32_t y,
 596					struct virtio_gpu_object_array *objs,
 597					struct virtio_gpu_fence *fence)
 598{
 599	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 600	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 601	struct virtio_gpu_vbuffer *vbuf;
 602	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 603	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 604
 605	if (use_dma_api)
 606		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 607				       shmem->pages->sgl, shmem->pages->nents,
 608				       DMA_TO_DEVICE);
 609
 610	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 611	memset(cmd_p, 0, sizeof(*cmd_p));
 612	vbuf->objs = objs;
 613
 614	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
 615	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 616	cmd_p->offset = cpu_to_le64(offset);
 617	cmd_p->r.width = cpu_to_le32(width);
 618	cmd_p->r.height = cpu_to_le32(height);
 619	cmd_p->r.x = cpu_to_le32(x);
 620	cmd_p->r.y = cpu_to_le32(y);
 621
 622	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 623}
 624
 625static void
 626virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 627				       uint32_t resource_id,
 628				       struct virtio_gpu_mem_entry *ents,
 629				       uint32_t nents,
 630				       struct virtio_gpu_fence *fence)
 631{
 632	struct virtio_gpu_resource_attach_backing *cmd_p;
 633	struct virtio_gpu_vbuffer *vbuf;
 634
 635	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 636	memset(cmd_p, 0, sizeof(*cmd_p));
 637
 638	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
 639	cmd_p->resource_id = cpu_to_le32(resource_id);
 640	cmd_p->nr_entries = cpu_to_le32(nents);
 641
 642	vbuf->data_buf = ents;
 643	vbuf->data_size = sizeof(*ents) * nents;
 644
 645	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 646}
 647
 648static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
 649					       struct virtio_gpu_vbuffer *vbuf)
 650{
 651	struct virtio_gpu_resp_display_info *resp =
 652		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
 653	int i;
 654
 655	spin_lock(&vgdev->display_info_lock);
 656	for (i = 0; i < vgdev->num_scanouts; i++) {
 657		vgdev->outputs[i].info = resp->pmodes[i];
 658		if (resp->pmodes[i].enabled) {
 659			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
 660				  le32_to_cpu(resp->pmodes[i].r.width),
 661				  le32_to_cpu(resp->pmodes[i].r.height),
 662				  le32_to_cpu(resp->pmodes[i].r.x),
 663				  le32_to_cpu(resp->pmodes[i].r.y));
 664		} else {
 665			DRM_DEBUG("output %d: disabled", i);
 666		}
 667	}
 668
 669	vgdev->display_info_pending = false;
 670	spin_unlock(&vgdev->display_info_lock);
 671	wake_up(&vgdev->resp_wq);
 672
 673	if (!drm_helper_hpd_irq_event(vgdev->ddev))
 674		drm_kms_helper_hotplug_event(vgdev->ddev);
 675}
 676
 677static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
 678					      struct virtio_gpu_vbuffer *vbuf)
 679{
 680	struct virtio_gpu_get_capset_info *cmd =
 681		(struct virtio_gpu_get_capset_info *)vbuf->buf;
 682	struct virtio_gpu_resp_capset_info *resp =
 683		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
 684	int i = le32_to_cpu(cmd->capset_index);
 685
 686	spin_lock(&vgdev->display_info_lock);
 687	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
 688	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
 689	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 690	spin_unlock(&vgdev->display_info_lock);
 691	wake_up(&vgdev->resp_wq);
 692}
 693
 694static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 695				     struct virtio_gpu_vbuffer *vbuf)
 696{
 697	struct virtio_gpu_get_capset *cmd =
 698		(struct virtio_gpu_get_capset *)vbuf->buf;
 699	struct virtio_gpu_resp_capset *resp =
 700		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
 701	struct virtio_gpu_drv_cap_cache *cache_ent;
 702
 703	spin_lock(&vgdev->display_info_lock);
 704	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 705		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
 706		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
 707			memcpy(cache_ent->caps_cache, resp->capset_data,
 708			       cache_ent->size);
 709			/* Copy must occur before is_valid is signalled. */
 710			smp_wmb();
 711			atomic_set(&cache_ent->is_valid, 1);
 712			break;
 713		}
 714	}
 715	spin_unlock(&vgdev->display_info_lock);
 716	wake_up_all(&vgdev->resp_wq);
 717}
 718
 719static int virtio_get_edid_block(void *data, u8 *buf,
 720				 unsigned int block, size_t len)
 721{
 722	struct virtio_gpu_resp_edid *resp = data;
 723	size_t start = block * EDID_LENGTH;
 724
 725	if (start + len > le32_to_cpu(resp->size))
 726		return -1;
 727	memcpy(buf, resp->edid + start, len);
 728	return 0;
 729}
 730
 731static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
 732				       struct virtio_gpu_vbuffer *vbuf)
 733{
 734	struct virtio_gpu_cmd_get_edid *cmd =
 735		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
 736	struct virtio_gpu_resp_edid *resp =
 737		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
 738	uint32_t scanout = le32_to_cpu(cmd->scanout);
 739	struct virtio_gpu_output *output;
 740	struct edid *new_edid, *old_edid;
 741
 742	if (scanout >= vgdev->num_scanouts)
 743		return;
 744	output = vgdev->outputs + scanout;
 745
 746	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
 747	drm_connector_update_edid_property(&output->conn, new_edid);
 748
 749	spin_lock(&vgdev->display_info_lock);
 750	old_edid = output->edid;
 751	output->edid = new_edid;
 752	spin_unlock(&vgdev->display_info_lock);
 753
 754	kfree(old_edid);
 755	wake_up(&vgdev->resp_wq);
 756}
 757
 758int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 759{
 760	struct virtio_gpu_ctrl_hdr *cmd_p;
 761	struct virtio_gpu_vbuffer *vbuf;
 762	void *resp_buf;
 763
 764	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
 765			   GFP_KERNEL);
 766	if (!resp_buf)
 767		return -ENOMEM;
 768
 769	cmd_p = virtio_gpu_alloc_cmd_resp
 770		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
 771		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
 772		 resp_buf);
 773	memset(cmd_p, 0, sizeof(*cmd_p));
 774
 775	vgdev->display_info_pending = true;
 776	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
 777	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 778	return 0;
 779}
 780
 781int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
 782{
 783	struct virtio_gpu_get_capset_info *cmd_p;
 784	struct virtio_gpu_vbuffer *vbuf;
 785	void *resp_buf;
 786
 787	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
 788			   GFP_KERNEL);
 789	if (!resp_buf)
 790		return -ENOMEM;
 791
 792	cmd_p = virtio_gpu_alloc_cmd_resp
 793		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
 794		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
 795		 resp_buf);
 796	memset(cmd_p, 0, sizeof(*cmd_p));
 797
 798	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
 799	cmd_p->capset_index = cpu_to_le32(idx);
 800	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 801	return 0;
 802}
 803
 804int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 805			      int idx, int version,
 806			      struct virtio_gpu_drv_cap_cache **cache_p)
 807{
 808	struct virtio_gpu_get_capset *cmd_p;
 809	struct virtio_gpu_vbuffer *vbuf;
 810	int max_size;
 811	struct virtio_gpu_drv_cap_cache *cache_ent;
 812	struct virtio_gpu_drv_cap_cache *search_ent;
 813	void *resp_buf;
 814
 815	*cache_p = NULL;
 816
 817	if (idx >= vgdev->num_capsets)
 818		return -EINVAL;
 819
 820	if (version > vgdev->capsets[idx].max_version)
 821		return -EINVAL;
 822
 823	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
 824	if (!cache_ent)
 825		return -ENOMEM;
 826
 827	max_size = vgdev->capsets[idx].max_size;
 828	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
 829	if (!cache_ent->caps_cache) {
 830		kfree(cache_ent);
 831		return -ENOMEM;
 832	}
 833
 834	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
 835			   GFP_KERNEL);
 836	if (!resp_buf) {
 837		kfree(cache_ent->caps_cache);
 838		kfree(cache_ent);
 839		return -ENOMEM;
 840	}
 841
 842	cache_ent->version = version;
 843	cache_ent->id = vgdev->capsets[idx].id;
 844	atomic_set(&cache_ent->is_valid, 0);
 845	cache_ent->size = max_size;
 846	spin_lock(&vgdev->display_info_lock);
 847	/* Search while under lock in case it was added by another task. */
 848	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
 849		if (search_ent->id == vgdev->capsets[idx].id &&
 850		    search_ent->version == version) {
 851			*cache_p = search_ent;
 852			break;
 853		}
 854	}
 855	if (!*cache_p)
 856		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 857	spin_unlock(&vgdev->display_info_lock);
 858
 859	if (*cache_p) {
 860		/* Entry was found, so free everything that was just created. */
 861		kfree(resp_buf);
 862		kfree(cache_ent->caps_cache);
 863		kfree(cache_ent);
 864		return 0;
 865	}
 866
 867	cmd_p = virtio_gpu_alloc_cmd_resp
 868		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
 869		 sizeof(struct virtio_gpu_resp_capset) + max_size,
 870		 resp_buf);
 871	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
 872	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
 873	cmd_p->capset_version = cpu_to_le32(version);
 874	*cache_p = cache_ent;
 875	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 876
 877	return 0;
 878}
 879
 880int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
 881{
 882	struct virtio_gpu_cmd_get_edid *cmd_p;
 883	struct virtio_gpu_vbuffer *vbuf;
 884	void *resp_buf;
 885	int scanout;
 886
 887	if (WARN_ON(!vgdev->has_edid))
 888		return -EINVAL;
 889
 890	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
 891		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
 892				   GFP_KERNEL);
 893		if (!resp_buf)
 894			return -ENOMEM;
 895
 896		cmd_p = virtio_gpu_alloc_cmd_resp
 897			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
 898			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
 899			 resp_buf);
 900		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
 901		cmd_p->scanout = cpu_to_le32(scanout);
 902		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 903	}
 904
 905	return 0;
 906}
 907
 908void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 909				   uint32_t nlen, const char *name)
 910{
 911	struct virtio_gpu_ctx_create *cmd_p;
 912	struct virtio_gpu_vbuffer *vbuf;
 913
 914	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 915	memset(cmd_p, 0, sizeof(*cmd_p));
 916
 917	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
 918	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 919	cmd_p->nlen = cpu_to_le32(nlen);
 920	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
 921	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
 922	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 923}
 924
 925void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
 926				    uint32_t id)
 927{
 928	struct virtio_gpu_ctx_destroy *cmd_p;
 929	struct virtio_gpu_vbuffer *vbuf;
 930
 931	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 932	memset(cmd_p, 0, sizeof(*cmd_p));
 933
 934	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
 935	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 936	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 937}
 938
 939void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 940					    uint32_t ctx_id,
 941					    struct virtio_gpu_object_array *objs)
 942{
 943	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 944	struct virtio_gpu_ctx_resource *cmd_p;
 945	struct virtio_gpu_vbuffer *vbuf;
 946
 947	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 948	memset(cmd_p, 0, sizeof(*cmd_p));
 949	vbuf->objs = objs;
 950
 951	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
 952	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 953	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 954	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
 955}
 956
 957void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 958					    uint32_t ctx_id,
 959					    struct virtio_gpu_object_array *objs)
 960{
 961	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 962	struct virtio_gpu_ctx_resource *cmd_p;
 963	struct virtio_gpu_vbuffer *vbuf;
 964
 965	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 966	memset(cmd_p, 0, sizeof(*cmd_p));
 967	vbuf->objs = objs;
 968
 969	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
 970	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 971	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 972	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 973}
 974
 975void
 976virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 977				  struct virtio_gpu_object *bo,
 978				  struct virtio_gpu_object_params *params,
 979				  struct virtio_gpu_object_array *objs,
 980				  struct virtio_gpu_fence *fence)
 981{
 982	struct virtio_gpu_resource_create_3d *cmd_p;
 983	struct virtio_gpu_vbuffer *vbuf;
 984
 985	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 986	memset(cmd_p, 0, sizeof(*cmd_p));
 987	vbuf->objs = objs;
 988
 
 989	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 990	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 991	cmd_p->format = cpu_to_le32(params->format);
 992	cmd_p->width = cpu_to_le32(params->width);
 993	cmd_p->height = cpu_to_le32(params->height);
 994
 995	cmd_p->target = cpu_to_le32(params->target);
 996	cmd_p->bind = cpu_to_le32(params->bind);
 997	cmd_p->depth = cpu_to_le32(params->depth);
 998	cmd_p->array_size = cpu_to_le32(params->array_size);
 999	cmd_p->last_level = cpu_to_le32(params->last_level);
1000	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1001	cmd_p->flags = cpu_to_le32(params->flags);
1002
1003	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1004
1005	bo->created = true;
1006}
1007
1008void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1009					uint32_t ctx_id,
1010					uint64_t offset, uint32_t level,
1011					struct drm_virtgpu_3d_box *box,
1012					struct virtio_gpu_object_array *objs,
1013					struct virtio_gpu_fence *fence)
1014{
1015	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1016	struct virtio_gpu_transfer_host_3d *cmd_p;
1017	struct virtio_gpu_vbuffer *vbuf;
1018	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1019	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1020
1021	if (use_dma_api)
1022		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1023				       shmem->pages->sgl, shmem->pages->nents,
1024				       DMA_TO_DEVICE);
1025
1026	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1027	memset(cmd_p, 0, sizeof(*cmd_p));
1028
1029	vbuf->objs = objs;
1030
1031	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1032	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1033	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1034	convert_to_hw_box(&cmd_p->box, box);
1035	cmd_p->offset = cpu_to_le64(offset);
1036	cmd_p->level = cpu_to_le32(level);
1037
1038	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1039}
1040
1041void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1042					  uint32_t ctx_id,
1043					  uint64_t offset, uint32_t level,
1044					  struct drm_virtgpu_3d_box *box,
1045					  struct virtio_gpu_object_array *objs,
1046					  struct virtio_gpu_fence *fence)
1047{
1048	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1049	struct virtio_gpu_transfer_host_3d *cmd_p;
1050	struct virtio_gpu_vbuffer *vbuf;
1051
1052	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1053	memset(cmd_p, 0, sizeof(*cmd_p));
1054
1055	vbuf->objs = objs;
1056
1057	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1058	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1059	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1060	convert_to_hw_box(&cmd_p->box, box);
1061	cmd_p->offset = cpu_to_le64(offset);
1062	cmd_p->level = cpu_to_le32(level);
1063
1064	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1065}
1066
1067void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1068			   void *data, uint32_t data_size,
1069			   uint32_t ctx_id,
1070			   struct virtio_gpu_object_array *objs,
1071			   struct virtio_gpu_fence *fence)
1072{
1073	struct virtio_gpu_cmd_submit *cmd_p;
1074	struct virtio_gpu_vbuffer *vbuf;
1075
1076	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1077	memset(cmd_p, 0, sizeof(*cmd_p));
1078
1079	vbuf->data_buf = data;
1080	vbuf->data_size = data_size;
1081	vbuf->objs = objs;
1082
1083	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1084	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1085	cmd_p->size = cpu_to_le32(data_size);
1086
1087	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1088}
1089
1090void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1091			      struct virtio_gpu_object *obj,
1092			      struct virtio_gpu_mem_entry *ents,
1093			      unsigned int nents)
1094{
1095	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1096					       ents, nents, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1097}
1098
1099void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1100			    struct virtio_gpu_output *output)
1101{
1102	struct virtio_gpu_vbuffer *vbuf;
1103	struct virtio_gpu_update_cursor *cur_p;
1104
1105	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1106	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1107	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1108	virtio_gpu_queue_cursor(vgdev, vbuf);
1109}
v4.17
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie <airlied@redhat.com>
  7 *    Gerd Hoffmann <kraxel@redhat.com>
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice (including the next
 17 * paragraph) shall be included in all copies or substantial portions of the
 18 * Software.
 19 *
 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 26 * OTHER DEALINGS IN THE SOFTWARE.
 27 */
 28
 29#include <drm/drmP.h>
 30#include "virtgpu_drv.h"
 31#include <linux/virtio.h>
 32#include <linux/virtio_config.h>
 33#include <linux/virtio_ring.h>
 34
 
 
 
 35#define MAX_INLINE_CMD_SIZE   96
 36#define MAX_INLINE_RESP_SIZE  24
 37#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
 38			       + MAX_INLINE_CMD_SIZE		 \
 39			       + MAX_INLINE_RESP_SIZE)
 40
 41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 42				uint32_t *resid)
 43{
 44	int handle;
 45
 46	idr_preload(GFP_KERNEL);
 47	spin_lock(&vgdev->resource_idr_lock);
 48	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
 49	spin_unlock(&vgdev->resource_idr_lock);
 50	idr_preload_end();
 51	*resid = handle;
 52}
 53
 54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 55{
 56	spin_lock(&vgdev->resource_idr_lock);
 57	idr_remove(&vgdev->resource_idr, id);
 58	spin_unlock(&vgdev->resource_idr_lock);
 
 
 
 59}
 60
 61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
 62{
 63	struct drm_device *dev = vq->vdev->priv;
 64	struct virtio_gpu_device *vgdev = dev->dev_private;
 65
 66	schedule_work(&vgdev->ctrlq.dequeue_work);
 67}
 68
 69void virtio_gpu_cursor_ack(struct virtqueue *vq)
 70{
 71	struct drm_device *dev = vq->vdev->priv;
 72	struct virtio_gpu_device *vgdev = dev->dev_private;
 73
 74	schedule_work(&vgdev->cursorq.dequeue_work);
 75}
 76
 77int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
 78{
 79	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
 80					 VBUFFER_SIZE,
 81					 __alignof__(struct virtio_gpu_vbuffer),
 82					 0, NULL);
 83	if (!vgdev->vbufs)
 84		return -ENOMEM;
 85	return 0;
 86}
 87
 88void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
 89{
 90	kmem_cache_destroy(vgdev->vbufs);
 91	vgdev->vbufs = NULL;
 92}
 93
 94static struct virtio_gpu_vbuffer*
 95virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
 96		    int size, int resp_size, void *resp_buf,
 97		    virtio_gpu_resp_cb resp_cb)
 98{
 99	struct virtio_gpu_vbuffer *vbuf;
100
101	vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
102	if (!vbuf)
103		return ERR_PTR(-ENOMEM);
104	memset(vbuf, 0, VBUFFER_SIZE);
105
106	BUG_ON(size > MAX_INLINE_CMD_SIZE);
 
107	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
108	vbuf->size = size;
109
110	vbuf->resp_cb = resp_cb;
111	vbuf->resp_size = resp_size;
112	if (resp_size <= MAX_INLINE_RESP_SIZE)
113		vbuf->resp_buf = (void *)vbuf->buf + size;
114	else
115		vbuf->resp_buf = resp_buf;
116	BUG_ON(!vbuf->resp_buf);
117	return vbuf;
118}
119
120static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
121				  struct virtio_gpu_vbuffer **vbuffer_p,
122				  int size)
123{
124	struct virtio_gpu_vbuffer *vbuf;
125
126	vbuf = virtio_gpu_get_vbuf(vgdev, size,
127				   sizeof(struct virtio_gpu_ctrl_hdr),
128				   NULL, NULL);
129	if (IS_ERR(vbuf)) {
130		*vbuffer_p = NULL;
131		return ERR_CAST(vbuf);
132	}
133	*vbuffer_p = vbuf;
134	return vbuf->buf;
135}
136
137static struct virtio_gpu_update_cursor*
138virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
139			struct virtio_gpu_vbuffer **vbuffer_p)
140{
141	struct virtio_gpu_vbuffer *vbuf;
142
143	vbuf = virtio_gpu_get_vbuf
144		(vgdev, sizeof(struct virtio_gpu_update_cursor),
145		 0, NULL, NULL);
146	if (IS_ERR(vbuf)) {
147		*vbuffer_p = NULL;
148		return ERR_CAST(vbuf);
149	}
150	*vbuffer_p = vbuf;
151	return (struct virtio_gpu_update_cursor *)vbuf->buf;
152}
153
154static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
155				       virtio_gpu_resp_cb cb,
156				       struct virtio_gpu_vbuffer **vbuffer_p,
157				       int cmd_size, int resp_size,
158				       void *resp_buf)
159{
160	struct virtio_gpu_vbuffer *vbuf;
161
162	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
163				   resp_size, resp_buf, cb);
164	if (IS_ERR(vbuf)) {
165		*vbuffer_p = NULL;
166		return ERR_CAST(vbuf);
167	}
168	*vbuffer_p = vbuf;
169	return (struct virtio_gpu_command *)vbuf->buf;
170}
171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172static void free_vbuf(struct virtio_gpu_device *vgdev,
173		      struct virtio_gpu_vbuffer *vbuf)
174{
175	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
176		kfree(vbuf->resp_buf);
177	kfree(vbuf->data_buf);
178	kmem_cache_free(vgdev->vbufs, vbuf);
179}
180
181static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
182{
183	struct virtio_gpu_vbuffer *vbuf;
184	unsigned int len;
185	int freed = 0;
186
187	while ((vbuf = virtqueue_get_buf(vq, &len))) {
188		list_add_tail(&vbuf->list, reclaim_list);
189		freed++;
190	}
191	if (freed == 0)
192		DRM_DEBUG("Huh? zero vbufs reclaimed");
193}
194
195void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
196{
197	struct virtio_gpu_device *vgdev =
198		container_of(work, struct virtio_gpu_device,
199			     ctrlq.dequeue_work);
200	struct list_head reclaim_list;
201	struct virtio_gpu_vbuffer *entry, *tmp;
202	struct virtio_gpu_ctrl_hdr *resp;
203	u64 fence_id = 0;
204
205	INIT_LIST_HEAD(&reclaim_list);
206	spin_lock(&vgdev->ctrlq.qlock);
207	do {
208		virtqueue_disable_cb(vgdev->ctrlq.vq);
209		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210
211	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
212	spin_unlock(&vgdev->ctrlq.qlock);
213
214	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
215		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
217			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 
 
 
 
 
 
 
 
 
 
 
218		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
219			u64 f = le64_to_cpu(resp->fence_id);
220
221			if (fence_id > f) {
222				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
223					  __func__, fence_id, f);
224			} else {
225				fence_id = f;
226			}
227		}
228		if (entry->resp_cb)
229			entry->resp_cb(vgdev, entry);
230
231		list_del(&entry->list);
232		free_vbuf(vgdev, entry);
233	}
234	wake_up(&vgdev->ctrlq.ack_queue);
235
236	if (fence_id)
237		virtio_gpu_fence_event_process(vgdev, fence_id);
 
 
 
 
 
 
 
238}
239
240void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
241{
242	struct virtio_gpu_device *vgdev =
243		container_of(work, struct virtio_gpu_device,
244			     cursorq.dequeue_work);
245	struct list_head reclaim_list;
246	struct virtio_gpu_vbuffer *entry, *tmp;
247
248	INIT_LIST_HEAD(&reclaim_list);
249	spin_lock(&vgdev->cursorq.qlock);
250	do {
251		virtqueue_disable_cb(vgdev->cursorq.vq);
252		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
253	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
254	spin_unlock(&vgdev->cursorq.qlock);
255
256	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
257		list_del(&entry->list);
258		free_vbuf(vgdev, entry);
259	}
260	wake_up(&vgdev->cursorq.ack_queue);
261}
262
263static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
264					       struct virtio_gpu_vbuffer *vbuf)
265		__releases(&vgdev->ctrlq.qlock)
266		__acquires(&vgdev->ctrlq.qlock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267{
268	struct virtqueue *vq = vgdev->ctrlq.vq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269	struct scatterlist *sgs[3], vcmd, vout, vresp;
270	int outcnt = 0, incnt = 0;
271	int ret;
272
273	if (!vgdev->vqs_ready)
274		return -ENODEV;
275
 
276	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
277	sgs[outcnt + incnt] = &vcmd;
 
278	outcnt++;
279
 
280	if (vbuf->data_size) {
281		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
282		sgs[outcnt + incnt] = &vout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283		outcnt++;
284	}
285
 
286	if (vbuf->resp_size) {
287		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 
288		sgs[outcnt + incnt] = &vresp;
289		incnt++;
290	}
291
292retry:
293	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294	if (ret == -ENOSPC) {
295		spin_unlock(&vgdev->ctrlq.qlock);
296		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297		spin_lock(&vgdev->ctrlq.qlock);
298		goto retry;
299	} else {
300		virtqueue_kick(vq);
301	}
302
303	if (!ret)
304		ret = vq->num_free;
305	return ret;
306}
307
308static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
309					struct virtio_gpu_vbuffer *vbuf)
310{
311	int rc;
 
 
 
312
313	spin_lock(&vgdev->ctrlq.qlock);
314	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 
315	spin_unlock(&vgdev->ctrlq.qlock);
316	return rc;
 
 
317}
318
319static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
320					       struct virtio_gpu_vbuffer *vbuf,
321					       struct virtio_gpu_ctrl_hdr *hdr,
322					       struct virtio_gpu_fence **fence)
323{
324	struct virtqueue *vq = vgdev->ctrlq.vq;
325	int rc;
326
327again:
328	spin_lock(&vgdev->ctrlq.qlock);
329
330	/*
331	 * Make sure we have enouth space in the virtqueue.  If not
332	 * wait here until we have.
333	 *
334	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
335	 * to wait for free space, which can result in fence ids being
336	 * submitted out-of-order.
337	 */
338	if (vq->num_free < 3) {
339		spin_unlock(&vgdev->ctrlq.qlock);
340		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
341		goto again;
342	}
343
344	if (fence)
345		virtio_gpu_fence_emit(vgdev, hdr, fence);
346	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
347	spin_unlock(&vgdev->ctrlq.qlock);
348	return rc;
349}
350
351static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
352				   struct virtio_gpu_vbuffer *vbuf)
353{
354	struct virtqueue *vq = vgdev->cursorq.vq;
355	struct scatterlist *sgs[1], ccmd;
356	int ret;
357	int outcnt;
358
359	if (!vgdev->vqs_ready)
360		return -ENODEV;
 
 
361
362	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
363	sgs[0] = &ccmd;
364	outcnt = 1;
365
366	spin_lock(&vgdev->cursorq.qlock);
367retry:
368	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369	if (ret == -ENOSPC) {
370		spin_unlock(&vgdev->cursorq.qlock);
371		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372		spin_lock(&vgdev->cursorq.qlock);
373		goto retry;
374	} else {
375		virtqueue_kick(vq);
 
 
 
376	}
377
378	spin_unlock(&vgdev->cursorq.qlock);
379
380	if (!ret)
381		ret = vq->num_free;
382	return ret;
 
383}
384
385/* just create gem objects for userspace and long lived objects,
386 * just use dma_alloced pages for the queue objects?
387 */
388
389/* create a basic resource */
390void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
391				    uint32_t resource_id,
392				    uint32_t format,
393				    uint32_t width,
394				    uint32_t height)
395{
396	struct virtio_gpu_resource_create_2d *cmd_p;
397	struct virtio_gpu_vbuffer *vbuf;
398
399	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
400	memset(cmd_p, 0, sizeof(*cmd_p));
 
401
402	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
403	cmd_p->resource_id = cpu_to_le32(resource_id);
404	cmd_p->format = cpu_to_le32(format);
405	cmd_p->width = cpu_to_le32(width);
406	cmd_p->height = cpu_to_le32(height);
407
408	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
409}
410
411void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
412				   uint32_t resource_id)
413{
414	struct virtio_gpu_resource_unref *cmd_p;
415	struct virtio_gpu_vbuffer *vbuf;
416
417	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
418	memset(cmd_p, 0, sizeof(*cmd_p));
419
420	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
421	cmd_p->resource_id = cpu_to_le32(resource_id);
422
423	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
424}
425
426void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
427					   uint32_t resource_id)
428{
429	struct virtio_gpu_resource_detach_backing *cmd_p;
430	struct virtio_gpu_vbuffer *vbuf;
431
432	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 
433	memset(cmd_p, 0, sizeof(*cmd_p));
434
435	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
436	cmd_p->resource_id = cpu_to_le32(resource_id);
437
 
438	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
439}
440
441void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
442				uint32_t scanout_id, uint32_t resource_id,
443				uint32_t width, uint32_t height,
444				uint32_t x, uint32_t y)
445{
446	struct virtio_gpu_set_scanout *cmd_p;
447	struct virtio_gpu_vbuffer *vbuf;
448
449	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
450	memset(cmd_p, 0, sizeof(*cmd_p));
451
452	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
453	cmd_p->resource_id = cpu_to_le32(resource_id);
454	cmd_p->scanout_id = cpu_to_le32(scanout_id);
455	cmd_p->r.width = cpu_to_le32(width);
456	cmd_p->r.height = cpu_to_le32(height);
457	cmd_p->r.x = cpu_to_le32(x);
458	cmd_p->r.y = cpu_to_le32(y);
459
460	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
461}
462
463void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
464				   uint32_t resource_id,
465				   uint32_t x, uint32_t y,
466				   uint32_t width, uint32_t height)
467{
468	struct virtio_gpu_resource_flush *cmd_p;
469	struct virtio_gpu_vbuffer *vbuf;
470
471	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
472	memset(cmd_p, 0, sizeof(*cmd_p));
473
474	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
475	cmd_p->resource_id = cpu_to_le32(resource_id);
476	cmd_p->r.width = cpu_to_le32(width);
477	cmd_p->r.height = cpu_to_le32(height);
478	cmd_p->r.x = cpu_to_le32(x);
479	cmd_p->r.y = cpu_to_le32(y);
480
481	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
482}
483
484void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
485					uint32_t resource_id, uint64_t offset,
486					__le32 width, __le32 height,
487					__le32 x, __le32 y,
488					struct virtio_gpu_fence **fence)
 
489{
 
490	struct virtio_gpu_transfer_to_host_2d *cmd_p;
491	struct virtio_gpu_vbuffer *vbuf;
 
 
 
 
 
 
 
492
493	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
494	memset(cmd_p, 0, sizeof(*cmd_p));
 
495
496	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
497	cmd_p->resource_id = cpu_to_le32(resource_id);
498	cmd_p->offset = cpu_to_le64(offset);
499	cmd_p->r.width = width;
500	cmd_p->r.height = height;
501	cmd_p->r.x = x;
502	cmd_p->r.y = y;
503
504	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
505}
506
507static void
508virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
509				       uint32_t resource_id,
510				       struct virtio_gpu_mem_entry *ents,
511				       uint32_t nents,
512				       struct virtio_gpu_fence **fence)
513{
514	struct virtio_gpu_resource_attach_backing *cmd_p;
515	struct virtio_gpu_vbuffer *vbuf;
516
517	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
518	memset(cmd_p, 0, sizeof(*cmd_p));
519
520	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
521	cmd_p->resource_id = cpu_to_le32(resource_id);
522	cmd_p->nr_entries = cpu_to_le32(nents);
523
524	vbuf->data_buf = ents;
525	vbuf->data_size = sizeof(*ents) * nents;
526
527	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
528}
529
530static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
531					       struct virtio_gpu_vbuffer *vbuf)
532{
533	struct virtio_gpu_resp_display_info *resp =
534		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
535	int i;
536
537	spin_lock(&vgdev->display_info_lock);
538	for (i = 0; i < vgdev->num_scanouts; i++) {
539		vgdev->outputs[i].info = resp->pmodes[i];
540		if (resp->pmodes[i].enabled) {
541			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
542				  le32_to_cpu(resp->pmodes[i].r.width),
543				  le32_to_cpu(resp->pmodes[i].r.height),
544				  le32_to_cpu(resp->pmodes[i].r.x),
545				  le32_to_cpu(resp->pmodes[i].r.y));
546		} else {
547			DRM_DEBUG("output %d: disabled", i);
548		}
549	}
550
551	vgdev->display_info_pending = false;
552	spin_unlock(&vgdev->display_info_lock);
553	wake_up(&vgdev->resp_wq);
554
555	if (!drm_helper_hpd_irq_event(vgdev->ddev))
556		drm_kms_helper_hotplug_event(vgdev->ddev);
557}
558
559static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
560					      struct virtio_gpu_vbuffer *vbuf)
561{
562	struct virtio_gpu_get_capset_info *cmd =
563		(struct virtio_gpu_get_capset_info *)vbuf->buf;
564	struct virtio_gpu_resp_capset_info *resp =
565		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
566	int i = le32_to_cpu(cmd->capset_index);
567
568	spin_lock(&vgdev->display_info_lock);
569	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
570	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
571	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
572	spin_unlock(&vgdev->display_info_lock);
573	wake_up(&vgdev->resp_wq);
574}
575
576static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
577				     struct virtio_gpu_vbuffer *vbuf)
578{
579	struct virtio_gpu_get_capset *cmd =
580		(struct virtio_gpu_get_capset *)vbuf->buf;
581	struct virtio_gpu_resp_capset *resp =
582		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
583	struct virtio_gpu_drv_cap_cache *cache_ent;
584
585	spin_lock(&vgdev->display_info_lock);
586	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
587		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
588		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
589			memcpy(cache_ent->caps_cache, resp->capset_data,
590			       cache_ent->size);
 
 
591			atomic_set(&cache_ent->is_valid, 1);
592			break;
593		}
594	}
595	spin_unlock(&vgdev->display_info_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
596	wake_up(&vgdev->resp_wq);
597}
598
599int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
600{
601	struct virtio_gpu_ctrl_hdr *cmd_p;
602	struct virtio_gpu_vbuffer *vbuf;
603	void *resp_buf;
604
605	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
606			   GFP_KERNEL);
607	if (!resp_buf)
608		return -ENOMEM;
609
610	cmd_p = virtio_gpu_alloc_cmd_resp
611		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
612		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
613		 resp_buf);
614	memset(cmd_p, 0, sizeof(*cmd_p));
615
616	vgdev->display_info_pending = true;
617	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
618	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
619	return 0;
620}
621
622int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
623{
624	struct virtio_gpu_get_capset_info *cmd_p;
625	struct virtio_gpu_vbuffer *vbuf;
626	void *resp_buf;
627
628	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
629			   GFP_KERNEL);
630	if (!resp_buf)
631		return -ENOMEM;
632
633	cmd_p = virtio_gpu_alloc_cmd_resp
634		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
635		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
636		 resp_buf);
637	memset(cmd_p, 0, sizeof(*cmd_p));
638
639	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
640	cmd_p->capset_index = cpu_to_le32(idx);
641	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
642	return 0;
643}
644
645int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
646			      int idx, int version,
647			      struct virtio_gpu_drv_cap_cache **cache_p)
648{
649	struct virtio_gpu_get_capset *cmd_p;
650	struct virtio_gpu_vbuffer *vbuf;
651	int max_size = vgdev->capsets[idx].max_size;
652	struct virtio_gpu_drv_cap_cache *cache_ent;
 
653	void *resp_buf;
654
655	if (idx > vgdev->num_capsets)
 
 
656		return -EINVAL;
657
658	if (version > vgdev->capsets[idx].max_version)
659		return -EINVAL;
660
661	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
662	if (!cache_ent)
663		return -ENOMEM;
664
 
665	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
666	if (!cache_ent->caps_cache) {
667		kfree(cache_ent);
668		return -ENOMEM;
669	}
670
671	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
672			   GFP_KERNEL);
673	if (!resp_buf) {
674		kfree(cache_ent->caps_cache);
675		kfree(cache_ent);
676		return -ENOMEM;
677	}
678
679	cache_ent->version = version;
680	cache_ent->id = vgdev->capsets[idx].id;
681	atomic_set(&cache_ent->is_valid, 0);
682	cache_ent->size = max_size;
683	spin_lock(&vgdev->display_info_lock);
684	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 
 
 
 
 
 
 
 
 
685	spin_unlock(&vgdev->display_info_lock);
686
 
 
 
 
 
 
 
 
687	cmd_p = virtio_gpu_alloc_cmd_resp
688		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
689		 sizeof(struct virtio_gpu_resp_capset) + max_size,
690		 resp_buf);
691	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
692	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
693	cmd_p->capset_version = cpu_to_le32(version);
694	*cache_p = cache_ent;
695	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
696
697	return 0;
698}
699
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
700void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
701				   uint32_t nlen, const char *name)
702{
703	struct virtio_gpu_ctx_create *cmd_p;
704	struct virtio_gpu_vbuffer *vbuf;
705
706	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
707	memset(cmd_p, 0, sizeof(*cmd_p));
708
709	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
710	cmd_p->hdr.ctx_id = cpu_to_le32(id);
711	cmd_p->nlen = cpu_to_le32(nlen);
712	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
713	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
714	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
715}
716
717void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
718				    uint32_t id)
719{
720	struct virtio_gpu_ctx_destroy *cmd_p;
721	struct virtio_gpu_vbuffer *vbuf;
722
723	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
724	memset(cmd_p, 0, sizeof(*cmd_p));
725
726	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
727	cmd_p->hdr.ctx_id = cpu_to_le32(id);
728	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
729}
730
731void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
732					    uint32_t ctx_id,
733					    uint32_t resource_id)
734{
 
735	struct virtio_gpu_ctx_resource *cmd_p;
736	struct virtio_gpu_vbuffer *vbuf;
737
738	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
739	memset(cmd_p, 0, sizeof(*cmd_p));
 
740
741	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
742	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
743	cmd_p->resource_id = cpu_to_le32(resource_id);
744	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
745
746}
747
748void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
749					    uint32_t ctx_id,
750					    uint32_t resource_id)
751{
 
752	struct virtio_gpu_ctx_resource *cmd_p;
753	struct virtio_gpu_vbuffer *vbuf;
754
755	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
756	memset(cmd_p, 0, sizeof(*cmd_p));
 
757
758	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
759	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
760	cmd_p->resource_id = cpu_to_le32(resource_id);
761	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
762}
763
764void
765virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
766				  struct virtio_gpu_resource_create_3d *rc_3d,
767				  struct virtio_gpu_fence **fence)
 
 
768{
769	struct virtio_gpu_resource_create_3d *cmd_p;
770	struct virtio_gpu_vbuffer *vbuf;
771
772	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
773	memset(cmd_p, 0, sizeof(*cmd_p));
 
774
775	*cmd_p = *rc_3d;
776	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
777	cmd_p->hdr.flags = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
778
779	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
780}
781
782void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
783					uint32_t resource_id, uint32_t ctx_id,
784					uint64_t offset, uint32_t level,
785					struct virtio_gpu_box *box,
786					struct virtio_gpu_fence **fence)
 
787{
 
788	struct virtio_gpu_transfer_host_3d *cmd_p;
789	struct virtio_gpu_vbuffer *vbuf;
 
 
 
 
 
 
 
790
791	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
792	memset(cmd_p, 0, sizeof(*cmd_p));
793
 
 
794	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
795	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
796	cmd_p->resource_id = cpu_to_le32(resource_id);
797	cmd_p->box = *box;
798	cmd_p->offset = cpu_to_le64(offset);
799	cmd_p->level = cpu_to_le32(level);
800
801	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
802}
803
804void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
805					  uint32_t resource_id, uint32_t ctx_id,
806					  uint64_t offset, uint32_t level,
807					  struct virtio_gpu_box *box,
808					  struct virtio_gpu_fence **fence)
 
809{
 
810	struct virtio_gpu_transfer_host_3d *cmd_p;
811	struct virtio_gpu_vbuffer *vbuf;
812
813	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
814	memset(cmd_p, 0, sizeof(*cmd_p));
815
 
 
816	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
817	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
818	cmd_p->resource_id = cpu_to_le32(resource_id);
819	cmd_p->box = *box;
820	cmd_p->offset = cpu_to_le64(offset);
821	cmd_p->level = cpu_to_le32(level);
822
823	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
824}
825
826void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
827			   void *data, uint32_t data_size,
828			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
 
 
829{
830	struct virtio_gpu_cmd_submit *cmd_p;
831	struct virtio_gpu_vbuffer *vbuf;
832
833	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
834	memset(cmd_p, 0, sizeof(*cmd_p));
835
836	vbuf->data_buf = data;
837	vbuf->data_size = data_size;
 
838
839	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
840	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
841	cmd_p->size = cpu_to_le32(data_size);
842
843	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
844}
845
846int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
847			     struct virtio_gpu_object *obj,
848			     uint32_t resource_id,
849			     struct virtio_gpu_fence **fence)
850{
851	struct virtio_gpu_mem_entry *ents;
852	struct scatterlist *sg;
853	int si;
854
855	if (!obj->pages) {
856		int ret;
857
858		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
859		if (ret)
860			return ret;
861	}
862
863	/* gets freed when the ring has consumed it */
864	ents = kmalloc_array(obj->pages->nents,
865			     sizeof(struct virtio_gpu_mem_entry),
866			     GFP_KERNEL);
867	if (!ents) {
868		DRM_ERROR("failed to allocate ent list\n");
869		return -ENOMEM;
870	}
871
872	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
873		ents[si].addr = cpu_to_le64(sg_phys(sg));
874		ents[si].length = cpu_to_le32(sg->length);
875		ents[si].padding = 0;
876	}
877
878	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
879					       ents, obj->pages->nents,
880					       fence);
881	obj->hw_res_handle = resource_id;
882	return 0;
883}
884
885void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
886			    struct virtio_gpu_output *output)
887{
888	struct virtio_gpu_vbuffer *vbuf;
889	struct virtio_gpu_update_cursor *cur_p;
890
891	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
892	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
893	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
894	virtio_gpu_queue_cursor(vgdev, vbuf);
895}