Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie <airlied@redhat.com>
   7 *    Gerd Hoffmann <kraxel@redhat.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the next
  17 * paragraph) shall be included in all copies or substantial portions of the
  18 * Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26 * OTHER DEALINGS IN THE SOFTWARE.
  27 */
  28
  29#include <linux/dma-mapping.h>
 
  30#include <linux/virtio.h>
  31#include <linux/virtio_config.h>
  32#include <linux/virtio_ring.h>
  33
  34#include "virtgpu_drv.h"
  35#include "virtgpu_trace.h"
  36
  37#define MAX_INLINE_CMD_SIZE   96
  38#define MAX_INLINE_RESP_SIZE  24
  39#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
  40			       + MAX_INLINE_CMD_SIZE		 \
  41			       + MAX_INLINE_RESP_SIZE)
  42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  44{
  45	struct drm_device *dev = vq->vdev->priv;
  46	struct virtio_gpu_device *vgdev = dev->dev_private;
  47
  48	schedule_work(&vgdev->ctrlq.dequeue_work);
  49}
  50
  51void virtio_gpu_cursor_ack(struct virtqueue *vq)
  52{
  53	struct drm_device *dev = vq->vdev->priv;
  54	struct virtio_gpu_device *vgdev = dev->dev_private;
  55
  56	schedule_work(&vgdev->cursorq.dequeue_work);
  57}
  58
  59int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  60{
  61	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  62					 VBUFFER_SIZE,
  63					 __alignof__(struct virtio_gpu_vbuffer),
  64					 0, NULL);
  65	if (!vgdev->vbufs)
  66		return -ENOMEM;
  67	return 0;
  68}
  69
  70void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  71{
  72	kmem_cache_destroy(vgdev->vbufs);
  73	vgdev->vbufs = NULL;
  74}
  75
  76static struct virtio_gpu_vbuffer*
  77virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  78		    int size, int resp_size, void *resp_buf,
  79		    virtio_gpu_resp_cb resp_cb)
  80{
  81	struct virtio_gpu_vbuffer *vbuf;
  82
  83	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
  84	if (!vbuf)
  85		return ERR_PTR(-ENOMEM);
 
  86
  87	BUG_ON(size > MAX_INLINE_CMD_SIZE);
  88	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
  89	vbuf->size = size;
  90
  91	vbuf->resp_cb = resp_cb;
  92	vbuf->resp_size = resp_size;
  93	if (resp_size <= MAX_INLINE_RESP_SIZE)
  94		vbuf->resp_buf = (void *)vbuf->buf + size;
  95	else
  96		vbuf->resp_buf = resp_buf;
  97	BUG_ON(!vbuf->resp_buf);
  98	return vbuf;
  99}
 100
 101static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
 102				  struct virtio_gpu_vbuffer **vbuffer_p,
 103				  int size)
 104{
 105	struct virtio_gpu_vbuffer *vbuf;
 106
 107	vbuf = virtio_gpu_get_vbuf(vgdev, size,
 108				   sizeof(struct virtio_gpu_ctrl_hdr),
 109				   NULL, NULL);
 110	if (IS_ERR(vbuf)) {
 111		*vbuffer_p = NULL;
 112		return ERR_CAST(vbuf);
 113	}
 114	*vbuffer_p = vbuf;
 115	return vbuf->buf;
 116}
 117
 118static struct virtio_gpu_update_cursor*
 119virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
 120			struct virtio_gpu_vbuffer **vbuffer_p)
 121{
 122	struct virtio_gpu_vbuffer *vbuf;
 123
 124	vbuf = virtio_gpu_get_vbuf
 125		(vgdev, sizeof(struct virtio_gpu_update_cursor),
 126		 0, NULL, NULL);
 127	if (IS_ERR(vbuf)) {
 128		*vbuffer_p = NULL;
 129		return ERR_CAST(vbuf);
 130	}
 131	*vbuffer_p = vbuf;
 132	return (struct virtio_gpu_update_cursor *)vbuf->buf;
 133}
 134
 135static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 136				       virtio_gpu_resp_cb cb,
 137				       struct virtio_gpu_vbuffer **vbuffer_p,
 138				       int cmd_size, int resp_size,
 139				       void *resp_buf)
 140{
 141	struct virtio_gpu_vbuffer *vbuf;
 142
 143	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
 144				   resp_size, resp_buf, cb);
 145	if (IS_ERR(vbuf)) {
 146		*vbuffer_p = NULL;
 147		return ERR_CAST(vbuf);
 148	}
 149	*vbuffer_p = vbuf;
 150	return (struct virtio_gpu_command *)vbuf->buf;
 151}
 152
 153static void free_vbuf(struct virtio_gpu_device *vgdev,
 154		      struct virtio_gpu_vbuffer *vbuf)
 155{
 156	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
 157		kfree(vbuf->resp_buf);
 158	kfree(vbuf->data_buf);
 159	kmem_cache_free(vgdev->vbufs, vbuf);
 160}
 161
 162static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
 163{
 164	struct virtio_gpu_vbuffer *vbuf;
 165	unsigned int len;
 166	int freed = 0;
 167
 168	while ((vbuf = virtqueue_get_buf(vq, &len))) {
 169		list_add_tail(&vbuf->list, reclaim_list);
 170		freed++;
 171	}
 172	if (freed == 0)
 173		DRM_DEBUG("Huh? zero vbufs reclaimed");
 174}
 175
 176void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 177{
 178	struct virtio_gpu_device *vgdev =
 179		container_of(work, struct virtio_gpu_device,
 180			     ctrlq.dequeue_work);
 181	struct list_head reclaim_list;
 182	struct virtio_gpu_vbuffer *entry, *tmp;
 183	struct virtio_gpu_ctrl_hdr *resp;
 184	u64 fence_id = 0;
 185
 186	INIT_LIST_HEAD(&reclaim_list);
 187	spin_lock(&vgdev->ctrlq.qlock);
 188	do {
 189		virtqueue_disable_cb(vgdev->ctrlq.vq);
 190		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
 191
 192	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
 193	spin_unlock(&vgdev->ctrlq.qlock);
 194
 195	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 196		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 197
 198		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
 199
 200		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
 201			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
 202				struct virtio_gpu_ctrl_hdr *cmd;
 203				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
 204				DRM_ERROR("response 0x%x (command 0x%x)\n",
 205					  le32_to_cpu(resp->type),
 206					  le32_to_cpu(cmd->type));
 207			} else
 208				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 209		}
 210		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
 211			u64 f = le64_to_cpu(resp->fence_id);
 212
 213			if (fence_id > f) {
 214				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
 215					  __func__, fence_id, f);
 216			} else {
 217				fence_id = f;
 218			}
 219		}
 220		if (entry->resp_cb)
 221			entry->resp_cb(vgdev, entry);
 222
 223		list_del(&entry->list);
 224		free_vbuf(vgdev, entry);
 225	}
 226	wake_up(&vgdev->ctrlq.ack_queue);
 227
 228	if (fence_id)
 229		virtio_gpu_fence_event_process(vgdev, fence_id);
 230}
 231
 232void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 233{
 234	struct virtio_gpu_device *vgdev =
 235		container_of(work, struct virtio_gpu_device,
 236			     cursorq.dequeue_work);
 237	struct list_head reclaim_list;
 238	struct virtio_gpu_vbuffer *entry, *tmp;
 239
 240	INIT_LIST_HEAD(&reclaim_list);
 241	spin_lock(&vgdev->cursorq.qlock);
 242	do {
 243		virtqueue_disable_cb(vgdev->cursorq.vq);
 244		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
 245	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
 246	spin_unlock(&vgdev->cursorq.qlock);
 247
 248	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 249		list_del(&entry->list);
 250		free_vbuf(vgdev, entry);
 251	}
 252	wake_up(&vgdev->cursorq.ack_queue);
 253}
 254
 255static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 256					       struct virtio_gpu_vbuffer *vbuf)
 257		__releases(&vgdev->ctrlq.qlock)
 258		__acquires(&vgdev->ctrlq.qlock)
 259{
 260	struct virtqueue *vq = vgdev->ctrlq.vq;
 261	struct scatterlist *sgs[3], vcmd, vout, vresp;
 262	int outcnt = 0, incnt = 0;
 263	int ret;
 264
 265	if (!vgdev->vqs_ready)
 266		return -ENODEV;
 267
 268	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 269	sgs[outcnt + incnt] = &vcmd;
 270	outcnt++;
 271
 272	if (vbuf->data_size) {
 273		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
 274		sgs[outcnt + incnt] = &vout;
 275		outcnt++;
 276	}
 277
 278	if (vbuf->resp_size) {
 279		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 280		sgs[outcnt + incnt] = &vresp;
 281		incnt++;
 282	}
 283
 284retry:
 285	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 286	if (ret == -ENOSPC) {
 287		spin_unlock(&vgdev->ctrlq.qlock);
 288		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
 289		spin_lock(&vgdev->ctrlq.qlock);
 290		goto retry;
 291	} else {
 292		trace_virtio_gpu_cmd_queue(vq,
 293			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
 294
 295		virtqueue_kick(vq);
 296	}
 297
 298	if (!ret)
 299		ret = vq->num_free;
 300	return ret;
 301}
 302
 303static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 304					struct virtio_gpu_vbuffer *vbuf)
 305{
 306	int rc;
 307
 308	spin_lock(&vgdev->ctrlq.qlock);
 309	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 310	spin_unlock(&vgdev->ctrlq.qlock);
 311	return rc;
 312}
 313
 314static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 315					       struct virtio_gpu_vbuffer *vbuf,
 316					       struct virtio_gpu_ctrl_hdr *hdr,
 317					       struct virtio_gpu_fence *fence)
 318{
 319	struct virtqueue *vq = vgdev->ctrlq.vq;
 320	int rc;
 321
 322again:
 323	spin_lock(&vgdev->ctrlq.qlock);
 324
 325	/*
 326	 * Make sure we have enouth space in the virtqueue.  If not
 327	 * wait here until we have.
 328	 *
 329	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
 330	 * to wait for free space, which can result in fence ids being
 331	 * submitted out-of-order.
 332	 */
 333	if (vq->num_free < 3) {
 334		spin_unlock(&vgdev->ctrlq.qlock);
 335		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
 336		goto again;
 337	}
 338
 339	if (fence)
 340		virtio_gpu_fence_emit(vgdev, hdr, fence);
 341	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 342	spin_unlock(&vgdev->ctrlq.qlock);
 343	return rc;
 344}
 345
 346static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 347				   struct virtio_gpu_vbuffer *vbuf)
 348{
 349	struct virtqueue *vq = vgdev->cursorq.vq;
 350	struct scatterlist *sgs[1], ccmd;
 351	int ret;
 352	int outcnt;
 353
 354	if (!vgdev->vqs_ready)
 355		return -ENODEV;
 356
 357	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 358	sgs[0] = &ccmd;
 359	outcnt = 1;
 360
 361	spin_lock(&vgdev->cursorq.qlock);
 362retry:
 363	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 364	if (ret == -ENOSPC) {
 365		spin_unlock(&vgdev->cursorq.qlock);
 366		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 367		spin_lock(&vgdev->cursorq.qlock);
 368		goto retry;
 369	} else {
 370		trace_virtio_gpu_cmd_queue(vq,
 371			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
 372
 373		virtqueue_kick(vq);
 374	}
 375
 376	spin_unlock(&vgdev->cursorq.qlock);
 377
 378	if (!ret)
 379		ret = vq->num_free;
 380	return ret;
 381}
 382
 383/* just create gem objects for userspace and long lived objects,
 384 * just use dma_alloced pages for the queue objects?
 385 */
 386
 387/* create a basic resource */
 388void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 389				    struct virtio_gpu_object *bo,
 390				    struct virtio_gpu_object_params *params,
 391				    struct virtio_gpu_fence *fence)
 
 392{
 393	struct virtio_gpu_resource_create_2d *cmd_p;
 394	struct virtio_gpu_vbuffer *vbuf;
 395
 396	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 397	memset(cmd_p, 0, sizeof(*cmd_p));
 398
 399	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
 400	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 401	cmd_p->format = cpu_to_le32(params->format);
 402	cmd_p->width = cpu_to_le32(params->width);
 403	cmd_p->height = cpu_to_le32(params->height);
 404
 405	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 406	bo->created = true;
 407}
 408
 409void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 410				   uint32_t resource_id)
 411{
 412	struct virtio_gpu_resource_unref *cmd_p;
 413	struct virtio_gpu_vbuffer *vbuf;
 414
 415	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 416	memset(cmd_p, 0, sizeof(*cmd_p));
 417
 418	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
 419	cmd_p->resource_id = cpu_to_le32(resource_id);
 420
 421	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 422}
 423
 424static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
 425						  uint32_t resource_id,
 426						  struct virtio_gpu_fence *fence)
 427{
 428	struct virtio_gpu_resource_detach_backing *cmd_p;
 429	struct virtio_gpu_vbuffer *vbuf;
 430
 431	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 432	memset(cmd_p, 0, sizeof(*cmd_p));
 433
 434	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
 435	cmd_p->resource_id = cpu_to_le32(resource_id);
 436
 437	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 438}
 439
 440void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 441				uint32_t scanout_id, uint32_t resource_id,
 442				uint32_t width, uint32_t height,
 443				uint32_t x, uint32_t y)
 444{
 445	struct virtio_gpu_set_scanout *cmd_p;
 446	struct virtio_gpu_vbuffer *vbuf;
 447
 448	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 449	memset(cmd_p, 0, sizeof(*cmd_p));
 450
 451	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
 452	cmd_p->resource_id = cpu_to_le32(resource_id);
 453	cmd_p->scanout_id = cpu_to_le32(scanout_id);
 454	cmd_p->r.width = cpu_to_le32(width);
 455	cmd_p->r.height = cpu_to_le32(height);
 456	cmd_p->r.x = cpu_to_le32(x);
 457	cmd_p->r.y = cpu_to_le32(y);
 458
 459	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 460}
 461
 462void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 463				   uint32_t resource_id,
 464				   uint32_t x, uint32_t y,
 465				   uint32_t width, uint32_t height)
 466{
 467	struct virtio_gpu_resource_flush *cmd_p;
 468	struct virtio_gpu_vbuffer *vbuf;
 469
 470	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 471	memset(cmd_p, 0, sizeof(*cmd_p));
 472
 473	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
 474	cmd_p->resource_id = cpu_to_le32(resource_id);
 475	cmd_p->r.width = cpu_to_le32(width);
 476	cmd_p->r.height = cpu_to_le32(height);
 477	cmd_p->r.x = cpu_to_le32(x);
 478	cmd_p->r.y = cpu_to_le32(y);
 479
 480	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 481}
 482
 483void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 484					struct virtio_gpu_object *bo,
 485					uint64_t offset,
 486					__le32 width, __le32 height,
 487					__le32 x, __le32 y,
 488					struct virtio_gpu_fence *fence)
 489{
 490	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 491	struct virtio_gpu_vbuffer *vbuf;
 492	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 493
 494	if (use_dma_api)
 495		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 496				       bo->pages->sgl, bo->pages->nents,
 497				       DMA_TO_DEVICE);
 498
 499	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 500	memset(cmd_p, 0, sizeof(*cmd_p));
 501
 502	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
 503	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 504	cmd_p->offset = cpu_to_le64(offset);
 505	cmd_p->r.width = width;
 506	cmd_p->r.height = height;
 507	cmd_p->r.x = x;
 508	cmd_p->r.y = y;
 509
 510	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 511}
 512
 513static void
 514virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 515				       uint32_t resource_id,
 516				       struct virtio_gpu_mem_entry *ents,
 517				       uint32_t nents,
 518				       struct virtio_gpu_fence *fence)
 519{
 520	struct virtio_gpu_resource_attach_backing *cmd_p;
 521	struct virtio_gpu_vbuffer *vbuf;
 522
 523	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 524	memset(cmd_p, 0, sizeof(*cmd_p));
 525
 526	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
 527	cmd_p->resource_id = cpu_to_le32(resource_id);
 528	cmd_p->nr_entries = cpu_to_le32(nents);
 529
 530	vbuf->data_buf = ents;
 531	vbuf->data_size = sizeof(*ents) * nents;
 532
 533	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 534}
 535
 536static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
 537					       struct virtio_gpu_vbuffer *vbuf)
 538{
 539	struct virtio_gpu_resp_display_info *resp =
 540		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
 541	int i;
 542
 543	spin_lock(&vgdev->display_info_lock);
 544	for (i = 0; i < vgdev->num_scanouts; i++) {
 545		vgdev->outputs[i].info = resp->pmodes[i];
 546		if (resp->pmodes[i].enabled) {
 547			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
 548				  le32_to_cpu(resp->pmodes[i].r.width),
 549				  le32_to_cpu(resp->pmodes[i].r.height),
 550				  le32_to_cpu(resp->pmodes[i].r.x),
 551				  le32_to_cpu(resp->pmodes[i].r.y));
 552		} else {
 553			DRM_DEBUG("output %d: disabled", i);
 554		}
 555	}
 556
 557	vgdev->display_info_pending = false;
 558	spin_unlock(&vgdev->display_info_lock);
 559	wake_up(&vgdev->resp_wq);
 560
 561	if (!drm_helper_hpd_irq_event(vgdev->ddev))
 562		drm_kms_helper_hotplug_event(vgdev->ddev);
 563}
 564
 565static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
 566					      struct virtio_gpu_vbuffer *vbuf)
 567{
 568	struct virtio_gpu_get_capset_info *cmd =
 569		(struct virtio_gpu_get_capset_info *)vbuf->buf;
 570	struct virtio_gpu_resp_capset_info *resp =
 571		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
 572	int i = le32_to_cpu(cmd->capset_index);
 573
 574	spin_lock(&vgdev->display_info_lock);
 575	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
 576	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
 577	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 578	spin_unlock(&vgdev->display_info_lock);
 579	wake_up(&vgdev->resp_wq);
 580}
 581
 582static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 583				     struct virtio_gpu_vbuffer *vbuf)
 584{
 585	struct virtio_gpu_get_capset *cmd =
 586		(struct virtio_gpu_get_capset *)vbuf->buf;
 587	struct virtio_gpu_resp_capset *resp =
 588		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
 589	struct virtio_gpu_drv_cap_cache *cache_ent;
 590
 591	spin_lock(&vgdev->display_info_lock);
 592	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 593		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
 594		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
 595			memcpy(cache_ent->caps_cache, resp->capset_data,
 596			       cache_ent->size);
 597			/* Copy must occur before is_valid is signalled. */
 598			smp_wmb();
 599			atomic_set(&cache_ent->is_valid, 1);
 600			break;
 601		}
 602	}
 603	spin_unlock(&vgdev->display_info_lock);
 604	wake_up_all(&vgdev->resp_wq);
 605}
 606
 607static int virtio_get_edid_block(void *data, u8 *buf,
 608				 unsigned int block, size_t len)
 609{
 610	struct virtio_gpu_resp_edid *resp = data;
 611	size_t start = block * EDID_LENGTH;
 612
 613	if (start + len > le32_to_cpu(resp->size))
 614		return -1;
 615	memcpy(buf, resp->edid + start, len);
 616	return 0;
 617}
 618
 619static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
 620				       struct virtio_gpu_vbuffer *vbuf)
 621{
 622	struct virtio_gpu_cmd_get_edid *cmd =
 623		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
 624	struct virtio_gpu_resp_edid *resp =
 625		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
 626	uint32_t scanout = le32_to_cpu(cmd->scanout);
 627	struct virtio_gpu_output *output;
 628	struct edid *new_edid, *old_edid;
 629
 630	if (scanout >= vgdev->num_scanouts)
 631		return;
 632	output = vgdev->outputs + scanout;
 633
 634	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
 635	drm_connector_update_edid_property(&output->conn, new_edid);
 636
 637	spin_lock(&vgdev->display_info_lock);
 638	old_edid = output->edid;
 639	output->edid = new_edid;
 640	spin_unlock(&vgdev->display_info_lock);
 641
 642	kfree(old_edid);
 643	wake_up(&vgdev->resp_wq);
 644}
 645
 646int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 647{
 648	struct virtio_gpu_ctrl_hdr *cmd_p;
 649	struct virtio_gpu_vbuffer *vbuf;
 650	void *resp_buf;
 651
 652	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
 653			   GFP_KERNEL);
 654	if (!resp_buf)
 655		return -ENOMEM;
 656
 657	cmd_p = virtio_gpu_alloc_cmd_resp
 658		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
 659		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
 660		 resp_buf);
 661	memset(cmd_p, 0, sizeof(*cmd_p));
 662
 663	vgdev->display_info_pending = true;
 664	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
 665	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 666	return 0;
 667}
 668
 669int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
 670{
 671	struct virtio_gpu_get_capset_info *cmd_p;
 672	struct virtio_gpu_vbuffer *vbuf;
 673	void *resp_buf;
 674
 675	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
 676			   GFP_KERNEL);
 677	if (!resp_buf)
 678		return -ENOMEM;
 679
 680	cmd_p = virtio_gpu_alloc_cmd_resp
 681		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
 682		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
 683		 resp_buf);
 684	memset(cmd_p, 0, sizeof(*cmd_p));
 685
 686	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
 687	cmd_p->capset_index = cpu_to_le32(idx);
 688	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 689	return 0;
 690}
 691
 692int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 693			      int idx, int version,
 694			      struct virtio_gpu_drv_cap_cache **cache_p)
 695{
 696	struct virtio_gpu_get_capset *cmd_p;
 697	struct virtio_gpu_vbuffer *vbuf;
 698	int max_size;
 699	struct virtio_gpu_drv_cap_cache *cache_ent;
 700	struct virtio_gpu_drv_cap_cache *search_ent;
 701	void *resp_buf;
 702
 703	*cache_p = NULL;
 704
 705	if (idx >= vgdev->num_capsets)
 706		return -EINVAL;
 707
 708	if (version > vgdev->capsets[idx].max_version)
 709		return -EINVAL;
 710
 711	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
 712	if (!cache_ent)
 713		return -ENOMEM;
 714
 715	max_size = vgdev->capsets[idx].max_size;
 716	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
 717	if (!cache_ent->caps_cache) {
 718		kfree(cache_ent);
 719		return -ENOMEM;
 720	}
 721
 722	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
 723			   GFP_KERNEL);
 724	if (!resp_buf) {
 725		kfree(cache_ent->caps_cache);
 726		kfree(cache_ent);
 727		return -ENOMEM;
 728	}
 729
 730	cache_ent->version = version;
 731	cache_ent->id = vgdev->capsets[idx].id;
 732	atomic_set(&cache_ent->is_valid, 0);
 733	cache_ent->size = max_size;
 734	spin_lock(&vgdev->display_info_lock);
 735	/* Search while under lock in case it was added by another task. */
 736	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
 737		if (search_ent->id == vgdev->capsets[idx].id &&
 738		    search_ent->version == version) {
 739			*cache_p = search_ent;
 740			break;
 741		}
 742	}
 743	if (!*cache_p)
 744		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 745	spin_unlock(&vgdev->display_info_lock);
 746
 747	if (*cache_p) {
 748		/* Entry was found, so free everything that was just created. */
 749		kfree(resp_buf);
 750		kfree(cache_ent->caps_cache);
 751		kfree(cache_ent);
 752		return 0;
 753	}
 754
 755	cmd_p = virtio_gpu_alloc_cmd_resp
 756		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
 757		 sizeof(struct virtio_gpu_resp_capset) + max_size,
 758		 resp_buf);
 759	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
 760	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
 761	cmd_p->capset_version = cpu_to_le32(version);
 762	*cache_p = cache_ent;
 763	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 764
 765	return 0;
 766}
 767
 768int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
 769{
 770	struct virtio_gpu_cmd_get_edid *cmd_p;
 771	struct virtio_gpu_vbuffer *vbuf;
 772	void *resp_buf;
 773	int scanout;
 774
 775	if (WARN_ON(!vgdev->has_edid))
 776		return -EINVAL;
 777
 778	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
 779		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
 780				   GFP_KERNEL);
 781		if (!resp_buf)
 782			return -ENOMEM;
 783
 784		cmd_p = virtio_gpu_alloc_cmd_resp
 785			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
 786			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
 787			 resp_buf);
 788		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
 789		cmd_p->scanout = cpu_to_le32(scanout);
 790		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 791	}
 792
 793	return 0;
 794}
 795
 796void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 797				   uint32_t nlen, const char *name)
 798{
 799	struct virtio_gpu_ctx_create *cmd_p;
 800	struct virtio_gpu_vbuffer *vbuf;
 801
 802	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 803	memset(cmd_p, 0, sizeof(*cmd_p));
 804
 805	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
 806	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 807	cmd_p->nlen = cpu_to_le32(nlen);
 808	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
 809	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
 810	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 811}
 812
 813void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
 814				    uint32_t id)
 815{
 816	struct virtio_gpu_ctx_destroy *cmd_p;
 817	struct virtio_gpu_vbuffer *vbuf;
 818
 819	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 820	memset(cmd_p, 0, sizeof(*cmd_p));
 821
 822	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
 823	cmd_p->hdr.ctx_id = cpu_to_le32(id);
 824	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 825}
 826
 827void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 828					    uint32_t ctx_id,
 829					    uint32_t resource_id)
 830{
 831	struct virtio_gpu_ctx_resource *cmd_p;
 832	struct virtio_gpu_vbuffer *vbuf;
 833
 834	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 835	memset(cmd_p, 0, sizeof(*cmd_p));
 836
 837	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
 838	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 839	cmd_p->resource_id = cpu_to_le32(resource_id);
 840	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 841
 842}
 843
 844void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 845					    uint32_t ctx_id,
 846					    uint32_t resource_id)
 847{
 848	struct virtio_gpu_ctx_resource *cmd_p;
 849	struct virtio_gpu_vbuffer *vbuf;
 850
 851	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 852	memset(cmd_p, 0, sizeof(*cmd_p));
 853
 854	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
 855	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 856	cmd_p->resource_id = cpu_to_le32(resource_id);
 857	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 858}
 859
 860void
 861virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 862				  struct virtio_gpu_object *bo,
 863				  struct virtio_gpu_object_params *params,
 864				  struct virtio_gpu_fence *fence)
 865{
 866	struct virtio_gpu_resource_create_3d *cmd_p;
 867	struct virtio_gpu_vbuffer *vbuf;
 868
 869	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 870	memset(cmd_p, 0, sizeof(*cmd_p));
 871
 
 872	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 873	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 874	cmd_p->format = cpu_to_le32(params->format);
 875	cmd_p->width = cpu_to_le32(params->width);
 876	cmd_p->height = cpu_to_le32(params->height);
 877
 878	cmd_p->target = cpu_to_le32(params->target);
 879	cmd_p->bind = cpu_to_le32(params->bind);
 880	cmd_p->depth = cpu_to_le32(params->depth);
 881	cmd_p->array_size = cpu_to_le32(params->array_size);
 882	cmd_p->last_level = cpu_to_le32(params->last_level);
 883	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
 884	cmd_p->flags = cpu_to_le32(params->flags);
 885
 886	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 887	bo->created = true;
 888}
 889
 890void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 891					struct virtio_gpu_object *bo,
 892					uint32_t ctx_id,
 893					uint64_t offset, uint32_t level,
 894					struct virtio_gpu_box *box,
 895					struct virtio_gpu_fence *fence)
 896{
 897	struct virtio_gpu_transfer_host_3d *cmd_p;
 898	struct virtio_gpu_vbuffer *vbuf;
 899	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 900
 901	if (use_dma_api)
 902		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 903				       bo->pages->sgl, bo->pages->nents,
 904				       DMA_TO_DEVICE);
 905
 906	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 907	memset(cmd_p, 0, sizeof(*cmd_p));
 908
 909	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
 910	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 911	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 912	cmd_p->box = *box;
 913	cmd_p->offset = cpu_to_le64(offset);
 914	cmd_p->level = cpu_to_le32(level);
 915
 916	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 917}
 918
 919void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 920					  uint32_t resource_id, uint32_t ctx_id,
 921					  uint64_t offset, uint32_t level,
 922					  struct virtio_gpu_box *box,
 923					  struct virtio_gpu_fence *fence)
 924{
 925	struct virtio_gpu_transfer_host_3d *cmd_p;
 926	struct virtio_gpu_vbuffer *vbuf;
 927
 928	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 929	memset(cmd_p, 0, sizeof(*cmd_p));
 930
 931	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
 932	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 933	cmd_p->resource_id = cpu_to_le32(resource_id);
 934	cmd_p->box = *box;
 935	cmd_p->offset = cpu_to_le64(offset);
 936	cmd_p->level = cpu_to_le32(level);
 937
 938	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 939}
 940
 941void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 942			   void *data, uint32_t data_size,
 943			   uint32_t ctx_id, struct virtio_gpu_fence *fence)
 944{
 945	struct virtio_gpu_cmd_submit *cmd_p;
 946	struct virtio_gpu_vbuffer *vbuf;
 947
 948	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 949	memset(cmd_p, 0, sizeof(*cmd_p));
 950
 951	vbuf->data_buf = data;
 952	vbuf->data_size = data_size;
 953
 954	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
 955	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 956	cmd_p->size = cpu_to_le32(data_size);
 957
 958	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 959}
 960
 961int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 962			     struct virtio_gpu_object *obj,
 963			     struct virtio_gpu_fence *fence)
 
 964{
 965	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 966	struct virtio_gpu_mem_entry *ents;
 967	struct scatterlist *sg;
 968	int si, nents;
 969
 970	if (WARN_ON_ONCE(!obj->created))
 971		return -EINVAL;
 972
 973	if (!obj->pages) {
 974		int ret;
 975
 976		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
 977		if (ret)
 978			return ret;
 979	}
 980
 981	if (use_dma_api) {
 982		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
 983					 obj->pages->sgl, obj->pages->nents,
 984					 DMA_TO_DEVICE);
 985		nents = obj->mapped;
 986	} else {
 987		nents = obj->pages->nents;
 988	}
 989
 990	/* gets freed when the ring has consumed it */
 991	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
 
 992			     GFP_KERNEL);
 993	if (!ents) {
 994		DRM_ERROR("failed to allocate ent list\n");
 995		return -ENOMEM;
 996	}
 997
 998	for_each_sg(obj->pages->sgl, sg, nents, si) {
 999		ents[si].addr = cpu_to_le64(use_dma_api
1000					    ? sg_dma_address(sg)
1001					    : sg_phys(sg));
1002		ents[si].length = cpu_to_le32(sg->length);
1003		ents[si].padding = 0;
1004	}
1005
1006	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1007					       ents, nents,
1008					       fence);
 
1009	return 0;
1010}
1011
1012void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1013			      struct virtio_gpu_object *obj)
1014{
1015	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1016
1017	if (use_dma_api && obj->mapped) {
1018		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1019		/* detach backing and wait for the host process it ... */
1020		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1021		dma_fence_wait(&fence->f, true);
1022		dma_fence_put(&fence->f);
1023
1024		/* ... then tear down iommu mappings */
1025		dma_unmap_sg(vgdev->vdev->dev.parent,
1026			     obj->pages->sgl, obj->mapped,
1027			     DMA_TO_DEVICE);
1028		obj->mapped = 0;
1029	} else {
1030		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1031	}
1032}
1033
1034void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1035			    struct virtio_gpu_output *output)
1036{
1037	struct virtio_gpu_vbuffer *vbuf;
1038	struct virtio_gpu_update_cursor *cur_p;
1039
1040	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1041	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1042	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1043	virtio_gpu_queue_cursor(vgdev, vbuf);
1044}
v4.17
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Authors:
  6 *    Dave Airlie <airlied@redhat.com>
  7 *    Gerd Hoffmann <kraxel@redhat.com>
  8 *
  9 * Permission is hereby granted, free of charge, to any person obtaining a
 10 * copy of this software and associated documentation files (the "Software"),
 11 * to deal in the Software without restriction, including without limitation
 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 13 * and/or sell copies of the Software, and to permit persons to whom the
 14 * Software is furnished to do so, subject to the following conditions:
 15 *
 16 * The above copyright notice and this permission notice (including the next
 17 * paragraph) shall be included in all copies or substantial portions of the
 18 * Software.
 19 *
 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 26 * OTHER DEALINGS IN THE SOFTWARE.
 27 */
 28
 29#include <drm/drmP.h>
 30#include "virtgpu_drv.h"
 31#include <linux/virtio.h>
 32#include <linux/virtio_config.h>
 33#include <linux/virtio_ring.h>
 34
 
 
 
 35#define MAX_INLINE_CMD_SIZE   96
 36#define MAX_INLINE_RESP_SIZE  24
 37#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
 38			       + MAX_INLINE_CMD_SIZE		 \
 39			       + MAX_INLINE_RESP_SIZE)
 40
 41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 42				uint32_t *resid)
 43{
 44	int handle;
 45
 46	idr_preload(GFP_KERNEL);
 47	spin_lock(&vgdev->resource_idr_lock);
 48	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
 49	spin_unlock(&vgdev->resource_idr_lock);
 50	idr_preload_end();
 51	*resid = handle;
 52}
 53
 54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
 55{
 56	spin_lock(&vgdev->resource_idr_lock);
 57	idr_remove(&vgdev->resource_idr, id);
 58	spin_unlock(&vgdev->resource_idr_lock);
 59}
 60
 61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
 62{
 63	struct drm_device *dev = vq->vdev->priv;
 64	struct virtio_gpu_device *vgdev = dev->dev_private;
 65
 66	schedule_work(&vgdev->ctrlq.dequeue_work);
 67}
 68
 69void virtio_gpu_cursor_ack(struct virtqueue *vq)
 70{
 71	struct drm_device *dev = vq->vdev->priv;
 72	struct virtio_gpu_device *vgdev = dev->dev_private;
 73
 74	schedule_work(&vgdev->cursorq.dequeue_work);
 75}
 76
 77int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
 78{
 79	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
 80					 VBUFFER_SIZE,
 81					 __alignof__(struct virtio_gpu_vbuffer),
 82					 0, NULL);
 83	if (!vgdev->vbufs)
 84		return -ENOMEM;
 85	return 0;
 86}
 87
 88void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
 89{
 90	kmem_cache_destroy(vgdev->vbufs);
 91	vgdev->vbufs = NULL;
 92}
 93
 94static struct virtio_gpu_vbuffer*
 95virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
 96		    int size, int resp_size, void *resp_buf,
 97		    virtio_gpu_resp_cb resp_cb)
 98{
 99	struct virtio_gpu_vbuffer *vbuf;
100
101	vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
102	if (!vbuf)
103		return ERR_PTR(-ENOMEM);
104	memset(vbuf, 0, VBUFFER_SIZE);
105
106	BUG_ON(size > MAX_INLINE_CMD_SIZE);
107	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
108	vbuf->size = size;
109
110	vbuf->resp_cb = resp_cb;
111	vbuf->resp_size = resp_size;
112	if (resp_size <= MAX_INLINE_RESP_SIZE)
113		vbuf->resp_buf = (void *)vbuf->buf + size;
114	else
115		vbuf->resp_buf = resp_buf;
116	BUG_ON(!vbuf->resp_buf);
117	return vbuf;
118}
119
120static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
121				  struct virtio_gpu_vbuffer **vbuffer_p,
122				  int size)
123{
124	struct virtio_gpu_vbuffer *vbuf;
125
126	vbuf = virtio_gpu_get_vbuf(vgdev, size,
127				   sizeof(struct virtio_gpu_ctrl_hdr),
128				   NULL, NULL);
129	if (IS_ERR(vbuf)) {
130		*vbuffer_p = NULL;
131		return ERR_CAST(vbuf);
132	}
133	*vbuffer_p = vbuf;
134	return vbuf->buf;
135}
136
137static struct virtio_gpu_update_cursor*
138virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
139			struct virtio_gpu_vbuffer **vbuffer_p)
140{
141	struct virtio_gpu_vbuffer *vbuf;
142
143	vbuf = virtio_gpu_get_vbuf
144		(vgdev, sizeof(struct virtio_gpu_update_cursor),
145		 0, NULL, NULL);
146	if (IS_ERR(vbuf)) {
147		*vbuffer_p = NULL;
148		return ERR_CAST(vbuf);
149	}
150	*vbuffer_p = vbuf;
151	return (struct virtio_gpu_update_cursor *)vbuf->buf;
152}
153
154static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
155				       virtio_gpu_resp_cb cb,
156				       struct virtio_gpu_vbuffer **vbuffer_p,
157				       int cmd_size, int resp_size,
158				       void *resp_buf)
159{
160	struct virtio_gpu_vbuffer *vbuf;
161
162	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
163				   resp_size, resp_buf, cb);
164	if (IS_ERR(vbuf)) {
165		*vbuffer_p = NULL;
166		return ERR_CAST(vbuf);
167	}
168	*vbuffer_p = vbuf;
169	return (struct virtio_gpu_command *)vbuf->buf;
170}
171
172static void free_vbuf(struct virtio_gpu_device *vgdev,
173		      struct virtio_gpu_vbuffer *vbuf)
174{
175	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
176		kfree(vbuf->resp_buf);
177	kfree(vbuf->data_buf);
178	kmem_cache_free(vgdev->vbufs, vbuf);
179}
180
181static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
182{
183	struct virtio_gpu_vbuffer *vbuf;
184	unsigned int len;
185	int freed = 0;
186
187	while ((vbuf = virtqueue_get_buf(vq, &len))) {
188		list_add_tail(&vbuf->list, reclaim_list);
189		freed++;
190	}
191	if (freed == 0)
192		DRM_DEBUG("Huh? zero vbufs reclaimed");
193}
194
195void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
196{
197	struct virtio_gpu_device *vgdev =
198		container_of(work, struct virtio_gpu_device,
199			     ctrlq.dequeue_work);
200	struct list_head reclaim_list;
201	struct virtio_gpu_vbuffer *entry, *tmp;
202	struct virtio_gpu_ctrl_hdr *resp;
203	u64 fence_id = 0;
204
205	INIT_LIST_HEAD(&reclaim_list);
206	spin_lock(&vgdev->ctrlq.qlock);
207	do {
208		virtqueue_disable_cb(vgdev->ctrlq.vq);
209		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210
211	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
212	spin_unlock(&vgdev->ctrlq.qlock);
213
214	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
215		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
217			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 
 
 
 
 
 
 
 
 
 
 
218		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
219			u64 f = le64_to_cpu(resp->fence_id);
220
221			if (fence_id > f) {
222				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
223					  __func__, fence_id, f);
224			} else {
225				fence_id = f;
226			}
227		}
228		if (entry->resp_cb)
229			entry->resp_cb(vgdev, entry);
230
231		list_del(&entry->list);
232		free_vbuf(vgdev, entry);
233	}
234	wake_up(&vgdev->ctrlq.ack_queue);
235
236	if (fence_id)
237		virtio_gpu_fence_event_process(vgdev, fence_id);
238}
239
240void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
241{
242	struct virtio_gpu_device *vgdev =
243		container_of(work, struct virtio_gpu_device,
244			     cursorq.dequeue_work);
245	struct list_head reclaim_list;
246	struct virtio_gpu_vbuffer *entry, *tmp;
247
248	INIT_LIST_HEAD(&reclaim_list);
249	spin_lock(&vgdev->cursorq.qlock);
250	do {
251		virtqueue_disable_cb(vgdev->cursorq.vq);
252		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
253	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
254	spin_unlock(&vgdev->cursorq.qlock);
255
256	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
257		list_del(&entry->list);
258		free_vbuf(vgdev, entry);
259	}
260	wake_up(&vgdev->cursorq.ack_queue);
261}
262
263static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
264					       struct virtio_gpu_vbuffer *vbuf)
265		__releases(&vgdev->ctrlq.qlock)
266		__acquires(&vgdev->ctrlq.qlock)
267{
268	struct virtqueue *vq = vgdev->ctrlq.vq;
269	struct scatterlist *sgs[3], vcmd, vout, vresp;
270	int outcnt = 0, incnt = 0;
271	int ret;
272
273	if (!vgdev->vqs_ready)
274		return -ENODEV;
275
276	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
277	sgs[outcnt + incnt] = &vcmd;
278	outcnt++;
279
280	if (vbuf->data_size) {
281		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
282		sgs[outcnt + incnt] = &vout;
283		outcnt++;
284	}
285
286	if (vbuf->resp_size) {
287		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
288		sgs[outcnt + incnt] = &vresp;
289		incnt++;
290	}
291
292retry:
293	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294	if (ret == -ENOSPC) {
295		spin_unlock(&vgdev->ctrlq.qlock);
296		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297		spin_lock(&vgdev->ctrlq.qlock);
298		goto retry;
299	} else {
 
 
 
300		virtqueue_kick(vq);
301	}
302
303	if (!ret)
304		ret = vq->num_free;
305	return ret;
306}
307
308static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
309					struct virtio_gpu_vbuffer *vbuf)
310{
311	int rc;
312
313	spin_lock(&vgdev->ctrlq.qlock);
314	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
315	spin_unlock(&vgdev->ctrlq.qlock);
316	return rc;
317}
318
319static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
320					       struct virtio_gpu_vbuffer *vbuf,
321					       struct virtio_gpu_ctrl_hdr *hdr,
322					       struct virtio_gpu_fence **fence)
323{
324	struct virtqueue *vq = vgdev->ctrlq.vq;
325	int rc;
326
327again:
328	spin_lock(&vgdev->ctrlq.qlock);
329
330	/*
331	 * Make sure we have enouth space in the virtqueue.  If not
332	 * wait here until we have.
333	 *
334	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
335	 * to wait for free space, which can result in fence ids being
336	 * submitted out-of-order.
337	 */
338	if (vq->num_free < 3) {
339		spin_unlock(&vgdev->ctrlq.qlock);
340		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
341		goto again;
342	}
343
344	if (fence)
345		virtio_gpu_fence_emit(vgdev, hdr, fence);
346	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
347	spin_unlock(&vgdev->ctrlq.qlock);
348	return rc;
349}
350
351static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
352				   struct virtio_gpu_vbuffer *vbuf)
353{
354	struct virtqueue *vq = vgdev->cursorq.vq;
355	struct scatterlist *sgs[1], ccmd;
356	int ret;
357	int outcnt;
358
359	if (!vgdev->vqs_ready)
360		return -ENODEV;
361
362	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
363	sgs[0] = &ccmd;
364	outcnt = 1;
365
366	spin_lock(&vgdev->cursorq.qlock);
367retry:
368	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369	if (ret == -ENOSPC) {
370		spin_unlock(&vgdev->cursorq.qlock);
371		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372		spin_lock(&vgdev->cursorq.qlock);
373		goto retry;
374	} else {
 
 
 
375		virtqueue_kick(vq);
376	}
377
378	spin_unlock(&vgdev->cursorq.qlock);
379
380	if (!ret)
381		ret = vq->num_free;
382	return ret;
383}
384
385/* just create gem objects for userspace and long lived objects,
386 * just use dma_alloced pages for the queue objects?
387 */
388
389/* create a basic resource */
390void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
391				    uint32_t resource_id,
392				    uint32_t format,
393				    uint32_t width,
394				    uint32_t height)
395{
396	struct virtio_gpu_resource_create_2d *cmd_p;
397	struct virtio_gpu_vbuffer *vbuf;
398
399	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
400	memset(cmd_p, 0, sizeof(*cmd_p));
401
402	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
403	cmd_p->resource_id = cpu_to_le32(resource_id);
404	cmd_p->format = cpu_to_le32(format);
405	cmd_p->width = cpu_to_le32(width);
406	cmd_p->height = cpu_to_le32(height);
407
408	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 
409}
410
411void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
412				   uint32_t resource_id)
413{
414	struct virtio_gpu_resource_unref *cmd_p;
415	struct virtio_gpu_vbuffer *vbuf;
416
417	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
418	memset(cmd_p, 0, sizeof(*cmd_p));
419
420	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
421	cmd_p->resource_id = cpu_to_le32(resource_id);
422
423	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
424}
425
426void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
427					   uint32_t resource_id)
 
428{
429	struct virtio_gpu_resource_detach_backing *cmd_p;
430	struct virtio_gpu_vbuffer *vbuf;
431
432	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
433	memset(cmd_p, 0, sizeof(*cmd_p));
434
435	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
436	cmd_p->resource_id = cpu_to_le32(resource_id);
437
438	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
439}
440
441void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
442				uint32_t scanout_id, uint32_t resource_id,
443				uint32_t width, uint32_t height,
444				uint32_t x, uint32_t y)
445{
446	struct virtio_gpu_set_scanout *cmd_p;
447	struct virtio_gpu_vbuffer *vbuf;
448
449	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
450	memset(cmd_p, 0, sizeof(*cmd_p));
451
452	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
453	cmd_p->resource_id = cpu_to_le32(resource_id);
454	cmd_p->scanout_id = cpu_to_le32(scanout_id);
455	cmd_p->r.width = cpu_to_le32(width);
456	cmd_p->r.height = cpu_to_le32(height);
457	cmd_p->r.x = cpu_to_le32(x);
458	cmd_p->r.y = cpu_to_le32(y);
459
460	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
461}
462
463void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
464				   uint32_t resource_id,
465				   uint32_t x, uint32_t y,
466				   uint32_t width, uint32_t height)
467{
468	struct virtio_gpu_resource_flush *cmd_p;
469	struct virtio_gpu_vbuffer *vbuf;
470
471	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
472	memset(cmd_p, 0, sizeof(*cmd_p));
473
474	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
475	cmd_p->resource_id = cpu_to_le32(resource_id);
476	cmd_p->r.width = cpu_to_le32(width);
477	cmd_p->r.height = cpu_to_le32(height);
478	cmd_p->r.x = cpu_to_le32(x);
479	cmd_p->r.y = cpu_to_le32(y);
480
481	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
482}
483
484void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
485					uint32_t resource_id, uint64_t offset,
 
486					__le32 width, __le32 height,
487					__le32 x, __le32 y,
488					struct virtio_gpu_fence **fence)
489{
490	struct virtio_gpu_transfer_to_host_2d *cmd_p;
491	struct virtio_gpu_vbuffer *vbuf;
 
 
 
 
 
 
492
493	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
494	memset(cmd_p, 0, sizeof(*cmd_p));
495
496	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
497	cmd_p->resource_id = cpu_to_le32(resource_id);
498	cmd_p->offset = cpu_to_le64(offset);
499	cmd_p->r.width = width;
500	cmd_p->r.height = height;
501	cmd_p->r.x = x;
502	cmd_p->r.y = y;
503
504	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
505}
506
507static void
508virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
509				       uint32_t resource_id,
510				       struct virtio_gpu_mem_entry *ents,
511				       uint32_t nents,
512				       struct virtio_gpu_fence **fence)
513{
514	struct virtio_gpu_resource_attach_backing *cmd_p;
515	struct virtio_gpu_vbuffer *vbuf;
516
517	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
518	memset(cmd_p, 0, sizeof(*cmd_p));
519
520	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
521	cmd_p->resource_id = cpu_to_le32(resource_id);
522	cmd_p->nr_entries = cpu_to_le32(nents);
523
524	vbuf->data_buf = ents;
525	vbuf->data_size = sizeof(*ents) * nents;
526
527	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
528}
529
530static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
531					       struct virtio_gpu_vbuffer *vbuf)
532{
533	struct virtio_gpu_resp_display_info *resp =
534		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
535	int i;
536
537	spin_lock(&vgdev->display_info_lock);
538	for (i = 0; i < vgdev->num_scanouts; i++) {
539		vgdev->outputs[i].info = resp->pmodes[i];
540		if (resp->pmodes[i].enabled) {
541			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
542				  le32_to_cpu(resp->pmodes[i].r.width),
543				  le32_to_cpu(resp->pmodes[i].r.height),
544				  le32_to_cpu(resp->pmodes[i].r.x),
545				  le32_to_cpu(resp->pmodes[i].r.y));
546		} else {
547			DRM_DEBUG("output %d: disabled", i);
548		}
549	}
550
551	vgdev->display_info_pending = false;
552	spin_unlock(&vgdev->display_info_lock);
553	wake_up(&vgdev->resp_wq);
554
555	if (!drm_helper_hpd_irq_event(vgdev->ddev))
556		drm_kms_helper_hotplug_event(vgdev->ddev);
557}
558
559static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
560					      struct virtio_gpu_vbuffer *vbuf)
561{
562	struct virtio_gpu_get_capset_info *cmd =
563		(struct virtio_gpu_get_capset_info *)vbuf->buf;
564	struct virtio_gpu_resp_capset_info *resp =
565		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
566	int i = le32_to_cpu(cmd->capset_index);
567
568	spin_lock(&vgdev->display_info_lock);
569	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
570	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
571	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
572	spin_unlock(&vgdev->display_info_lock);
573	wake_up(&vgdev->resp_wq);
574}
575
576static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
577				     struct virtio_gpu_vbuffer *vbuf)
578{
579	struct virtio_gpu_get_capset *cmd =
580		(struct virtio_gpu_get_capset *)vbuf->buf;
581	struct virtio_gpu_resp_capset *resp =
582		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
583	struct virtio_gpu_drv_cap_cache *cache_ent;
584
585	spin_lock(&vgdev->display_info_lock);
586	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
587		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
588		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
589			memcpy(cache_ent->caps_cache, resp->capset_data,
590			       cache_ent->size);
 
 
591			atomic_set(&cache_ent->is_valid, 1);
592			break;
593		}
594	}
595	spin_unlock(&vgdev->display_info_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
596	wake_up(&vgdev->resp_wq);
597}
598
599int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
600{
601	struct virtio_gpu_ctrl_hdr *cmd_p;
602	struct virtio_gpu_vbuffer *vbuf;
603	void *resp_buf;
604
605	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
606			   GFP_KERNEL);
607	if (!resp_buf)
608		return -ENOMEM;
609
610	cmd_p = virtio_gpu_alloc_cmd_resp
611		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
612		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
613		 resp_buf);
614	memset(cmd_p, 0, sizeof(*cmd_p));
615
616	vgdev->display_info_pending = true;
617	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
618	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
619	return 0;
620}
621
622int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
623{
624	struct virtio_gpu_get_capset_info *cmd_p;
625	struct virtio_gpu_vbuffer *vbuf;
626	void *resp_buf;
627
628	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
629			   GFP_KERNEL);
630	if (!resp_buf)
631		return -ENOMEM;
632
633	cmd_p = virtio_gpu_alloc_cmd_resp
634		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
635		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
636		 resp_buf);
637	memset(cmd_p, 0, sizeof(*cmd_p));
638
639	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
640	cmd_p->capset_index = cpu_to_le32(idx);
641	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
642	return 0;
643}
644
645int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
646			      int idx, int version,
647			      struct virtio_gpu_drv_cap_cache **cache_p)
648{
649	struct virtio_gpu_get_capset *cmd_p;
650	struct virtio_gpu_vbuffer *vbuf;
651	int max_size = vgdev->capsets[idx].max_size;
652	struct virtio_gpu_drv_cap_cache *cache_ent;
 
653	void *resp_buf;
654
655	if (idx > vgdev->num_capsets)
 
 
656		return -EINVAL;
657
658	if (version > vgdev->capsets[idx].max_version)
659		return -EINVAL;
660
661	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
662	if (!cache_ent)
663		return -ENOMEM;
664
 
665	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
666	if (!cache_ent->caps_cache) {
667		kfree(cache_ent);
668		return -ENOMEM;
669	}
670
671	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
672			   GFP_KERNEL);
673	if (!resp_buf) {
674		kfree(cache_ent->caps_cache);
675		kfree(cache_ent);
676		return -ENOMEM;
677	}
678
679	cache_ent->version = version;
680	cache_ent->id = vgdev->capsets[idx].id;
681	atomic_set(&cache_ent->is_valid, 0);
682	cache_ent->size = max_size;
683	spin_lock(&vgdev->display_info_lock);
684	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 
 
 
 
 
 
 
 
 
685	spin_unlock(&vgdev->display_info_lock);
686
 
 
 
 
 
 
 
 
687	cmd_p = virtio_gpu_alloc_cmd_resp
688		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
689		 sizeof(struct virtio_gpu_resp_capset) + max_size,
690		 resp_buf);
691	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
692	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
693	cmd_p->capset_version = cpu_to_le32(version);
694	*cache_p = cache_ent;
695	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
696
697	return 0;
698}
699
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
700void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
701				   uint32_t nlen, const char *name)
702{
703	struct virtio_gpu_ctx_create *cmd_p;
704	struct virtio_gpu_vbuffer *vbuf;
705
706	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
707	memset(cmd_p, 0, sizeof(*cmd_p));
708
709	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
710	cmd_p->hdr.ctx_id = cpu_to_le32(id);
711	cmd_p->nlen = cpu_to_le32(nlen);
712	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
713	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
714	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
715}
716
717void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
718				    uint32_t id)
719{
720	struct virtio_gpu_ctx_destroy *cmd_p;
721	struct virtio_gpu_vbuffer *vbuf;
722
723	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
724	memset(cmd_p, 0, sizeof(*cmd_p));
725
726	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
727	cmd_p->hdr.ctx_id = cpu_to_le32(id);
728	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
729}
730
731void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
732					    uint32_t ctx_id,
733					    uint32_t resource_id)
734{
735	struct virtio_gpu_ctx_resource *cmd_p;
736	struct virtio_gpu_vbuffer *vbuf;
737
738	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
739	memset(cmd_p, 0, sizeof(*cmd_p));
740
741	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
742	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
743	cmd_p->resource_id = cpu_to_le32(resource_id);
744	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
745
746}
747
748void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
749					    uint32_t ctx_id,
750					    uint32_t resource_id)
751{
752	struct virtio_gpu_ctx_resource *cmd_p;
753	struct virtio_gpu_vbuffer *vbuf;
754
755	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
756	memset(cmd_p, 0, sizeof(*cmd_p));
757
758	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
759	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
760	cmd_p->resource_id = cpu_to_le32(resource_id);
761	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
762}
763
764void
765virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
766				  struct virtio_gpu_resource_create_3d *rc_3d,
767				  struct virtio_gpu_fence **fence)
 
768{
769	struct virtio_gpu_resource_create_3d *cmd_p;
770	struct virtio_gpu_vbuffer *vbuf;
771
772	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
773	memset(cmd_p, 0, sizeof(*cmd_p));
774
775	*cmd_p = *rc_3d;
776	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
777	cmd_p->hdr.flags = 0;
 
 
 
 
 
 
 
 
 
 
 
778
779	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 
780}
781
782void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
783					uint32_t resource_id, uint32_t ctx_id,
 
784					uint64_t offset, uint32_t level,
785					struct virtio_gpu_box *box,
786					struct virtio_gpu_fence **fence)
787{
788	struct virtio_gpu_transfer_host_3d *cmd_p;
789	struct virtio_gpu_vbuffer *vbuf;
 
 
 
 
 
 
790
791	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
792	memset(cmd_p, 0, sizeof(*cmd_p));
793
794	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
795	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
796	cmd_p->resource_id = cpu_to_le32(resource_id);
797	cmd_p->box = *box;
798	cmd_p->offset = cpu_to_le64(offset);
799	cmd_p->level = cpu_to_le32(level);
800
801	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
802}
803
804void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
805					  uint32_t resource_id, uint32_t ctx_id,
806					  uint64_t offset, uint32_t level,
807					  struct virtio_gpu_box *box,
808					  struct virtio_gpu_fence **fence)
809{
810	struct virtio_gpu_transfer_host_3d *cmd_p;
811	struct virtio_gpu_vbuffer *vbuf;
812
813	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
814	memset(cmd_p, 0, sizeof(*cmd_p));
815
816	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
817	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
818	cmd_p->resource_id = cpu_to_le32(resource_id);
819	cmd_p->box = *box;
820	cmd_p->offset = cpu_to_le64(offset);
821	cmd_p->level = cpu_to_le32(level);
822
823	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
824}
825
826void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
827			   void *data, uint32_t data_size,
828			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
829{
830	struct virtio_gpu_cmd_submit *cmd_p;
831	struct virtio_gpu_vbuffer *vbuf;
832
833	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
834	memset(cmd_p, 0, sizeof(*cmd_p));
835
836	vbuf->data_buf = data;
837	vbuf->data_size = data_size;
838
839	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
840	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
841	cmd_p->size = cpu_to_le32(data_size);
842
843	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
844}
845
846int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
847			     struct virtio_gpu_object *obj,
848			     uint32_t resource_id,
849			     struct virtio_gpu_fence **fence)
850{
 
851	struct virtio_gpu_mem_entry *ents;
852	struct scatterlist *sg;
853	int si;
 
 
 
854
855	if (!obj->pages) {
856		int ret;
857
858		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
859		if (ret)
860			return ret;
861	}
862
 
 
 
 
 
 
 
 
 
863	/* gets freed when the ring has consumed it */
864	ents = kmalloc_array(obj->pages->nents,
865			     sizeof(struct virtio_gpu_mem_entry),
866			     GFP_KERNEL);
867	if (!ents) {
868		DRM_ERROR("failed to allocate ent list\n");
869		return -ENOMEM;
870	}
871
872	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
873		ents[si].addr = cpu_to_le64(sg_phys(sg));
 
 
874		ents[si].length = cpu_to_le32(sg->length);
875		ents[si].padding = 0;
876	}
877
878	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
879					       ents, obj->pages->nents,
880					       fence);
881	obj->hw_res_handle = resource_id;
882	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
883}
884
885void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
886			    struct virtio_gpu_output *output)
887{
888	struct virtio_gpu_vbuffer *vbuf;
889	struct virtio_gpu_update_cursor *cur_p;
890
891	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
892	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
893	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
894	virtio_gpu_queue_cursor(vgdev, vbuf);
895}