Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v3.1
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_drm.h"
  30#include "ttm/ttm_object.h"
  31#include "ttm/ttm_placement.h"
  32#include "drmP.h"
  33
  34#define VMW_RES_CONTEXT ttm_driver_type0
  35#define VMW_RES_SURFACE ttm_driver_type1
  36#define VMW_RES_STREAM ttm_driver_type2
  37
  38struct vmw_user_context {
  39	struct ttm_base_object base;
  40	struct vmw_resource res;
  41};
  42
  43struct vmw_user_surface {
  44	struct ttm_base_object base;
  45	struct vmw_surface srf;
  46};
  47
  48struct vmw_user_dma_buffer {
  49	struct ttm_base_object base;
  50	struct vmw_dma_buffer dma;
  51};
  52
  53struct vmw_bo_user_rep {
  54	uint32_t handle;
  55	uint64_t map_handle;
  56};
  57
  58struct vmw_stream {
  59	struct vmw_resource res;
  60	uint32_t stream_id;
  61};
  62
  63struct vmw_user_stream {
  64	struct ttm_base_object base;
  65	struct vmw_stream stream;
  66};
  67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  68static inline struct vmw_dma_buffer *
  69vmw_dma_buffer(struct ttm_buffer_object *bo)
  70{
  71	return container_of(bo, struct vmw_dma_buffer, base);
  72}
  73
  74static inline struct vmw_user_dma_buffer *
  75vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  76{
  77	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  78	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  79}
  80
  81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  82{
  83	kref_get(&res->kref);
  84	return res;
  85}
  86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87static void vmw_resource_release(struct kref *kref)
  88{
  89	struct vmw_resource *res =
  90	    container_of(kref, struct vmw_resource, kref);
  91	struct vmw_private *dev_priv = res->dev_priv;
 
 
  92
  93	idr_remove(res->idr, res->id);
 
 
  94	write_unlock(&dev_priv->resource_lock);
 
 
  95
  96	if (likely(res->hw_destroy != NULL))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97		res->hw_destroy(res);
 
  98
 
  99	if (res->res_free != NULL)
 100		res->res_free(res);
 101	else
 102		kfree(res);
 103
 104	write_lock(&dev_priv->resource_lock);
 
 
 
 105}
 106
 107void vmw_resource_unreference(struct vmw_resource **p_res)
 108{
 109	struct vmw_resource *res = *p_res;
 110	struct vmw_private *dev_priv = res->dev_priv;
 111
 112	*p_res = NULL;
 113	write_lock(&dev_priv->resource_lock);
 114	kref_put(&res->kref, vmw_resource_release);
 115	write_unlock(&dev_priv->resource_lock);
 116}
 117
 118static int vmw_resource_init(struct vmw_private *dev_priv,
 119			     struct vmw_resource *res,
 120			     struct idr *idr,
 121			     enum ttm_object_type obj_type,
 122			     void (*res_free) (struct vmw_resource *res))
 
 
 
 
 
 123{
 
 124	int ret;
 
 
 
 
 
 
 125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126	kref_init(&res->kref);
 127	res->hw_destroy = NULL;
 128	res->res_free = res_free;
 129	res->res_type = obj_type;
 130	res->idr = idr;
 131	res->avail = false;
 132	res->dev_priv = dev_priv;
 133
 134	do {
 135		if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
 136			return -ENOMEM;
 137
 138		write_lock(&dev_priv->resource_lock);
 139		ret = idr_get_new_above(idr, res, 1, &res->id);
 140		write_unlock(&dev_priv->resource_lock);
 141
 142	} while (ret == -EAGAIN);
 143
 144	return ret;
 
 145}
 146
 147/**
 148 * vmw_resource_activate
 149 *
 150 * @res:        Pointer to the newly created resource
 151 * @hw_destroy: Destroy function. NULL if none.
 152 *
 153 * Activate a resource after the hardware has been made aware of it.
 154 * Set tye destroy function to @destroy. Typically this frees the
 155 * resource and destroys the hardware resources associated with it.
 156 * Activate basically means that the function vmw_resource_lookup will
 157 * find it.
 158 */
 159
 160static void vmw_resource_activate(struct vmw_resource *res,
 161				  void (*hw_destroy) (struct vmw_resource *))
 162{
 163	struct vmw_private *dev_priv = res->dev_priv;
 164
 165	write_lock(&dev_priv->resource_lock);
 166	res->avail = true;
 167	res->hw_destroy = hw_destroy;
 168	write_unlock(&dev_priv->resource_lock);
 169}
 170
 171struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 172					 struct idr *idr, int id)
 173{
 174	struct vmw_resource *res;
 175
 176	read_lock(&dev_priv->resource_lock);
 177	res = idr_find(idr, id);
 178	if (res && res->avail)
 179		kref_get(&res->kref);
 180	else
 181		res = NULL;
 
 182	read_unlock(&dev_priv->resource_lock);
 183
 184	if (unlikely(res == NULL))
 185		return NULL;
 186
 187	return res;
 188}
 189
 190/**
 191 * Context management:
 
 
 
 
 
 
 
 
 
 
 
 192 */
 193
 194static void vmw_hw_context_destroy(struct vmw_resource *res)
 
 
 
 
 195{
 
 
 
 196
 197	struct vmw_private *dev_priv = res->dev_priv;
 198	struct {
 199		SVGA3dCmdHeader header;
 200		SVGA3dCmdDestroyContext body;
 201	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 202
 203	if (unlikely(cmd == NULL)) {
 204		DRM_ERROR("Failed reserving FIFO space for surface "
 205			  "destruction.\n");
 206		return;
 207	}
 208
 209	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
 210	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 211	cmd->body.cid = cpu_to_le32(res->id);
 212
 213	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 214	vmw_3d_resource_dec(dev_priv);
 215}
 216
 217static int vmw_context_init(struct vmw_private *dev_priv,
 218			    struct vmw_resource *res,
 219			    void (*res_free) (struct vmw_resource *res))
 220{
 221	int ret;
 222
 223	struct {
 224		SVGA3dCmdHeader header;
 225		SVGA3dCmdDefineContext body;
 226	} *cmd;
 227
 228	ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
 229				VMW_RES_CONTEXT, res_free);
 230
 231	if (unlikely(ret != 0)) {
 232		if (res_free == NULL)
 233			kfree(res);
 234		else
 235			res_free(res);
 236		return ret;
 237	}
 238
 239	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 240	if (unlikely(cmd == NULL)) {
 241		DRM_ERROR("Fifo reserve failed.\n");
 242		vmw_resource_unreference(&res);
 243		return -ENOMEM;
 244	}
 245
 246	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
 247	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 248	cmd->body.cid = cpu_to_le32(res->id);
 249
 250	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 251	(void) vmw_3d_resource_inc(dev_priv);
 252	vmw_resource_activate(res, vmw_hw_context_destroy);
 253	return 0;
 254}
 255
 256struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
 257{
 258	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
 259	int ret;
 260
 261	if (unlikely(res == NULL))
 262		return NULL;
 263
 264	ret = vmw_context_init(dev_priv, res, NULL);
 265	return (ret == 0) ? res : NULL;
 266}
 267
 268/**
 269 * User-space context management:
 270 */
 271
 272static void vmw_user_context_free(struct vmw_resource *res)
 273{
 274	struct vmw_user_context *ctx =
 275	    container_of(res, struct vmw_user_context, res);
 276
 277	kfree(ctx);
 278}
 279
 280/**
 281 * This function is called when user space has no more references on the
 282 * base object. It releases the base-object's reference on the resource object.
 
 283 */
 284
 285static void vmw_user_context_base_release(struct ttm_base_object **p_base)
 286{
 287	struct ttm_base_object *base = *p_base;
 288	struct vmw_user_context *ctx =
 289	    container_of(base, struct vmw_user_context, base);
 290	struct vmw_resource *res = &ctx->res;
 291
 292	*p_base = NULL;
 293	vmw_resource_unreference(&res);
 294}
 295
 296int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 297			      struct drm_file *file_priv)
 298{
 299	struct vmw_private *dev_priv = vmw_priv(dev);
 300	struct vmw_resource *res;
 301	struct vmw_user_context *ctx;
 302	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 303	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 304	int ret = 0;
 305
 306	res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
 307	if (unlikely(res == NULL))
 308		return -EINVAL;
 309
 310	if (res->res_free != &vmw_user_context_free) {
 311		ret = -EINVAL;
 312		goto out;
 313	}
 314
 315	ctx = container_of(res, struct vmw_user_context, res);
 316	if (ctx->base.tfile != tfile && !ctx->base.shareable) {
 317		ret = -EPERM;
 318		goto out;
 319	}
 320
 321	ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
 322out:
 323	vmw_resource_unreference(&res);
 324	return ret;
 325}
 326
 327int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 328			     struct drm_file *file_priv)
 329{
 330	struct vmw_private *dev_priv = vmw_priv(dev);
 331	struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 332	struct vmw_resource *res;
 333	struct vmw_resource *tmp;
 334	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 335	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 336	int ret;
 337
 338	if (unlikely(ctx == NULL))
 339		return -ENOMEM;
 340
 341	res = &ctx->res;
 342	ctx->base.shareable = false;
 343	ctx->base.tfile = NULL;
 344
 345	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
 346	if (unlikely(ret != 0))
 347		return ret;
 348
 349	tmp = vmw_resource_reference(&ctx->res);
 350	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
 351				   &vmw_user_context_base_release, NULL);
 352
 353	if (unlikely(ret != 0)) {
 354		vmw_resource_unreference(&tmp);
 355		goto out_err;
 
 
 
 356	}
 357
 358	arg->cid = res->id;
 359out_err:
 360	vmw_resource_unreference(&res);
 361	return ret;
 362
 363}
 364
 365int vmw_context_check(struct vmw_private *dev_priv,
 366		      struct ttm_object_file *tfile,
 367		      int id)
 368{
 369	struct vmw_resource *res;
 370	int ret = 0;
 371
 372	read_lock(&dev_priv->resource_lock);
 373	res = idr_find(&dev_priv->context_idr, id);
 374	if (res && res->avail) {
 375		struct vmw_user_context *ctx =
 376			container_of(res, struct vmw_user_context, res);
 377		if (ctx->base.tfile != tfile && !ctx->base.shareable)
 378			ret = -EPERM;
 379	} else
 380		ret = -EINVAL;
 381	read_unlock(&dev_priv->resource_lock);
 382
 383	return ret;
 384}
 385
 386
 387/**
 388 * Surface management.
 389 */
 390
 391static void vmw_hw_surface_destroy(struct vmw_resource *res)
 
 
 
 
 
 
 
 
 392{
 
 
 
 393
 394	struct vmw_private *dev_priv = res->dev_priv;
 395	struct {
 396		SVGA3dCmdHeader header;
 397		SVGA3dCmdDestroySurface body;
 398	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 399
 400	if (unlikely(cmd == NULL)) {
 401		DRM_ERROR("Failed reserving FIFO space for surface "
 402			  "destruction.\n");
 403		return;
 404	}
 405
 406	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
 407	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 408	cmd->body.sid = cpu_to_le32(res->id);
 409
 410	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 411	vmw_3d_resource_dec(dev_priv);
 412}
 413
 414void vmw_surface_res_free(struct vmw_resource *res)
 415{
 416	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
 417
 418	kfree(srf->sizes);
 419	kfree(srf->snooper.image);
 420	kfree(srf);
 421}
 422
 423int vmw_surface_init(struct vmw_private *dev_priv,
 424		     struct vmw_surface *srf,
 425		     void (*res_free) (struct vmw_resource *res))
 426{
 427	int ret;
 428	struct {
 429		SVGA3dCmdHeader header;
 430		SVGA3dCmdDefineSurface body;
 431	} *cmd;
 432	SVGA3dSize *cmd_size;
 433	struct vmw_resource *res = &srf->res;
 434	struct drm_vmw_size *src_size;
 435	size_t submit_size;
 436	uint32_t cmd_len;
 437	int i;
 438
 439	BUG_ON(res_free == NULL);
 440	ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
 441				VMW_RES_SURFACE, res_free);
 442
 443	if (unlikely(ret != 0)) {
 444		res_free(res);
 445		return ret;
 446	}
 447
 448	submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
 449	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
 450
 451	cmd = vmw_fifo_reserve(dev_priv, submit_size);
 452	if (unlikely(cmd == NULL)) {
 453		DRM_ERROR("Fifo reserve failed for create surface.\n");
 454		vmw_resource_unreference(&res);
 455		return -ENOMEM;
 456	}
 457
 458	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
 459	cmd->header.size = cpu_to_le32(cmd_len);
 460	cmd->body.sid = cpu_to_le32(res->id);
 461	cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
 462	cmd->body.format = cpu_to_le32(srf->format);
 463	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
 464		cmd->body.face[i].numMipLevels =
 465		    cpu_to_le32(srf->mip_levels[i]);
 466	}
 467
 468	cmd += 1;
 469	cmd_size = (SVGA3dSize *) cmd;
 470	src_size = srf->sizes;
 471
 472	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
 473		cmd_size->width = cpu_to_le32(src_size->width);
 474		cmd_size->height = cpu_to_le32(src_size->height);
 475		cmd_size->depth = cpu_to_le32(src_size->depth);
 476	}
 477
 478	vmw_fifo_commit(dev_priv, submit_size);
 479	(void) vmw_3d_resource_inc(dev_priv);
 480	vmw_resource_activate(res, vmw_hw_surface_destroy);
 481	return 0;
 482}
 483
 484static void vmw_user_surface_free(struct vmw_resource *res)
 485{
 486	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
 487	struct vmw_user_surface *user_srf =
 488	    container_of(srf, struct vmw_user_surface, srf);
 489
 490	kfree(srf->sizes);
 491	kfree(srf->snooper.image);
 492	kfree(user_srf);
 493}
 494
 495int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
 496				   struct ttm_object_file *tfile,
 497				   uint32_t handle, struct vmw_surface **out)
 
 
 498{
 499	struct vmw_resource *res;
 500	struct vmw_surface *srf;
 501	struct vmw_user_surface *user_srf;
 502	struct ttm_base_object *base;
 503	int ret = -EINVAL;
 504
 505	base = ttm_base_object_lookup(tfile, handle);
 506	if (unlikely(base == NULL))
 507		return -EINVAL;
 508
 509	if (unlikely(base->object_type != VMW_RES_SURFACE))
 510		goto out_bad_resource;
 511
 512	user_srf = container_of(base, struct vmw_user_surface, base);
 513	srf = &user_srf->srf;
 514	res = &srf->res;
 515
 516	read_lock(&dev_priv->resource_lock);
 517
 518	if (!res->avail || res->res_free != &vmw_user_surface_free) {
 519		read_unlock(&dev_priv->resource_lock);
 520		goto out_bad_resource;
 521	}
 522
 523	kref_get(&res->kref);
 524	read_unlock(&dev_priv->resource_lock);
 525
 526	*out = srf;
 527	ret = 0;
 528
 529out_bad_resource:
 530	ttm_base_object_unref(&base);
 531
 
 
 
 
 532	return ret;
 533}
 534
 535static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
 536{
 
 537	struct ttm_base_object *base = *p_base;
 538	struct vmw_user_surface *user_srf =
 539	    container_of(base, struct vmw_user_surface, base);
 540	struct vmw_resource *res = &user_srf->srf.res;
 541
 542	*p_base = NULL;
 543	vmw_resource_unreference(&res);
 
 
 
 
 
 
 
 544}
 545
 546int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 547			      struct drm_file *file_priv)
 548{
 549	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
 550	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 551
 552	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
 
 
 
 
 
 
 553}
 554
 555int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 556			     struct drm_file *file_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 557{
 558	struct vmw_private *dev_priv = vmw_priv(dev);
 559	struct vmw_user_surface *user_srf =
 560	    kmalloc(sizeof(*user_srf), GFP_KERNEL);
 561	struct vmw_surface *srf;
 562	struct vmw_resource *res;
 563	struct vmw_resource *tmp;
 564	union drm_vmw_surface_create_arg *arg =
 565	    (union drm_vmw_surface_create_arg *)data;
 566	struct drm_vmw_surface_create_req *req = &arg->req;
 567	struct drm_vmw_surface_arg *rep = &arg->rep;
 568	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 569	struct drm_vmw_size __user *user_sizes;
 570	int ret;
 571	int i;
 572
 573	if (unlikely(user_srf == NULL))
 
 
 574		return -ENOMEM;
 575
 576	srf = &user_srf->srf;
 577	res = &srf->res;
 578
 579	srf->flags = req->flags;
 580	srf->format = req->format;
 581	srf->scanout = req->scanout;
 582	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
 583	srf->num_sizes = 0;
 584	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
 585		srf->num_sizes += srf->mip_levels[i];
 586
 587	if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
 588	    DRM_VMW_MAX_MIP_LEVELS) {
 589		ret = -EINVAL;
 590		goto out_err0;
 591	}
 592
 593	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
 594	if (unlikely(srf->sizes == NULL)) {
 595		ret = -ENOMEM;
 596		goto out_err0;
 597	}
 598
 599	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
 600	    req->size_addr;
 601
 602	ret = copy_from_user(srf->sizes, user_sizes,
 603			     srf->num_sizes * sizeof(*srf->sizes));
 604	if (unlikely(ret != 0)) {
 605		ret = -EFAULT;
 606		goto out_err1;
 607	}
 608
 609	if (srf->scanout &&
 610	    srf->num_sizes == 1 &&
 611	    srf->sizes[0].width == 64 &&
 612	    srf->sizes[0].height == 64 &&
 613	    srf->format == SVGA3D_A8R8G8B8) {
 614
 615		/* allocate image area and clear it */
 616		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
 617		if (!srf->snooper.image) {
 618			DRM_ERROR("Failed to allocate cursor_image\n");
 619			ret = -ENOMEM;
 620			goto out_err1;
 621		}
 622	} else {
 623		srf->snooper.image = NULL;
 624	}
 625	srf->snooper.crtc = NULL;
 626
 627	user_srf->base.shareable = false;
 628	user_srf->base.tfile = NULL;
 629
 630	/**
 631	 * From this point, the generic resource management functions
 632	 * destroy the object on failure.
 633	 */
 634
 635	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
 636	if (unlikely(ret != 0))
 637		return ret;
 638
 639	tmp = vmw_resource_reference(&srf->res);
 640	ret = ttm_base_object_init(tfile, &user_srf->base,
 641				   req->shareable, VMW_RES_SURFACE,
 642				   &vmw_user_surface_base_release, NULL);
 643
 644	if (unlikely(ret != 0)) {
 645		vmw_resource_unreference(&tmp);
 646		vmw_resource_unreference(&res);
 647		return ret;
 648	}
 649
 650	rep->sid = user_srf->base.hash.key;
 651	if (rep->sid == SVGA3D_INVALID_ID)
 652		DRM_ERROR("Created bad Surface ID.\n");
 653
 654	vmw_resource_unreference(&res);
 655	return 0;
 656out_err1:
 657	kfree(srf->sizes);
 658out_err0:
 659	kfree(user_srf);
 660	return ret;
 661}
 662
 663int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 664				struct drm_file *file_priv)
 665{
 666	union drm_vmw_surface_reference_arg *arg =
 667	    (union drm_vmw_surface_reference_arg *)data;
 668	struct drm_vmw_surface_arg *req = &arg->req;
 669	struct drm_vmw_surface_create_req *rep = &arg->rep;
 670	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 671	struct vmw_surface *srf;
 672	struct vmw_user_surface *user_srf;
 673	struct drm_vmw_size __user *user_sizes;
 674	struct ttm_base_object *base;
 675	int ret = -EINVAL;
 676
 677	base = ttm_base_object_lookup(tfile, req->sid);
 678	if (unlikely(base == NULL)) {
 679		DRM_ERROR("Could not find surface to reference.\n");
 680		return -EINVAL;
 681	}
 682
 683	if (unlikely(base->object_type != VMW_RES_SURFACE))
 684		goto out_bad_resource;
 685
 686	user_srf = container_of(base, struct vmw_user_surface, base);
 687	srf = &user_srf->srf;
 688
 689	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
 690	if (unlikely(ret != 0)) {
 691		DRM_ERROR("Could not add a reference to a surface.\n");
 692		goto out_no_reference;
 693	}
 694
 695	rep->flags = srf->flags;
 696	rep->format = srf->format;
 697	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
 698	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
 699	    rep->size_addr;
 700
 701	if (user_sizes)
 702		ret = copy_to_user(user_sizes, srf->sizes,
 703				   srf->num_sizes * sizeof(*srf->sizes));
 704	if (unlikely(ret != 0)) {
 705		DRM_ERROR("copy_to_user failed %p %u\n",
 706			  user_sizes, srf->num_sizes);
 707		ret = -EFAULT;
 708	}
 709out_bad_resource:
 710out_no_reference:
 711	ttm_base_object_unref(&base);
 712
 713	return ret;
 714}
 715
 716int vmw_surface_check(struct vmw_private *dev_priv,
 717		      struct ttm_object_file *tfile,
 718		      uint32_t handle, int *id)
 719{
 720	struct ttm_base_object *base;
 721	struct vmw_user_surface *user_srf;
 722
 723	int ret = -EPERM;
 724
 725	base = ttm_base_object_lookup(tfile, handle);
 726	if (unlikely(base == NULL))
 727		return -EINVAL;
 728
 729	if (unlikely(base->object_type != VMW_RES_SURFACE))
 730		goto out_bad_surface;
 731
 732	user_srf = container_of(base, struct vmw_user_surface, base);
 733	*id = user_srf->srf.res.id;
 734	ret = 0;
 735
 736out_bad_surface:
 737	/**
 738	 * FIXME: May deadlock here when called from the
 739	 * command parsing code.
 740	 */
 741
 742	ttm_base_object_unref(&base);
 743	return ret;
 744}
 745
 746/**
 747 * Buffer management.
 
 
 
 
 748 */
 749
 750static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
 751				  unsigned long num_pages)
 752{
 753	static size_t bo_user_size = ~0;
 754
 755	size_t page_array_size =
 756	    (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
 757
 758	if (unlikely(bo_user_size == ~0)) {
 759		bo_user_size = glob->ttm_bo_extra_size +
 760		    ttm_round_pot(sizeof(struct vmw_dma_buffer));
 761	}
 762
 763	return bo_user_size + page_array_size;
 764}
 765
 766void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 767{
 768	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 769	struct ttm_bo_global *glob = bo->glob;
 770
 771	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
 772	kfree(vmw_bo);
 773}
 774
 775int vmw_dmabuf_init(struct vmw_private *dev_priv,
 776		    struct vmw_dma_buffer *vmw_bo,
 777		    size_t size, struct ttm_placement *placement,
 778		    bool interruptible,
 779		    void (*bo_free) (struct ttm_buffer_object *bo))
 
 
 
 
 
 
 
 
 
 780{
 781	struct ttm_bo_device *bdev = &dev_priv->bdev;
 782	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
 783	size_t acc_size;
 784	int ret;
 785
 786	BUG_ON(!bo_free);
 787
 788	acc_size =
 789	    vmw_dmabuf_acc_size(bdev->glob,
 790				(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
 791
 792	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
 793	if (unlikely(ret != 0)) {
 794		/* we must free the bo here as
 795		 * ttm_buffer_object_init does so as well */
 796		bo_free(&vmw_bo->base);
 797		return ret;
 798	}
 799
 800	memset(vmw_bo, 0, sizeof(*vmw_bo));
 
 
 
 801
 802	INIT_LIST_HEAD(&vmw_bo->validate_list);
 
 
 
 803
 804	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 805			  ttm_bo_type_device, placement,
 806			  0, 0, interruptible,
 807			  NULL, acc_size, bo_free);
 808	return ret;
 809}
 810
 811static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 812{
 813	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 814	struct ttm_bo_global *glob = bo->glob;
 
 
 
 
 
 
 
 
 
 
 
 815
 816	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
 817	kfree(vmw_user_bo);
 818}
 819
 820static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 
 
 
 
 
 
 
 
 
 
 
 
 821{
 822	struct vmw_user_dma_buffer *vmw_user_bo;
 823	struct ttm_base_object *base = *p_base;
 824	struct ttm_buffer_object *bo;
 
 
 
 
 825
 826	*p_base = NULL;
 
 
 
 
 
 
 827
 828	if (unlikely(base == NULL))
 829		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 830
 831	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
 832	bo = &vmw_user_bo->dma.base;
 833	ttm_bo_unref(&bo);
 834}
 835
 836int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 837			   struct drm_file *file_priv)
 838{
 839	struct vmw_private *dev_priv = vmw_priv(dev);
 840	union drm_vmw_alloc_dmabuf_arg *arg =
 841	    (union drm_vmw_alloc_dmabuf_arg *)data;
 842	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 843	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 844	struct vmw_user_dma_buffer *vmw_user_bo;
 845	struct ttm_buffer_object *tmp;
 846	struct vmw_master *vmaster = vmw_master(file_priv->master);
 847	int ret;
 848
 849	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
 850	if (unlikely(vmw_user_bo == NULL))
 851		return -ENOMEM;
 852
 853	ret = ttm_read_lock(&vmaster->lock, true);
 854	if (unlikely(ret != 0)) {
 855		kfree(vmw_user_bo);
 856		return ret;
 857	}
 858
 859	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
 860			      &vmw_vram_sys_placement, true,
 861			      &vmw_user_dmabuf_destroy);
 862	if (unlikely(ret != 0))
 863		goto out_no_dmabuf;
 864
 865	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
 866	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
 867				   &vmw_user_bo->base,
 868				   false,
 869				   ttm_buffer_type,
 870				   &vmw_user_dmabuf_release, NULL);
 871	if (unlikely(ret != 0))
 872		goto out_no_base_object;
 873	else {
 874		rep->handle = vmw_user_bo->base.hash.key;
 875		rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
 876		rep->cur_gmr_id = vmw_user_bo->base.hash.key;
 877		rep->cur_gmr_offset = 0;
 878	}
 879
 880out_no_base_object:
 881	ttm_bo_unref(&tmp);
 882out_no_dmabuf:
 883	ttm_read_unlock(&vmaster->lock);
 884
 885	return ret;
 886}
 887
 888int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 889			   struct drm_file *file_priv)
 890{
 891	struct drm_vmw_unref_dmabuf_arg *arg =
 892	    (struct drm_vmw_unref_dmabuf_arg *)data;
 893
 894	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 895					 arg->handle,
 896					 TTM_REF_USAGE);
 897}
 898
 899uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
 900				  uint32_t cur_validate_node)
 901{
 902	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 903
 904	if (likely(vmw_bo->on_validate_list))
 905		return vmw_bo->cur_validate_node;
 906
 907	vmw_bo->cur_validate_node = cur_validate_node;
 908	vmw_bo->on_validate_list = true;
 909
 910	return cur_validate_node;
 911}
 912
 913void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
 914{
 915	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 916
 917	vmw_bo->on_validate_list = false;
 918}
 919
 920int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 921			   uint32_t handle, struct vmw_dma_buffer **out)
 
 922{
 923	struct vmw_user_dma_buffer *vmw_user_bo;
 924	struct ttm_base_object *base;
 925
 926	base = ttm_base_object_lookup(tfile, handle);
 927	if (unlikely(base == NULL)) {
 928		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 929		       (unsigned long)handle);
 930		return -ESRCH;
 931	}
 932
 933	if (unlikely(base->object_type != ttm_buffer_type)) {
 934		ttm_base_object_unref(&base);
 935		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 936		       (unsigned long)handle);
 937		return -EINVAL;
 938	}
 939
 940	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
 
 941	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
 942	ttm_base_object_unref(&base);
 
 
 
 943	*out = &vmw_user_bo->dma;
 944
 945	return 0;
 946}
 947
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 948/*
 949 * Stream management
 950 */
 951
 952static void vmw_stream_destroy(struct vmw_resource *res)
 953{
 954	struct vmw_private *dev_priv = res->dev_priv;
 955	struct vmw_stream *stream;
 956	int ret;
 957
 958	DRM_INFO("%s: unref\n", __func__);
 959	stream = container_of(res, struct vmw_stream, res);
 960
 961	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
 962	WARN_ON(ret != 0);
 963}
 964
 965static int vmw_stream_init(struct vmw_private *dev_priv,
 966			   struct vmw_stream *stream,
 967			   void (*res_free) (struct vmw_resource *res))
 968{
 969	struct vmw_resource *res = &stream->res;
 970	int ret;
 971
 972	ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
 973				VMW_RES_STREAM, res_free);
 974
 975	if (unlikely(ret != 0)) {
 976		if (res_free == NULL)
 977			kfree(stream);
 978		else
 979			res_free(&stream->res);
 980		return ret;
 981	}
 982
 983	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
 984	if (ret) {
 985		vmw_resource_unreference(&res);
 986		return ret;
 987	}
 988
 989	DRM_INFO("%s: claimed\n", __func__);
 990
 991	vmw_resource_activate(&stream->res, vmw_stream_destroy);
 992	return 0;
 993}
 994
 995/**
 996 * User-space context management:
 997 */
 998
 999static void vmw_user_stream_free(struct vmw_resource *res)
1000{
1001	struct vmw_user_stream *stream =
1002	    container_of(res, struct vmw_user_stream, stream.res);
 
1003
1004	kfree(stream);
 
 
1005}
1006
1007/**
1008 * This function is called when user space has no more references on the
1009 * base object. It releases the base-object's reference on the resource object.
1010 */
1011
1012static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1013{
1014	struct ttm_base_object *base = *p_base;
1015	struct vmw_user_stream *stream =
1016	    container_of(base, struct vmw_user_stream, base);
1017	struct vmw_resource *res = &stream->stream.res;
1018
1019	*p_base = NULL;
1020	vmw_resource_unreference(&res);
1021}
1022
1023int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1024			   struct drm_file *file_priv)
1025{
1026	struct vmw_private *dev_priv = vmw_priv(dev);
1027	struct vmw_resource *res;
1028	struct vmw_user_stream *stream;
1029	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1030	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 
1031	int ret = 0;
1032
1033	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
 
1034	if (unlikely(res == NULL))
1035		return -EINVAL;
1036
1037	if (res->res_free != &vmw_user_stream_free) {
1038		ret = -EINVAL;
1039		goto out;
1040	}
1041
1042	stream = container_of(res, struct vmw_user_stream, stream.res);
1043	if (stream->base.tfile != tfile) {
1044		ret = -EINVAL;
1045		goto out;
1046	}
1047
1048	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1049out:
1050	vmw_resource_unreference(&res);
1051	return ret;
1052}
1053
1054int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1055			   struct drm_file *file_priv)
1056{
1057	struct vmw_private *dev_priv = vmw_priv(dev);
1058	struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1059	struct vmw_resource *res;
1060	struct vmw_resource *tmp;
1061	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1062	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1063	int ret;
1064
1065	if (unlikely(stream == NULL))
1066		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067
1068	res = &stream->stream.res;
1069	stream->base.shareable = false;
1070	stream->base.tfile = NULL;
1071
 
 
 
 
1072	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1073	if (unlikely(ret != 0))
1074		return ret;
1075
1076	tmp = vmw_resource_reference(res);
1077	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1078				   &vmw_user_stream_base_release, NULL);
1079
1080	if (unlikely(ret != 0)) {
1081		vmw_resource_unreference(&tmp);
1082		goto out_err;
1083	}
1084
1085	arg->stream_id = res->id;
1086out_err:
1087	vmw_resource_unreference(&res);
 
1088	return ret;
1089}
1090
1091int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1092			   struct ttm_object_file *tfile,
1093			   uint32_t *inout_id, struct vmw_resource **out)
1094{
1095	struct vmw_user_stream *stream;
1096	struct vmw_resource *res;
1097	int ret;
1098
1099	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
 
1100	if (unlikely(res == NULL))
1101		return -EINVAL;
1102
1103	if (res->res_free != &vmw_user_stream_free) {
1104		ret = -EINVAL;
1105		goto err_ref;
1106	}
1107
1108	stream = container_of(res, struct vmw_user_stream, stream.res);
1109	if (stream->base.tfile != tfile) {
1110		ret = -EPERM;
1111		goto err_ref;
1112	}
1113
1114	*inout_id = stream->stream.stream_id;
1115	*out = res;
1116	return 0;
1117err_ref:
1118	vmw_resource_unreference(&res);
1119	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120}
v4.10.11
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include <drm/vmwgfx_drm.h>
  30#include <drm/ttm/ttm_object.h>
  31#include <drm/ttm/ttm_placement.h>
  32#include <drm/drmP.h>
  33#include "vmwgfx_resource_priv.h"
  34#include "vmwgfx_binding.h"
 
 
  35
  36#define VMW_RES_EVICT_ERR_COUNT 10
 
 
 
 
 
 
 
 
  37
  38struct vmw_user_dma_buffer {
  39	struct ttm_prime_object prime;
  40	struct vmw_dma_buffer dma;
  41};
  42
  43struct vmw_bo_user_rep {
  44	uint32_t handle;
  45	uint64_t map_handle;
  46};
  47
  48struct vmw_stream {
  49	struct vmw_resource res;
  50	uint32_t stream_id;
  51};
  52
  53struct vmw_user_stream {
  54	struct ttm_base_object base;
  55	struct vmw_stream stream;
  56};
  57
  58
  59static uint64_t vmw_user_stream_size;
  60
  61static const struct vmw_res_func vmw_stream_func = {
  62	.res_type = vmw_res_stream,
  63	.needs_backup = false,
  64	.may_evict = false,
  65	.type_name = "video streams",
  66	.backup_placement = NULL,
  67	.create = NULL,
  68	.destroy = NULL,
  69	.bind = NULL,
  70	.unbind = NULL
  71};
  72
  73static inline struct vmw_dma_buffer *
  74vmw_dma_buffer(struct ttm_buffer_object *bo)
  75{
  76	return container_of(bo, struct vmw_dma_buffer, base);
  77}
  78
  79static inline struct vmw_user_dma_buffer *
  80vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  81{
  82	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  83	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  84}
  85
  86struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  87{
  88	kref_get(&res->kref);
  89	return res;
  90}
  91
  92struct vmw_resource *
  93vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  94{
  95	return kref_get_unless_zero(&res->kref) ? res : NULL;
  96}
  97
  98/**
  99 * vmw_resource_release_id - release a resource id to the id manager.
 100 *
 101 * @res: Pointer to the resource.
 102 *
 103 * Release the resource id to the resource id manager and set it to -1
 104 */
 105void vmw_resource_release_id(struct vmw_resource *res)
 106{
 107	struct vmw_private *dev_priv = res->dev_priv;
 108	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 109
 110	write_lock(&dev_priv->resource_lock);
 111	if (res->id != -1)
 112		idr_remove(idr, res->id);
 113	res->id = -1;
 114	write_unlock(&dev_priv->resource_lock);
 115}
 116
 117static void vmw_resource_release(struct kref *kref)
 118{
 119	struct vmw_resource *res =
 120	    container_of(kref, struct vmw_resource, kref);
 121	struct vmw_private *dev_priv = res->dev_priv;
 122	int id;
 123	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 124
 125	write_lock(&dev_priv->resource_lock);
 126	res->avail = false;
 127	list_del_init(&res->lru_head);
 128	write_unlock(&dev_priv->resource_lock);
 129	if (res->backup) {
 130		struct ttm_buffer_object *bo = &res->backup->base;
 131
 132		ttm_bo_reserve(bo, false, false, NULL);
 133		if (!list_empty(&res->mob_head) &&
 134		    res->func->unbind != NULL) {
 135			struct ttm_validate_buffer val_buf;
 136
 137			val_buf.bo = bo;
 138			val_buf.shared = false;
 139			res->func->unbind(res, false, &val_buf);
 140		}
 141		res->backup_dirty = false;
 142		list_del_init(&res->mob_head);
 143		ttm_bo_unreserve(bo);
 144		vmw_dmabuf_unreference(&res->backup);
 145	}
 146
 147	if (likely(res->hw_destroy != NULL)) {
 148		mutex_lock(&dev_priv->binding_mutex);
 149		vmw_binding_res_list_kill(&res->binding_head);
 150		mutex_unlock(&dev_priv->binding_mutex);
 151		res->hw_destroy(res);
 152	}
 153
 154	id = res->id;
 155	if (res->res_free != NULL)
 156		res->res_free(res);
 157	else
 158		kfree(res);
 159
 160	write_lock(&dev_priv->resource_lock);
 161	if (id != -1)
 162		idr_remove(idr, id);
 163	write_unlock(&dev_priv->resource_lock);
 164}
 165
 166void vmw_resource_unreference(struct vmw_resource **p_res)
 167{
 168	struct vmw_resource *res = *p_res;
 
 169
 170	*p_res = NULL;
 
 171	kref_put(&res->kref, vmw_resource_release);
 
 172}
 173
 174
 175/**
 176 * vmw_resource_alloc_id - release a resource id to the id manager.
 177 *
 178 * @res: Pointer to the resource.
 179 *
 180 * Allocate the lowest free resource from the resource manager, and set
 181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 182 */
 183int vmw_resource_alloc_id(struct vmw_resource *res)
 184{
 185	struct vmw_private *dev_priv = res->dev_priv;
 186	int ret;
 187	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 188
 189	BUG_ON(res->id != -1);
 190
 191	idr_preload(GFP_KERNEL);
 192	write_lock(&dev_priv->resource_lock);
 193
 194	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 195	if (ret >= 0)
 196		res->id = ret;
 197
 198	write_unlock(&dev_priv->resource_lock);
 199	idr_preload_end();
 200	return ret < 0 ? ret : 0;
 201}
 202
 203/**
 204 * vmw_resource_init - initialize a struct vmw_resource
 205 *
 206 * @dev_priv:       Pointer to a device private struct.
 207 * @res:            The struct vmw_resource to initialize.
 208 * @obj_type:       Resource object type.
 209 * @delay_id:       Boolean whether to defer device id allocation until
 210 *                  the first validation.
 211 * @res_free:       Resource destructor.
 212 * @func:           Resource function table.
 213 */
 214int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 215		      bool delay_id,
 216		      void (*res_free) (struct vmw_resource *res),
 217		      const struct vmw_res_func *func)
 218{
 219	kref_init(&res->kref);
 220	res->hw_destroy = NULL;
 221	res->res_free = res_free;
 
 
 222	res->avail = false;
 223	res->dev_priv = dev_priv;
 224	res->func = func;
 225	INIT_LIST_HEAD(&res->lru_head);
 226	INIT_LIST_HEAD(&res->mob_head);
 227	INIT_LIST_HEAD(&res->binding_head);
 228	res->id = -1;
 229	res->backup = NULL;
 230	res->backup_offset = 0;
 231	res->backup_dirty = false;
 232	res->res_dirty = false;
 233	if (delay_id)
 234		return 0;
 235	else
 236		return vmw_resource_alloc_id(res);
 237}
 238
 239/**
 240 * vmw_resource_activate
 241 *
 242 * @res:        Pointer to the newly created resource
 243 * @hw_destroy: Destroy function. NULL if none.
 244 *
 245 * Activate a resource after the hardware has been made aware of it.
 246 * Set tye destroy function to @destroy. Typically this frees the
 247 * resource and destroys the hardware resources associated with it.
 248 * Activate basically means that the function vmw_resource_lookup will
 249 * find it.
 250 */
 251void vmw_resource_activate(struct vmw_resource *res,
 252			   void (*hw_destroy) (struct vmw_resource *))
 
 253{
 254	struct vmw_private *dev_priv = res->dev_priv;
 255
 256	write_lock(&dev_priv->resource_lock);
 257	res->avail = true;
 258	res->hw_destroy = hw_destroy;
 259	write_unlock(&dev_priv->resource_lock);
 260}
 261
 262static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 263						struct idr *idr, int id)
 264{
 265	struct vmw_resource *res;
 266
 267	read_lock(&dev_priv->resource_lock);
 268	res = idr_find(idr, id);
 269	if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
 
 
 270		res = NULL;
 271
 272	read_unlock(&dev_priv->resource_lock);
 273
 274	if (unlikely(res == NULL))
 275		return NULL;
 276
 277	return res;
 278}
 279
 280/**
 281 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 282 * TTM user-space handle and perform basic type checks
 283 *
 284 * @dev_priv:     Pointer to a device private struct
 285 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 286 * @handle:       The TTM user-space handle
 287 * @converter:    Pointer to an object describing the resource type
 288 * @p_res:        On successful return the location pointed to will contain
 289 *                a pointer to a refcounted struct vmw_resource.
 290 *
 291 * If the handle can't be found or is associated with an incorrect resource
 292 * type, -EINVAL will be returned.
 293 */
 294int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 295				    struct ttm_object_file *tfile,
 296				    uint32_t handle,
 297				    const struct vmw_user_resource_conv
 298				    *converter,
 299				    struct vmw_resource **p_res)
 300{
 301	struct ttm_base_object *base;
 302	struct vmw_resource *res;
 303	int ret = -EINVAL;
 304
 305	base = ttm_base_object_lookup(tfile, handle);
 306	if (unlikely(base == NULL))
 307		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308
 309	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 310		goto out_bad_resource;
 311
 312	res = converter->base_obj_to_res(base);
 
 
 
 
 
 
 313
 314	read_lock(&dev_priv->resource_lock);
 315	if (!res->avail || res->res_free != converter->res_free) {
 316		read_unlock(&dev_priv->resource_lock);
 317		goto out_bad_resource;
 
 318	}
 319
 320	kref_get(&res->kref);
 321	read_unlock(&dev_priv->resource_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322
 323	*p_res = res;
 324	ret = 0;
 
 325
 326out_bad_resource:
 327	ttm_base_object_unref(&base);
 
 
 328
 329	return ret;
 330}
 331
 332/**
 333 * Helper function that looks either a surface or dmabuf.
 334 *
 335 * The pointer this pointed at by out_surf and out_buf needs to be null.
 336 */
 337int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 338			   struct ttm_object_file *tfile,
 339			   uint32_t handle,
 340			   struct vmw_surface **out_surf,
 341			   struct vmw_dma_buffer **out_buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342{
 
 
 343	struct vmw_resource *res;
 
 
 
 344	int ret;
 345
 346	BUG_ON(*out_surf || *out_buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 347
 348	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 349					      user_surface_converter,
 350					      &res);
 351	if (!ret) {
 352		*out_surf = vmw_res_to_srf(res);
 353		return 0;
 354	}
 355
 356	*out_surf = NULL;
 357	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
 
 358	return ret;
 
 359}
 360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361/**
 362 * Buffer management.
 363 */
 364
 365/**
 366 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
 367 *
 368 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 369 * @size: The requested buffer size.
 370 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 371 */
 372static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
 373				  bool user)
 374{
 375	static size_t struct_size, user_struct_size;
 376	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 377	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 378
 379	if (unlikely(struct_size == 0)) {
 380		size_t backend_size = ttm_round_pot(vmw_tt_size);
 
 
 
 381
 382		struct_size = backend_size +
 383			ttm_round_pot(sizeof(struct vmw_dma_buffer));
 384		user_struct_size = backend_size +
 385			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
 386	}
 387
 388	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 389		page_array_size +=
 390			ttm_round_pot(num_pages * sizeof(dma_addr_t));
 391
 392	return ((user) ? user_struct_size : struct_size) +
 393		page_array_size;
 394}
 395
 396void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 
 
 
 
 
 
 
 
 
 
 
 397{
 398	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 399
 400	kfree(vmw_bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 401}
 402
 403static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 404{
 405	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 
 
 406
 407	ttm_prime_object_kfree(vmw_user_bo, prime);
 
 
 408}
 409
 410int vmw_dmabuf_init(struct vmw_private *dev_priv,
 411		    struct vmw_dma_buffer *vmw_bo,
 412		    size_t size, struct ttm_placement *placement,
 413		    bool interruptible,
 414		    void (*bo_free) (struct ttm_buffer_object *bo))
 415{
 416	struct ttm_bo_device *bdev = &dev_priv->bdev;
 417	size_t acc_size;
 418	int ret;
 419	bool user = (bo_free == &vmw_user_dmabuf_destroy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 420
 421	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
 
 422
 423	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
 424	memset(vmw_bo, 0, sizeof(*vmw_bo));
 425
 426	INIT_LIST_HEAD(&vmw_bo->res_list);
 
 427
 428	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 429			  ttm_bo_type_device, placement,
 430			  0, interruptible,
 431			  NULL, acc_size, NULL, NULL, bo_free);
 432	return ret;
 433}
 434
 435static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 436{
 437	struct vmw_user_dma_buffer *vmw_user_bo;
 438	struct ttm_base_object *base = *p_base;
 439	struct ttm_buffer_object *bo;
 
 
 440
 441	*p_base = NULL;
 442
 443	if (unlikely(base == NULL))
 444		return;
 445
 446	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 447				   prime.base);
 448	bo = &vmw_user_bo->dma.base;
 449	ttm_bo_unref(&bo);
 450}
 451
 452static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
 453					    enum ttm_ref_type ref_type)
 454{
 455	struct vmw_user_dma_buffer *user_bo;
 456	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
 457
 458	switch (ref_type) {
 459	case TTM_REF_SYNCCPU_WRITE:
 460		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 461		break;
 462	default:
 463		BUG();
 464	}
 465}
 466
 467/**
 468 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
 469 *
 470 * @dev_priv: Pointer to a struct device private.
 471 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 472 * object.
 473 * @size: Size of the dma buffer.
 474 * @shareable: Boolean whether the buffer is shareable with other open files.
 475 * @handle: Pointer to where the handle value should be assigned.
 476 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
 477 * should be assigned.
 478 */
 479int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
 480			  struct ttm_object_file *tfile,
 481			  uint32_t size,
 482			  bool shareable,
 483			  uint32_t *handle,
 484			  struct vmw_dma_buffer **p_dma_buf,
 485			  struct ttm_base_object **p_base)
 486{
 487	struct vmw_user_dma_buffer *user_bo;
 488	struct ttm_buffer_object *tmp;
 
 
 
 
 
 
 
 
 
 
 489	int ret;
 
 490
 491	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 492	if (unlikely(user_bo == NULL)) {
 493		DRM_ERROR("Failed to allocate a buffer.\n");
 494		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495	}
 496
 497	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
 498			      (dev_priv->has_mob) ?
 499			      &vmw_sys_placement :
 500			      &vmw_vram_sys_placement, true,
 501			      &vmw_user_dmabuf_destroy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502	if (unlikely(ret != 0))
 503		return ret;
 504
 505	tmp = ttm_bo_reference(&user_bo->dma.base);
 506	ret = ttm_prime_object_init(tfile,
 507				    size,
 508				    &user_bo->prime,
 509				    shareable,
 510				    ttm_buffer_type,
 511				    &vmw_user_dmabuf_release,
 512				    &vmw_user_dmabuf_ref_obj_release);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513	if (unlikely(ret != 0)) {
 514		ttm_bo_unref(&tmp);
 515		goto out_no_base_object;
 516	}
 517
 518	*p_dma_buf = &user_bo->dma;
 519	if (p_base) {
 520		*p_base = &user_bo->prime.base;
 521		kref_get(&(*p_base)->refcount);
 
 
 
 
 
 
 
 
 
 522	}
 523	*handle = user_bo->prime.base.hash.key;
 
 
 524
 525out_no_base_object:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 526	return ret;
 527}
 528
 529/**
 530 * vmw_user_dmabuf_verify_access - verify access permissions on this
 531 * buffer object.
 532 *
 533 * @bo: Pointer to the buffer object being accessed
 534 * @tfile: Identifying the caller.
 535 */
 536int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
 537				  struct ttm_object_file *tfile)
 
 538{
 539	struct vmw_user_dma_buffer *vmw_user_bo;
 
 
 
 540
 541	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
 542		return -EPERM;
 
 
 543
 544	vmw_user_bo = vmw_user_dma_buffer(bo);
 
 545
 546	/* Check that the caller has opened the object. */
 547	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 548		return 0;
 
 549
 550	DRM_ERROR("Could not grant buffer access.\n");
 551	return -EPERM;
 552}
 553
 554/**
 555 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
 556 * access, idling previous GPU operations on the buffer and optionally
 557 * blocking it for further command submissions.
 558 *
 559 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 560 * @tfile: Identifying the caller.
 561 * @flags: Flags indicating how the grab should be performed.
 562 *
 563 * A blocking grab will be automatically released when @tfile is closed.
 564 */
 565static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
 566					struct ttm_object_file *tfile,
 567					uint32_t flags)
 568{
 569	struct ttm_buffer_object *bo = &user_bo->dma.base;
 570	bool existed;
 
 571	int ret;
 572
 573	if (flags & drm_vmw_synccpu_allow_cs) {
 574		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 575		long lret;
 576
 577		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
 578							   nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 579		if (!lret)
 580			return -EBUSY;
 581		else if (lret < 0)
 582			return lret;
 583		return 0;
 
 584	}
 585
 586	ret = ttm_bo_synccpu_write_grab
 587		(bo, !!(flags & drm_vmw_synccpu_dontblock));
 588	if (unlikely(ret != 0))
 589		return ret;
 590
 591	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 592				 TTM_REF_SYNCCPU_WRITE, &existed, false);
 593	if (ret != 0 || existed)
 594		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 595
 
 
 
 
 596	return ret;
 597}
 598
 599/**
 600 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
 601 * and unblock command submission on the buffer if blocked.
 602 *
 603 * @handle: Handle identifying the buffer object.
 604 * @tfile: Identifying the caller.
 605 * @flags: Flags indicating the type of release.
 606 */
 607static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
 608					   struct ttm_object_file *tfile,
 609					   uint32_t flags)
 610{
 611	if (!(flags & drm_vmw_synccpu_allow_cs))
 612		return ttm_ref_object_base_unref(tfile, handle,
 613						 TTM_REF_SYNCCPU_WRITE);
 614
 615	return 0;
 
 616}
 617
 618/**
 619 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
 620 * functionality.
 621 *
 622 * @dev: Identifies the drm device.
 623 * @data: Pointer to the ioctl argument.
 624 * @file_priv: Identifies the caller.
 625 *
 626 * This function checks the ioctl arguments for validity and calls the
 627 * relevant synccpu functions.
 628 */
 629int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
 630				  struct drm_file *file_priv)
 631{
 632	struct drm_vmw_synccpu_arg *arg =
 633		(struct drm_vmw_synccpu_arg *) data;
 634	struct vmw_dma_buffer *dma_buf;
 635	struct vmw_user_dma_buffer *user_bo;
 636	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 637	struct ttm_base_object *buffer_base;
 638	int ret;
 639
 640	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 641	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 642			       drm_vmw_synccpu_dontblock |
 643			       drm_vmw_synccpu_allow_cs)) != 0) {
 644		DRM_ERROR("Illegal synccpu flags.\n");
 645		return -EINVAL;
 646	}
 647
 648	switch (arg->op) {
 649	case drm_vmw_synccpu_grab:
 650		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
 651					     &buffer_base);
 652		if (unlikely(ret != 0))
 653			return ret;
 654
 655		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
 656				       dma);
 657		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
 658		vmw_dmabuf_unreference(&dma_buf);
 659		ttm_base_object_unref(&buffer_base);
 660		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 661			     ret != -EBUSY)) {
 662			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 663				  (unsigned int) arg->handle);
 664			return ret;
 665		}
 666		break;
 667	case drm_vmw_synccpu_release:
 668		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
 669						      arg->flags);
 670		if (unlikely(ret != 0)) {
 671			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 672				  (unsigned int) arg->handle);
 673			return ret;
 674		}
 675		break;
 676	default:
 677		DRM_ERROR("Invalid synccpu operation.\n");
 678		return -EINVAL;
 679	}
 680
 681	return 0;
 
 
 682}
 683
 684int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 685			   struct drm_file *file_priv)
 686{
 687	struct vmw_private *dev_priv = vmw_priv(dev);
 688	union drm_vmw_alloc_dmabuf_arg *arg =
 689	    (union drm_vmw_alloc_dmabuf_arg *)data;
 690	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 691	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 692	struct vmw_dma_buffer *dma_buf;
 693	uint32_t handle;
 
 694	int ret;
 695
 696	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 697	if (unlikely(ret != 0))
 
 
 
 
 
 698		return ret;
 
 699
 700	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 701				    req->size, false, &handle, &dma_buf,
 702				    NULL);
 703	if (unlikely(ret != 0))
 704		goto out_no_dmabuf;
 705
 706	rep->handle = handle;
 707	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
 708	rep->cur_gmr_id = handle;
 709	rep->cur_gmr_offset = 0;
 710
 711	vmw_dmabuf_unreference(&dma_buf);
 
 
 
 
 
 
 
 
 712
 
 
 713out_no_dmabuf:
 714	ttm_read_unlock(&dev_priv->reservation_sem);
 715
 716	return ret;
 717}
 718
 719int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 720			   struct drm_file *file_priv)
 721{
 722	struct drm_vmw_unref_dmabuf_arg *arg =
 723	    (struct drm_vmw_unref_dmabuf_arg *)data;
 724
 725	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 726					 arg->handle,
 727					 TTM_REF_USAGE);
 728}
 729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 730int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 731			   uint32_t handle, struct vmw_dma_buffer **out,
 732			   struct ttm_base_object **p_base)
 733{
 734	struct vmw_user_dma_buffer *vmw_user_bo;
 735	struct ttm_base_object *base;
 736
 737	base = ttm_base_object_lookup(tfile, handle);
 738	if (unlikely(base == NULL)) {
 739		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 740		       (unsigned long)handle);
 741		return -ESRCH;
 742	}
 743
 744	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 745		ttm_base_object_unref(&base);
 746		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 747		       (unsigned long)handle);
 748		return -EINVAL;
 749	}
 750
 751	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 752				   prime.base);
 753	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
 754	if (p_base)
 755		*p_base = base;
 756	else
 757		ttm_base_object_unref(&base);
 758	*out = &vmw_user_bo->dma;
 759
 760	return 0;
 761}
 762
 763int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 764			      struct vmw_dma_buffer *dma_buf,
 765			      uint32_t *handle)
 766{
 767	struct vmw_user_dma_buffer *user_bo;
 768
 769	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
 770		return -EINVAL;
 771
 772	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
 773
 774	*handle = user_bo->prime.base.hash.key;
 775	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 776				  TTM_REF_USAGE, NULL, false);
 777}
 778
 779/*
 780 * Stream management
 781 */
 782
 783static void vmw_stream_destroy(struct vmw_resource *res)
 784{
 785	struct vmw_private *dev_priv = res->dev_priv;
 786	struct vmw_stream *stream;
 787	int ret;
 788
 789	DRM_INFO("%s: unref\n", __func__);
 790	stream = container_of(res, struct vmw_stream, res);
 791
 792	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
 793	WARN_ON(ret != 0);
 794}
 795
 796static int vmw_stream_init(struct vmw_private *dev_priv,
 797			   struct vmw_stream *stream,
 798			   void (*res_free) (struct vmw_resource *res))
 799{
 800	struct vmw_resource *res = &stream->res;
 801	int ret;
 802
 803	ret = vmw_resource_init(dev_priv, res, false, res_free,
 804				&vmw_stream_func);
 805
 806	if (unlikely(ret != 0)) {
 807		if (res_free == NULL)
 808			kfree(stream);
 809		else
 810			res_free(&stream->res);
 811		return ret;
 812	}
 813
 814	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
 815	if (ret) {
 816		vmw_resource_unreference(&res);
 817		return ret;
 818	}
 819
 820	DRM_INFO("%s: claimed\n", __func__);
 821
 822	vmw_resource_activate(&stream->res, vmw_stream_destroy);
 823	return 0;
 824}
 825
 
 
 
 
 826static void vmw_user_stream_free(struct vmw_resource *res)
 827{
 828	struct vmw_user_stream *stream =
 829	    container_of(res, struct vmw_user_stream, stream.res);
 830	struct vmw_private *dev_priv = res->dev_priv;
 831
 832	ttm_base_object_kfree(stream, base);
 833	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 834			    vmw_user_stream_size);
 835}
 836
 837/**
 838 * This function is called when user space has no more references on the
 839 * base object. It releases the base-object's reference on the resource object.
 840 */
 841
 842static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
 843{
 844	struct ttm_base_object *base = *p_base;
 845	struct vmw_user_stream *stream =
 846	    container_of(base, struct vmw_user_stream, base);
 847	struct vmw_resource *res = &stream->stream.res;
 848
 849	*p_base = NULL;
 850	vmw_resource_unreference(&res);
 851}
 852
 853int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
 854			   struct drm_file *file_priv)
 855{
 856	struct vmw_private *dev_priv = vmw_priv(dev);
 857	struct vmw_resource *res;
 858	struct vmw_user_stream *stream;
 859	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 860	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 861	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
 862	int ret = 0;
 863
 864
 865	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
 866	if (unlikely(res == NULL))
 867		return -EINVAL;
 868
 869	if (res->res_free != &vmw_user_stream_free) {
 870		ret = -EINVAL;
 871		goto out;
 872	}
 873
 874	stream = container_of(res, struct vmw_user_stream, stream.res);
 875	if (stream->base.tfile != tfile) {
 876		ret = -EINVAL;
 877		goto out;
 878	}
 879
 880	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
 881out:
 882	vmw_resource_unreference(&res);
 883	return ret;
 884}
 885
 886int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 887			   struct drm_file *file_priv)
 888{
 889	struct vmw_private *dev_priv = vmw_priv(dev);
 890	struct vmw_user_stream *stream;
 891	struct vmw_resource *res;
 892	struct vmw_resource *tmp;
 893	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 894	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 895	int ret;
 896
 897	/*
 898	 * Approximate idr memory usage with 128 bytes. It will be limited
 899	 * by maximum number_of streams anyway?
 900	 */
 901
 902	if (unlikely(vmw_user_stream_size == 0))
 903		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
 904
 905	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 906	if (unlikely(ret != 0))
 907		return ret;
 908
 909	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 910				   vmw_user_stream_size,
 911				   false, true);
 912	ttm_read_unlock(&dev_priv->reservation_sem);
 913	if (unlikely(ret != 0)) {
 914		if (ret != -ERESTARTSYS)
 915			DRM_ERROR("Out of graphics memory for stream"
 916				  " creation.\n");
 917
 918		goto out_ret;
 919	}
 920
 921	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
 922	if (unlikely(stream == NULL)) {
 923		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 924				    vmw_user_stream_size);
 925		ret = -ENOMEM;
 926		goto out_ret;
 927	}
 928
 929	res = &stream->stream.res;
 930	stream->base.shareable = false;
 931	stream->base.tfile = NULL;
 932
 933	/*
 934	 * From here on, the destructor takes over resource freeing.
 935	 */
 936
 937	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
 938	if (unlikely(ret != 0))
 939		goto out_ret;
 940
 941	tmp = vmw_resource_reference(res);
 942	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
 943				   &vmw_user_stream_base_release, NULL);
 944
 945	if (unlikely(ret != 0)) {
 946		vmw_resource_unreference(&tmp);
 947		goto out_err;
 948	}
 949
 950	arg->stream_id = res->id;
 951out_err:
 952	vmw_resource_unreference(&res);
 953out_ret:
 954	return ret;
 955}
 956
 957int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 958			   struct ttm_object_file *tfile,
 959			   uint32_t *inout_id, struct vmw_resource **out)
 960{
 961	struct vmw_user_stream *stream;
 962	struct vmw_resource *res;
 963	int ret;
 964
 965	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
 966				  *inout_id);
 967	if (unlikely(res == NULL))
 968		return -EINVAL;
 969
 970	if (res->res_free != &vmw_user_stream_free) {
 971		ret = -EINVAL;
 972		goto err_ref;
 973	}
 974
 975	stream = container_of(res, struct vmw_user_stream, stream.res);
 976	if (stream->base.tfile != tfile) {
 977		ret = -EPERM;
 978		goto err_ref;
 979	}
 980
 981	*inout_id = stream->stream.stream_id;
 982	*out = res;
 983	return 0;
 984err_ref:
 985	vmw_resource_unreference(&res);
 986	return ret;
 987}
 988
 989
 990/**
 991 * vmw_dumb_create - Create a dumb kms buffer
 992 *
 993 * @file_priv: Pointer to a struct drm_file identifying the caller.
 994 * @dev: Pointer to the drm device.
 995 * @args: Pointer to a struct drm_mode_create_dumb structure
 996 *
 997 * This is a driver callback for the core drm create_dumb functionality.
 998 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
 999 * that the arguments have a different format.
1000 */
1001int vmw_dumb_create(struct drm_file *file_priv,
1002		    struct drm_device *dev,
1003		    struct drm_mode_create_dumb *args)
1004{
1005	struct vmw_private *dev_priv = vmw_priv(dev);
1006	struct vmw_dma_buffer *dma_buf;
1007	int ret;
1008
1009	args->pitch = args->width * ((args->bpp + 7) / 8);
1010	args->size = args->pitch * args->height;
1011
1012	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1013	if (unlikely(ret != 0))
1014		return ret;
1015
1016	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1017				    args->size, false, &args->handle,
1018				    &dma_buf, NULL);
1019	if (unlikely(ret != 0))
1020		goto out_no_dmabuf;
1021
1022	vmw_dmabuf_unreference(&dma_buf);
1023out_no_dmabuf:
1024	ttm_read_unlock(&dev_priv->reservation_sem);
1025	return ret;
1026}
1027
1028/**
1029 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1030 *
1031 * @file_priv: Pointer to a struct drm_file identifying the caller.
1032 * @dev: Pointer to the drm device.
1033 * @handle: Handle identifying the dumb buffer.
1034 * @offset: The address space offset returned.
1035 *
1036 * This is a driver callback for the core drm dumb_map_offset functionality.
1037 */
1038int vmw_dumb_map_offset(struct drm_file *file_priv,
1039			struct drm_device *dev, uint32_t handle,
1040			uint64_t *offset)
1041{
1042	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1043	struct vmw_dma_buffer *out_buf;
1044	int ret;
1045
1046	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1047	if (ret != 0)
1048		return -EINVAL;
1049
1050	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1051	vmw_dmabuf_unreference(&out_buf);
1052	return 0;
1053}
1054
1055/**
1056 * vmw_dumb_destroy - Destroy a dumb boffer
1057 *
1058 * @file_priv: Pointer to a struct drm_file identifying the caller.
1059 * @dev: Pointer to the drm device.
1060 * @handle: Handle identifying the dumb buffer.
1061 *
1062 * This is a driver callback for the core drm dumb_destroy functionality.
1063 */
1064int vmw_dumb_destroy(struct drm_file *file_priv,
1065		     struct drm_device *dev,
1066		     uint32_t handle)
1067{
1068	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1069					 handle, TTM_REF_USAGE);
1070}
1071
1072/**
1073 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1074 *
1075 * @res:            The resource for which to allocate a backup buffer.
1076 * @interruptible:  Whether any sleeps during allocation should be
1077 *                  performed while interruptible.
1078 */
1079static int vmw_resource_buf_alloc(struct vmw_resource *res,
1080				  bool interruptible)
1081{
1082	unsigned long size =
1083		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1084	struct vmw_dma_buffer *backup;
1085	int ret;
1086
1087	if (likely(res->backup)) {
1088		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1089		return 0;
1090	}
1091
1092	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1093	if (unlikely(backup == NULL))
1094		return -ENOMEM;
1095
1096	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1097			      res->func->backup_placement,
1098			      interruptible,
1099			      &vmw_dmabuf_bo_free);
1100	if (unlikely(ret != 0))
1101		goto out_no_dmabuf;
1102
1103	res->backup = backup;
1104
1105out_no_dmabuf:
1106	return ret;
1107}
1108
1109/**
1110 * vmw_resource_do_validate - Make a resource up-to-date and visible
1111 *                            to the device.
1112 *
1113 * @res:            The resource to make visible to the device.
1114 * @val_buf:        Information about a buffer possibly
1115 *                  containing backup data if a bind operation is needed.
1116 *
1117 * On hardware resource shortage, this function returns -EBUSY and
1118 * should be retried once resources have been freed up.
1119 */
1120static int vmw_resource_do_validate(struct vmw_resource *res,
1121				    struct ttm_validate_buffer *val_buf)
1122{
1123	int ret = 0;
1124	const struct vmw_res_func *func = res->func;
1125
1126	if (unlikely(res->id == -1)) {
1127		ret = func->create(res);
1128		if (unlikely(ret != 0))
1129			return ret;
1130	}
1131
1132	if (func->bind &&
1133	    ((func->needs_backup && list_empty(&res->mob_head) &&
1134	      val_buf->bo != NULL) ||
1135	     (!func->needs_backup && val_buf->bo != NULL))) {
1136		ret = func->bind(res, val_buf);
1137		if (unlikely(ret != 0))
1138			goto out_bind_failed;
1139		if (func->needs_backup)
1140			list_add_tail(&res->mob_head, &res->backup->res_list);
1141	}
1142
1143	/*
1144	 * Only do this on write operations, and move to
1145	 * vmw_resource_unreserve if it can be called after
1146	 * backup buffers have been unreserved. Otherwise
1147	 * sort out locking.
1148	 */
1149	res->res_dirty = true;
1150
1151	return 0;
1152
1153out_bind_failed:
1154	func->destroy(res);
1155
1156	return ret;
1157}
1158
1159/**
1160 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1161 * command submission.
1162 *
1163 * @res:               Pointer to the struct vmw_resource to unreserve.
1164 * @switch_backup:     Backup buffer has been switched.
1165 * @new_backup:        Pointer to new backup buffer if command submission
1166 *                     switched. May be NULL.
1167 * @new_backup_offset: New backup offset if @switch_backup is true.
1168 *
1169 * Currently unreserving a resource means putting it back on the device's
1170 * resource lru list, so that it can be evicted if necessary.
1171 */
1172void vmw_resource_unreserve(struct vmw_resource *res,
1173			    bool switch_backup,
1174			    struct vmw_dma_buffer *new_backup,
1175			    unsigned long new_backup_offset)
1176{
1177	struct vmw_private *dev_priv = res->dev_priv;
1178
1179	if (!list_empty(&res->lru_head))
1180		return;
1181
1182	if (switch_backup && new_backup != res->backup) {
1183		if (res->backup) {
1184			lockdep_assert_held(&res->backup->base.resv->lock.base);
1185			list_del_init(&res->mob_head);
1186			vmw_dmabuf_unreference(&res->backup);
1187		}
1188
1189		if (new_backup) {
1190			res->backup = vmw_dmabuf_reference(new_backup);
1191			lockdep_assert_held(&new_backup->base.resv->lock.base);
1192			list_add_tail(&res->mob_head, &new_backup->res_list);
1193		} else {
1194			res->backup = NULL;
1195		}
1196	}
1197	if (switch_backup)
1198		res->backup_offset = new_backup_offset;
1199
1200	if (!res->func->may_evict || res->id == -1 || res->pin_count)
1201		return;
1202
1203	write_lock(&dev_priv->resource_lock);
1204	list_add_tail(&res->lru_head,
1205		      &res->dev_priv->res_lru[res->func->res_type]);
1206	write_unlock(&dev_priv->resource_lock);
1207}
1208
1209/**
1210 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1211 *                             for a resource and in that case, allocate
1212 *                             one, reserve and validate it.
1213 *
1214 * @res:            The resource for which to allocate a backup buffer.
1215 * @interruptible:  Whether any sleeps during allocation should be
1216 *                  performed while interruptible.
1217 * @val_buf:        On successful return contains data about the
1218 *                  reserved and validated backup buffer.
1219 */
1220static int
1221vmw_resource_check_buffer(struct vmw_resource *res,
1222			  bool interruptible,
1223			  struct ttm_validate_buffer *val_buf)
1224{
1225	struct list_head val_list;
1226	bool backup_dirty = false;
1227	int ret;
1228
1229	if (unlikely(res->backup == NULL)) {
1230		ret = vmw_resource_buf_alloc(res, interruptible);
1231		if (unlikely(ret != 0))
1232			return ret;
1233	}
1234
1235	INIT_LIST_HEAD(&val_list);
1236	val_buf->bo = ttm_bo_reference(&res->backup->base);
1237	val_buf->shared = false;
1238	list_add_tail(&val_buf->head, &val_list);
1239	ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1240	if (unlikely(ret != 0))
1241		goto out_no_reserve;
1242
1243	if (res->func->needs_backup && list_empty(&res->mob_head))
1244		return 0;
1245
1246	backup_dirty = res->backup_dirty;
1247	ret = ttm_bo_validate(&res->backup->base,
1248			      res->func->backup_placement,
1249			      true, false);
1250
1251	if (unlikely(ret != 0))
1252		goto out_no_validate;
1253
1254	return 0;
1255
1256out_no_validate:
1257	ttm_eu_backoff_reservation(NULL, &val_list);
1258out_no_reserve:
1259	ttm_bo_unref(&val_buf->bo);
1260	if (backup_dirty)
1261		vmw_dmabuf_unreference(&res->backup);
1262
1263	return ret;
1264}
1265
1266/**
1267 * vmw_resource_reserve - Reserve a resource for command submission
1268 *
1269 * @res:            The resource to reserve.
1270 *
1271 * This function takes the resource off the LRU list and make sure
1272 * a backup buffer is present for guest-backed resources. However,
1273 * the buffer may not be bound to the resource at this point.
1274 *
1275 */
1276int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1277			 bool no_backup)
1278{
1279	struct vmw_private *dev_priv = res->dev_priv;
1280	int ret;
1281
1282	write_lock(&dev_priv->resource_lock);
1283	list_del_init(&res->lru_head);
1284	write_unlock(&dev_priv->resource_lock);
1285
1286	if (res->func->needs_backup && res->backup == NULL &&
1287	    !no_backup) {
1288		ret = vmw_resource_buf_alloc(res, interruptible);
1289		if (unlikely(ret != 0)) {
1290			DRM_ERROR("Failed to allocate a backup buffer "
1291				  "of size %lu. bytes\n",
1292				  (unsigned long) res->backup_size);
1293			return ret;
1294		}
1295	}
1296
1297	return 0;
1298}
1299
1300/**
1301 * vmw_resource_backoff_reservation - Unreserve and unreference a
1302 *                                    backup buffer
1303 *.
1304 * @val_buf:        Backup buffer information.
1305 */
1306static void
1307vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1308{
1309	struct list_head val_list;
1310
1311	if (likely(val_buf->bo == NULL))
1312		return;
1313
1314	INIT_LIST_HEAD(&val_list);
1315	list_add_tail(&val_buf->head, &val_list);
1316	ttm_eu_backoff_reservation(NULL, &val_list);
1317	ttm_bo_unref(&val_buf->bo);
1318}
1319
1320/**
1321 * vmw_resource_do_evict - Evict a resource, and transfer its data
1322 *                         to a backup buffer.
1323 *
1324 * @res:            The resource to evict.
1325 * @interruptible:  Whether to wait interruptible.
1326 */
1327static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1328{
1329	struct ttm_validate_buffer val_buf;
1330	const struct vmw_res_func *func = res->func;
1331	int ret;
1332
1333	BUG_ON(!func->may_evict);
1334
1335	val_buf.bo = NULL;
1336	val_buf.shared = false;
1337	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1338	if (unlikely(ret != 0))
1339		return ret;
1340
1341	if (unlikely(func->unbind != NULL &&
1342		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1343		ret = func->unbind(res, res->res_dirty, &val_buf);
1344		if (unlikely(ret != 0))
1345			goto out_no_unbind;
1346		list_del_init(&res->mob_head);
1347	}
1348	ret = func->destroy(res);
1349	res->backup_dirty = true;
1350	res->res_dirty = false;
1351out_no_unbind:
1352	vmw_resource_backoff_reservation(&val_buf);
1353
1354	return ret;
1355}
1356
1357
1358/**
1359 * vmw_resource_validate - Make a resource up-to-date and visible
1360 *                         to the device.
1361 *
1362 * @res:            The resource to make visible to the device.
1363 *
1364 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1365 * be reserved and validated.
1366 * On hardware resource shortage, this function will repeatedly evict
1367 * resources of the same type until the validation succeeds.
1368 */
1369int vmw_resource_validate(struct vmw_resource *res)
1370{
1371	int ret;
1372	struct vmw_resource *evict_res;
1373	struct vmw_private *dev_priv = res->dev_priv;
1374	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1375	struct ttm_validate_buffer val_buf;
1376	unsigned err_count = 0;
1377
1378	if (!res->func->create)
1379		return 0;
1380
1381	val_buf.bo = NULL;
1382	val_buf.shared = false;
1383	if (res->backup)
1384		val_buf.bo = &res->backup->base;
1385	do {
1386		ret = vmw_resource_do_validate(res, &val_buf);
1387		if (likely(ret != -EBUSY))
1388			break;
1389
1390		write_lock(&dev_priv->resource_lock);
1391		if (list_empty(lru_list) || !res->func->may_evict) {
1392			DRM_ERROR("Out of device device resources "
1393				  "for %s.\n", res->func->type_name);
1394			ret = -EBUSY;
1395			write_unlock(&dev_priv->resource_lock);
1396			break;
1397		}
1398
1399		evict_res = vmw_resource_reference
1400			(list_first_entry(lru_list, struct vmw_resource,
1401					  lru_head));
1402		list_del_init(&evict_res->lru_head);
1403
1404		write_unlock(&dev_priv->resource_lock);
1405
1406		ret = vmw_resource_do_evict(evict_res, true);
1407		if (unlikely(ret != 0)) {
1408			write_lock(&dev_priv->resource_lock);
1409			list_add_tail(&evict_res->lru_head, lru_list);
1410			write_unlock(&dev_priv->resource_lock);
1411			if (ret == -ERESTARTSYS ||
1412			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1413				vmw_resource_unreference(&evict_res);
1414				goto out_no_validate;
1415			}
1416		}
1417
1418		vmw_resource_unreference(&evict_res);
1419	} while (1);
1420
1421	if (unlikely(ret != 0))
1422		goto out_no_validate;
1423	else if (!res->func->needs_backup && res->backup) {
1424		list_del_init(&res->mob_head);
1425		vmw_dmabuf_unreference(&res->backup);
1426	}
1427
1428	return 0;
1429
1430out_no_validate:
1431	return ret;
1432}
1433
1434/**
1435 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1436 *                       object without unreserving it.
1437 *
1438 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1439 * @fence:          Pointer to the fence. If NULL, this function will
1440 *                  insert a fence into the command stream..
1441 *
1442 * Contrary to the ttm_eu version of this function, it takes only
1443 * a single buffer object instead of a list, and it also doesn't
1444 * unreserve the buffer object, which needs to be done separately.
1445 */
1446void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1447			 struct vmw_fence_obj *fence)
1448{
1449	struct ttm_bo_device *bdev = bo->bdev;
1450
1451	struct vmw_private *dev_priv =
1452		container_of(bdev, struct vmw_private, bdev);
1453
1454	if (fence == NULL) {
1455		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1456		reservation_object_add_excl_fence(bo->resv, &fence->base);
1457		dma_fence_put(&fence->base);
1458	} else
1459		reservation_object_add_excl_fence(bo->resv, &fence->base);
1460}
1461
1462/**
1463 * vmw_resource_move_notify - TTM move_notify_callback
1464 *
1465 * @bo: The TTM buffer object about to move.
1466 * @mem: The struct ttm_mem_reg indicating to what memory
1467 *       region the move is taking place.
1468 *
1469 * Evicts the Guest Backed hardware resource if the backup
1470 * buffer is being moved out of MOB memory.
1471 * Note that this function should not race with the resource
1472 * validation code as long as it accesses only members of struct
1473 * resource that remain static while bo::res is !NULL and
1474 * while we have @bo reserved. struct resource::backup is *not* a
1475 * static member. The resource validation code will take care
1476 * to set @bo::res to NULL, while having @bo reserved when the
1477 * buffer is no longer bound to the resource, so @bo:res can be
1478 * used to determine whether there is a need to unbind and whether
1479 * it is safe to unbind.
1480 */
1481void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1482			      struct ttm_mem_reg *mem)
1483{
1484	struct vmw_dma_buffer *dma_buf;
1485
1486	if (mem == NULL)
1487		return;
1488
1489	if (bo->destroy != vmw_dmabuf_bo_free &&
1490	    bo->destroy != vmw_user_dmabuf_destroy)
1491		return;
1492
1493	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1494
1495	if (mem->mem_type != VMW_PL_MOB) {
1496		struct vmw_resource *res, *n;
1497		struct ttm_validate_buffer val_buf;
1498
1499		val_buf.bo = bo;
1500		val_buf.shared = false;
1501
1502		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1503
1504			if (unlikely(res->func->unbind == NULL))
1505				continue;
1506
1507			(void) res->func->unbind(res, true, &val_buf);
1508			res->backup_dirty = true;
1509			res->res_dirty = false;
1510			list_del_init(&res->mob_head);
1511		}
1512
1513		(void) ttm_bo_wait(bo, false, false);
1514	}
1515}
1516
1517
1518
1519/**
1520 * vmw_query_readback_all - Read back cached query states
1521 *
1522 * @dx_query_mob: Buffer containing the DX query MOB
1523 *
1524 * Read back cached states from the device if they exist.  This function
1525 * assumings binding_mutex is held.
1526 */
1527int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1528{
1529	struct vmw_resource *dx_query_ctx;
1530	struct vmw_private *dev_priv;
1531	struct {
1532		SVGA3dCmdHeader header;
1533		SVGA3dCmdDXReadbackAllQuery body;
1534	} *cmd;
1535
1536
1537	/* No query bound, so do nothing */
1538	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1539		return 0;
1540
1541	dx_query_ctx = dx_query_mob->dx_query_ctx;
1542	dev_priv     = dx_query_ctx->dev_priv;
1543
1544	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1545	if (unlikely(cmd == NULL)) {
1546		DRM_ERROR("Failed reserving FIFO space for "
1547			  "query MOB read back.\n");
1548		return -ENOMEM;
1549	}
1550
1551	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1552	cmd->header.size = sizeof(cmd->body);
1553	cmd->body.cid    = dx_query_ctx->id;
1554
1555	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1556
1557	/* Triggers a rebind the next time affected context is bound */
1558	dx_query_mob->dx_query_ctx = NULL;
1559
1560	return 0;
1561}
1562
1563
1564
1565/**
1566 * vmw_query_move_notify - Read back cached query states
1567 *
1568 * @bo: The TTM buffer object about to move.
1569 * @mem: The memory region @bo is moving to.
1570 *
1571 * Called before the query MOB is swapped out to read back cached query
1572 * states from the device.
1573 */
1574void vmw_query_move_notify(struct ttm_buffer_object *bo,
1575			   struct ttm_mem_reg *mem)
1576{
1577	struct vmw_dma_buffer *dx_query_mob;
1578	struct ttm_bo_device *bdev = bo->bdev;
1579	struct vmw_private *dev_priv;
1580
1581
1582	dev_priv = container_of(bdev, struct vmw_private, bdev);
1583
1584	mutex_lock(&dev_priv->binding_mutex);
1585
1586	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1587	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1588		mutex_unlock(&dev_priv->binding_mutex);
1589		return;
1590	}
1591
1592	/* If BO is being moved from MOB to system memory */
1593	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1594		struct vmw_fence_obj *fence;
1595
1596		(void) vmw_query_readback_all(dx_query_mob);
1597		mutex_unlock(&dev_priv->binding_mutex);
1598
1599		/* Create a fence and attach the BO to it */
1600		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1601		vmw_fence_single_bo(bo, fence);
1602
1603		if (fence != NULL)
1604			vmw_fence_obj_unreference(&fence);
1605
1606		(void) ttm_bo_wait(bo, false, false);
1607	} else
1608		mutex_unlock(&dev_priv->binding_mutex);
1609
1610}
1611
1612/**
1613 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1614 *
1615 * @res:            The resource being queried.
1616 */
1617bool vmw_resource_needs_backup(const struct vmw_resource *res)
1618{
1619	return res->func->needs_backup;
1620}
1621
1622/**
1623 * vmw_resource_evict_type - Evict all resources of a specific type
1624 *
1625 * @dev_priv:       Pointer to a device private struct
1626 * @type:           The resource type to evict
1627 *
1628 * To avoid thrashing starvation or as part of the hibernation sequence,
1629 * try to evict all evictable resources of a specific type.
1630 */
1631static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1632				    enum vmw_res_type type)
1633{
1634	struct list_head *lru_list = &dev_priv->res_lru[type];
1635	struct vmw_resource *evict_res;
1636	unsigned err_count = 0;
1637	int ret;
1638
1639	do {
1640		write_lock(&dev_priv->resource_lock);
1641
1642		if (list_empty(lru_list))
1643			goto out_unlock;
1644
1645		evict_res = vmw_resource_reference(
1646			list_first_entry(lru_list, struct vmw_resource,
1647					 lru_head));
1648		list_del_init(&evict_res->lru_head);
1649		write_unlock(&dev_priv->resource_lock);
1650
1651		ret = vmw_resource_do_evict(evict_res, false);
1652		if (unlikely(ret != 0)) {
1653			write_lock(&dev_priv->resource_lock);
1654			list_add_tail(&evict_res->lru_head, lru_list);
1655			write_unlock(&dev_priv->resource_lock);
1656			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1657				vmw_resource_unreference(&evict_res);
1658				return;
1659			}
1660		}
1661
1662		vmw_resource_unreference(&evict_res);
1663	} while (1);
1664
1665out_unlock:
1666	write_unlock(&dev_priv->resource_lock);
1667}
1668
1669/**
1670 * vmw_resource_evict_all - Evict all evictable resources
1671 *
1672 * @dev_priv:       Pointer to a device private struct
1673 *
1674 * To avoid thrashing starvation or as part of the hibernation sequence,
1675 * evict all evictable resources. In particular this means that all
1676 * guest-backed resources that are registered with the device are
1677 * evicted and the OTable becomes clean.
1678 */
1679void vmw_resource_evict_all(struct vmw_private *dev_priv)
1680{
1681	enum vmw_res_type type;
1682
1683	mutex_lock(&dev_priv->cmdbuf_mutex);
1684
1685	for (type = 0; type < vmw_res_max; ++type)
1686		vmw_resource_evict_type(dev_priv, type);
1687
1688	mutex_unlock(&dev_priv->cmdbuf_mutex);
1689}
1690
1691/**
1692 * vmw_resource_pin - Add a pin reference on a resource
1693 *
1694 * @res: The resource to add a pin reference on
1695 *
1696 * This function adds a pin reference, and if needed validates the resource.
1697 * Having a pin reference means that the resource can never be evicted, and
1698 * its id will never change as long as there is a pin reference.
1699 * This function returns 0 on success and a negative error code on failure.
1700 */
1701int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1702{
1703	struct vmw_private *dev_priv = res->dev_priv;
1704	int ret;
1705
1706	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1707	mutex_lock(&dev_priv->cmdbuf_mutex);
1708	ret = vmw_resource_reserve(res, interruptible, false);
1709	if (ret)
1710		goto out_no_reserve;
1711
1712	if (res->pin_count == 0) {
1713		struct vmw_dma_buffer *vbo = NULL;
1714
1715		if (res->backup) {
1716			vbo = res->backup;
1717
1718			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1719			if (!vbo->pin_count) {
1720				ret = ttm_bo_validate
1721					(&vbo->base,
1722					 res->func->backup_placement,
1723					 interruptible, false);
1724				if (ret) {
1725					ttm_bo_unreserve(&vbo->base);
1726					goto out_no_validate;
1727				}
1728			}
1729
1730			/* Do we really need to pin the MOB as well? */
1731			vmw_bo_pin_reserved(vbo, true);
1732		}
1733		ret = vmw_resource_validate(res);
1734		if (vbo)
1735			ttm_bo_unreserve(&vbo->base);
1736		if (ret)
1737			goto out_no_validate;
1738	}
1739	res->pin_count++;
1740
1741out_no_validate:
1742	vmw_resource_unreserve(res, false, NULL, 0UL);
1743out_no_reserve:
1744	mutex_unlock(&dev_priv->cmdbuf_mutex);
1745	ttm_write_unlock(&dev_priv->reservation_sem);
1746
1747	return ret;
1748}
1749
1750/**
1751 * vmw_resource_unpin - Remove a pin reference from a resource
1752 *
1753 * @res: The resource to remove a pin reference from
1754 *
1755 * Having a pin reference means that the resource can never be evicted, and
1756 * its id will never change as long as there is a pin reference.
1757 */
1758void vmw_resource_unpin(struct vmw_resource *res)
1759{
1760	struct vmw_private *dev_priv = res->dev_priv;
1761	int ret;
1762
1763	ttm_read_lock(&dev_priv->reservation_sem, false);
1764	mutex_lock(&dev_priv->cmdbuf_mutex);
1765
1766	ret = vmw_resource_reserve(res, false, true);
1767	WARN_ON(ret);
1768
1769	WARN_ON(res->pin_count == 0);
1770	if (--res->pin_count == 0 && res->backup) {
1771		struct vmw_dma_buffer *vbo = res->backup;
1772
1773		ttm_bo_reserve(&vbo->base, false, false, NULL);
1774		vmw_bo_pin_reserved(vbo, false);
1775		ttm_bo_unreserve(&vbo->base);
1776	}
1777
1778	vmw_resource_unreserve(res, false, NULL, 0UL);
1779
1780	mutex_unlock(&dev_priv->cmdbuf_mutex);
1781	ttm_read_unlock(&dev_priv->reservation_sem);
1782}
1783
1784/**
1785 * vmw_res_type - Return the resource type
1786 *
1787 * @res: Pointer to the resource
1788 */
1789enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1790{
1791	return res->func->res_type;
1792}