Linux Audio

Check our new training course

Loading...
v3.1
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
 
 
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_drm.h"
  30#include "ttm/ttm_object.h"
  31#include "ttm/ttm_placement.h"
  32#include "drmP.h"
  33
  34#define VMW_RES_CONTEXT ttm_driver_type0
  35#define VMW_RES_SURFACE ttm_driver_type1
  36#define VMW_RES_STREAM ttm_driver_type2
  37
  38struct vmw_user_context {
  39	struct ttm_base_object base;
  40	struct vmw_resource res;
  41};
  42
  43struct vmw_user_surface {
  44	struct ttm_base_object base;
  45	struct vmw_surface srf;
  46};
  47
  48struct vmw_user_dma_buffer {
  49	struct ttm_base_object base;
  50	struct vmw_dma_buffer dma;
  51};
  52
  53struct vmw_bo_user_rep {
  54	uint32_t handle;
  55	uint64_t map_handle;
  56};
  57
  58struct vmw_stream {
  59	struct vmw_resource res;
  60	uint32_t stream_id;
  61};
  62
  63struct vmw_user_stream {
  64	struct ttm_base_object base;
  65	struct vmw_stream stream;
  66};
  67
  68static inline struct vmw_dma_buffer *
  69vmw_dma_buffer(struct ttm_buffer_object *bo)
  70{
  71	return container_of(bo, struct vmw_dma_buffer, base);
 
 
 
 
 
 
  72}
  73
  74static inline struct vmw_user_dma_buffer *
  75vmw_user_dma_buffer(struct ttm_buffer_object *bo)
 
 
 
  76{
  77	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  78	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
 
 
 
 
 
  79}
  80
  81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  82{
  83	kref_get(&res->kref);
  84	return res;
  85}
  86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87static void vmw_resource_release(struct kref *kref)
  88{
  89	struct vmw_resource *res =
  90	    container_of(kref, struct vmw_resource, kref);
  91	struct vmw_private *dev_priv = res->dev_priv;
 
 
  92
  93	idr_remove(res->idr, res->id);
  94	write_unlock(&dev_priv->resource_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95
  96	if (likely(res->hw_destroy != NULL))
 
 
 
  97		res->hw_destroy(res);
 
  98
 
  99	if (res->res_free != NULL)
 100		res->res_free(res);
 101	else
 102		kfree(res);
 103
 104	write_lock(&dev_priv->resource_lock);
 
 
 
 105}
 106
 107void vmw_resource_unreference(struct vmw_resource **p_res)
 108{
 109	struct vmw_resource *res = *p_res;
 110	struct vmw_private *dev_priv = res->dev_priv;
 111
 112	*p_res = NULL;
 113	write_lock(&dev_priv->resource_lock);
 114	kref_put(&res->kref, vmw_resource_release);
 115	write_unlock(&dev_priv->resource_lock);
 116}
 117
 118static int vmw_resource_init(struct vmw_private *dev_priv,
 119			     struct vmw_resource *res,
 120			     struct idr *idr,
 121			     enum ttm_object_type obj_type,
 122			     void (*res_free) (struct vmw_resource *res))
 123{
 124	int ret;
 125
 126	kref_init(&res->kref);
 127	res->hw_destroy = NULL;
 128	res->res_free = res_free;
 129	res->res_type = obj_type;
 130	res->idr = idr;
 131	res->avail = false;
 132	res->dev_priv = dev_priv;
 133
 134	do {
 135		if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
 136			return -ENOMEM;
 137
 138		write_lock(&dev_priv->resource_lock);
 139		ret = idr_get_new_above(idr, res, 1, &res->id);
 140		write_unlock(&dev_priv->resource_lock);
 141
 142	} while (ret == -EAGAIN);
 143
 144	return ret;
 145}
 146
 147/**
 148 * vmw_resource_activate
 149 *
 150 * @res:        Pointer to the newly created resource
 151 * @hw_destroy: Destroy function. NULL if none.
 152 *
 153 * Activate a resource after the hardware has been made aware of it.
 154 * Set tye destroy function to @destroy. Typically this frees the
 155 * resource and destroys the hardware resources associated with it.
 156 * Activate basically means that the function vmw_resource_lookup will
 157 * find it.
 158 */
 159
 160static void vmw_resource_activate(struct vmw_resource *res,
 161				  void (*hw_destroy) (struct vmw_resource *))
 162{
 163	struct vmw_private *dev_priv = res->dev_priv;
 
 
 164
 165	write_lock(&dev_priv->resource_lock);
 166	res->avail = true;
 167	res->hw_destroy = hw_destroy;
 168	write_unlock(&dev_priv->resource_lock);
 169}
 170
 171struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 172					 struct idr *idr, int id)
 173{
 174	struct vmw_resource *res;
 175
 176	read_lock(&dev_priv->resource_lock);
 177	res = idr_find(idr, id);
 178	if (res && res->avail)
 179		kref_get(&res->kref);
 180	else
 181		res = NULL;
 182	read_unlock(&dev_priv->resource_lock);
 183
 184	if (unlikely(res == NULL))
 185		return NULL;
 
 186
 187	return res;
 
 
 188}
 189
 190/**
 191 * Context management:
 
 
 
 
 
 
 
 
 192 */
 193
 194static void vmw_hw_context_destroy(struct vmw_resource *res)
 
 
 195{
 196
 197	struct vmw_private *dev_priv = res->dev_priv;
 198	struct {
 199		SVGA3dCmdHeader header;
 200		SVGA3dCmdDestroyContext body;
 201	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 202
 203	if (unlikely(cmd == NULL)) {
 204		DRM_ERROR("Failed reserving FIFO space for surface "
 205			  "destruction.\n");
 206		return;
 207	}
 208
 209	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
 210	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 211	cmd->body.cid = cpu_to_le32(res->id);
 212
 213	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 214	vmw_3d_resource_dec(dev_priv);
 215}
 216
 217static int vmw_context_init(struct vmw_private *dev_priv,
 218			    struct vmw_resource *res,
 219			    void (*res_free) (struct vmw_resource *res))
 220{
 221	int ret;
 222
 223	struct {
 224		SVGA3dCmdHeader header;
 225		SVGA3dCmdDefineContext body;
 226	} *cmd;
 227
 228	ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
 229				VMW_RES_CONTEXT, res_free);
 230
 231	if (unlikely(ret != 0)) {
 232		if (res_free == NULL)
 233			kfree(res);
 234		else
 235			res_free(res);
 236		return ret;
 237	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238
 239	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 240	if (unlikely(cmd == NULL)) {
 241		DRM_ERROR("Fifo reserve failed.\n");
 242		vmw_resource_unreference(&res);
 243		return -ENOMEM;
 244	}
 245
 246	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
 247	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 248	cmd->body.cid = cpu_to_le32(res->id);
 249
 250	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 251	(void) vmw_3d_resource_inc(dev_priv);
 252	vmw_resource_activate(res, vmw_hw_context_destroy);
 253	return 0;
 254}
 255
 256struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
 257{
 258	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
 259	int ret;
 260
 261	if (unlikely(res == NULL))
 262		return NULL;
 263
 264	ret = vmw_context_init(dev_priv, res, NULL);
 265	return (ret == 0) ? res : NULL;
 266}
 267
 268/**
 269 * User-space context management:
 
 
 
 
 
 
 
 
 
 
 
 270 */
 271
 272static void vmw_user_context_free(struct vmw_resource *res)
 
 
 
 
 273{
 274	struct vmw_user_context *ctx =
 275	    container_of(res, struct vmw_user_context, res);
 276
 277	kfree(ctx);
 278}
 
 279
 280/**
 281 * This function is called when user space has no more references on the
 282 * base object. It releases the base-object's reference on the resource object.
 283 */
 284
 285static void vmw_user_context_base_release(struct ttm_base_object **p_base)
 286{
 287	struct ttm_base_object *base = *p_base;
 288	struct vmw_user_context *ctx =
 289	    container_of(base, struct vmw_user_context, base);
 290	struct vmw_resource *res = &ctx->res;
 291
 292	*p_base = NULL;
 293	vmw_resource_unreference(&res);
 294}
 295
 296int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 297			      struct drm_file *file_priv)
 
 
 
 
 
 
 
 
 298{
 299	struct vmw_private *dev_priv = vmw_priv(dev);
 300	struct vmw_resource *res;
 301	struct vmw_user_context *ctx;
 302	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 303	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 304	int ret = 0;
 305
 306	res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
 307	if (unlikely(res == NULL))
 308		return -EINVAL;
 309
 310	if (res->res_free != &vmw_user_context_free) {
 311		ret = -EINVAL;
 312		goto out;
 313	}
 314
 315	ctx = container_of(res, struct vmw_user_context, res);
 316	if (ctx->base.tfile != tfile && !ctx->base.shareable) {
 317		ret = -EPERM;
 318		goto out;
 
 
 319	}
 320
 321	ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
 322out:
 323	vmw_resource_unreference(&res);
 324	return ret;
 325}
 326
 327int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 328			     struct drm_file *file_priv)
 
 
 
 
 
 
 
 329{
 330	struct vmw_private *dev_priv = vmw_priv(dev);
 331	struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 332	struct vmw_resource *res;
 333	struct vmw_resource *tmp;
 334	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 335	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 336	int ret;
 337
 338	if (unlikely(ctx == NULL))
 339		return -ENOMEM;
 
 
 340
 341	res = &ctx->res;
 342	ctx->base.shareable = false;
 343	ctx->base.tfile = NULL;
 344
 345	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
 
 
 
 346	if (unlikely(ret != 0))
 347		return ret;
 348
 349	tmp = vmw_resource_reference(&ctx->res);
 350	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
 351				   &vmw_user_context_base_release, NULL);
 352
 353	if (unlikely(ret != 0)) {
 354		vmw_resource_unreference(&tmp);
 355		goto out_err;
 356	}
 357
 358	arg->cid = res->id;
 359out_err:
 360	vmw_resource_unreference(&res);
 361	return ret;
 362
 363}
 364
 365int vmw_context_check(struct vmw_private *dev_priv,
 366		      struct ttm_object_file *tfile,
 367		      int id)
 
 
 
 
 
 
 
 
 
 
 368{
 369	struct vmw_resource *res;
 370	int ret = 0;
 
 371
 372	read_lock(&dev_priv->resource_lock);
 373	res = idr_find(&dev_priv->context_idr, id);
 374	if (res && res->avail) {
 375		struct vmw_user_context *ctx =
 376			container_of(res, struct vmw_user_context, res);
 377		if (ctx->base.tfile != tfile && !ctx->base.shareable)
 378			ret = -EPERM;
 379	} else
 380		ret = -EINVAL;
 381	read_unlock(&dev_priv->resource_lock);
 
 
 
 
 
 
 
 
 
 
 
 382
 383	return ret;
 384}
 385
 386
 387/**
 388 * Surface management.
 
 
 
 
 
 
 
 
 
 
 
 
 389 */
 390
 391static void vmw_hw_surface_destroy(struct vmw_resource *res)
 
 
 
 
 392{
 393
 394	struct vmw_private *dev_priv = res->dev_priv;
 395	struct {
 396		SVGA3dCmdHeader header;
 397		SVGA3dCmdDestroySurface body;
 398	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 399
 400	if (unlikely(cmd == NULL)) {
 401		DRM_ERROR("Failed reserving FIFO space for surface "
 402			  "destruction.\n");
 403		return;
 404	}
 405
 406	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
 407	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 408	cmd->body.sid = cpu_to_le32(res->id);
 409
 410	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 411	vmw_3d_resource_dec(dev_priv);
 412}
 
 
 413
 414void vmw_surface_res_free(struct vmw_resource *res)
 415{
 416	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
 
 
 
 
 
 
 417
 418	kfree(srf->sizes);
 419	kfree(srf->snooper.image);
 420	kfree(srf);
 421}
 422
 423int vmw_surface_init(struct vmw_private *dev_priv,
 424		     struct vmw_surface *srf,
 425		     void (*res_free) (struct vmw_resource *res))
 426{
 427	int ret;
 428	struct {
 429		SVGA3dCmdHeader header;
 430		SVGA3dCmdDefineSurface body;
 431	} *cmd;
 432	SVGA3dSize *cmd_size;
 433	struct vmw_resource *res = &srf->res;
 434	struct drm_vmw_size *src_size;
 435	size_t submit_size;
 436	uint32_t cmd_len;
 437	int i;
 438
 439	BUG_ON(res_free == NULL);
 440	ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
 441				VMW_RES_SURFACE, res_free);
 442
 443	if (unlikely(ret != 0)) {
 444		res_free(res);
 445		return ret;
 446	}
 
 447
 448	submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
 449	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450
 451	cmd = vmw_fifo_reserve(dev_priv, submit_size);
 452	if (unlikely(cmd == NULL)) {
 453		DRM_ERROR("Fifo reserve failed for create surface.\n");
 454		vmw_resource_unreference(&res);
 455		return -ENOMEM;
 456	}
 457
 458	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
 459	cmd->header.size = cpu_to_le32(cmd_len);
 460	cmd->body.sid = cpu_to_le32(res->id);
 461	cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
 462	cmd->body.format = cpu_to_le32(srf->format);
 463	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
 464		cmd->body.face[i].numMipLevels =
 465		    cpu_to_le32(srf->mip_levels[i]);
 466	}
 467
 468	cmd += 1;
 469	cmd_size = (SVGA3dSize *) cmd;
 470	src_size = srf->sizes;
 471
 472	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
 473		cmd_size->width = cpu_to_le32(src_size->width);
 474		cmd_size->height = cpu_to_le32(src_size->height);
 475		cmd_size->depth = cpu_to_le32(src_size->depth);
 476	}
 477
 478	vmw_fifo_commit(dev_priv, submit_size);
 479	(void) vmw_3d_resource_inc(dev_priv);
 480	vmw_resource_activate(res, vmw_hw_surface_destroy);
 481	return 0;
 482}
 483
 484static void vmw_user_surface_free(struct vmw_resource *res)
 485{
 486	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
 487	struct vmw_user_surface *user_srf =
 488	    container_of(srf, struct vmw_user_surface, srf);
 
 
 489
 490	kfree(srf->sizes);
 491	kfree(srf->snooper.image);
 492	kfree(user_srf);
 493}
 494
 495int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
 496				   struct ttm_object_file *tfile,
 497				   uint32_t handle, struct vmw_surface **out)
 
 
 
 
 
 
 
 
 
 498{
 499	struct vmw_resource *res;
 500	struct vmw_surface *srf;
 501	struct vmw_user_surface *user_srf;
 502	struct ttm_base_object *base;
 503	int ret = -EINVAL;
 504
 505	base = ttm_base_object_lookup(tfile, handle);
 506	if (unlikely(base == NULL))
 507		return -EINVAL;
 508
 509	if (unlikely(base->object_type != VMW_RES_SURFACE))
 510		goto out_bad_resource;
 511
 512	user_srf = container_of(base, struct vmw_user_surface, base);
 513	srf = &user_srf->srf;
 514	res = &srf->res;
 515
 516	read_lock(&dev_priv->resource_lock);
 517
 518	if (!res->avail || res->res_free != &vmw_user_surface_free) {
 519		read_unlock(&dev_priv->resource_lock);
 520		goto out_bad_resource;
 
 
 
 
 
 
 
 
 
 
 521	}
 522
 523	kref_get(&res->kref);
 524	read_unlock(&dev_priv->resource_lock);
 525
 526	*out = srf;
 527	ret = 0;
 528
 529out_bad_resource:
 530	ttm_base_object_unref(&base);
 531
 532	return ret;
 533}
 534
 535static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
 
 
 
 
 
 
 
 
 
 536{
 537	struct ttm_base_object *base = *p_base;
 538	struct vmw_user_surface *user_srf =
 539	    container_of(base, struct vmw_user_surface, base);
 540	struct vmw_resource *res = &user_srf->srf.res;
 541
 542	*p_base = NULL;
 543	vmw_resource_unreference(&res);
 544}
 545
 546int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 547			      struct drm_file *file_priv)
 548{
 549	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
 550	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 551
 552	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
 
 
 
 
 553}
 554
 555int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 556			     struct drm_file *file_priv)
 
 
 
 
 
 
 
 
 557{
 558	struct vmw_private *dev_priv = vmw_priv(dev);
 559	struct vmw_user_surface *user_srf =
 560	    kmalloc(sizeof(*user_srf), GFP_KERNEL);
 561	struct vmw_surface *srf;
 562	struct vmw_resource *res;
 563	struct vmw_resource *tmp;
 564	union drm_vmw_surface_create_arg *arg =
 565	    (union drm_vmw_surface_create_arg *)data;
 566	struct drm_vmw_surface_create_req *req = &arg->req;
 567	struct drm_vmw_surface_arg *rep = &arg->rep;
 568	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 569	struct drm_vmw_size __user *user_sizes;
 570	int ret;
 571	int i;
 572
 573	if (unlikely(user_srf == NULL))
 574		return -ENOMEM;
 575
 576	srf = &user_srf->srf;
 577	res = &srf->res;
 578
 579	srf->flags = req->flags;
 580	srf->format = req->format;
 581	srf->scanout = req->scanout;
 582	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
 583	srf->num_sizes = 0;
 584	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
 585		srf->num_sizes += srf->mip_levels[i];
 586
 587	if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
 588	    DRM_VMW_MAX_MIP_LEVELS) {
 589		ret = -EINVAL;
 590		goto out_err0;
 591	}
 592
 593	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
 594	if (unlikely(srf->sizes == NULL)) {
 595		ret = -ENOMEM;
 596		goto out_err0;
 597	}
 598
 599	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
 600	    req->size_addr;
 601
 602	ret = copy_from_user(srf->sizes, user_sizes,
 603			     srf->num_sizes * sizeof(*srf->sizes));
 604	if (unlikely(ret != 0)) {
 605		ret = -EFAULT;
 606		goto out_err1;
 607	}
 608
 609	if (srf->scanout &&
 610	    srf->num_sizes == 1 &&
 611	    srf->sizes[0].width == 64 &&
 612	    srf->sizes[0].height == 64 &&
 613	    srf->format == SVGA3D_A8R8G8B8) {
 614
 615		/* allocate image area and clear it */
 616		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
 617		if (!srf->snooper.image) {
 618			DRM_ERROR("Failed to allocate cursor_image\n");
 619			ret = -ENOMEM;
 620			goto out_err1;
 621		}
 622	} else {
 623		srf->snooper.image = NULL;
 624	}
 625	srf->snooper.crtc = NULL;
 626
 627	user_srf->base.shareable = false;
 628	user_srf->base.tfile = NULL;
 629
 630	/**
 631	 * From this point, the generic resource management functions
 632	 * destroy the object on failure.
 633	 */
 634
 635	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
 636	if (unlikely(ret != 0))
 637		return ret;
 638
 639	tmp = vmw_resource_reference(&srf->res);
 640	ret = ttm_base_object_init(tfile, &user_srf->base,
 641				   req->shareable, VMW_RES_SURFACE,
 642				   &vmw_user_surface_base_release, NULL);
 643
 644	if (unlikely(ret != 0)) {
 645		vmw_resource_unreference(&tmp);
 646		vmw_resource_unreference(&res);
 647		return ret;
 648	}
 649
 650	rep->sid = user_srf->base.hash.key;
 651	if (rep->sid == SVGA3D_INVALID_ID)
 652		DRM_ERROR("Created bad Surface ID.\n");
 653
 654	vmw_resource_unreference(&res);
 655	return 0;
 656out_err1:
 657	kfree(srf->sizes);
 658out_err0:
 659	kfree(user_srf);
 660	return ret;
 661}
 662
 663int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 664				struct drm_file *file_priv)
 665{
 666	union drm_vmw_surface_reference_arg *arg =
 667	    (union drm_vmw_surface_reference_arg *)data;
 668	struct drm_vmw_surface_arg *req = &arg->req;
 669	struct drm_vmw_surface_create_req *rep = &arg->rep;
 670	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 671	struct vmw_surface *srf;
 672	struct vmw_user_surface *user_srf;
 673	struct drm_vmw_size __user *user_sizes;
 674	struct ttm_base_object *base;
 675	int ret = -EINVAL;
 676
 677	base = ttm_base_object_lookup(tfile, req->sid);
 678	if (unlikely(base == NULL)) {
 679		DRM_ERROR("Could not find surface to reference.\n");
 680		return -EINVAL;
 681	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 682
 683	if (unlikely(base->object_type != VMW_RES_SURFACE))
 684		goto out_bad_resource;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685
 686	user_srf = container_of(base, struct vmw_user_surface, base);
 687	srf = &user_srf->srf;
 688
 689	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
 690	if (unlikely(ret != 0)) {
 691		DRM_ERROR("Could not add a reference to a surface.\n");
 692		goto out_no_reference;
 
 693	}
 694
 695	rep->flags = srf->flags;
 696	rep->format = srf->format;
 697	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
 698	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
 699	    rep->size_addr;
 700
 701	if (user_sizes)
 702		ret = copy_to_user(user_sizes, srf->sizes,
 703				   srf->num_sizes * sizeof(*srf->sizes));
 704	if (unlikely(ret != 0)) {
 705		DRM_ERROR("copy_to_user failed %p %u\n",
 706			  user_sizes, srf->num_sizes);
 707		ret = -EFAULT;
 708	}
 709out_bad_resource:
 710out_no_reference:
 711	ttm_base_object_unref(&base);
 712
 
 713	return ret;
 714}
 715
 716int vmw_surface_check(struct vmw_private *dev_priv,
 717		      struct ttm_object_file *tfile,
 718		      uint32_t handle, int *id)
 719{
 720	struct ttm_base_object *base;
 721	struct vmw_user_surface *user_srf;
 722
 723	int ret = -EPERM;
 724
 725	base = ttm_base_object_lookup(tfile, handle);
 726	if (unlikely(base == NULL))
 727		return -EINVAL;
 728
 729	if (unlikely(base->object_type != VMW_RES_SURFACE))
 730		goto out_bad_surface;
 731
 732	user_srf = container_of(base, struct vmw_user_surface, base);
 733	*id = user_srf->srf.res.id;
 734	ret = 0;
 735
 736out_bad_surface:
 737	/**
 738	 * FIXME: May deadlock here when called from the
 739	 * command parsing code.
 740	 */
 741
 742	ttm_base_object_unref(&base);
 743	return ret;
 744}
 745
 746/**
 747 * Buffer management.
 
 
 
 
 
 
 
 
 748 */
 749
 750static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
 751				  unsigned long num_pages)
 752{
 753	static size_t bo_user_size = ~0;
 754
 755	size_t page_array_size =
 756	    (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
 
 
 
 757
 758	if (unlikely(bo_user_size == ~0)) {
 759		bo_user_size = glob->ttm_bo_extra_size +
 760		    ttm_round_pot(sizeof(struct vmw_dma_buffer));
 761	}
 762
 763	return bo_user_size + page_array_size;
 764}
 765
 766void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 767{
 768	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 769	struct ttm_bo_global *glob = bo->glob;
 770
 771	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
 772	kfree(vmw_bo);
 773}
 774
 775int vmw_dmabuf_init(struct vmw_private *dev_priv,
 776		    struct vmw_dma_buffer *vmw_bo,
 777		    size_t size, struct ttm_placement *placement,
 778		    bool interruptible,
 779		    void (*bo_free) (struct ttm_buffer_object *bo))
 780{
 781	struct ttm_bo_device *bdev = &dev_priv->bdev;
 782	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
 783	size_t acc_size;
 784	int ret;
 785
 786	BUG_ON(!bo_free);
 787
 788	acc_size =
 789	    vmw_dmabuf_acc_size(bdev->glob,
 790				(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
 791
 792	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
 793	if (unlikely(ret != 0)) {
 794		/* we must free the bo here as
 795		 * ttm_buffer_object_init does so as well */
 796		bo_free(&vmw_bo->base);
 797		return ret;
 798	}
 799
 800	memset(vmw_bo, 0, sizeof(*vmw_bo));
 801
 802	INIT_LIST_HEAD(&vmw_bo->validate_list);
 803
 804	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 805			  ttm_bo_type_device, placement,
 806			  0, 0, interruptible,
 807			  NULL, acc_size, bo_free);
 808	return ret;
 809}
 810
 811static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 812{
 813	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 814	struct ttm_bo_global *glob = bo->glob;
 815
 816	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
 817	kfree(vmw_user_bo);
 818}
 819
 820static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 
 
 
 
 
 
 
 
 821{
 822	struct vmw_user_dma_buffer *vmw_user_bo;
 823	struct ttm_base_object *base = *p_base;
 824	struct ttm_buffer_object *bo;
 
 
 
 825
 826	*p_base = NULL;
 827
 828	if (unlikely(base == NULL))
 829		return;
 
 830
 831	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
 832	bo = &vmw_user_bo->dma.base;
 833	ttm_bo_unref(&bo);
 834}
 835
 836int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 837			   struct drm_file *file_priv)
 838{
 839	struct vmw_private *dev_priv = vmw_priv(dev);
 840	union drm_vmw_alloc_dmabuf_arg *arg =
 841	    (union drm_vmw_alloc_dmabuf_arg *)data;
 842	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 843	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 844	struct vmw_user_dma_buffer *vmw_user_bo;
 845	struct ttm_buffer_object *tmp;
 846	struct vmw_master *vmaster = vmw_master(file_priv->master);
 847	int ret;
 848
 849	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
 850	if (unlikely(vmw_user_bo == NULL))
 851		return -ENOMEM;
 852
 853	ret = ttm_read_lock(&vmaster->lock, true);
 854	if (unlikely(ret != 0)) {
 855		kfree(vmw_user_bo);
 856		return ret;
 857	}
 858
 859	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
 860			      &vmw_vram_sys_placement, true,
 861			      &vmw_user_dmabuf_destroy);
 862	if (unlikely(ret != 0))
 863		goto out_no_dmabuf;
 864
 865	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
 866	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
 867				   &vmw_user_bo->base,
 868				   false,
 869				   ttm_buffer_type,
 870				   &vmw_user_dmabuf_release, NULL);
 871	if (unlikely(ret != 0))
 872		goto out_no_base_object;
 873	else {
 874		rep->handle = vmw_user_bo->base.hash.key;
 875		rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
 876		rep->cur_gmr_id = vmw_user_bo->base.hash.key;
 877		rep->cur_gmr_offset = 0;
 878	}
 879
 880out_no_base_object:
 881	ttm_bo_unref(&tmp);
 882out_no_dmabuf:
 883	ttm_read_unlock(&vmaster->lock);
 884
 885	return ret;
 886}
 887
 888int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 889			   struct drm_file *file_priv)
 890{
 891	struct drm_vmw_unref_dmabuf_arg *arg =
 892	    (struct drm_vmw_unref_dmabuf_arg *)data;
 893
 894	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 895					 arg->handle,
 896					 TTM_REF_USAGE);
 897}
 898
 899uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
 900				  uint32_t cur_validate_node)
 
 
 
 
 
 
 
 
 
 901{
 902	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 
 
 903
 904	if (likely(vmw_bo->on_validate_list))
 905		return vmw_bo->cur_validate_node;
 906
 907	vmw_bo->cur_validate_node = cur_validate_node;
 908	vmw_bo->on_validate_list = true;
 909
 910	return cur_validate_node;
 911}
 912
 913void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
 914{
 915	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 
 
 916
 917	vmw_bo->on_validate_list = false;
 918}
 
 919
 920int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 921			   uint32_t handle, struct vmw_dma_buffer **out)
 922{
 923	struct vmw_user_dma_buffer *vmw_user_bo;
 924	struct ttm_base_object *base;
 925
 926	base = ttm_base_object_lookup(tfile, handle);
 927	if (unlikely(base == NULL)) {
 928		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 929		       (unsigned long)handle);
 930		return -ESRCH;
 931	}
 932
 933	if (unlikely(base->object_type != ttm_buffer_type)) {
 934		ttm_base_object_unref(&base);
 935		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 936		       (unsigned long)handle);
 937		return -EINVAL;
 938	}
 939
 940	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
 941	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
 942	ttm_base_object_unref(&base);
 943	*out = &vmw_user_bo->dma;
 944
 945	return 0;
 946}
 947
 948/*
 949 * Stream management
 
 
 950 */
 951
 952static void vmw_stream_destroy(struct vmw_resource *res)
 953{
 954	struct vmw_private *dev_priv = res->dev_priv;
 955	struct vmw_stream *stream;
 956	int ret;
 957
 958	DRM_INFO("%s: unref\n", __func__);
 959	stream = container_of(res, struct vmw_stream, res);
 960
 961	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
 962	WARN_ON(ret != 0);
 963}
 964
 965static int vmw_stream_init(struct vmw_private *dev_priv,
 966			   struct vmw_stream *stream,
 967			   void (*res_free) (struct vmw_resource *res))
 
 
 
 
 
 
 
 
 968{
 969	struct vmw_resource *res = &stream->res;
 
 
 970	int ret;
 
 971
 972	ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
 973				VMW_RES_STREAM, res_free);
 974
 975	if (unlikely(ret != 0)) {
 976		if (res_free == NULL)
 977			kfree(stream);
 978		else
 979			res_free(&stream->res);
 980		return ret;
 981	}
 982
 983	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
 984	if (ret) {
 985		vmw_resource_unreference(&res);
 986		return ret;
 987	}
 
 
 
 
 
 
 
 
 
 
 
 
 988
 989	DRM_INFO("%s: claimed\n", __func__);
 
 990
 991	vmw_resource_activate(&stream->res, vmw_stream_destroy);
 992	return 0;
 993}
 994
 995/**
 996 * User-space context management:
 
 
 
 
 
 
 
 997 */
 998
 999static void vmw_user_stream_free(struct vmw_resource *res)
1000{
1001	struct vmw_user_stream *stream =
1002	    container_of(res, struct vmw_user_stream, stream.res);
1003
1004	kfree(stream);
1005}
1006
1007/**
1008 * This function is called when user space has no more references on the
1009 * base object. It releases the base-object's reference on the resource object.
1010 */
1011
1012static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1013{
1014	struct ttm_base_object *base = *p_base;
1015	struct vmw_user_stream *stream =
1016	    container_of(base, struct vmw_user_stream, base);
1017	struct vmw_resource *res = &stream->stream.res;
1018
1019	*p_base = NULL;
1020	vmw_resource_unreference(&res);
1021}
1022
1023int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1024			   struct drm_file *file_priv)
 
 
 
 
 
 
 
 
 
1025{
1026	struct vmw_private *dev_priv = vmw_priv(dev);
1027	struct vmw_resource *res;
1028	struct vmw_user_stream *stream;
1029	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1030	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1031	int ret = 0;
1032
1033	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1034	if (unlikely(res == NULL))
1035		return -EINVAL;
1036
1037	if (res->res_free != &vmw_user_stream_free) {
1038		ret = -EINVAL;
1039		goto out;
1040	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1041
1042	stream = container_of(res, struct vmw_user_stream, stream.res);
1043	if (stream->base.tfile != tfile) {
1044		ret = -EINVAL;
1045		goto out;
1046	}
 
 
 
 
 
 
 
 
 
 
 
1047
1048	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1049out:
1050	vmw_resource_unreference(&res);
1051	return ret;
1052}
1053
1054int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1055			   struct drm_file *file_priv)
 
 
 
 
 
 
 
1056{
1057	struct vmw_private *dev_priv = vmw_priv(dev);
1058	struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1059	struct vmw_resource *res;
1060	struct vmw_resource *tmp;
1061	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1062	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1063	int ret;
1064
1065	if (unlikely(stream == NULL))
1066		return -ENOMEM;
1067
1068	res = &stream->stream.res;
1069	stream->base.shareable = false;
1070	stream->base.tfile = NULL;
1071
1072	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1073	if (unlikely(ret != 0))
1074		return ret;
1075
1076	tmp = vmw_resource_reference(res);
1077	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1078				   &vmw_user_stream_base_release, NULL);
1079
1080	if (unlikely(ret != 0)) {
1081		vmw_resource_unreference(&tmp);
1082		goto out_err;
1083	}
1084
1085	arg->stream_id = res->id;
1086out_err:
1087	vmw_resource_unreference(&res);
1088	return ret;
1089}
1090
1091int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1092			   struct ttm_object_file *tfile,
1093			   uint32_t *inout_id, struct vmw_resource **out)
 
 
 
1094{
1095	struct vmw_user_stream *stream;
1096	struct vmw_resource *res;
1097	int ret;
1098
1099	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1100	if (unlikely(res == NULL))
1101		return -EINVAL;
1102
1103	if (res->res_free != &vmw_user_stream_free) {
1104		ret = -EINVAL;
1105		goto err_ref;
1106	}
1107
1108	stream = container_of(res, struct vmw_user_stream, stream.res);
1109	if (stream->base.tfile != tfile) {
1110		ret = -EPERM;
1111		goto err_ref;
1112	}
1113
1114	*inout_id = stream->stream.stream_id;
1115	*out = res;
1116	return 0;
1117err_ref:
1118	vmw_resource_unreference(&res);
1119	return ret;
1120}
v5.4
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_resource_priv.h"
  31#include "vmwgfx_binding.h"
  32#include "vmwgfx_drv.h"
  33
  34#define VMW_RES_EVICT_ERR_COUNT 10
  35
  36/**
  37 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  38 * @res: The resource
  39 */
  40void vmw_resource_mob_attach(struct vmw_resource *res)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41{
  42	struct vmw_buffer_object *backup = res->backup;
  43
  44	dma_resv_assert_held(res->backup->base.base.resv);
  45	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  46		res->func->prio;
  47	list_add_tail(&res->mob_head, &backup->res_list);
  48	vmw_bo_prio_add(backup, res->used_prio);
  49}
  50
  51/**
  52 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  53 * @res: The resource
  54 */
  55void vmw_resource_mob_detach(struct vmw_resource *res)
  56{
  57	struct vmw_buffer_object *backup = res->backup;
  58
  59	dma_resv_assert_held(backup->base.base.resv);
  60	if (vmw_resource_mob_attached(res)) {
  61		list_del_init(&res->mob_head);
  62		vmw_bo_prio_del(backup, res->used_prio);
  63	}
  64}
  65
  66struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  67{
  68	kref_get(&res->kref);
  69	return res;
  70}
  71
  72struct vmw_resource *
  73vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  74{
  75	return kref_get_unless_zero(&res->kref) ? res : NULL;
  76}
  77
  78/**
  79 * vmw_resource_release_id - release a resource id to the id manager.
  80 *
  81 * @res: Pointer to the resource.
  82 *
  83 * Release the resource id to the resource id manager and set it to -1
  84 */
  85void vmw_resource_release_id(struct vmw_resource *res)
  86{
  87	struct vmw_private *dev_priv = res->dev_priv;
  88	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  89
  90	spin_lock(&dev_priv->resource_lock);
  91	if (res->id != -1)
  92		idr_remove(idr, res->id);
  93	res->id = -1;
  94	spin_unlock(&dev_priv->resource_lock);
  95}
  96
  97static void vmw_resource_release(struct kref *kref)
  98{
  99	struct vmw_resource *res =
 100	    container_of(kref, struct vmw_resource, kref);
 101	struct vmw_private *dev_priv = res->dev_priv;
 102	int id;
 103	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 104
 105	spin_lock(&dev_priv->resource_lock);
 106	list_del_init(&res->lru_head);
 107	spin_unlock(&dev_priv->resource_lock);
 108	if (res->backup) {
 109		struct ttm_buffer_object *bo = &res->backup->base;
 110
 111		ttm_bo_reserve(bo, false, false, NULL);
 112		if (vmw_resource_mob_attached(res) &&
 113		    res->func->unbind != NULL) {
 114			struct ttm_validate_buffer val_buf;
 115
 116			val_buf.bo = bo;
 117			val_buf.num_shared = 0;
 118			res->func->unbind(res, false, &val_buf);
 119		}
 120		res->backup_dirty = false;
 121		vmw_resource_mob_detach(res);
 122		ttm_bo_unreserve(bo);
 123		vmw_bo_unreference(&res->backup);
 124	}
 125
 126	if (likely(res->hw_destroy != NULL)) {
 127		mutex_lock(&dev_priv->binding_mutex);
 128		vmw_binding_res_list_kill(&res->binding_head);
 129		mutex_unlock(&dev_priv->binding_mutex);
 130		res->hw_destroy(res);
 131	}
 132
 133	id = res->id;
 134	if (res->res_free != NULL)
 135		res->res_free(res);
 136	else
 137		kfree(res);
 138
 139	spin_lock(&dev_priv->resource_lock);
 140	if (id != -1)
 141		idr_remove(idr, id);
 142	spin_unlock(&dev_priv->resource_lock);
 143}
 144
 145void vmw_resource_unreference(struct vmw_resource **p_res)
 146{
 147	struct vmw_resource *res = *p_res;
 
 148
 149	*p_res = NULL;
 
 150	kref_put(&res->kref, vmw_resource_release);
 
 151}
 152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153
 154/**
 155 * vmw_resource_alloc_id - release a resource id to the id manager.
 156 *
 157 * @res: Pointer to the resource.
 
 158 *
 159 * Allocate the lowest free resource from the resource manager, and set
 160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 
 
 
 161 */
 162int vmw_resource_alloc_id(struct vmw_resource *res)
 
 
 163{
 164	struct vmw_private *dev_priv = res->dev_priv;
 165	int ret;
 166	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 167
 168	BUG_ON(res->id != -1);
 
 
 
 
 
 
 
 
 
 169
 170	idr_preload(GFP_KERNEL);
 171	spin_lock(&dev_priv->resource_lock);
 
 
 
 
 
 172
 173	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 174	if (ret >= 0)
 175		res->id = ret;
 176
 177	spin_unlock(&dev_priv->resource_lock);
 178	idr_preload_end();
 179	return ret < 0 ? ret : 0;
 180}
 181
 182/**
 183 * vmw_resource_init - initialize a struct vmw_resource
 184 *
 185 * @dev_priv:       Pointer to a device private struct.
 186 * @res:            The struct vmw_resource to initialize.
 187 * @obj_type:       Resource object type.
 188 * @delay_id:       Boolean whether to defer device id allocation until
 189 *                  the first validation.
 190 * @res_free:       Resource destructor.
 191 * @func:           Resource function table.
 192 */
 193int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 194		      bool delay_id,
 195		      void (*res_free) (struct vmw_resource *res),
 196		      const struct vmw_res_func *func)
 197{
 198	kref_init(&res->kref);
 199	res->hw_destroy = NULL;
 200	res->res_free = res_free;
 201	res->dev_priv = dev_priv;
 202	res->func = func;
 203	INIT_LIST_HEAD(&res->lru_head);
 204	INIT_LIST_HEAD(&res->mob_head);
 205	INIT_LIST_HEAD(&res->binding_head);
 206	res->id = -1;
 207	res->backup = NULL;
 208	res->backup_offset = 0;
 209	res->backup_dirty = false;
 210	res->res_dirty = false;
 211	res->used_prio = 3;
 212	if (delay_id)
 213		return 0;
 214	else
 215		return vmw_resource_alloc_id(res);
 
 216}
 217
 
 
 
 
 
 
 
 
 
 
 218
 219/**
 220 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 221 * TTM user-space handle and perform basic type checks
 222 *
 223 * @dev_priv:     Pointer to a device private struct
 224 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 225 * @handle:       The TTM user-space handle
 226 * @converter:    Pointer to an object describing the resource type
 227 * @p_res:        On successful return the location pointed to will contain
 228 *                a pointer to a refcounted struct vmw_resource.
 229 *
 230 * If the handle can't be found or is associated with an incorrect resource
 231 * type, -EINVAL will be returned.
 232 */
 233int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 234				    struct ttm_object_file *tfile,
 235				    uint32_t handle,
 236				    const struct vmw_user_resource_conv
 237				    *converter,
 238				    struct vmw_resource **p_res)
 239{
 240	struct ttm_base_object *base;
 241	struct vmw_resource *res;
 242	int ret = -EINVAL;
 243
 244	base = ttm_base_object_lookup(tfile, handle);
 245	if (unlikely(base == NULL))
 246		return -EINVAL;
 
 
 
 247
 248	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 249		goto out_bad_resource;
 
 250
 251	res = converter->base_obj_to_res(base);
 252	kref_get(&res->kref);
 
 
 
 253
 254	*p_res = res;
 255	ret = 0;
 
 
 256
 257out_bad_resource:
 258	ttm_base_object_unref(&base);
 259
 260	return ret;
 
 261}
 262
 263/**
 264 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 265 * TTM user-space handle and perform basic type checks
 266 *
 267 * @dev_priv:     Pointer to a device private struct
 268 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 269 * @handle:       The TTM user-space handle
 270 * @converter:    Pointer to an object describing the resource type
 271 * @p_res:        On successful return the location pointed to will contain
 272 *                a pointer to a refcounted struct vmw_resource.
 273 *
 274 * If the handle can't be found or is associated with an incorrect resource
 275 * type, -EINVAL will be returned.
 276 */
 277struct vmw_resource *
 278vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
 279				      struct ttm_object_file *tfile,
 280				      uint32_t handle,
 281				      const struct vmw_user_resource_conv
 282				      *converter)
 283{
 284	struct ttm_base_object *base;
 
 285
 286	base = ttm_base_object_noref_lookup(tfile, handle);
 287	if (!base)
 288		return ERR_PTR(-ESRCH);
 289
 290	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
 291		ttm_base_object_noref_release();
 292		return ERR_PTR(-EINVAL);
 293	}
 
 
 
 
 
 
 
 294
 295	return converter->base_obj_to_res(base);
 
 296}
 297
 298/**
 299 * Helper function that looks either a surface or bo.
 300 *
 301 * The pointer this pointed at by out_surf and out_buf needs to be null.
 302 */
 303int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 304			   struct ttm_object_file *tfile,
 305			   uint32_t handle,
 306			   struct vmw_surface **out_surf,
 307			   struct vmw_buffer_object **out_buf)
 308{
 
 309	struct vmw_resource *res;
 310	int ret;
 
 
 
 
 
 
 
 311
 312	BUG_ON(*out_surf || *out_buf);
 
 
 
 313
 314	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 315					      user_surface_converter,
 316					      &res);
 317	if (!ret) {
 318		*out_surf = vmw_res_to_srf(res);
 319		return 0;
 320	}
 321
 322	*out_surf = NULL;
 323	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
 
 324	return ret;
 325}
 326
 327/**
 328 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 329 *
 330 * @res:            The resource for which to allocate a backup buffer.
 331 * @interruptible:  Whether any sleeps during allocation should be
 332 *                  performed while interruptible.
 333 */
 334static int vmw_resource_buf_alloc(struct vmw_resource *res,
 335				  bool interruptible)
 336{
 337	unsigned long size =
 338		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 339	struct vmw_buffer_object *backup;
 
 
 
 340	int ret;
 341
 342	if (likely(res->backup)) {
 343		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
 344		return 0;
 345	}
 346
 347	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
 348	if (unlikely(!backup))
 349		return -ENOMEM;
 350
 351	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
 352			      res->func->backup_placement,
 353			      interruptible,
 354			      &vmw_bo_bo_free);
 355	if (unlikely(ret != 0))
 356		goto out_no_bo;
 357
 358	res->backup = backup;
 
 
 
 
 
 
 
 359
 360out_no_bo:
 
 
 361	return ret;
 
 362}
 363
 364/**
 365 * vmw_resource_do_validate - Make a resource up-to-date and visible
 366 *                            to the device.
 367 *
 368 * @res:            The resource to make visible to the device.
 369 * @val_buf:        Information about a buffer possibly
 370 *                  containing backup data if a bind operation is needed.
 371 *
 372 * On hardware resource shortage, this function returns -EBUSY and
 373 * should be retried once resources have been freed up.
 374 */
 375static int vmw_resource_do_validate(struct vmw_resource *res,
 376				    struct ttm_validate_buffer *val_buf)
 377{
 
 378	int ret = 0;
 379	const struct vmw_res_func *func = res->func;
 380
 381	if (unlikely(res->id == -1)) {
 382		ret = func->create(res);
 383		if (unlikely(ret != 0))
 384			return ret;
 385	}
 386
 387	if (func->bind &&
 388	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
 389	      val_buf->bo != NULL) ||
 390	     (!func->needs_backup && val_buf->bo != NULL))) {
 391		ret = func->bind(res, val_buf);
 392		if (unlikely(ret != 0))
 393			goto out_bind_failed;
 394		if (func->needs_backup)
 395			vmw_resource_mob_attach(res);
 396	}
 397
 398	return 0;
 399
 400out_bind_failed:
 401	func->destroy(res);
 402
 403	return ret;
 404}
 405
 
 406/**
 407 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 408 * command submission.
 409 *
 410 * @res:               Pointer to the struct vmw_resource to unreserve.
 411 * @dirty_set:         Change dirty status of the resource.
 412 * @dirty:             When changing dirty status indicates the new status.
 413 * @switch_backup:     Backup buffer has been switched.
 414 * @new_backup:        Pointer to new backup buffer if command submission
 415 *                     switched. May be NULL.
 416 * @new_backup_offset: New backup offset if @switch_backup is true.
 417 *
 418 * Currently unreserving a resource means putting it back on the device's
 419 * resource lru list, so that it can be evicted if necessary.
 420 */
 421void vmw_resource_unreserve(struct vmw_resource *res,
 422			    bool dirty_set,
 423			    bool dirty,
 424			    bool switch_backup,
 425			    struct vmw_buffer_object *new_backup,
 426			    unsigned long new_backup_offset)
 427{
 
 428	struct vmw_private *dev_priv = res->dev_priv;
 
 
 
 
 429
 430	if (!list_empty(&res->lru_head))
 
 
 431		return;
 
 
 
 
 
 432
 433	if (switch_backup && new_backup != res->backup) {
 434		if (res->backup) {
 435			vmw_resource_mob_detach(res);
 436			vmw_bo_unreference(&res->backup);
 437		}
 438
 439		if (new_backup) {
 440			res->backup = vmw_bo_reference(new_backup);
 441			vmw_resource_mob_attach(res);
 442		} else {
 443			res->backup = NULL;
 444		}
 445	}
 446	if (switch_backup)
 447		res->backup_offset = new_backup_offset;
 448
 449	if (dirty_set)
 450		res->res_dirty = dirty;
 
 
 451
 452	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 453		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454
 455	spin_lock(&dev_priv->resource_lock);
 456	list_add_tail(&res->lru_head,
 457		      &res->dev_priv->res_lru[res->func->res_type]);
 458	spin_unlock(&dev_priv->resource_lock);
 459}
 460
 461/**
 462 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 463 *                             for a resource and in that case, allocate
 464 *                             one, reserve and validate it.
 465 *
 466 * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
 467 * @res:            The resource for which to allocate a backup buffer.
 468 * @interruptible:  Whether any sleeps during allocation should be
 469 *                  performed while interruptible.
 470 * @val_buf:        On successful return contains data about the
 471 *                  reserved and validated backup buffer.
 472 */
 473static int
 474vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 475			  struct vmw_resource *res,
 476			  bool interruptible,
 477			  struct ttm_validate_buffer *val_buf)
 478{
 479	struct ttm_operation_ctx ctx = { true, false };
 480	struct list_head val_list;
 481	bool backup_dirty = false;
 482	int ret;
 483
 484	if (unlikely(res->backup == NULL)) {
 485		ret = vmw_resource_buf_alloc(res, interruptible);
 486		if (unlikely(ret != 0))
 487			return ret;
 488	}
 489
 490	INIT_LIST_HEAD(&val_list);
 491	ttm_bo_get(&res->backup->base);
 492	val_buf->bo = &res->backup->base;
 493	val_buf->num_shared = 0;
 494	list_add_tail(&val_buf->head, &val_list);
 495	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
 496				     true);
 497	if (unlikely(ret != 0))
 498		goto out_no_reserve;
 499
 500	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
 501		return 0;
 
 
 
 
 502
 503	backup_dirty = res->backup_dirty;
 504	ret = ttm_bo_validate(&res->backup->base,
 505			      res->func->backup_placement,
 506			      &ctx);
 
 
 
 
 
 507
 508	if (unlikely(ret != 0))
 509		goto out_no_validate;
 
 
 
 
 
 
 
 510
 
 
 
 511	return 0;
 
 512
 513out_no_validate:
 514	ttm_eu_backoff_reservation(ticket, &val_list);
 515out_no_reserve:
 516	ttm_bo_put(val_buf->bo);
 517	val_buf->bo = NULL;
 518	if (backup_dirty)
 519		vmw_bo_unreference(&res->backup);
 520
 521	return ret;
 
 
 522}
 523
 524/**
 525 * vmw_resource_reserve - Reserve a resource for command submission
 526 *
 527 * @res:            The resource to reserve.
 528 *
 529 * This function takes the resource off the LRU list and make sure
 530 * a backup buffer is present for guest-backed resources. However,
 531 * the buffer may not be bound to the resource at this point.
 532 *
 533 */
 534int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 535			 bool no_backup)
 536{
 537	struct vmw_private *dev_priv = res->dev_priv;
 538	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539
 540	spin_lock(&dev_priv->resource_lock);
 541	list_del_init(&res->lru_head);
 542	spin_unlock(&dev_priv->resource_lock);
 543
 544	if (res->func->needs_backup && res->backup == NULL &&
 545	    !no_backup) {
 546		ret = vmw_resource_buf_alloc(res, interruptible);
 547		if (unlikely(ret != 0)) {
 548			DRM_ERROR("Failed to allocate a backup buffer "
 549				  "of size %lu. bytes\n",
 550				  (unsigned long) res->backup_size);
 551			return ret;
 552		}
 553	}
 554
 555	return 0;
 
 
 
 
 
 
 
 
 
 556}
 557
 558/**
 559 * vmw_resource_backoff_reservation - Unreserve and unreference a
 560 *                                    backup buffer
 561 *.
 562 * @ticket:         The ww acquire ctx used for reservation.
 563 * @val_buf:        Backup buffer information.
 564 */
 565static void
 566vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 567				 struct ttm_validate_buffer *val_buf)
 568{
 569	struct list_head val_list;
 
 
 
 
 
 
 
 570
 571	if (likely(val_buf->bo == NULL))
 572		return;
 
 
 
 573
 574	INIT_LIST_HEAD(&val_list);
 575	list_add_tail(&val_buf->head, &val_list);
 576	ttm_eu_backoff_reservation(ticket, &val_list);
 577	ttm_bo_put(val_buf->bo);
 578	val_buf->bo = NULL;
 579}
 580
 581/**
 582 * vmw_resource_do_evict - Evict a resource, and transfer its data
 583 *                         to a backup buffer.
 584 *
 585 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 586 * @res:            The resource to evict.
 587 * @interruptible:  Whether to wait interruptible.
 588 */
 589static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 590				 struct vmw_resource *res, bool interruptible)
 591{
 592	struct ttm_validate_buffer val_buf;
 593	const struct vmw_res_func *func = res->func;
 
 
 
 
 
 
 
 
 
 
 594	int ret;
 
 
 
 
 595
 596	BUG_ON(!func->may_evict);
 
 597
 598	val_buf.bo = NULL;
 599	val_buf.num_shared = 0;
 600	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 601	if (unlikely(ret != 0))
 602		return ret;
 603
 604	if (unlikely(func->unbind != NULL &&
 605		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
 606		ret = func->unbind(res, res->res_dirty, &val_buf);
 607		if (unlikely(ret != 0))
 608			goto out_no_unbind;
 609		vmw_resource_mob_detach(res);
 610	}
 611	ret = func->destroy(res);
 612	res->backup_dirty = true;
 613	res->res_dirty = false;
 614out_no_unbind:
 615	vmw_resource_backoff_reservation(ticket, &val_buf);
 
 
 616
 
 
 
 
 
 
 617	return ret;
 618}
 619
 
 
 
 
 
 
 
 
 
 
 
 
 
 620
 621/**
 622 * vmw_resource_validate - Make a resource up-to-date and visible
 623 *                         to the device.
 624 * @res: The resource to make visible to the device.
 625 * @intr: Perform waits interruptible if possible.
 626 *
 627 * On succesful return, any backup DMA buffer pointed to by @res->backup will
 628 * be reserved and validated.
 629 * On hardware resource shortage, this function will repeatedly evict
 630 * resources of the same type until the validation succeeds.
 631 *
 632 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 633 * on failure.
 634 */
 635int vmw_resource_validate(struct vmw_resource *res, bool intr)
 636{
 637	int ret;
 638	struct vmw_resource *evict_res;
 639	struct vmw_private *dev_priv = res->dev_priv;
 640	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 641	struct ttm_validate_buffer val_buf;
 642	unsigned err_count = 0;
 643
 644	if (!res->func->create)
 645		return 0;
 646
 647	val_buf.bo = NULL;
 648	val_buf.num_shared = 0;
 649	if (res->backup)
 650		val_buf.bo = &res->backup->base;
 651	do {
 652		ret = vmw_resource_do_validate(res, &val_buf);
 653		if (likely(ret != -EBUSY))
 654			break;
 655
 656		spin_lock(&dev_priv->resource_lock);
 657		if (list_empty(lru_list) || !res->func->may_evict) {
 658			DRM_ERROR("Out of device device resources "
 659				  "for %s.\n", res->func->type_name);
 660			ret = -EBUSY;
 661			spin_unlock(&dev_priv->resource_lock);
 662			break;
 663		}
 664
 665		evict_res = vmw_resource_reference
 666			(list_first_entry(lru_list, struct vmw_resource,
 667					  lru_head));
 668		list_del_init(&evict_res->lru_head);
 669
 670		spin_unlock(&dev_priv->resource_lock);
 671
 672		/* Trylock backup buffers with a NULL ticket. */
 673		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 674		if (unlikely(ret != 0)) {
 675			spin_lock(&dev_priv->resource_lock);
 676			list_add_tail(&evict_res->lru_head, lru_list);
 677			spin_unlock(&dev_priv->resource_lock);
 678			if (ret == -ERESTARTSYS ||
 679			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 680				vmw_resource_unreference(&evict_res);
 681				goto out_no_validate;
 682			}
 683		}
 684
 685		vmw_resource_unreference(&evict_res);
 686	} while (1);
 687
 688	if (unlikely(ret != 0))
 689		goto out_no_validate;
 690	else if (!res->func->needs_backup && res->backup) {
 691		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 692		vmw_bo_unreference(&res->backup);
 693	}
 694
 695	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 696
 697out_no_validate:
 698	return ret;
 699}
 700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 701
 702/**
 703 * vmw_resource_unbind_list
 704 *
 705 * @vbo: Pointer to the current backing MOB.
 706 *
 707 * Evicts the Guest Backed hardware resource if the backup
 708 * buffer is being moved out of MOB memory.
 709 * Note that this function will not race with the resource
 710 * validation code, since resource validation and eviction
 711 * both require the backup buffer to be reserved.
 712 */
 713void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 
 
 714{
 
 715
 716	struct vmw_resource *res, *next;
 717	struct ttm_validate_buffer val_buf = {
 718		.bo = &vbo->base,
 719		.num_shared = 0
 720	};
 721
 722	dma_resv_assert_held(vbo->base.base.resv);
 723	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
 724		if (!res->func->unbind)
 725			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 726
 727		(void) res->func->unbind(res, res->res_dirty, &val_buf);
 728		res->backup_dirty = true;
 729		res->res_dirty = false;
 730		vmw_resource_mob_detach(res);
 
 
 
 
 
 
 731	}
 732
 733	(void) ttm_bo_wait(&vbo->base, false, false);
 
 
 
 
 
 
 
 
 734}
 735
 
 
 
 
 
 
 
 
 736
 737/**
 738 * vmw_query_readback_all - Read back cached query states
 739 *
 740 * @dx_query_mob: Buffer containing the DX query MOB
 741 *
 742 * Read back cached states from the device if they exist.  This function
 743 * assumings binding_mutex is held.
 744 */
 745int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 746{
 747	struct vmw_resource *dx_query_ctx;
 748	struct vmw_private *dev_priv;
 749	struct {
 750		SVGA3dCmdHeader header;
 751		SVGA3dCmdDXReadbackAllQuery body;
 752	} *cmd;
 753
 
 754
 755	/* No query bound, so do nothing */
 756	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 757		return 0;
 758
 759	dx_query_ctx = dx_query_mob->dx_query_ctx;
 760	dev_priv     = dx_query_ctx->dev_priv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 761
 762	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 763	if (unlikely(cmd == NULL))
 764		return -ENOMEM;
 765
 766	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 767	cmd->header.size = sizeof(cmd->body);
 768	cmd->body.cid    = dx_query_ctx->id;
 
 
 769
 770	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 771
 772	/* Triggers a rebind the next time affected context is bound */
 773	dx_query_mob->dx_query_ctx = NULL;
 
 
 774
 775	return 0;
 776}
 777
 
 
 
 
 
 778
 
 
 
 
 779
 780/**
 781 * vmw_query_move_notify - Read back cached query states
 782 *
 783 * @bo: The TTM buffer object about to move.
 784 * @mem: The memory region @bo is moving to.
 785 *
 786 * Called before the query MOB is swapped out to read back cached query
 787 * states from the device.
 788 */
 789void vmw_query_move_notify(struct ttm_buffer_object *bo,
 790			   struct ttm_mem_reg *mem)
 791{
 792	struct vmw_buffer_object *dx_query_mob;
 793	struct ttm_bo_device *bdev = bo->bdev;
 794	struct vmw_private *dev_priv;
 795
 
 
 796
 797	dev_priv = container_of(bdev, struct vmw_private, bdev);
 
 798
 799	mutex_lock(&dev_priv->binding_mutex);
 
 800
 801	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
 802	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
 803		mutex_unlock(&dev_priv->binding_mutex);
 804		return;
 805	}
 806
 807	/* If BO is being moved from MOB to system memory */
 808	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
 809		struct vmw_fence_obj *fence;
 810
 811		(void) vmw_query_readback_all(dx_query_mob);
 812		mutex_unlock(&dev_priv->binding_mutex);
 
 
 
 813
 814		/* Create a fence and attach the BO to it */
 815		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 816		vmw_bo_fence_single(bo, fence);
 
 
 
 817
 818		if (fence != NULL)
 819			vmw_fence_obj_unreference(&fence);
 
 
 
 
 820
 821		(void) ttm_bo_wait(bo, false, false);
 822	} else
 823		mutex_unlock(&dev_priv->binding_mutex);
 
 824
 
 825}
 826
 827/**
 828 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 829 *
 830 * @res:            The resource being queried.
 831 */
 832bool vmw_resource_needs_backup(const struct vmw_resource *res)
 
 833{
 834	return res->func->needs_backup;
 
 
 
 
 
 
 
 
 835}
 836
 837/**
 838 * vmw_resource_evict_type - Evict all resources of a specific type
 839 *
 840 * @dev_priv:       Pointer to a device private struct
 841 * @type:           The resource type to evict
 842 *
 843 * To avoid thrashing starvation or as part of the hibernation sequence,
 844 * try to evict all evictable resources of a specific type.
 845 */
 846static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 847				    enum vmw_res_type type)
 848{
 849	struct list_head *lru_list = &dev_priv->res_lru[type];
 850	struct vmw_resource *evict_res;
 851	unsigned err_count = 0;
 852	int ret;
 853	struct ww_acquire_ctx ticket;
 854
 855	do {
 856		spin_lock(&dev_priv->resource_lock);
 857
 858		if (list_empty(lru_list))
 859			goto out_unlock;
 
 
 
 
 
 860
 861		evict_res = vmw_resource_reference(
 862			list_first_entry(lru_list, struct vmw_resource,
 863					 lru_head));
 864		list_del_init(&evict_res->lru_head);
 865		spin_unlock(&dev_priv->resource_lock);
 866
 867		/* Wait lock backup buffers with a ticket. */
 868		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 869		if (unlikely(ret != 0)) {
 870			spin_lock(&dev_priv->resource_lock);
 871			list_add_tail(&evict_res->lru_head, lru_list);
 872			spin_unlock(&dev_priv->resource_lock);
 873			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 874				vmw_resource_unreference(&evict_res);
 875				return;
 876			}
 877		}
 878
 879		vmw_resource_unreference(&evict_res);
 880	} while (1);
 881
 882out_unlock:
 883	spin_unlock(&dev_priv->resource_lock);
 884}
 885
 886/**
 887 * vmw_resource_evict_all - Evict all evictable resources
 888 *
 889 * @dev_priv:       Pointer to a device private struct
 890 *
 891 * To avoid thrashing starvation or as part of the hibernation sequence,
 892 * evict all evictable resources. In particular this means that all
 893 * guest-backed resources that are registered with the device are
 894 * evicted and the OTable becomes clean.
 895 */
 896void vmw_resource_evict_all(struct vmw_private *dev_priv)
 
 897{
 898	enum vmw_res_type type;
 
 899
 900	mutex_lock(&dev_priv->cmdbuf_mutex);
 
 901
 902	for (type = 0; type < vmw_res_max; ++type)
 903		vmw_resource_evict_type(dev_priv, type);
 
 
 
 
 
 
 
 
 
 904
 905	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 906}
 907
 908/**
 909 * vmw_resource_pin - Add a pin reference on a resource
 910 *
 911 * @res: The resource to add a pin reference on
 912 *
 913 * This function adds a pin reference, and if needed validates the resource.
 914 * Having a pin reference means that the resource can never be evicted, and
 915 * its id will never change as long as there is a pin reference.
 916 * This function returns 0 on success and a negative error code on failure.
 917 */
 918int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 919{
 920	struct ttm_operation_ctx ctx = { interruptible, false };
 921	struct vmw_private *dev_priv = res->dev_priv;
 922	int ret;
 
 
 
 
 
 
 
 923
 924	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 925	mutex_lock(&dev_priv->cmdbuf_mutex);
 926	ret = vmw_resource_reserve(res, interruptible, false);
 927	if (ret)
 928		goto out_no_reserve;
 929
 930	if (res->pin_count == 0) {
 931		struct vmw_buffer_object *vbo = NULL;
 932
 933		if (res->backup) {
 934			vbo = res->backup;
 935
 936			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
 937			if (!vbo->pin_count) {
 938				ret = ttm_bo_validate
 939					(&vbo->base,
 940					 res->func->backup_placement,
 941					 &ctx);
 942				if (ret) {
 943					ttm_bo_unreserve(&vbo->base);
 944					goto out_no_validate;
 945				}
 946			}
 947
 948			/* Do we really need to pin the MOB as well? */
 949			vmw_bo_pin_reserved(vbo, true);
 950		}
 951		ret = vmw_resource_validate(res, interruptible);
 952		if (vbo)
 953			ttm_bo_unreserve(&vbo->base);
 954		if (ret)
 955			goto out_no_validate;
 956	}
 957	res->pin_count++;
 958
 959out_no_validate:
 960	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
 961out_no_reserve:
 962	mutex_unlock(&dev_priv->cmdbuf_mutex);
 963	ttm_write_unlock(&dev_priv->reservation_sem);
 964
 
 
 
 965	return ret;
 966}
 967
 968/**
 969 * vmw_resource_unpin - Remove a pin reference from a resource
 970 *
 971 * @res: The resource to remove a pin reference from
 972 *
 973 * Having a pin reference means that the resource can never be evicted, and
 974 * its id will never change as long as there is a pin reference.
 975 */
 976void vmw_resource_unpin(struct vmw_resource *res)
 977{
 978	struct vmw_private *dev_priv = res->dev_priv;
 
 
 
 
 
 979	int ret;
 980
 981	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
 982	mutex_lock(&dev_priv->cmdbuf_mutex);
 983
 984	ret = vmw_resource_reserve(res, false, true);
 985	WARN_ON(ret);
 
 986
 987	WARN_ON(res->pin_count == 0);
 988	if (--res->pin_count == 0 && res->backup) {
 989		struct vmw_buffer_object *vbo = res->backup;
 990
 991		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
 992		vmw_bo_pin_reserved(vbo, false);
 993		ttm_bo_unreserve(&vbo->base);
 
 
 
 
 994	}
 995
 996	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
 997
 998	mutex_unlock(&dev_priv->cmdbuf_mutex);
 999	ttm_read_unlock(&dev_priv->reservation_sem);
1000}
1001
1002/**
1003 * vmw_res_type - Return the resource type
1004 *
1005 * @res: Pointer to the resource
1006 */
1007enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1008{
1009	return res->func->res_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010}