Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_resource_priv.h"
  31#include "vmwgfx_binding.h"
  32#include "vmwgfx_drv.h"
  33
  34#define VMW_RES_EVICT_ERR_COUNT 10
  35
  36/**
  37 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  38 * @res: The resource
  39 */
  40void vmw_resource_mob_attach(struct vmw_resource *res)
  41{
  42	struct vmw_buffer_object *backup = res->backup;
 
  43
  44	dma_resv_assert_held(res->backup->base.base.resv);
  45	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  46		res->func->prio;
  47	list_add_tail(&res->mob_head, &backup->res_list);
 
 
 
 
 
 
 
 
 
 
 
 
  48	vmw_bo_prio_add(backup, res->used_prio);
  49}
  50
  51/**
  52 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  53 * @res: The resource
  54 */
  55void vmw_resource_mob_detach(struct vmw_resource *res)
  56{
  57	struct vmw_buffer_object *backup = res->backup;
  58
  59	dma_resv_assert_held(backup->base.base.resv);
  60	if (vmw_resource_mob_attached(res)) {
  61		list_del_init(&res->mob_head);
 
  62		vmw_bo_prio_del(backup, res->used_prio);
  63	}
  64}
  65
  66struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  67{
  68	kref_get(&res->kref);
  69	return res;
  70}
  71
  72struct vmw_resource *
  73vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  74{
  75	return kref_get_unless_zero(&res->kref) ? res : NULL;
  76}
  77
  78/**
  79 * vmw_resource_release_id - release a resource id to the id manager.
  80 *
  81 * @res: Pointer to the resource.
  82 *
  83 * Release the resource id to the resource id manager and set it to -1
  84 */
  85void vmw_resource_release_id(struct vmw_resource *res)
  86{
  87	struct vmw_private *dev_priv = res->dev_priv;
  88	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  89
  90	spin_lock(&dev_priv->resource_lock);
  91	if (res->id != -1)
  92		idr_remove(idr, res->id);
  93	res->id = -1;
  94	spin_unlock(&dev_priv->resource_lock);
  95}
  96
  97static void vmw_resource_release(struct kref *kref)
  98{
  99	struct vmw_resource *res =
 100	    container_of(kref, struct vmw_resource, kref);
 101	struct vmw_private *dev_priv = res->dev_priv;
 102	int id;
 
 103	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 104
 105	spin_lock(&dev_priv->resource_lock);
 106	list_del_init(&res->lru_head);
 107	spin_unlock(&dev_priv->resource_lock);
 108	if (res->backup) {
 109		struct ttm_buffer_object *bo = &res->backup->base;
 110
 111		ttm_bo_reserve(bo, false, false, NULL);
 
 112		if (vmw_resource_mob_attached(res) &&
 113		    res->func->unbind != NULL) {
 114			struct ttm_validate_buffer val_buf;
 115
 116			val_buf.bo = bo;
 117			val_buf.num_shared = 0;
 118			res->func->unbind(res, false, &val_buf);
 119		}
 120		res->backup_dirty = false;
 121		vmw_resource_mob_detach(res);
 
 
 
 
 122		ttm_bo_unreserve(bo);
 123		vmw_bo_unreference(&res->backup);
 124	}
 125
 126	if (likely(res->hw_destroy != NULL)) {
 127		mutex_lock(&dev_priv->binding_mutex);
 128		vmw_binding_res_list_kill(&res->binding_head);
 129		mutex_unlock(&dev_priv->binding_mutex);
 130		res->hw_destroy(res);
 131	}
 132
 133	id = res->id;
 134	if (res->res_free != NULL)
 135		res->res_free(res);
 136	else
 137		kfree(res);
 138
 139	spin_lock(&dev_priv->resource_lock);
 140	if (id != -1)
 141		idr_remove(idr, id);
 142	spin_unlock(&dev_priv->resource_lock);
 143}
 144
 145void vmw_resource_unreference(struct vmw_resource **p_res)
 146{
 147	struct vmw_resource *res = *p_res;
 148
 149	*p_res = NULL;
 150	kref_put(&res->kref, vmw_resource_release);
 151}
 152
 153
 154/**
 155 * vmw_resource_alloc_id - release a resource id to the id manager.
 156 *
 157 * @res: Pointer to the resource.
 158 *
 159 * Allocate the lowest free resource from the resource manager, and set
 160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 161 */
 162int vmw_resource_alloc_id(struct vmw_resource *res)
 163{
 164	struct vmw_private *dev_priv = res->dev_priv;
 165	int ret;
 166	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 167
 168	BUG_ON(res->id != -1);
 169
 170	idr_preload(GFP_KERNEL);
 171	spin_lock(&dev_priv->resource_lock);
 172
 173	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 174	if (ret >= 0)
 175		res->id = ret;
 176
 177	spin_unlock(&dev_priv->resource_lock);
 178	idr_preload_end();
 179	return ret < 0 ? ret : 0;
 180}
 181
 182/**
 183 * vmw_resource_init - initialize a struct vmw_resource
 184 *
 185 * @dev_priv:       Pointer to a device private struct.
 186 * @res:            The struct vmw_resource to initialize.
 187 * @obj_type:       Resource object type.
 188 * @delay_id:       Boolean whether to defer device id allocation until
 189 *                  the first validation.
 190 * @res_free:       Resource destructor.
 191 * @func:           Resource function table.
 192 */
 193int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 194		      bool delay_id,
 195		      void (*res_free) (struct vmw_resource *res),
 196		      const struct vmw_res_func *func)
 197{
 198	kref_init(&res->kref);
 199	res->hw_destroy = NULL;
 200	res->res_free = res_free;
 201	res->dev_priv = dev_priv;
 202	res->func = func;
 
 203	INIT_LIST_HEAD(&res->lru_head);
 204	INIT_LIST_HEAD(&res->mob_head);
 205	INIT_LIST_HEAD(&res->binding_head);
 206	res->id = -1;
 207	res->backup = NULL;
 208	res->backup_offset = 0;
 209	res->backup_dirty = false;
 210	res->res_dirty = false;
 
 211	res->used_prio = 3;
 
 212	if (delay_id)
 213		return 0;
 214	else
 215		return vmw_resource_alloc_id(res);
 216}
 217
 218
 219/**
 220 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 221 * TTM user-space handle and perform basic type checks
 222 *
 223 * @dev_priv:     Pointer to a device private struct
 224 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 225 * @handle:       The TTM user-space handle
 226 * @converter:    Pointer to an object describing the resource type
 227 * @p_res:        On successful return the location pointed to will contain
 228 *                a pointer to a refcounted struct vmw_resource.
 229 *
 230 * If the handle can't be found or is associated with an incorrect resource
 231 * type, -EINVAL will be returned.
 232 */
 233int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 234				    struct ttm_object_file *tfile,
 235				    uint32_t handle,
 236				    const struct vmw_user_resource_conv
 237				    *converter,
 238				    struct vmw_resource **p_res)
 239{
 240	struct ttm_base_object *base;
 241	struct vmw_resource *res;
 242	int ret = -EINVAL;
 243
 244	base = ttm_base_object_lookup(tfile, handle);
 245	if (unlikely(base == NULL))
 246		return -EINVAL;
 247
 248	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 249		goto out_bad_resource;
 250
 251	res = converter->base_obj_to_res(base);
 252	kref_get(&res->kref);
 253
 254	*p_res = res;
 255	ret = 0;
 256
 257out_bad_resource:
 258	ttm_base_object_unref(&base);
 259
 260	return ret;
 261}
 262
 263/**
 264 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 265 * TTM user-space handle and perform basic type checks
 266 *
 267 * @dev_priv:     Pointer to a device private struct
 268 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 269 * @handle:       The TTM user-space handle
 270 * @converter:    Pointer to an object describing the resource type
 271 * @p_res:        On successful return the location pointed to will contain
 272 *                a pointer to a refcounted struct vmw_resource.
 273 *
 274 * If the handle can't be found or is associated with an incorrect resource
 275 * type, -EINVAL will be returned.
 276 */
 277struct vmw_resource *
 278vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
 279				      struct ttm_object_file *tfile,
 280				      uint32_t handle,
 281				      const struct vmw_user_resource_conv
 282				      *converter)
 283{
 284	struct ttm_base_object *base;
 285
 286	base = ttm_base_object_noref_lookup(tfile, handle);
 287	if (!base)
 288		return ERR_PTR(-ESRCH);
 289
 290	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
 291		ttm_base_object_noref_release();
 292		return ERR_PTR(-EINVAL);
 293	}
 294
 295	return converter->base_obj_to_res(base);
 296}
 297
 298/**
 299 * Helper function that looks either a surface or bo.
 300 *
 301 * The pointer this pointed at by out_surf and out_buf needs to be null.
 302 */
 303int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 304			   struct ttm_object_file *tfile,
 305			   uint32_t handle,
 306			   struct vmw_surface **out_surf,
 307			   struct vmw_buffer_object **out_buf)
 308{
 
 309	struct vmw_resource *res;
 310	int ret;
 311
 312	BUG_ON(*out_surf || *out_buf);
 313
 314	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 315					      user_surface_converter,
 316					      &res);
 317	if (!ret) {
 318		*out_surf = vmw_res_to_srf(res);
 319		return 0;
 320	}
 321
 322	*out_surf = NULL;
 323	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
 324	return ret;
 325}
 326
 327/**
 328 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 329 *
 330 * @res:            The resource for which to allocate a backup buffer.
 331 * @interruptible:  Whether any sleeps during allocation should be
 332 *                  performed while interruptible.
 333 */
 334static int vmw_resource_buf_alloc(struct vmw_resource *res,
 335				  bool interruptible)
 336{
 337	unsigned long size =
 338		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 339	struct vmw_buffer_object *backup;
 340	int ret;
 341
 342	if (likely(res->backup)) {
 343		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
 344		return 0;
 345	}
 346
 347	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
 348	if (unlikely(!backup))
 349		return -ENOMEM;
 350
 351	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
 352			      res->func->backup_placement,
 353			      interruptible,
 354			      &vmw_bo_bo_free);
 355	if (unlikely(ret != 0))
 356		goto out_no_bo;
 357
 358	res->backup = backup;
 359
 360out_no_bo:
 361	return ret;
 362}
 363
 364/**
 365 * vmw_resource_do_validate - Make a resource up-to-date and visible
 366 *                            to the device.
 367 *
 368 * @res:            The resource to make visible to the device.
 369 * @val_buf:        Information about a buffer possibly
 370 *                  containing backup data if a bind operation is needed.
 
 371 *
 372 * On hardware resource shortage, this function returns -EBUSY and
 373 * should be retried once resources have been freed up.
 374 */
 375static int vmw_resource_do_validate(struct vmw_resource *res,
 376				    struct ttm_validate_buffer *val_buf)
 
 377{
 378	int ret = 0;
 379	const struct vmw_res_func *func = res->func;
 380
 381	if (unlikely(res->id == -1)) {
 382		ret = func->create(res);
 383		if (unlikely(ret != 0))
 384			return ret;
 385	}
 386
 387	if (func->bind &&
 388	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
 389	      val_buf->bo != NULL) ||
 390	     (!func->needs_backup && val_buf->bo != NULL))) {
 391		ret = func->bind(res, val_buf);
 392		if (unlikely(ret != 0))
 393			goto out_bind_failed;
 394		if (func->needs_backup)
 395			vmw_resource_mob_attach(res);
 396	}
 397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 398	return 0;
 399
 400out_bind_failed:
 401	func->destroy(res);
 402
 403	return ret;
 404}
 405
 406/**
 407 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 408 * command submission.
 409 *
 410 * @res:               Pointer to the struct vmw_resource to unreserve.
 411 * @dirty_set:         Change dirty status of the resource.
 412 * @dirty:             When changing dirty status indicates the new status.
 413 * @switch_backup:     Backup buffer has been switched.
 414 * @new_backup:        Pointer to new backup buffer if command submission
 415 *                     switched. May be NULL.
 416 * @new_backup_offset: New backup offset if @switch_backup is true.
 417 *
 418 * Currently unreserving a resource means putting it back on the device's
 419 * resource lru list, so that it can be evicted if necessary.
 420 */
 421void vmw_resource_unreserve(struct vmw_resource *res,
 422			    bool dirty_set,
 423			    bool dirty,
 424			    bool switch_backup,
 425			    struct vmw_buffer_object *new_backup,
 426			    unsigned long new_backup_offset)
 427{
 428	struct vmw_private *dev_priv = res->dev_priv;
 429
 430	if (!list_empty(&res->lru_head))
 431		return;
 432
 433	if (switch_backup && new_backup != res->backup) {
 434		if (res->backup) {
 435			vmw_resource_mob_detach(res);
 
 
 436			vmw_bo_unreference(&res->backup);
 437		}
 438
 439		if (new_backup) {
 440			res->backup = vmw_bo_reference(new_backup);
 
 
 
 
 
 
 
 441			vmw_resource_mob_attach(res);
 442		} else {
 443			res->backup = NULL;
 444		}
 
 
 445	}
 
 446	if (switch_backup)
 447		res->backup_offset = new_backup_offset;
 448
 449	if (dirty_set)
 450		res->res_dirty = dirty;
 451
 452	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 453		return;
 454
 455	spin_lock(&dev_priv->resource_lock);
 456	list_add_tail(&res->lru_head,
 457		      &res->dev_priv->res_lru[res->func->res_type]);
 458	spin_unlock(&dev_priv->resource_lock);
 459}
 460
 461/**
 462 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 463 *                             for a resource and in that case, allocate
 464 *                             one, reserve and validate it.
 465 *
 466 * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
 467 * @res:            The resource for which to allocate a backup buffer.
 468 * @interruptible:  Whether any sleeps during allocation should be
 469 *                  performed while interruptible.
 470 * @val_buf:        On successful return contains data about the
 471 *                  reserved and validated backup buffer.
 472 */
 473static int
 474vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 475			  struct vmw_resource *res,
 476			  bool interruptible,
 477			  struct ttm_validate_buffer *val_buf)
 478{
 479	struct ttm_operation_ctx ctx = { true, false };
 480	struct list_head val_list;
 481	bool backup_dirty = false;
 482	int ret;
 483
 484	if (unlikely(res->backup == NULL)) {
 485		ret = vmw_resource_buf_alloc(res, interruptible);
 486		if (unlikely(ret != 0))
 487			return ret;
 488	}
 489
 490	INIT_LIST_HEAD(&val_list);
 491	ttm_bo_get(&res->backup->base);
 492	val_buf->bo = &res->backup->base;
 493	val_buf->num_shared = 0;
 494	list_add_tail(&val_buf->head, &val_list);
 495	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
 496				     true);
 497	if (unlikely(ret != 0))
 498		goto out_no_reserve;
 499
 500	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
 501		return 0;
 502
 503	backup_dirty = res->backup_dirty;
 504	ret = ttm_bo_validate(&res->backup->base,
 505			      res->func->backup_placement,
 506			      &ctx);
 507
 508	if (unlikely(ret != 0))
 509		goto out_no_validate;
 510
 511	return 0;
 512
 513out_no_validate:
 514	ttm_eu_backoff_reservation(ticket, &val_list);
 515out_no_reserve:
 516	ttm_bo_put(val_buf->bo);
 517	val_buf->bo = NULL;
 518	if (backup_dirty)
 519		vmw_bo_unreference(&res->backup);
 520
 521	return ret;
 522}
 523
 524/**
 525 * vmw_resource_reserve - Reserve a resource for command submission
 526 *
 527 * @res:            The resource to reserve.
 528 *
 529 * This function takes the resource off the LRU list and make sure
 530 * a backup buffer is present for guest-backed resources. However,
 531 * the buffer may not be bound to the resource at this point.
 532 *
 533 */
 534int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 535			 bool no_backup)
 536{
 537	struct vmw_private *dev_priv = res->dev_priv;
 538	int ret;
 539
 540	spin_lock(&dev_priv->resource_lock);
 541	list_del_init(&res->lru_head);
 542	spin_unlock(&dev_priv->resource_lock);
 543
 544	if (res->func->needs_backup && res->backup == NULL &&
 545	    !no_backup) {
 546		ret = vmw_resource_buf_alloc(res, interruptible);
 547		if (unlikely(ret != 0)) {
 548			DRM_ERROR("Failed to allocate a backup buffer "
 549				  "of size %lu. bytes\n",
 550				  (unsigned long) res->backup_size);
 551			return ret;
 552		}
 553	}
 554
 555	return 0;
 556}
 557
 558/**
 559 * vmw_resource_backoff_reservation - Unreserve and unreference a
 560 *                                    backup buffer
 561 *.
 562 * @ticket:         The ww acquire ctx used for reservation.
 563 * @val_buf:        Backup buffer information.
 564 */
 565static void
 566vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 567				 struct ttm_validate_buffer *val_buf)
 568{
 569	struct list_head val_list;
 570
 571	if (likely(val_buf->bo == NULL))
 572		return;
 573
 574	INIT_LIST_HEAD(&val_list);
 575	list_add_tail(&val_buf->head, &val_list);
 576	ttm_eu_backoff_reservation(ticket, &val_list);
 577	ttm_bo_put(val_buf->bo);
 578	val_buf->bo = NULL;
 579}
 580
 581/**
 582 * vmw_resource_do_evict - Evict a resource, and transfer its data
 583 *                         to a backup buffer.
 584 *
 585 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 586 * @res:            The resource to evict.
 587 * @interruptible:  Whether to wait interruptible.
 588 */
 589static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 590				 struct vmw_resource *res, bool interruptible)
 591{
 592	struct ttm_validate_buffer val_buf;
 593	const struct vmw_res_func *func = res->func;
 594	int ret;
 595
 596	BUG_ON(!func->may_evict);
 597
 598	val_buf.bo = NULL;
 599	val_buf.num_shared = 0;
 600	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 601	if (unlikely(ret != 0))
 602		return ret;
 603
 604	if (unlikely(func->unbind != NULL &&
 605		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
 606		ret = func->unbind(res, res->res_dirty, &val_buf);
 607		if (unlikely(ret != 0))
 608			goto out_no_unbind;
 609		vmw_resource_mob_detach(res);
 610	}
 611	ret = func->destroy(res);
 612	res->backup_dirty = true;
 613	res->res_dirty = false;
 614out_no_unbind:
 615	vmw_resource_backoff_reservation(ticket, &val_buf);
 616
 617	return ret;
 618}
 619
 620
 621/**
 622 * vmw_resource_validate - Make a resource up-to-date and visible
 623 *                         to the device.
 624 * @res: The resource to make visible to the device.
 625 * @intr: Perform waits interruptible if possible.
 
 626 *
 627 * On succesful return, any backup DMA buffer pointed to by @res->backup will
 628 * be reserved and validated.
 629 * On hardware resource shortage, this function will repeatedly evict
 630 * resources of the same type until the validation succeeds.
 631 *
 632 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 633 * on failure.
 634 */
 635int vmw_resource_validate(struct vmw_resource *res, bool intr)
 
 636{
 637	int ret;
 638	struct vmw_resource *evict_res;
 639	struct vmw_private *dev_priv = res->dev_priv;
 640	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 641	struct ttm_validate_buffer val_buf;
 642	unsigned err_count = 0;
 643
 644	if (!res->func->create)
 645		return 0;
 646
 647	val_buf.bo = NULL;
 648	val_buf.num_shared = 0;
 649	if (res->backup)
 650		val_buf.bo = &res->backup->base;
 651	do {
 652		ret = vmw_resource_do_validate(res, &val_buf);
 653		if (likely(ret != -EBUSY))
 654			break;
 655
 656		spin_lock(&dev_priv->resource_lock);
 657		if (list_empty(lru_list) || !res->func->may_evict) {
 658			DRM_ERROR("Out of device device resources "
 659				  "for %s.\n", res->func->type_name);
 660			ret = -EBUSY;
 661			spin_unlock(&dev_priv->resource_lock);
 662			break;
 663		}
 664
 665		evict_res = vmw_resource_reference
 666			(list_first_entry(lru_list, struct vmw_resource,
 667					  lru_head));
 668		list_del_init(&evict_res->lru_head);
 669
 670		spin_unlock(&dev_priv->resource_lock);
 671
 672		/* Trylock backup buffers with a NULL ticket. */
 673		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 674		if (unlikely(ret != 0)) {
 675			spin_lock(&dev_priv->resource_lock);
 676			list_add_tail(&evict_res->lru_head, lru_list);
 677			spin_unlock(&dev_priv->resource_lock);
 678			if (ret == -ERESTARTSYS ||
 679			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 680				vmw_resource_unreference(&evict_res);
 681				goto out_no_validate;
 682			}
 683		}
 684
 685		vmw_resource_unreference(&evict_res);
 686	} while (1);
 687
 688	if (unlikely(ret != 0))
 689		goto out_no_validate;
 690	else if (!res->func->needs_backup && res->backup) {
 691		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 692		vmw_bo_unreference(&res->backup);
 693	}
 694
 695	return 0;
 696
 697out_no_validate:
 698	return ret;
 699}
 700
 701
 702/**
 703 * vmw_resource_unbind_list
 704 *
 705 * @vbo: Pointer to the current backing MOB.
 706 *
 707 * Evicts the Guest Backed hardware resource if the backup
 708 * buffer is being moved out of MOB memory.
 709 * Note that this function will not race with the resource
 710 * validation code, since resource validation and eviction
 711 * both require the backup buffer to be reserved.
 712 */
 713void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 714{
 715
 716	struct vmw_resource *res, *next;
 717	struct ttm_validate_buffer val_buf = {
 718		.bo = &vbo->base,
 719		.num_shared = 0
 720	};
 721
 722	dma_resv_assert_held(vbo->base.base.resv);
 723	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
 724		if (!res->func->unbind)
 725			continue;
 
 
 
 
 726
 727		(void) res->func->unbind(res, res->res_dirty, &val_buf);
 728		res->backup_dirty = true;
 729		res->res_dirty = false;
 730		vmw_resource_mob_detach(res);
 731	}
 732
 733	(void) ttm_bo_wait(&vbo->base, false, false);
 734}
 735
 736
 737/**
 738 * vmw_query_readback_all - Read back cached query states
 739 *
 740 * @dx_query_mob: Buffer containing the DX query MOB
 741 *
 742 * Read back cached states from the device if they exist.  This function
 743 * assumings binding_mutex is held.
 744 */
 745int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 746{
 747	struct vmw_resource *dx_query_ctx;
 748	struct vmw_private *dev_priv;
 749	struct {
 750		SVGA3dCmdHeader header;
 751		SVGA3dCmdDXReadbackAllQuery body;
 752	} *cmd;
 753
 754
 755	/* No query bound, so do nothing */
 756	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 757		return 0;
 758
 759	dx_query_ctx = dx_query_mob->dx_query_ctx;
 760	dev_priv     = dx_query_ctx->dev_priv;
 761
 762	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 763	if (unlikely(cmd == NULL))
 764		return -ENOMEM;
 765
 766	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 767	cmd->header.size = sizeof(cmd->body);
 768	cmd->body.cid    = dx_query_ctx->id;
 769
 770	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 771
 772	/* Triggers a rebind the next time affected context is bound */
 773	dx_query_mob->dx_query_ctx = NULL;
 774
 775	return 0;
 776}
 777
 778
 779
 780/**
 781 * vmw_query_move_notify - Read back cached query states
 782 *
 783 * @bo: The TTM buffer object about to move.
 784 * @mem: The memory region @bo is moving to.
 
 785 *
 786 * Called before the query MOB is swapped out to read back cached query
 787 * states from the device.
 788 */
 789void vmw_query_move_notify(struct ttm_buffer_object *bo,
 790			   struct ttm_mem_reg *mem)
 
 791{
 792	struct vmw_buffer_object *dx_query_mob;
 793	struct ttm_bo_device *bdev = bo->bdev;
 794	struct vmw_private *dev_priv;
 795
 796
 797	dev_priv = container_of(bdev, struct vmw_private, bdev);
 798
 799	mutex_lock(&dev_priv->binding_mutex);
 800
 801	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
 802	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
 803		mutex_unlock(&dev_priv->binding_mutex);
 804		return;
 805	}
 806
 807	/* If BO is being moved from MOB to system memory */
 808	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
 
 809		struct vmw_fence_obj *fence;
 810
 
 
 
 
 
 
 811		(void) vmw_query_readback_all(dx_query_mob);
 812		mutex_unlock(&dev_priv->binding_mutex);
 813
 814		/* Create a fence and attach the BO to it */
 815		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 816		vmw_bo_fence_single(bo, fence);
 817
 818		if (fence != NULL)
 819			vmw_fence_obj_unreference(&fence);
 820
 821		(void) ttm_bo_wait(bo, false, false);
 822	} else
 823		mutex_unlock(&dev_priv->binding_mutex);
 824
 825}
 826
 827/**
 828 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 829 *
 830 * @res:            The resource being queried.
 831 */
 832bool vmw_resource_needs_backup(const struct vmw_resource *res)
 833{
 834	return res->func->needs_backup;
 835}
 836
 837/**
 838 * vmw_resource_evict_type - Evict all resources of a specific type
 839 *
 840 * @dev_priv:       Pointer to a device private struct
 841 * @type:           The resource type to evict
 842 *
 843 * To avoid thrashing starvation or as part of the hibernation sequence,
 844 * try to evict all evictable resources of a specific type.
 845 */
 846static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 847				    enum vmw_res_type type)
 848{
 849	struct list_head *lru_list = &dev_priv->res_lru[type];
 850	struct vmw_resource *evict_res;
 851	unsigned err_count = 0;
 852	int ret;
 853	struct ww_acquire_ctx ticket;
 854
 855	do {
 856		spin_lock(&dev_priv->resource_lock);
 857
 858		if (list_empty(lru_list))
 859			goto out_unlock;
 860
 861		evict_res = vmw_resource_reference(
 862			list_first_entry(lru_list, struct vmw_resource,
 863					 lru_head));
 864		list_del_init(&evict_res->lru_head);
 865		spin_unlock(&dev_priv->resource_lock);
 866
 867		/* Wait lock backup buffers with a ticket. */
 868		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 869		if (unlikely(ret != 0)) {
 870			spin_lock(&dev_priv->resource_lock);
 871			list_add_tail(&evict_res->lru_head, lru_list);
 872			spin_unlock(&dev_priv->resource_lock);
 873			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 874				vmw_resource_unreference(&evict_res);
 875				return;
 876			}
 877		}
 878
 879		vmw_resource_unreference(&evict_res);
 880	} while (1);
 881
 882out_unlock:
 883	spin_unlock(&dev_priv->resource_lock);
 884}
 885
 886/**
 887 * vmw_resource_evict_all - Evict all evictable resources
 888 *
 889 * @dev_priv:       Pointer to a device private struct
 890 *
 891 * To avoid thrashing starvation or as part of the hibernation sequence,
 892 * evict all evictable resources. In particular this means that all
 893 * guest-backed resources that are registered with the device are
 894 * evicted and the OTable becomes clean.
 895 */
 896void vmw_resource_evict_all(struct vmw_private *dev_priv)
 897{
 898	enum vmw_res_type type;
 899
 900	mutex_lock(&dev_priv->cmdbuf_mutex);
 901
 902	for (type = 0; type < vmw_res_max; ++type)
 903		vmw_resource_evict_type(dev_priv, type);
 904
 905	mutex_unlock(&dev_priv->cmdbuf_mutex);
 906}
 907
 908/**
 909 * vmw_resource_pin - Add a pin reference on a resource
 910 *
 911 * @res: The resource to add a pin reference on
 912 *
 913 * This function adds a pin reference, and if needed validates the resource.
 914 * Having a pin reference means that the resource can never be evicted, and
 915 * its id will never change as long as there is a pin reference.
 916 * This function returns 0 on success and a negative error code on failure.
 917 */
 918int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 919{
 920	struct ttm_operation_ctx ctx = { interruptible, false };
 921	struct vmw_private *dev_priv = res->dev_priv;
 922	int ret;
 923
 924	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 925	mutex_lock(&dev_priv->cmdbuf_mutex);
 926	ret = vmw_resource_reserve(res, interruptible, false);
 927	if (ret)
 928		goto out_no_reserve;
 929
 930	if (res->pin_count == 0) {
 931		struct vmw_buffer_object *vbo = NULL;
 932
 933		if (res->backup) {
 934			vbo = res->backup;
 935
 936			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
 937			if (!vbo->pin_count) {
 
 
 938				ret = ttm_bo_validate
 939					(&vbo->base,
 940					 res->func->backup_placement,
 941					 &ctx);
 942				if (ret) {
 943					ttm_bo_unreserve(&vbo->base);
 944					goto out_no_validate;
 945				}
 946			}
 947
 948			/* Do we really need to pin the MOB as well? */
 949			vmw_bo_pin_reserved(vbo, true);
 950		}
 951		ret = vmw_resource_validate(res, interruptible);
 952		if (vbo)
 953			ttm_bo_unreserve(&vbo->base);
 954		if (ret)
 955			goto out_no_validate;
 956	}
 957	res->pin_count++;
 958
 959out_no_validate:
 960	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
 961out_no_reserve:
 962	mutex_unlock(&dev_priv->cmdbuf_mutex);
 963	ttm_write_unlock(&dev_priv->reservation_sem);
 964
 965	return ret;
 966}
 967
 968/**
 969 * vmw_resource_unpin - Remove a pin reference from a resource
 970 *
 971 * @res: The resource to remove a pin reference from
 972 *
 973 * Having a pin reference means that the resource can never be evicted, and
 974 * its id will never change as long as there is a pin reference.
 975 */
 976void vmw_resource_unpin(struct vmw_resource *res)
 977{
 978	struct vmw_private *dev_priv = res->dev_priv;
 979	int ret;
 980
 981	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
 982	mutex_lock(&dev_priv->cmdbuf_mutex);
 983
 984	ret = vmw_resource_reserve(res, false, true);
 985	WARN_ON(ret);
 986
 987	WARN_ON(res->pin_count == 0);
 988	if (--res->pin_count == 0 && res->backup) {
 989		struct vmw_buffer_object *vbo = res->backup;
 990
 991		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
 992		vmw_bo_pin_reserved(vbo, false);
 993		ttm_bo_unreserve(&vbo->base);
 994	}
 995
 996	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
 997
 998	mutex_unlock(&dev_priv->cmdbuf_mutex);
 999	ttm_read_unlock(&dev_priv->reservation_sem);
1000}
1001
1002/**
1003 * vmw_res_type - Return the resource type
1004 *
1005 * @res: Pointer to the resource
1006 */
1007enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1008{
1009	return res->func->res_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010}
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_resource_priv.h"
  31#include "vmwgfx_binding.h"
  32#include "vmwgfx_drv.h"
  33
  34#define VMW_RES_EVICT_ERR_COUNT 10
  35
  36/**
  37 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  38 * @res: The resource
  39 */
  40void vmw_resource_mob_attach(struct vmw_resource *res)
  41{
  42	struct vmw_buffer_object *backup = res->backup;
  43	struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
  44
  45	dma_resv_assert_held(res->backup->base.base.resv);
  46	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  47		res->func->prio;
  48
  49	while (*new) {
  50		struct vmw_resource *this =
  51			container_of(*new, struct vmw_resource, mob_node);
  52
  53		parent = *new;
  54		new = (res->backup_offset < this->backup_offset) ?
  55			&((*new)->rb_left) : &((*new)->rb_right);
  56	}
  57
  58	rb_link_node(&res->mob_node, parent, new);
  59	rb_insert_color(&res->mob_node, &backup->res_tree);
  60
  61	vmw_bo_prio_add(backup, res->used_prio);
  62}
  63
  64/**
  65 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  66 * @res: The resource
  67 */
  68void vmw_resource_mob_detach(struct vmw_resource *res)
  69{
  70	struct vmw_buffer_object *backup = res->backup;
  71
  72	dma_resv_assert_held(backup->base.base.resv);
  73	if (vmw_resource_mob_attached(res)) {
  74		rb_erase(&res->mob_node, &backup->res_tree);
  75		RB_CLEAR_NODE(&res->mob_node);
  76		vmw_bo_prio_del(backup, res->used_prio);
  77	}
  78}
  79
  80struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  81{
  82	kref_get(&res->kref);
  83	return res;
  84}
  85
  86struct vmw_resource *
  87vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  88{
  89	return kref_get_unless_zero(&res->kref) ? res : NULL;
  90}
  91
  92/**
  93 * vmw_resource_release_id - release a resource id to the id manager.
  94 *
  95 * @res: Pointer to the resource.
  96 *
  97 * Release the resource id to the resource id manager and set it to -1
  98 */
  99void vmw_resource_release_id(struct vmw_resource *res)
 100{
 101	struct vmw_private *dev_priv = res->dev_priv;
 102	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 103
 104	spin_lock(&dev_priv->resource_lock);
 105	if (res->id != -1)
 106		idr_remove(idr, res->id);
 107	res->id = -1;
 108	spin_unlock(&dev_priv->resource_lock);
 109}
 110
 111static void vmw_resource_release(struct kref *kref)
 112{
 113	struct vmw_resource *res =
 114	    container_of(kref, struct vmw_resource, kref);
 115	struct vmw_private *dev_priv = res->dev_priv;
 116	int id;
 117	int ret;
 118	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 119
 120	spin_lock(&dev_priv->resource_lock);
 121	list_del_init(&res->lru_head);
 122	spin_unlock(&dev_priv->resource_lock);
 123	if (res->backup) {
 124		struct ttm_buffer_object *bo = &res->backup->base;
 125
 126		ret = ttm_bo_reserve(bo, false, false, NULL);
 127		BUG_ON(ret);
 128		if (vmw_resource_mob_attached(res) &&
 129		    res->func->unbind != NULL) {
 130			struct ttm_validate_buffer val_buf;
 131
 132			val_buf.bo = bo;
 133			val_buf.num_shared = 0;
 134			res->func->unbind(res, false, &val_buf);
 135		}
 136		res->backup_dirty = false;
 137		vmw_resource_mob_detach(res);
 138		if (res->dirty)
 139			res->func->dirty_free(res);
 140		if (res->coherent)
 141			vmw_bo_dirty_release(res->backup);
 142		ttm_bo_unreserve(bo);
 143		vmw_bo_unreference(&res->backup);
 144	}
 145
 146	if (likely(res->hw_destroy != NULL)) {
 147		mutex_lock(&dev_priv->binding_mutex);
 148		vmw_binding_res_list_kill(&res->binding_head);
 149		mutex_unlock(&dev_priv->binding_mutex);
 150		res->hw_destroy(res);
 151	}
 152
 153	id = res->id;
 154	if (res->res_free != NULL)
 155		res->res_free(res);
 156	else
 157		kfree(res);
 158
 159	spin_lock(&dev_priv->resource_lock);
 160	if (id != -1)
 161		idr_remove(idr, id);
 162	spin_unlock(&dev_priv->resource_lock);
 163}
 164
 165void vmw_resource_unreference(struct vmw_resource **p_res)
 166{
 167	struct vmw_resource *res = *p_res;
 168
 169	*p_res = NULL;
 170	kref_put(&res->kref, vmw_resource_release);
 171}
 172
 173
 174/**
 175 * vmw_resource_alloc_id - release a resource id to the id manager.
 176 *
 177 * @res: Pointer to the resource.
 178 *
 179 * Allocate the lowest free resource from the resource manager, and set
 180 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 181 */
 182int vmw_resource_alloc_id(struct vmw_resource *res)
 183{
 184	struct vmw_private *dev_priv = res->dev_priv;
 185	int ret;
 186	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 187
 188	BUG_ON(res->id != -1);
 189
 190	idr_preload(GFP_KERNEL);
 191	spin_lock(&dev_priv->resource_lock);
 192
 193	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 194	if (ret >= 0)
 195		res->id = ret;
 196
 197	spin_unlock(&dev_priv->resource_lock);
 198	idr_preload_end();
 199	return ret < 0 ? ret : 0;
 200}
 201
 202/**
 203 * vmw_resource_init - initialize a struct vmw_resource
 204 *
 205 * @dev_priv:       Pointer to a device private struct.
 206 * @res:            The struct vmw_resource to initialize.
 
 207 * @delay_id:       Boolean whether to defer device id allocation until
 208 *                  the first validation.
 209 * @res_free:       Resource destructor.
 210 * @func:           Resource function table.
 211 */
 212int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 213		      bool delay_id,
 214		      void (*res_free) (struct vmw_resource *res),
 215		      const struct vmw_res_func *func)
 216{
 217	kref_init(&res->kref);
 218	res->hw_destroy = NULL;
 219	res->res_free = res_free;
 220	res->dev_priv = dev_priv;
 221	res->func = func;
 222	RB_CLEAR_NODE(&res->mob_node);
 223	INIT_LIST_HEAD(&res->lru_head);
 
 224	INIT_LIST_HEAD(&res->binding_head);
 225	res->id = -1;
 226	res->backup = NULL;
 227	res->backup_offset = 0;
 228	res->backup_dirty = false;
 229	res->res_dirty = false;
 230	res->coherent = false;
 231	res->used_prio = 3;
 232	res->dirty = NULL;
 233	if (delay_id)
 234		return 0;
 235	else
 236		return vmw_resource_alloc_id(res);
 237}
 238
 239
 240/**
 241 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 242 * TTM user-space handle and perform basic type checks
 243 *
 244 * @dev_priv:     Pointer to a device private struct
 245 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 246 * @handle:       The TTM user-space handle
 247 * @converter:    Pointer to an object describing the resource type
 248 * @p_res:        On successful return the location pointed to will contain
 249 *                a pointer to a refcounted struct vmw_resource.
 250 *
 251 * If the handle can't be found or is associated with an incorrect resource
 252 * type, -EINVAL will be returned.
 253 */
 254int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 255				    struct ttm_object_file *tfile,
 256				    uint32_t handle,
 257				    const struct vmw_user_resource_conv
 258				    *converter,
 259				    struct vmw_resource **p_res)
 260{
 261	struct ttm_base_object *base;
 262	struct vmw_resource *res;
 263	int ret = -EINVAL;
 264
 265	base = ttm_base_object_lookup(tfile, handle);
 266	if (unlikely(base == NULL))
 267		return -EINVAL;
 268
 269	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 270		goto out_bad_resource;
 271
 272	res = converter->base_obj_to_res(base);
 273	kref_get(&res->kref);
 274
 275	*p_res = res;
 276	ret = 0;
 277
 278out_bad_resource:
 279	ttm_base_object_unref(&base);
 280
 281	return ret;
 282}
 283
 284/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285 * Helper function that looks either a surface or bo.
 286 *
 287 * The pointer this pointed at by out_surf and out_buf needs to be null.
 288 */
 289int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 290			   struct drm_file *filp,
 291			   uint32_t handle,
 292			   struct vmw_surface **out_surf,
 293			   struct vmw_buffer_object **out_buf)
 294{
 295	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
 296	struct vmw_resource *res;
 297	int ret;
 298
 299	BUG_ON(*out_surf || *out_buf);
 300
 301	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 302					      user_surface_converter,
 303					      &res);
 304	if (!ret) {
 305		*out_surf = vmw_res_to_srf(res);
 306		return 0;
 307	}
 308
 309	*out_surf = NULL;
 310	ret = vmw_user_bo_lookup(filp, handle, out_buf);
 311	return ret;
 312}
 313
 314/**
 315 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 316 *
 317 * @res:            The resource for which to allocate a backup buffer.
 318 * @interruptible:  Whether any sleeps during allocation should be
 319 *                  performed while interruptible.
 320 */
 321static int vmw_resource_buf_alloc(struct vmw_resource *res,
 322				  bool interruptible)
 323{
 324	unsigned long size = PFN_ALIGN(res->backup_size);
 
 325	struct vmw_buffer_object *backup;
 326	int ret;
 327
 328	if (likely(res->backup)) {
 329		BUG_ON(res->backup->base.base.size < size);
 330		return 0;
 331	}
 332
 333	ret = vmw_bo_create(res->dev_priv, res->backup_size,
 334			    res->func->backup_placement,
 335			    interruptible, false,
 336			    &vmw_bo_bo_free, &backup);
 
 
 
 
 337	if (unlikely(ret != 0))
 338		goto out_no_bo;
 339
 340	res->backup = backup;
 341
 342out_no_bo:
 343	return ret;
 344}
 345
 346/**
 347 * vmw_resource_do_validate - Make a resource up-to-date and visible
 348 *                            to the device.
 349 *
 350 * @res:            The resource to make visible to the device.
 351 * @val_buf:        Information about a buffer possibly
 352 *                  containing backup data if a bind operation is needed.
 353 * @dirtying:       Transfer dirty regions.
 354 *
 355 * On hardware resource shortage, this function returns -EBUSY and
 356 * should be retried once resources have been freed up.
 357 */
 358static int vmw_resource_do_validate(struct vmw_resource *res,
 359				    struct ttm_validate_buffer *val_buf,
 360				    bool dirtying)
 361{
 362	int ret = 0;
 363	const struct vmw_res_func *func = res->func;
 364
 365	if (unlikely(res->id == -1)) {
 366		ret = func->create(res);
 367		if (unlikely(ret != 0))
 368			return ret;
 369	}
 370
 371	if (func->bind &&
 372	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
 373	      val_buf->bo != NULL) ||
 374	     (!func->needs_backup && val_buf->bo != NULL))) {
 375		ret = func->bind(res, val_buf);
 376		if (unlikely(ret != 0))
 377			goto out_bind_failed;
 378		if (func->needs_backup)
 379			vmw_resource_mob_attach(res);
 380	}
 381
 382	/*
 383	 * Handle the case where the backup mob is marked coherent but
 384	 * the resource isn't.
 385	 */
 386	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
 387	    !res->coherent) {
 388		if (res->backup->dirty && !res->dirty) {
 389			ret = func->dirty_alloc(res);
 390			if (ret)
 391				return ret;
 392		} else if (!res->backup->dirty && res->dirty) {
 393			func->dirty_free(res);
 394		}
 395	}
 396
 397	/*
 398	 * Transfer the dirty regions to the resource and update
 399	 * the resource.
 400	 */
 401	if (res->dirty) {
 402		if (dirtying && !res->res_dirty) {
 403			pgoff_t start = res->backup_offset >> PAGE_SHIFT;
 404			pgoff_t end = __KERNEL_DIV_ROUND_UP
 405				(res->backup_offset + res->backup_size,
 406				 PAGE_SIZE);
 407
 408			vmw_bo_dirty_unmap(res->backup, start, end);
 409		}
 410
 411		vmw_bo_dirty_transfer_to_res(res);
 412		return func->dirty_sync(res);
 413	}
 414
 415	return 0;
 416
 417out_bind_failed:
 418	func->destroy(res);
 419
 420	return ret;
 421}
 422
 423/**
 424 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 425 * command submission.
 426 *
 427 * @res:               Pointer to the struct vmw_resource to unreserve.
 428 * @dirty_set:         Change dirty status of the resource.
 429 * @dirty:             When changing dirty status indicates the new status.
 430 * @switch_backup:     Backup buffer has been switched.
 431 * @new_backup:        Pointer to new backup buffer if command submission
 432 *                     switched. May be NULL.
 433 * @new_backup_offset: New backup offset if @switch_backup is true.
 434 *
 435 * Currently unreserving a resource means putting it back on the device's
 436 * resource lru list, so that it can be evicted if necessary.
 437 */
 438void vmw_resource_unreserve(struct vmw_resource *res,
 439			    bool dirty_set,
 440			    bool dirty,
 441			    bool switch_backup,
 442			    struct vmw_buffer_object *new_backup,
 443			    unsigned long new_backup_offset)
 444{
 445	struct vmw_private *dev_priv = res->dev_priv;
 446
 447	if (!list_empty(&res->lru_head))
 448		return;
 449
 450	if (switch_backup && new_backup != res->backup) {
 451		if (res->backup) {
 452			vmw_resource_mob_detach(res);
 453			if (res->coherent)
 454				vmw_bo_dirty_release(res->backup);
 455			vmw_bo_unreference(&res->backup);
 456		}
 457
 458		if (new_backup) {
 459			res->backup = vmw_bo_reference(new_backup);
 460
 461			/*
 462			 * The validation code should already have added a
 463			 * dirty tracker here.
 464			 */
 465			WARN_ON(res->coherent && !new_backup->dirty);
 466
 467			vmw_resource_mob_attach(res);
 468		} else {
 469			res->backup = NULL;
 470		}
 471	} else if (switch_backup && res->coherent) {
 472		vmw_bo_dirty_release(res->backup);
 473	}
 474
 475	if (switch_backup)
 476		res->backup_offset = new_backup_offset;
 477
 478	if (dirty_set)
 479		res->res_dirty = dirty;
 480
 481	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 482		return;
 483
 484	spin_lock(&dev_priv->resource_lock);
 485	list_add_tail(&res->lru_head,
 486		      &res->dev_priv->res_lru[res->func->res_type]);
 487	spin_unlock(&dev_priv->resource_lock);
 488}
 489
 490/**
 491 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 492 *                             for a resource and in that case, allocate
 493 *                             one, reserve and validate it.
 494 *
 495 * @ticket:         The ww acquire context to use, or NULL if trylocking.
 496 * @res:            The resource for which to allocate a backup buffer.
 497 * @interruptible:  Whether any sleeps during allocation should be
 498 *                  performed while interruptible.
 499 * @val_buf:        On successful return contains data about the
 500 *                  reserved and validated backup buffer.
 501 */
 502static int
 503vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 504			  struct vmw_resource *res,
 505			  bool interruptible,
 506			  struct ttm_validate_buffer *val_buf)
 507{
 508	struct ttm_operation_ctx ctx = { true, false };
 509	struct list_head val_list;
 510	bool backup_dirty = false;
 511	int ret;
 512
 513	if (unlikely(res->backup == NULL)) {
 514		ret = vmw_resource_buf_alloc(res, interruptible);
 515		if (unlikely(ret != 0))
 516			return ret;
 517	}
 518
 519	INIT_LIST_HEAD(&val_list);
 520	ttm_bo_get(&res->backup->base);
 521	val_buf->bo = &res->backup->base;
 522	val_buf->num_shared = 0;
 523	list_add_tail(&val_buf->head, &val_list);
 524	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 
 525	if (unlikely(ret != 0))
 526		goto out_no_reserve;
 527
 528	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
 529		return 0;
 530
 531	backup_dirty = res->backup_dirty;
 532	ret = ttm_bo_validate(&res->backup->base,
 533			      res->func->backup_placement,
 534			      &ctx);
 535
 536	if (unlikely(ret != 0))
 537		goto out_no_validate;
 538
 539	return 0;
 540
 541out_no_validate:
 542	ttm_eu_backoff_reservation(ticket, &val_list);
 543out_no_reserve:
 544	ttm_bo_put(val_buf->bo);
 545	val_buf->bo = NULL;
 546	if (backup_dirty)
 547		vmw_bo_unreference(&res->backup);
 548
 549	return ret;
 550}
 551
 552/*
 553 * vmw_resource_reserve - Reserve a resource for command submission
 554 *
 555 * @res:            The resource to reserve.
 556 *
 557 * This function takes the resource off the LRU list and make sure
 558 * a backup buffer is present for guest-backed resources. However,
 559 * the buffer may not be bound to the resource at this point.
 560 *
 561 */
 562int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 563			 bool no_backup)
 564{
 565	struct vmw_private *dev_priv = res->dev_priv;
 566	int ret;
 567
 568	spin_lock(&dev_priv->resource_lock);
 569	list_del_init(&res->lru_head);
 570	spin_unlock(&dev_priv->resource_lock);
 571
 572	if (res->func->needs_backup && res->backup == NULL &&
 573	    !no_backup) {
 574		ret = vmw_resource_buf_alloc(res, interruptible);
 575		if (unlikely(ret != 0)) {
 576			DRM_ERROR("Failed to allocate a backup buffer "
 577				  "of size %lu. bytes\n",
 578				  (unsigned long) res->backup_size);
 579			return ret;
 580		}
 581	}
 582
 583	return 0;
 584}
 585
 586/**
 587 * vmw_resource_backoff_reservation - Unreserve and unreference a
 588 *                                    backup buffer
 589 *.
 590 * @ticket:         The ww acquire ctx used for reservation.
 591 * @val_buf:        Backup buffer information.
 592 */
 593static void
 594vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 595				 struct ttm_validate_buffer *val_buf)
 596{
 597	struct list_head val_list;
 598
 599	if (likely(val_buf->bo == NULL))
 600		return;
 601
 602	INIT_LIST_HEAD(&val_list);
 603	list_add_tail(&val_buf->head, &val_list);
 604	ttm_eu_backoff_reservation(ticket, &val_list);
 605	ttm_bo_put(val_buf->bo);
 606	val_buf->bo = NULL;
 607}
 608
 609/**
 610 * vmw_resource_do_evict - Evict a resource, and transfer its data
 611 *                         to a backup buffer.
 612 *
 613 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 614 * @res:            The resource to evict.
 615 * @interruptible:  Whether to wait interruptible.
 616 */
 617static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 618				 struct vmw_resource *res, bool interruptible)
 619{
 620	struct ttm_validate_buffer val_buf;
 621	const struct vmw_res_func *func = res->func;
 622	int ret;
 623
 624	BUG_ON(!func->may_evict);
 625
 626	val_buf.bo = NULL;
 627	val_buf.num_shared = 0;
 628	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 629	if (unlikely(ret != 0))
 630		return ret;
 631
 632	if (unlikely(func->unbind != NULL &&
 633		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
 634		ret = func->unbind(res, res->res_dirty, &val_buf);
 635		if (unlikely(ret != 0))
 636			goto out_no_unbind;
 637		vmw_resource_mob_detach(res);
 638	}
 639	ret = func->destroy(res);
 640	res->backup_dirty = true;
 641	res->res_dirty = false;
 642out_no_unbind:
 643	vmw_resource_backoff_reservation(ticket, &val_buf);
 644
 645	return ret;
 646}
 647
 648
 649/**
 650 * vmw_resource_validate - Make a resource up-to-date and visible
 651 *                         to the device.
 652 * @res: The resource to make visible to the device.
 653 * @intr: Perform waits interruptible if possible.
 654 * @dirtying: Pending GPU operation will dirty the resource
 655 *
 656 * On successful return, any backup DMA buffer pointed to by @res->backup will
 657 * be reserved and validated.
 658 * On hardware resource shortage, this function will repeatedly evict
 659 * resources of the same type until the validation succeeds.
 660 *
 661 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 662 * on failure.
 663 */
 664int vmw_resource_validate(struct vmw_resource *res, bool intr,
 665			  bool dirtying)
 666{
 667	int ret;
 668	struct vmw_resource *evict_res;
 669	struct vmw_private *dev_priv = res->dev_priv;
 670	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 671	struct ttm_validate_buffer val_buf;
 672	unsigned err_count = 0;
 673
 674	if (!res->func->create)
 675		return 0;
 676
 677	val_buf.bo = NULL;
 678	val_buf.num_shared = 0;
 679	if (res->backup)
 680		val_buf.bo = &res->backup->base;
 681	do {
 682		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
 683		if (likely(ret != -EBUSY))
 684			break;
 685
 686		spin_lock(&dev_priv->resource_lock);
 687		if (list_empty(lru_list) || !res->func->may_evict) {
 688			DRM_ERROR("Out of device device resources "
 689				  "for %s.\n", res->func->type_name);
 690			ret = -EBUSY;
 691			spin_unlock(&dev_priv->resource_lock);
 692			break;
 693		}
 694
 695		evict_res = vmw_resource_reference
 696			(list_first_entry(lru_list, struct vmw_resource,
 697					  lru_head));
 698		list_del_init(&evict_res->lru_head);
 699
 700		spin_unlock(&dev_priv->resource_lock);
 701
 702		/* Trylock backup buffers with a NULL ticket. */
 703		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 704		if (unlikely(ret != 0)) {
 705			spin_lock(&dev_priv->resource_lock);
 706			list_add_tail(&evict_res->lru_head, lru_list);
 707			spin_unlock(&dev_priv->resource_lock);
 708			if (ret == -ERESTARTSYS ||
 709			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 710				vmw_resource_unreference(&evict_res);
 711				goto out_no_validate;
 712			}
 713		}
 714
 715		vmw_resource_unreference(&evict_res);
 716	} while (1);
 717
 718	if (unlikely(ret != 0))
 719		goto out_no_validate;
 720	else if (!res->func->needs_backup && res->backup) {
 721		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 722		vmw_bo_unreference(&res->backup);
 723	}
 724
 725	return 0;
 726
 727out_no_validate:
 728	return ret;
 729}
 730
 731
 732/**
 733 * vmw_resource_unbind_list
 734 *
 735 * @vbo: Pointer to the current backing MOB.
 736 *
 737 * Evicts the Guest Backed hardware resource if the backup
 738 * buffer is being moved out of MOB memory.
 739 * Note that this function will not race with the resource
 740 * validation code, since resource validation and eviction
 741 * both require the backup buffer to be reserved.
 742 */
 743void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 744{
 
 
 745	struct ttm_validate_buffer val_buf = {
 746		.bo = &vbo->base,
 747		.num_shared = 0
 748	};
 749
 750	dma_resv_assert_held(vbo->base.base.resv);
 751	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
 752		struct rb_node *node = vbo->res_tree.rb_node;
 753		struct vmw_resource *res =
 754			container_of(node, struct vmw_resource, mob_node);
 755
 756		if (!WARN_ON_ONCE(!res->func->unbind))
 757			(void) res->func->unbind(res, res->res_dirty, &val_buf);
 758
 
 759		res->backup_dirty = true;
 760		res->res_dirty = false;
 761		vmw_resource_mob_detach(res);
 762	}
 763
 764	(void) ttm_bo_wait(&vbo->base, false, false);
 765}
 766
 767
 768/**
 769 * vmw_query_readback_all - Read back cached query states
 770 *
 771 * @dx_query_mob: Buffer containing the DX query MOB
 772 *
 773 * Read back cached states from the device if they exist.  This function
 774 * assumes binding_mutex is held.
 775 */
 776int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 777{
 778	struct vmw_resource *dx_query_ctx;
 779	struct vmw_private *dev_priv;
 780	struct {
 781		SVGA3dCmdHeader header;
 782		SVGA3dCmdDXReadbackAllQuery body;
 783	} *cmd;
 784
 785
 786	/* No query bound, so do nothing */
 787	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 788		return 0;
 789
 790	dx_query_ctx = dx_query_mob->dx_query_ctx;
 791	dev_priv     = dx_query_ctx->dev_priv;
 792
 793	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 794	if (unlikely(cmd == NULL))
 795		return -ENOMEM;
 796
 797	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 798	cmd->header.size = sizeof(cmd->body);
 799	cmd->body.cid    = dx_query_ctx->id;
 800
 801	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 802
 803	/* Triggers a rebind the next time affected context is bound */
 804	dx_query_mob->dx_query_ctx = NULL;
 805
 806	return 0;
 807}
 808
 809
 810
 811/**
 812 * vmw_query_move_notify - Read back cached query states
 813 *
 814 * @bo: The TTM buffer object about to move.
 815 * @old_mem: The memory region @bo is moving from.
 816 * @new_mem: The memory region @bo is moving to.
 817 *
 818 * Called before the query MOB is swapped out to read back cached query
 819 * states from the device.
 820 */
 821void vmw_query_move_notify(struct ttm_buffer_object *bo,
 822			   struct ttm_resource *old_mem,
 823			   struct ttm_resource *new_mem)
 824{
 825	struct vmw_buffer_object *dx_query_mob;
 826	struct ttm_device *bdev = bo->bdev;
 827	struct vmw_private *dev_priv;
 828
 
 829	dev_priv = container_of(bdev, struct vmw_private, bdev);
 830
 831	mutex_lock(&dev_priv->binding_mutex);
 832
 
 
 
 
 
 
 833	/* If BO is being moved from MOB to system memory */
 834	if (new_mem->mem_type == TTM_PL_SYSTEM &&
 835	    old_mem->mem_type == VMW_PL_MOB) {
 836		struct vmw_fence_obj *fence;
 837
 838		dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
 839		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
 840			mutex_unlock(&dev_priv->binding_mutex);
 841			return;
 842		}
 843
 844		(void) vmw_query_readback_all(dx_query_mob);
 845		mutex_unlock(&dev_priv->binding_mutex);
 846
 847		/* Create a fence and attach the BO to it */
 848		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 849		vmw_bo_fence_single(bo, fence);
 850
 851		if (fence != NULL)
 852			vmw_fence_obj_unreference(&fence);
 853
 854		(void) ttm_bo_wait(bo, false, false);
 855	} else
 856		mutex_unlock(&dev_priv->binding_mutex);
 
 857}
 858
 859/**
 860 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 861 *
 862 * @res:            The resource being queried.
 863 */
 864bool vmw_resource_needs_backup(const struct vmw_resource *res)
 865{
 866	return res->func->needs_backup;
 867}
 868
 869/**
 870 * vmw_resource_evict_type - Evict all resources of a specific type
 871 *
 872 * @dev_priv:       Pointer to a device private struct
 873 * @type:           The resource type to evict
 874 *
 875 * To avoid thrashing starvation or as part of the hibernation sequence,
 876 * try to evict all evictable resources of a specific type.
 877 */
 878static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 879				    enum vmw_res_type type)
 880{
 881	struct list_head *lru_list = &dev_priv->res_lru[type];
 882	struct vmw_resource *evict_res;
 883	unsigned err_count = 0;
 884	int ret;
 885	struct ww_acquire_ctx ticket;
 886
 887	do {
 888		spin_lock(&dev_priv->resource_lock);
 889
 890		if (list_empty(lru_list))
 891			goto out_unlock;
 892
 893		evict_res = vmw_resource_reference(
 894			list_first_entry(lru_list, struct vmw_resource,
 895					 lru_head));
 896		list_del_init(&evict_res->lru_head);
 897		spin_unlock(&dev_priv->resource_lock);
 898
 899		/* Wait lock backup buffers with a ticket. */
 900		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 901		if (unlikely(ret != 0)) {
 902			spin_lock(&dev_priv->resource_lock);
 903			list_add_tail(&evict_res->lru_head, lru_list);
 904			spin_unlock(&dev_priv->resource_lock);
 905			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 906				vmw_resource_unreference(&evict_res);
 907				return;
 908			}
 909		}
 910
 911		vmw_resource_unreference(&evict_res);
 912	} while (1);
 913
 914out_unlock:
 915	spin_unlock(&dev_priv->resource_lock);
 916}
 917
 918/**
 919 * vmw_resource_evict_all - Evict all evictable resources
 920 *
 921 * @dev_priv:       Pointer to a device private struct
 922 *
 923 * To avoid thrashing starvation or as part of the hibernation sequence,
 924 * evict all evictable resources. In particular this means that all
 925 * guest-backed resources that are registered with the device are
 926 * evicted and the OTable becomes clean.
 927 */
 928void vmw_resource_evict_all(struct vmw_private *dev_priv)
 929{
 930	enum vmw_res_type type;
 931
 932	mutex_lock(&dev_priv->cmdbuf_mutex);
 933
 934	for (type = 0; type < vmw_res_max; ++type)
 935		vmw_resource_evict_type(dev_priv, type);
 936
 937	mutex_unlock(&dev_priv->cmdbuf_mutex);
 938}
 939
 940/*
 941 * vmw_resource_pin - Add a pin reference on a resource
 942 *
 943 * @res: The resource to add a pin reference on
 944 *
 945 * This function adds a pin reference, and if needed validates the resource.
 946 * Having a pin reference means that the resource can never be evicted, and
 947 * its id will never change as long as there is a pin reference.
 948 * This function returns 0 on success and a negative error code on failure.
 949 */
 950int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 951{
 952	struct ttm_operation_ctx ctx = { interruptible, false };
 953	struct vmw_private *dev_priv = res->dev_priv;
 954	int ret;
 955
 
 956	mutex_lock(&dev_priv->cmdbuf_mutex);
 957	ret = vmw_resource_reserve(res, interruptible, false);
 958	if (ret)
 959		goto out_no_reserve;
 960
 961	if (res->pin_count == 0) {
 962		struct vmw_buffer_object *vbo = NULL;
 963
 964		if (res->backup) {
 965			vbo = res->backup;
 966
 967			ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
 968			if (ret)
 969				goto out_no_validate;
 970			if (!vbo->base.pin_count) {
 971				ret = ttm_bo_validate
 972					(&vbo->base,
 973					 res->func->backup_placement,
 974					 &ctx);
 975				if (ret) {
 976					ttm_bo_unreserve(&vbo->base);
 977					goto out_no_validate;
 978				}
 979			}
 980
 981			/* Do we really need to pin the MOB as well? */
 982			vmw_bo_pin_reserved(vbo, true);
 983		}
 984		ret = vmw_resource_validate(res, interruptible, true);
 985		if (vbo)
 986			ttm_bo_unreserve(&vbo->base);
 987		if (ret)
 988			goto out_no_validate;
 989	}
 990	res->pin_count++;
 991
 992out_no_validate:
 993	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
 994out_no_reserve:
 995	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 996
 997	return ret;
 998}
 999
1000/**
1001 * vmw_resource_unpin - Remove a pin reference from a resource
1002 *
1003 * @res: The resource to remove a pin reference from
1004 *
1005 * Having a pin reference means that the resource can never be evicted, and
1006 * its id will never change as long as there is a pin reference.
1007 */
1008void vmw_resource_unpin(struct vmw_resource *res)
1009{
1010	struct vmw_private *dev_priv = res->dev_priv;
1011	int ret;
1012
 
1013	mutex_lock(&dev_priv->cmdbuf_mutex);
1014
1015	ret = vmw_resource_reserve(res, false, true);
1016	WARN_ON(ret);
1017
1018	WARN_ON(res->pin_count == 0);
1019	if (--res->pin_count == 0 && res->backup) {
1020		struct vmw_buffer_object *vbo = res->backup;
1021
1022		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1023		vmw_bo_pin_reserved(vbo, false);
1024		ttm_bo_unreserve(&vbo->base);
1025	}
1026
1027	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1028
1029	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1030}
1031
1032/**
1033 * vmw_res_type - Return the resource type
1034 *
1035 * @res: Pointer to the resource
1036 */
1037enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1038{
1039	return res->func->res_type;
1040}
1041
1042/**
1043 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1044 * sequential range of touched backing store memory.
1045 * @res: The resource.
1046 * @start: The first page touched.
1047 * @end: The last page touched + 1.
1048 */
1049void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1050			       pgoff_t end)
1051{
1052	if (res->dirty)
1053		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1054					   end << PAGE_SHIFT);
1055}
1056
1057/**
1058 * vmw_resources_clean - Clean resources intersecting a mob range
1059 * @vbo: The mob buffer object
1060 * @start: The mob page offset starting the range
1061 * @end: The mob page offset ending the range
1062 * @num_prefault: Returns how many pages including the first have been
1063 * cleaned and are ok to prefault
1064 */
1065int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1066			pgoff_t end, pgoff_t *num_prefault)
1067{
1068	struct rb_node *cur = vbo->res_tree.rb_node;
1069	struct vmw_resource *found = NULL;
1070	unsigned long res_start = start << PAGE_SHIFT;
1071	unsigned long res_end = end << PAGE_SHIFT;
1072	unsigned long last_cleaned = 0;
1073
1074	/*
1075	 * Find the resource with lowest backup_offset that intersects the
1076	 * range.
1077	 */
1078	while (cur) {
1079		struct vmw_resource *cur_res =
1080			container_of(cur, struct vmw_resource, mob_node);
1081
1082		if (cur_res->backup_offset >= res_end) {
1083			cur = cur->rb_left;
1084		} else if (cur_res->backup_offset + cur_res->backup_size <=
1085			   res_start) {
1086			cur = cur->rb_right;
1087		} else {
1088			found = cur_res;
1089			cur = cur->rb_left;
1090			/* Continue to look for resources with lower offsets */
1091		}
1092	}
1093
1094	/*
1095	 * In order of increasing backup_offset, clean dirty resources
1096	 * intersecting the range.
1097	 */
1098	while (found) {
1099		if (found->res_dirty) {
1100			int ret;
1101
1102			if (!found->func->clean)
1103				return -EINVAL;
1104
1105			ret = found->func->clean(found);
1106			if (ret)
1107				return ret;
1108
1109			found->res_dirty = false;
1110		}
1111		last_cleaned = found->backup_offset + found->backup_size;
1112		cur = rb_next(&found->mob_node);
1113		if (!cur)
1114			break;
1115
1116		found = container_of(cur, struct vmw_resource, mob_node);
1117		if (found->backup_offset >= res_end)
1118			break;
1119	}
1120
1121	/*
1122	 * Set number of pages allowed prefaulting and fence the buffer object
1123	 */
1124	*num_prefault = 1;
1125	if (last_cleaned > res_start) {
1126		struct ttm_buffer_object *bo = &vbo->base;
1127
1128		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1129						      PAGE_SIZE);
1130		vmw_bo_fence_single(bo, NULL);
1131	}
1132
1133	return 0;
1134}