Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#include <drm/ttm/ttm_placement.h>
  30
 
  31#include "vmwgfx_binding.h"
  32#include "vmwgfx_bo.h"
  33#include "vmwgfx_drv.h"
  34#include "vmwgfx_resource_priv.h"
  35
  36#define VMW_RES_EVICT_ERR_COUNT 10
  37
  38/**
  39 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  40 * @res: The resource
  41 */
  42void vmw_resource_mob_attach(struct vmw_resource *res)
  43{
  44	struct vmw_bo *gbo = res->guest_memory_bo;
  45	struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
  46
  47	dma_resv_assert_held(gbo->tbo.base.resv);
  48	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  49		res->func->prio;
  50
  51	while (*new) {
  52		struct vmw_resource *this =
  53			container_of(*new, struct vmw_resource, mob_node);
  54
  55		parent = *new;
  56		new = (res->guest_memory_offset < this->guest_memory_offset) ?
  57			&((*new)->rb_left) : &((*new)->rb_right);
  58	}
  59
  60	rb_link_node(&res->mob_node, parent, new);
  61	rb_insert_color(&res->mob_node, &gbo->res_tree);
  62	vmw_bo_del_detached_resource(gbo, res);
  63
  64	vmw_bo_prio_add(gbo, res->used_prio);
  65}
  66
  67/**
  68 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  69 * @res: The resource
  70 */
  71void vmw_resource_mob_detach(struct vmw_resource *res)
  72{
  73	struct vmw_bo *gbo = res->guest_memory_bo;
  74
  75	dma_resv_assert_held(gbo->tbo.base.resv);
  76	if (vmw_resource_mob_attached(res)) {
  77		rb_erase(&res->mob_node, &gbo->res_tree);
  78		RB_CLEAR_NODE(&res->mob_node);
  79		vmw_bo_prio_del(gbo, res->used_prio);
  80	}
  81}
  82
  83struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  84{
  85	kref_get(&res->kref);
  86	return res;
  87}
  88
  89struct vmw_resource *
  90vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  91{
  92	return kref_get_unless_zero(&res->kref) ? res : NULL;
  93}
  94
  95/**
  96 * vmw_resource_release_id - release a resource id to the id manager.
  97 *
  98 * @res: Pointer to the resource.
  99 *
 100 * Release the resource id to the resource id manager and set it to -1
 101 */
 102void vmw_resource_release_id(struct vmw_resource *res)
 103{
 104	struct vmw_private *dev_priv = res->dev_priv;
 105	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 106
 107	spin_lock(&dev_priv->resource_lock);
 108	if (res->id != -1)
 109		idr_remove(idr, res->id);
 110	res->id = -1;
 111	spin_unlock(&dev_priv->resource_lock);
 112}
 113
 114static void vmw_resource_release(struct kref *kref)
 115{
 116	struct vmw_resource *res =
 117	    container_of(kref, struct vmw_resource, kref);
 118	struct vmw_private *dev_priv = res->dev_priv;
 119	int id;
 120	int ret;
 121	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 122
 123	spin_lock(&dev_priv->resource_lock);
 124	list_del_init(&res->lru_head);
 125	spin_unlock(&dev_priv->resource_lock);
 126	if (res->guest_memory_bo) {
 127		struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
 128
 129		ret = ttm_bo_reserve(bo, false, false, NULL);
 130		BUG_ON(ret);
 131		if (vmw_resource_mob_attached(res) &&
 132		    res->func->unbind != NULL) {
 133			struct ttm_validate_buffer val_buf;
 134
 135			val_buf.bo = bo;
 136			val_buf.num_shared = 0;
 137			res->func->unbind(res, false, &val_buf);
 138		}
 139		res->guest_memory_size = false;
 140		vmw_resource_mob_detach(res);
 141		if (res->dirty)
 142			res->func->dirty_free(res);
 143		if (res->coherent)
 144			vmw_bo_dirty_release(res->guest_memory_bo);
 145		ttm_bo_unreserve(bo);
 146		vmw_user_bo_unref(&res->guest_memory_bo);
 147	}
 148
 149	if (likely(res->hw_destroy != NULL)) {
 150		mutex_lock(&dev_priv->binding_mutex);
 151		vmw_binding_res_list_kill(&res->binding_head);
 152		mutex_unlock(&dev_priv->binding_mutex);
 153		res->hw_destroy(res);
 154	}
 155
 156	id = res->id;
 157	if (res->res_free != NULL)
 158		res->res_free(res);
 159	else
 160		kfree(res);
 161
 162	spin_lock(&dev_priv->resource_lock);
 163	if (id != -1)
 164		idr_remove(idr, id);
 165	spin_unlock(&dev_priv->resource_lock);
 166}
 167
 168void vmw_resource_unreference(struct vmw_resource **p_res)
 169{
 170	struct vmw_resource *res = *p_res;
 171
 172	*p_res = NULL;
 173	kref_put(&res->kref, vmw_resource_release);
 174}
 175
 176
 177/**
 178 * vmw_resource_alloc_id - release a resource id to the id manager.
 179 *
 180 * @res: Pointer to the resource.
 181 *
 182 * Allocate the lowest free resource from the resource manager, and set
 183 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 184 */
 185int vmw_resource_alloc_id(struct vmw_resource *res)
 186{
 187	struct vmw_private *dev_priv = res->dev_priv;
 188	int ret;
 189	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 190
 191	BUG_ON(res->id != -1);
 192
 193	idr_preload(GFP_KERNEL);
 194	spin_lock(&dev_priv->resource_lock);
 195
 196	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 197	if (ret >= 0)
 198		res->id = ret;
 199
 200	spin_unlock(&dev_priv->resource_lock);
 201	idr_preload_end();
 202	return ret < 0 ? ret : 0;
 203}
 204
 205/**
 206 * vmw_resource_init - initialize a struct vmw_resource
 207 *
 208 * @dev_priv:       Pointer to a device private struct.
 209 * @res:            The struct vmw_resource to initialize.
 210 * @delay_id:       Boolean whether to defer device id allocation until
 211 *                  the first validation.
 212 * @res_free:       Resource destructor.
 213 * @func:           Resource function table.
 214 */
 215int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 216		      bool delay_id,
 217		      void (*res_free) (struct vmw_resource *res),
 218		      const struct vmw_res_func *func)
 219{
 220	kref_init(&res->kref);
 221	res->hw_destroy = NULL;
 222	res->res_free = res_free;
 223	res->dev_priv = dev_priv;
 224	res->func = func;
 225	RB_CLEAR_NODE(&res->mob_node);
 226	INIT_LIST_HEAD(&res->lru_head);
 227	INIT_LIST_HEAD(&res->binding_head);
 228	res->id = -1;
 229	res->guest_memory_bo = NULL;
 230	res->guest_memory_offset = 0;
 231	res->guest_memory_dirty = false;
 232	res->res_dirty = false;
 233	res->coherent = false;
 234	res->used_prio = 3;
 235	res->dirty = NULL;
 236	if (delay_id)
 237		return 0;
 238	else
 239		return vmw_resource_alloc_id(res);
 240}
 241
 242
 243/**
 244 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 245 * TTM user-space handle and perform basic type checks
 246 *
 247 * @dev_priv:     Pointer to a device private struct
 248 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 249 * @handle:       The TTM user-space handle
 250 * @converter:    Pointer to an object describing the resource type
 251 * @p_res:        On successful return the location pointed to will contain
 252 *                a pointer to a refcounted struct vmw_resource.
 253 *
 254 * If the handle can't be found or is associated with an incorrect resource
 255 * type, -EINVAL will be returned.
 256 */
 257int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 258				    struct ttm_object_file *tfile,
 259				    uint32_t handle,
 260				    const struct vmw_user_resource_conv
 261				    *converter,
 262				    struct vmw_resource **p_res)
 263{
 264	struct ttm_base_object *base;
 265	struct vmw_resource *res;
 266	int ret = -EINVAL;
 267
 268	base = ttm_base_object_lookup(tfile, handle);
 269	if (unlikely(!base))
 270		return -EINVAL;
 271
 272	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 273		goto out_bad_resource;
 274
 275	res = converter->base_obj_to_res(base);
 276	kref_get(&res->kref);
 277
 278	*p_res = res;
 279	ret = 0;
 280
 281out_bad_resource:
 282	ttm_base_object_unref(&base);
 283
 284	return ret;
 285}
 286
 287/*
 288 * Helper function that looks either a surface or bo.
 289 *
 290 * The pointer this pointed at by out_surf and out_buf needs to be null.
 291 */
 292int vmw_user_object_lookup(struct vmw_private *dev_priv,
 293			   struct drm_file *filp,
 294			   u32 handle,
 295			   struct vmw_user_object *uo)
 
 296{
 297	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
 298	struct vmw_resource *res;
 299	int ret;
 300
 301	WARN_ON(uo->surface || uo->buffer);
 302
 303	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 304					      user_surface_converter,
 305					      &res);
 306	if (!ret) {
 307		uo->surface = vmw_res_to_srf(res);
 308		return 0;
 309	}
 310
 311	uo->surface = NULL;
 312	ret = vmw_user_bo_lookup(filp, handle, &uo->buffer);
 313	if (!ret && !uo->buffer->is_dumb) {
 314		uo->surface = vmw_lookup_surface_for_buffer(dev_priv,
 315							    uo->buffer,
 316							    handle);
 317		if (uo->surface)
 318			vmw_user_bo_unref(&uo->buffer);
 319	}
 320
 321	return ret;
 322}
 323
 324/**
 325 * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
 326 *
 327 * @res:            The resource for which to allocate a gbo buffer.
 328 * @interruptible:  Whether any sleeps during allocation should be
 329 *                  performed while interruptible.
 330 */
 331static int vmw_resource_buf_alloc(struct vmw_resource *res,
 332				  bool interruptible)
 333{
 334	unsigned long size = PFN_ALIGN(res->guest_memory_size);
 335	struct vmw_bo *gbo;
 336	struct vmw_bo_params bo_params = {
 337		.domain = res->func->domain,
 338		.busy_domain = res->func->busy_domain,
 339		.bo_type = ttm_bo_type_device,
 340		.size = res->guest_memory_size,
 341		.pin = false
 342	};
 343	int ret;
 344
 345	if (likely(res->guest_memory_bo)) {
 346		BUG_ON(res->guest_memory_bo->tbo.base.size < size);
 347		return 0;
 348	}
 349
 350	ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
 
 
 
 351	if (unlikely(ret != 0))
 352		goto out_no_bo;
 353
 354	res->guest_memory_bo = gbo;
 355
 356out_no_bo:
 357	return ret;
 358}
 359
 360/**
 361 * vmw_resource_do_validate - Make a resource up-to-date and visible
 362 *                            to the device.
 363 *
 364 * @res:            The resource to make visible to the device.
 365 * @val_buf:        Information about a buffer possibly
 366 *                  containing backup data if a bind operation is needed.
 367 * @dirtying:       Transfer dirty regions.
 368 *
 369 * On hardware resource shortage, this function returns -EBUSY and
 370 * should be retried once resources have been freed up.
 371 */
 372static int vmw_resource_do_validate(struct vmw_resource *res,
 373				    struct ttm_validate_buffer *val_buf,
 374				    bool dirtying)
 375{
 376	int ret = 0;
 377	const struct vmw_res_func *func = res->func;
 378
 379	if (unlikely(res->id == -1)) {
 380		ret = func->create(res);
 381		if (unlikely(ret != 0))
 382			return ret;
 383	}
 384
 385	if (func->bind &&
 386	    ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
 387	      val_buf->bo) ||
 388	     (!func->needs_guest_memory && val_buf->bo))) {
 389		ret = func->bind(res, val_buf);
 390		if (unlikely(ret != 0))
 391			goto out_bind_failed;
 392		if (func->needs_guest_memory)
 393			vmw_resource_mob_attach(res);
 394	}
 395
 396	/*
 397	 * Handle the case where the backup mob is marked coherent but
 398	 * the resource isn't.
 399	 */
 400	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
 401	    !res->coherent) {
 402		if (res->guest_memory_bo->dirty && !res->dirty) {
 403			ret = func->dirty_alloc(res);
 404			if (ret)
 405				return ret;
 406		} else if (!res->guest_memory_bo->dirty && res->dirty) {
 407			func->dirty_free(res);
 408		}
 409	}
 410
 411	/*
 412	 * Transfer the dirty regions to the resource and update
 413	 * the resource.
 414	 */
 415	if (res->dirty) {
 416		if (dirtying && !res->res_dirty) {
 417			pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
 418			pgoff_t end = __KERNEL_DIV_ROUND_UP
 419				(res->guest_memory_offset + res->guest_memory_size,
 420				 PAGE_SIZE);
 421
 422			vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
 423		}
 424
 425		vmw_bo_dirty_transfer_to_res(res);
 426		return func->dirty_sync(res);
 427	}
 428
 429	return 0;
 430
 431out_bind_failed:
 432	func->destroy(res);
 433
 434	return ret;
 435}
 436
 437/**
 438 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 439 * command submission.
 440 *
 441 * @res:               Pointer to the struct vmw_resource to unreserve.
 442 * @dirty_set:         Change dirty status of the resource.
 443 * @dirty:             When changing dirty status indicates the new status.
 444 * @switch_guest_memory: Guest memory buffer has been switched.
 445 * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
 446 *                     switched. May be NULL.
 447 * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
 448 *
 449 * Currently unreserving a resource means putting it back on the device's
 450 * resource lru list, so that it can be evicted if necessary.
 451 */
 452void vmw_resource_unreserve(struct vmw_resource *res,
 453			    bool dirty_set,
 454			    bool dirty,
 455			    bool switch_guest_memory,
 456			    struct vmw_bo *new_guest_memory_bo,
 457			    unsigned long new_guest_memory_offset)
 458{
 459	struct vmw_private *dev_priv = res->dev_priv;
 460
 461	if (!list_empty(&res->lru_head))
 462		return;
 463
 464	if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
 465		if (res->guest_memory_bo) {
 466			vmw_resource_mob_detach(res);
 467			if (res->coherent)
 468				vmw_bo_dirty_release(res->guest_memory_bo);
 469			vmw_user_bo_unref(&res->guest_memory_bo);
 470		}
 471
 472		if (new_guest_memory_bo) {
 473			res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
 474
 475			/*
 476			 * The validation code should already have added a
 477			 * dirty tracker here.
 478			 */
 479			WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
 480
 481			vmw_resource_mob_attach(res);
 482		} else {
 483			res->guest_memory_bo = NULL;
 484		}
 485	} else if (switch_guest_memory && res->coherent) {
 486		vmw_bo_dirty_release(res->guest_memory_bo);
 487	}
 488
 489	if (switch_guest_memory)
 490		res->guest_memory_offset = new_guest_memory_offset;
 491
 492	if (dirty_set)
 493		res->res_dirty = dirty;
 494
 495	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 496		return;
 497
 498	spin_lock(&dev_priv->resource_lock);
 499	list_add_tail(&res->lru_head,
 500		      &res->dev_priv->res_lru[res->func->res_type]);
 501	spin_unlock(&dev_priv->resource_lock);
 502}
 503
 504/**
 505 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 506 *                             for a resource and in that case, allocate
 507 *                             one, reserve and validate it.
 508 *
 509 * @ticket:         The ww acquire context to use, or NULL if trylocking.
 510 * @res:            The resource for which to allocate a backup buffer.
 511 * @interruptible:  Whether any sleeps during allocation should be
 512 *                  performed while interruptible.
 513 * @val_buf:        On successful return contains data about the
 514 *                  reserved and validated backup buffer.
 515 */
 516static int
 517vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 518			  struct vmw_resource *res,
 519			  bool interruptible,
 520			  struct ttm_validate_buffer *val_buf)
 521{
 522	struct ttm_operation_ctx ctx = { true, false };
 523	struct list_head val_list;
 524	bool guest_memory_dirty = false;
 525	int ret;
 526
 527	if (unlikely(!res->guest_memory_bo)) {
 528		ret = vmw_resource_buf_alloc(res, interruptible);
 529		if (unlikely(ret != 0))
 530			return ret;
 531	}
 532
 533	INIT_LIST_HEAD(&val_list);
 534	ttm_bo_get(&res->guest_memory_bo->tbo);
 535	val_buf->bo = &res->guest_memory_bo->tbo;
 536	val_buf->num_shared = 0;
 537	list_add_tail(&val_buf->head, &val_list);
 538	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 539	if (unlikely(ret != 0))
 540		goto out_no_reserve;
 541
 542	if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
 543		return 0;
 544
 545	guest_memory_dirty = res->guest_memory_dirty;
 546	vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
 547			     res->func->busy_domain);
 548	ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
 549			      &res->guest_memory_bo->placement,
 550			      &ctx);
 551
 552	if (unlikely(ret != 0))
 553		goto out_no_validate;
 554
 555	return 0;
 556
 557out_no_validate:
 558	ttm_eu_backoff_reservation(ticket, &val_list);
 559out_no_reserve:
 560	ttm_bo_put(val_buf->bo);
 561	val_buf->bo = NULL;
 562	if (guest_memory_dirty)
 563		vmw_user_bo_unref(&res->guest_memory_bo);
 564
 565	return ret;
 566}
 567
 568/*
 569 * vmw_resource_reserve - Reserve a resource for command submission
 570 *
 571 * @res:            The resource to reserve.
 572 *
 573 * This function takes the resource off the LRU list and make sure
 574 * a guest memory buffer is present for guest-backed resources.
 575 * However, the buffer may not be bound to the resource at this
 576 * point.
 577 *
 578 */
 579int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 580			 bool no_guest_memory)
 581{
 582	struct vmw_private *dev_priv = res->dev_priv;
 583	int ret;
 584
 585	spin_lock(&dev_priv->resource_lock);
 586	list_del_init(&res->lru_head);
 587	spin_unlock(&dev_priv->resource_lock);
 588
 589	if (res->func->needs_guest_memory && !res->guest_memory_bo &&
 590	    !no_guest_memory) {
 591		ret = vmw_resource_buf_alloc(res, interruptible);
 592		if (unlikely(ret != 0)) {
 593			DRM_ERROR("Failed to allocate a guest memory buffer "
 594				  "of size %lu. bytes\n",
 595				  (unsigned long) res->guest_memory_size);
 596			return ret;
 597		}
 598	}
 599
 600	return 0;
 601}
 602
 603/**
 604 * vmw_resource_backoff_reservation - Unreserve and unreference a
 605 *                                    guest memory buffer
 606 *.
 607 * @ticket:         The ww acquire ctx used for reservation.
 608 * @val_buf:        Guest memory buffer information.
 609 */
 610static void
 611vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 612				 struct ttm_validate_buffer *val_buf)
 613{
 614	struct list_head val_list;
 615
 616	if (likely(val_buf->bo == NULL))
 617		return;
 618
 619	INIT_LIST_HEAD(&val_list);
 620	list_add_tail(&val_buf->head, &val_list);
 621	ttm_eu_backoff_reservation(ticket, &val_list);
 622	ttm_bo_put(val_buf->bo);
 623	val_buf->bo = NULL;
 624}
 625
 626/**
 627 * vmw_resource_do_evict - Evict a resource, and transfer its data
 628 *                         to a backup buffer.
 629 *
 630 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 631 * @res:            The resource to evict.
 632 * @interruptible:  Whether to wait interruptible.
 633 */
 634static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 635				 struct vmw_resource *res, bool interruptible)
 636{
 637	struct ttm_validate_buffer val_buf;
 638	const struct vmw_res_func *func = res->func;
 639	int ret;
 640
 641	BUG_ON(!func->may_evict);
 642
 643	val_buf.bo = NULL;
 644	val_buf.num_shared = 0;
 645	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 646	if (unlikely(ret != 0))
 647		return ret;
 648
 649	if (unlikely(func->unbind != NULL &&
 650		     (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
 651		ret = func->unbind(res, res->res_dirty, &val_buf);
 652		if (unlikely(ret != 0))
 653			goto out_no_unbind;
 654		vmw_resource_mob_detach(res);
 655	}
 656	ret = func->destroy(res);
 657	res->guest_memory_dirty = true;
 658	res->res_dirty = false;
 659out_no_unbind:
 660	vmw_resource_backoff_reservation(ticket, &val_buf);
 661
 662	return ret;
 663}
 664
 665
 666/**
 667 * vmw_resource_validate - Make a resource up-to-date and visible
 668 *                         to the device.
 669 * @res: The resource to make visible to the device.
 670 * @intr: Perform waits interruptible if possible.
 671 * @dirtying: Pending GPU operation will dirty the resource
 672 *
 673 * On successful return, any backup DMA buffer pointed to by @res->backup will
 674 * be reserved and validated.
 675 * On hardware resource shortage, this function will repeatedly evict
 676 * resources of the same type until the validation succeeds.
 677 *
 678 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 679 * on failure.
 680 */
 681int vmw_resource_validate(struct vmw_resource *res, bool intr,
 682			  bool dirtying)
 683{
 684	int ret;
 685	struct vmw_resource *evict_res;
 686	struct vmw_private *dev_priv = res->dev_priv;
 687	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 688	struct ttm_validate_buffer val_buf;
 689	unsigned err_count = 0;
 690
 691	if (!res->func->create)
 692		return 0;
 693
 694	val_buf.bo = NULL;
 695	val_buf.num_shared = 0;
 696	if (res->guest_memory_bo)
 697		val_buf.bo = &res->guest_memory_bo->tbo;
 698	do {
 699		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
 700		if (likely(ret != -EBUSY))
 701			break;
 702
 703		spin_lock(&dev_priv->resource_lock);
 704		if (list_empty(lru_list) || !res->func->may_evict) {
 705			DRM_ERROR("Out of device device resources "
 706				  "for %s.\n", res->func->type_name);
 707			ret = -EBUSY;
 708			spin_unlock(&dev_priv->resource_lock);
 709			break;
 710		}
 711
 712		evict_res = vmw_resource_reference
 713			(list_first_entry(lru_list, struct vmw_resource,
 714					  lru_head));
 715		list_del_init(&evict_res->lru_head);
 716
 717		spin_unlock(&dev_priv->resource_lock);
 718
 719		/* Trylock backup buffers with a NULL ticket. */
 720		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 721		if (unlikely(ret != 0)) {
 722			spin_lock(&dev_priv->resource_lock);
 723			list_add_tail(&evict_res->lru_head, lru_list);
 724			spin_unlock(&dev_priv->resource_lock);
 725			if (ret == -ERESTARTSYS ||
 726			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 727				vmw_resource_unreference(&evict_res);
 728				goto out_no_validate;
 729			}
 730		}
 731
 732		vmw_resource_unreference(&evict_res);
 733	} while (1);
 734
 735	if (unlikely(ret != 0))
 736		goto out_no_validate;
 737	else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
 738		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 739		vmw_user_bo_unref(&res->guest_memory_bo);
 740	}
 741
 742	return 0;
 743
 744out_no_validate:
 745	return ret;
 746}
 747
 748
 749/**
 750 * vmw_resource_unbind_list
 751 *
 752 * @vbo: Pointer to the current backing MOB.
 753 *
 754 * Evicts the Guest Backed hardware resource if the backup
 755 * buffer is being moved out of MOB memory.
 756 * Note that this function will not race with the resource
 757 * validation code, since resource validation and eviction
 758 * both require the backup buffer to be reserved.
 759 */
 760void vmw_resource_unbind_list(struct vmw_bo *vbo)
 761{
 762	struct ttm_validate_buffer val_buf = {
 763		.bo = &vbo->tbo,
 764		.num_shared = 0
 765	};
 766
 767	dma_resv_assert_held(vbo->tbo.base.resv);
 768	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
 769		struct rb_node *node = vbo->res_tree.rb_node;
 770		struct vmw_resource *res =
 771			container_of(node, struct vmw_resource, mob_node);
 772
 773		if (!WARN_ON_ONCE(!res->func->unbind))
 774			(void) res->func->unbind(res, res->res_dirty, &val_buf);
 775
 776		res->guest_memory_size = true;
 777		res->res_dirty = false;
 778		vmw_resource_mob_detach(res);
 779	}
 780
 781	(void) ttm_bo_wait(&vbo->tbo, false, false);
 782}
 783
 784
 785/**
 786 * vmw_query_readback_all - Read back cached query states
 787 *
 788 * @dx_query_mob: Buffer containing the DX query MOB
 789 *
 790 * Read back cached states from the device if they exist.  This function
 791 * assumes binding_mutex is held.
 792 */
 793int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
 794{
 795	struct vmw_resource *dx_query_ctx;
 796	struct vmw_private *dev_priv;
 797	struct {
 798		SVGA3dCmdHeader header;
 799		SVGA3dCmdDXReadbackAllQuery body;
 800	} *cmd;
 801
 802
 803	/* No query bound, so do nothing */
 804	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 805		return 0;
 806
 807	dx_query_ctx = dx_query_mob->dx_query_ctx;
 808	dev_priv     = dx_query_ctx->dev_priv;
 809
 810	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 811	if (unlikely(cmd == NULL))
 812		return -ENOMEM;
 813
 814	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 815	cmd->header.size = sizeof(cmd->body);
 816	cmd->body.cid    = dx_query_ctx->id;
 817
 818	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 819
 820	/* Triggers a rebind the next time affected context is bound */
 821	dx_query_mob->dx_query_ctx = NULL;
 822
 823	return 0;
 824}
 825
 826
 827
 828/**
 829 * vmw_query_move_notify - Read back cached query states
 830 *
 831 * @bo: The TTM buffer object about to move.
 832 * @old_mem: The memory region @bo is moving from.
 833 * @new_mem: The memory region @bo is moving to.
 834 *
 835 * Called before the query MOB is swapped out to read back cached query
 836 * states from the device.
 837 */
 838void vmw_query_move_notify(struct ttm_buffer_object *bo,
 839			   struct ttm_resource *old_mem,
 840			   struct ttm_resource *new_mem)
 841{
 842	struct vmw_bo *dx_query_mob;
 843	struct ttm_device *bdev = bo->bdev;
 844	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
 
 
 845
 846	mutex_lock(&dev_priv->binding_mutex);
 847
 848	/* If BO is being moved from MOB to system memory */
 849	if (old_mem &&
 850	    new_mem->mem_type == TTM_PL_SYSTEM &&
 851	    old_mem->mem_type == VMW_PL_MOB) {
 852		struct vmw_fence_obj *fence;
 853
 854		dx_query_mob = to_vmw_bo(&bo->base);
 855		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
 856			mutex_unlock(&dev_priv->binding_mutex);
 857			return;
 858		}
 859
 860		(void) vmw_query_readback_all(dx_query_mob);
 861		mutex_unlock(&dev_priv->binding_mutex);
 862
 863		/* Create a fence and attach the BO to it */
 864		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 865		vmw_bo_fence_single(bo, fence);
 866
 867		if (fence != NULL)
 868			vmw_fence_obj_unreference(&fence);
 869
 870		(void) ttm_bo_wait(bo, false, false);
 871	} else
 872		mutex_unlock(&dev_priv->binding_mutex);
 873}
 874
 875/**
 876 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 877 *
 878 * @res:            The resource being queried.
 879 */
 880bool vmw_resource_needs_backup(const struct vmw_resource *res)
 881{
 882	return res->func->needs_guest_memory;
 883}
 884
 885/**
 886 * vmw_resource_evict_type - Evict all resources of a specific type
 887 *
 888 * @dev_priv:       Pointer to a device private struct
 889 * @type:           The resource type to evict
 890 *
 891 * To avoid thrashing starvation or as part of the hibernation sequence,
 892 * try to evict all evictable resources of a specific type.
 893 */
 894static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 895				    enum vmw_res_type type)
 896{
 897	struct list_head *lru_list = &dev_priv->res_lru[type];
 898	struct vmw_resource *evict_res;
 899	unsigned err_count = 0;
 900	int ret;
 901	struct ww_acquire_ctx ticket;
 902
 903	do {
 904		spin_lock(&dev_priv->resource_lock);
 905
 906		if (list_empty(lru_list))
 907			goto out_unlock;
 908
 909		evict_res = vmw_resource_reference(
 910			list_first_entry(lru_list, struct vmw_resource,
 911					 lru_head));
 912		list_del_init(&evict_res->lru_head);
 913		spin_unlock(&dev_priv->resource_lock);
 914
 915		/* Wait lock backup buffers with a ticket. */
 916		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 917		if (unlikely(ret != 0)) {
 918			spin_lock(&dev_priv->resource_lock);
 919			list_add_tail(&evict_res->lru_head, lru_list);
 920			spin_unlock(&dev_priv->resource_lock);
 921			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 922				vmw_resource_unreference(&evict_res);
 923				return;
 924			}
 925		}
 926
 927		vmw_resource_unreference(&evict_res);
 928	} while (1);
 929
 930out_unlock:
 931	spin_unlock(&dev_priv->resource_lock);
 932}
 933
 934/**
 935 * vmw_resource_evict_all - Evict all evictable resources
 936 *
 937 * @dev_priv:       Pointer to a device private struct
 938 *
 939 * To avoid thrashing starvation or as part of the hibernation sequence,
 940 * evict all evictable resources. In particular this means that all
 941 * guest-backed resources that are registered with the device are
 942 * evicted and the OTable becomes clean.
 943 */
 944void vmw_resource_evict_all(struct vmw_private *dev_priv)
 945{
 946	enum vmw_res_type type;
 947
 948	mutex_lock(&dev_priv->cmdbuf_mutex);
 949
 950	for (type = 0; type < vmw_res_max; ++type)
 951		vmw_resource_evict_type(dev_priv, type);
 952
 953	mutex_unlock(&dev_priv->cmdbuf_mutex);
 954}
 955
 956/*
 957 * vmw_resource_pin - Add a pin reference on a resource
 958 *
 959 * @res: The resource to add a pin reference on
 960 *
 961 * This function adds a pin reference, and if needed validates the resource.
 962 * Having a pin reference means that the resource can never be evicted, and
 963 * its id will never change as long as there is a pin reference.
 964 * This function returns 0 on success and a negative error code on failure.
 965 */
 966int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 967{
 968	struct ttm_operation_ctx ctx = { interruptible, false };
 969	struct vmw_private *dev_priv = res->dev_priv;
 970	int ret;
 971
 972	mutex_lock(&dev_priv->cmdbuf_mutex);
 973	ret = vmw_resource_reserve(res, interruptible, false);
 974	if (ret)
 975		goto out_no_reserve;
 976
 977	if (res->pin_count == 0) {
 978		struct vmw_bo *vbo = NULL;
 979
 980		if (res->guest_memory_bo) {
 981			vbo = res->guest_memory_bo;
 982
 983			ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
 984			if (ret)
 985				goto out_no_validate;
 986			if (!vbo->tbo.pin_count) {
 987				vmw_bo_placement_set(vbo,
 988						     res->func->domain,
 989						     res->func->busy_domain);
 990				ret = ttm_bo_validate
 991					(&vbo->tbo,
 992					 &vbo->placement,
 993					 &ctx);
 994				if (ret) {
 995					ttm_bo_unreserve(&vbo->tbo);
 996					goto out_no_validate;
 997				}
 998			}
 999
1000			/* Do we really need to pin the MOB as well? */
1001			vmw_bo_pin_reserved(vbo, true);
1002		}
1003		ret = vmw_resource_validate(res, interruptible, true);
1004		if (vbo)
1005			ttm_bo_unreserve(&vbo->tbo);
1006		if (ret)
1007			goto out_no_validate;
1008	}
1009	res->pin_count++;
1010
1011out_no_validate:
1012	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1013out_no_reserve:
1014	mutex_unlock(&dev_priv->cmdbuf_mutex);
1015
1016	return ret;
1017}
1018
1019/**
1020 * vmw_resource_unpin - Remove a pin reference from a resource
1021 *
1022 * @res: The resource to remove a pin reference from
1023 *
1024 * Having a pin reference means that the resource can never be evicted, and
1025 * its id will never change as long as there is a pin reference.
1026 */
1027void vmw_resource_unpin(struct vmw_resource *res)
1028{
1029	struct vmw_private *dev_priv = res->dev_priv;
1030	int ret;
1031
1032	mutex_lock(&dev_priv->cmdbuf_mutex);
1033
1034	ret = vmw_resource_reserve(res, false, true);
1035	WARN_ON(ret);
1036
1037	WARN_ON(res->pin_count == 0);
1038	if (--res->pin_count == 0 && res->guest_memory_bo) {
1039		struct vmw_bo *vbo = res->guest_memory_bo;
1040
1041		(void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
1042		vmw_bo_pin_reserved(vbo, false);
1043		ttm_bo_unreserve(&vbo->tbo);
1044	}
1045
1046	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1047
1048	mutex_unlock(&dev_priv->cmdbuf_mutex);
1049}
1050
1051/**
1052 * vmw_res_type - Return the resource type
1053 *
1054 * @res: Pointer to the resource
1055 */
1056enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1057{
1058	return res->func->res_type;
1059}
1060
1061/**
1062 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1063 * sequential range of touched backing store memory.
1064 * @res: The resource.
1065 * @start: The first page touched.
1066 * @end: The last page touched + 1.
1067 */
1068void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1069			       pgoff_t end)
1070{
1071	if (res->dirty)
1072		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1073					   end << PAGE_SHIFT);
1074}
1075
1076int vmw_resource_clean(struct vmw_resource *res)
1077{
1078	int ret = 0;
1079
1080	if (res->res_dirty) {
1081		if (!res->func->clean)
1082			return -EINVAL;
1083
1084		ret = res->func->clean(res);
1085		if (ret)
1086			return ret;
1087		res->res_dirty = false;
1088	}
1089	return ret;
1090}
1091
1092/**
1093 * vmw_resources_clean - Clean resources intersecting a mob range
1094 * @vbo: The mob buffer object
1095 * @start: The mob page offset starting the range
1096 * @end: The mob page offset ending the range
1097 * @num_prefault: Returns how many pages including the first have been
1098 * cleaned and are ok to prefault
1099 */
1100int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
1101			pgoff_t end, pgoff_t *num_prefault)
1102{
1103	struct rb_node *cur = vbo->res_tree.rb_node;
1104	struct vmw_resource *found = NULL;
1105	unsigned long res_start = start << PAGE_SHIFT;
1106	unsigned long res_end = end << PAGE_SHIFT;
1107	unsigned long last_cleaned = 0;
1108	int ret;
1109
1110	/*
1111	 * Find the resource with lowest backup_offset that intersects the
1112	 * range.
1113	 */
1114	while (cur) {
1115		struct vmw_resource *cur_res =
1116			container_of(cur, struct vmw_resource, mob_node);
1117
1118		if (cur_res->guest_memory_offset >= res_end) {
1119			cur = cur->rb_left;
1120		} else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
1121			   res_start) {
1122			cur = cur->rb_right;
1123		} else {
1124			found = cur_res;
1125			cur = cur->rb_left;
1126			/* Continue to look for resources with lower offsets */
1127		}
1128	}
1129
1130	/*
1131	 * In order of increasing guest_memory_offset, clean dirty resources
1132	 * intersecting the range.
1133	 */
1134	while (found) {
1135		ret = vmw_resource_clean(found);
1136		if (ret)
1137			return ret;
1138		last_cleaned = found->guest_memory_offset + found->guest_memory_size;
 
 
 
 
 
 
 
 
 
1139		cur = rb_next(&found->mob_node);
1140		if (!cur)
1141			break;
1142
1143		found = container_of(cur, struct vmw_resource, mob_node);
1144		if (found->guest_memory_offset >= res_end)
1145			break;
1146	}
1147
1148	/*
1149	 * Set number of pages allowed prefaulting and fence the buffer object
1150	 */
1151	*num_prefault = 1;
1152	if (last_cleaned > res_start) {
1153		struct ttm_buffer_object *bo = &vbo->tbo;
1154
1155		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1156						      PAGE_SIZE);
1157		vmw_bo_fence_single(bo, NULL);
1158	}
1159
1160	return 0;
1161}
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_resource_priv.h"
  31#include "vmwgfx_binding.h"
 
  32#include "vmwgfx_drv.h"
 
  33
  34#define VMW_RES_EVICT_ERR_COUNT 10
  35
  36/**
  37 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  38 * @res: The resource
  39 */
  40void vmw_resource_mob_attach(struct vmw_resource *res)
  41{
  42	struct vmw_buffer_object *backup = res->backup;
  43	struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
  44
  45	dma_resv_assert_held(res->backup->base.base.resv);
  46	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  47		res->func->prio;
  48
  49	while (*new) {
  50		struct vmw_resource *this =
  51			container_of(*new, struct vmw_resource, mob_node);
  52
  53		parent = *new;
  54		new = (res->backup_offset < this->backup_offset) ?
  55			&((*new)->rb_left) : &((*new)->rb_right);
  56	}
  57
  58	rb_link_node(&res->mob_node, parent, new);
  59	rb_insert_color(&res->mob_node, &backup->res_tree);
 
  60
  61	vmw_bo_prio_add(backup, res->used_prio);
  62}
  63
  64/**
  65 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  66 * @res: The resource
  67 */
  68void vmw_resource_mob_detach(struct vmw_resource *res)
  69{
  70	struct vmw_buffer_object *backup = res->backup;
  71
  72	dma_resv_assert_held(backup->base.base.resv);
  73	if (vmw_resource_mob_attached(res)) {
  74		rb_erase(&res->mob_node, &backup->res_tree);
  75		RB_CLEAR_NODE(&res->mob_node);
  76		vmw_bo_prio_del(backup, res->used_prio);
  77	}
  78}
  79
  80struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  81{
  82	kref_get(&res->kref);
  83	return res;
  84}
  85
  86struct vmw_resource *
  87vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  88{
  89	return kref_get_unless_zero(&res->kref) ? res : NULL;
  90}
  91
  92/**
  93 * vmw_resource_release_id - release a resource id to the id manager.
  94 *
  95 * @res: Pointer to the resource.
  96 *
  97 * Release the resource id to the resource id manager and set it to -1
  98 */
  99void vmw_resource_release_id(struct vmw_resource *res)
 100{
 101	struct vmw_private *dev_priv = res->dev_priv;
 102	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 103
 104	spin_lock(&dev_priv->resource_lock);
 105	if (res->id != -1)
 106		idr_remove(idr, res->id);
 107	res->id = -1;
 108	spin_unlock(&dev_priv->resource_lock);
 109}
 110
 111static void vmw_resource_release(struct kref *kref)
 112{
 113	struct vmw_resource *res =
 114	    container_of(kref, struct vmw_resource, kref);
 115	struct vmw_private *dev_priv = res->dev_priv;
 116	int id;
 117	int ret;
 118	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 119
 120	spin_lock(&dev_priv->resource_lock);
 121	list_del_init(&res->lru_head);
 122	spin_unlock(&dev_priv->resource_lock);
 123	if (res->backup) {
 124		struct ttm_buffer_object *bo = &res->backup->base;
 125
 126		ret = ttm_bo_reserve(bo, false, false, NULL);
 127		BUG_ON(ret);
 128		if (vmw_resource_mob_attached(res) &&
 129		    res->func->unbind != NULL) {
 130			struct ttm_validate_buffer val_buf;
 131
 132			val_buf.bo = bo;
 133			val_buf.num_shared = 0;
 134			res->func->unbind(res, false, &val_buf);
 135		}
 136		res->backup_dirty = false;
 137		vmw_resource_mob_detach(res);
 138		if (res->dirty)
 139			res->func->dirty_free(res);
 140		if (res->coherent)
 141			vmw_bo_dirty_release(res->backup);
 142		ttm_bo_unreserve(bo);
 143		vmw_bo_unreference(&res->backup);
 144	}
 145
 146	if (likely(res->hw_destroy != NULL)) {
 147		mutex_lock(&dev_priv->binding_mutex);
 148		vmw_binding_res_list_kill(&res->binding_head);
 149		mutex_unlock(&dev_priv->binding_mutex);
 150		res->hw_destroy(res);
 151	}
 152
 153	id = res->id;
 154	if (res->res_free != NULL)
 155		res->res_free(res);
 156	else
 157		kfree(res);
 158
 159	spin_lock(&dev_priv->resource_lock);
 160	if (id != -1)
 161		idr_remove(idr, id);
 162	spin_unlock(&dev_priv->resource_lock);
 163}
 164
 165void vmw_resource_unreference(struct vmw_resource **p_res)
 166{
 167	struct vmw_resource *res = *p_res;
 168
 169	*p_res = NULL;
 170	kref_put(&res->kref, vmw_resource_release);
 171}
 172
 173
 174/**
 175 * vmw_resource_alloc_id - release a resource id to the id manager.
 176 *
 177 * @res: Pointer to the resource.
 178 *
 179 * Allocate the lowest free resource from the resource manager, and set
 180 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 181 */
 182int vmw_resource_alloc_id(struct vmw_resource *res)
 183{
 184	struct vmw_private *dev_priv = res->dev_priv;
 185	int ret;
 186	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 187
 188	BUG_ON(res->id != -1);
 189
 190	idr_preload(GFP_KERNEL);
 191	spin_lock(&dev_priv->resource_lock);
 192
 193	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 194	if (ret >= 0)
 195		res->id = ret;
 196
 197	spin_unlock(&dev_priv->resource_lock);
 198	idr_preload_end();
 199	return ret < 0 ? ret : 0;
 200}
 201
 202/**
 203 * vmw_resource_init - initialize a struct vmw_resource
 204 *
 205 * @dev_priv:       Pointer to a device private struct.
 206 * @res:            The struct vmw_resource to initialize.
 207 * @delay_id:       Boolean whether to defer device id allocation until
 208 *                  the first validation.
 209 * @res_free:       Resource destructor.
 210 * @func:           Resource function table.
 211 */
 212int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 213		      bool delay_id,
 214		      void (*res_free) (struct vmw_resource *res),
 215		      const struct vmw_res_func *func)
 216{
 217	kref_init(&res->kref);
 218	res->hw_destroy = NULL;
 219	res->res_free = res_free;
 220	res->dev_priv = dev_priv;
 221	res->func = func;
 222	RB_CLEAR_NODE(&res->mob_node);
 223	INIT_LIST_HEAD(&res->lru_head);
 224	INIT_LIST_HEAD(&res->binding_head);
 225	res->id = -1;
 226	res->backup = NULL;
 227	res->backup_offset = 0;
 228	res->backup_dirty = false;
 229	res->res_dirty = false;
 230	res->coherent = false;
 231	res->used_prio = 3;
 232	res->dirty = NULL;
 233	if (delay_id)
 234		return 0;
 235	else
 236		return vmw_resource_alloc_id(res);
 237}
 238
 239
 240/**
 241 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 242 * TTM user-space handle and perform basic type checks
 243 *
 244 * @dev_priv:     Pointer to a device private struct
 245 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 246 * @handle:       The TTM user-space handle
 247 * @converter:    Pointer to an object describing the resource type
 248 * @p_res:        On successful return the location pointed to will contain
 249 *                a pointer to a refcounted struct vmw_resource.
 250 *
 251 * If the handle can't be found or is associated with an incorrect resource
 252 * type, -EINVAL will be returned.
 253 */
 254int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 255				    struct ttm_object_file *tfile,
 256				    uint32_t handle,
 257				    const struct vmw_user_resource_conv
 258				    *converter,
 259				    struct vmw_resource **p_res)
 260{
 261	struct ttm_base_object *base;
 262	struct vmw_resource *res;
 263	int ret = -EINVAL;
 264
 265	base = ttm_base_object_lookup(tfile, handle);
 266	if (unlikely(base == NULL))
 267		return -EINVAL;
 268
 269	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 270		goto out_bad_resource;
 271
 272	res = converter->base_obj_to_res(base);
 273	kref_get(&res->kref);
 274
 275	*p_res = res;
 276	ret = 0;
 277
 278out_bad_resource:
 279	ttm_base_object_unref(&base);
 280
 281	return ret;
 282}
 283
 284/*
 285 * Helper function that looks either a surface or bo.
 286 *
 287 * The pointer this pointed at by out_surf and out_buf needs to be null.
 288 */
 289int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 290			   struct drm_file *filp,
 291			   uint32_t handle,
 292			   struct vmw_surface **out_surf,
 293			   struct vmw_buffer_object **out_buf)
 294{
 295	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
 296	struct vmw_resource *res;
 297	int ret;
 298
 299	BUG_ON(*out_surf || *out_buf);
 300
 301	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 302					      user_surface_converter,
 303					      &res);
 304	if (!ret) {
 305		*out_surf = vmw_res_to_srf(res);
 306		return 0;
 307	}
 308
 309	*out_surf = NULL;
 310	ret = vmw_user_bo_lookup(filp, handle, out_buf);
 
 
 
 
 
 
 
 
 311	return ret;
 312}
 313
 314/**
 315 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 316 *
 317 * @res:            The resource for which to allocate a backup buffer.
 318 * @interruptible:  Whether any sleeps during allocation should be
 319 *                  performed while interruptible.
 320 */
 321static int vmw_resource_buf_alloc(struct vmw_resource *res,
 322				  bool interruptible)
 323{
 324	unsigned long size = PFN_ALIGN(res->backup_size);
 325	struct vmw_buffer_object *backup;
 
 
 
 
 
 
 
 326	int ret;
 327
 328	if (likely(res->backup)) {
 329		BUG_ON(res->backup->base.base.size < size);
 330		return 0;
 331	}
 332
 333	ret = vmw_bo_create(res->dev_priv, res->backup_size,
 334			    res->func->backup_placement,
 335			    interruptible, false,
 336			    &vmw_bo_bo_free, &backup);
 337	if (unlikely(ret != 0))
 338		goto out_no_bo;
 339
 340	res->backup = backup;
 341
 342out_no_bo:
 343	return ret;
 344}
 345
 346/**
 347 * vmw_resource_do_validate - Make a resource up-to-date and visible
 348 *                            to the device.
 349 *
 350 * @res:            The resource to make visible to the device.
 351 * @val_buf:        Information about a buffer possibly
 352 *                  containing backup data if a bind operation is needed.
 353 * @dirtying:       Transfer dirty regions.
 354 *
 355 * On hardware resource shortage, this function returns -EBUSY and
 356 * should be retried once resources have been freed up.
 357 */
 358static int vmw_resource_do_validate(struct vmw_resource *res,
 359				    struct ttm_validate_buffer *val_buf,
 360				    bool dirtying)
 361{
 362	int ret = 0;
 363	const struct vmw_res_func *func = res->func;
 364
 365	if (unlikely(res->id == -1)) {
 366		ret = func->create(res);
 367		if (unlikely(ret != 0))
 368			return ret;
 369	}
 370
 371	if (func->bind &&
 372	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
 373	      val_buf->bo != NULL) ||
 374	     (!func->needs_backup && val_buf->bo != NULL))) {
 375		ret = func->bind(res, val_buf);
 376		if (unlikely(ret != 0))
 377			goto out_bind_failed;
 378		if (func->needs_backup)
 379			vmw_resource_mob_attach(res);
 380	}
 381
 382	/*
 383	 * Handle the case where the backup mob is marked coherent but
 384	 * the resource isn't.
 385	 */
 386	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
 387	    !res->coherent) {
 388		if (res->backup->dirty && !res->dirty) {
 389			ret = func->dirty_alloc(res);
 390			if (ret)
 391				return ret;
 392		} else if (!res->backup->dirty && res->dirty) {
 393			func->dirty_free(res);
 394		}
 395	}
 396
 397	/*
 398	 * Transfer the dirty regions to the resource and update
 399	 * the resource.
 400	 */
 401	if (res->dirty) {
 402		if (dirtying && !res->res_dirty) {
 403			pgoff_t start = res->backup_offset >> PAGE_SHIFT;
 404			pgoff_t end = __KERNEL_DIV_ROUND_UP
 405				(res->backup_offset + res->backup_size,
 406				 PAGE_SIZE);
 407
 408			vmw_bo_dirty_unmap(res->backup, start, end);
 409		}
 410
 411		vmw_bo_dirty_transfer_to_res(res);
 412		return func->dirty_sync(res);
 413	}
 414
 415	return 0;
 416
 417out_bind_failed:
 418	func->destroy(res);
 419
 420	return ret;
 421}
 422
 423/**
 424 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 425 * command submission.
 426 *
 427 * @res:               Pointer to the struct vmw_resource to unreserve.
 428 * @dirty_set:         Change dirty status of the resource.
 429 * @dirty:             When changing dirty status indicates the new status.
 430 * @switch_backup:     Backup buffer has been switched.
 431 * @new_backup:        Pointer to new backup buffer if command submission
 432 *                     switched. May be NULL.
 433 * @new_backup_offset: New backup offset if @switch_backup is true.
 434 *
 435 * Currently unreserving a resource means putting it back on the device's
 436 * resource lru list, so that it can be evicted if necessary.
 437 */
 438void vmw_resource_unreserve(struct vmw_resource *res,
 439			    bool dirty_set,
 440			    bool dirty,
 441			    bool switch_backup,
 442			    struct vmw_buffer_object *new_backup,
 443			    unsigned long new_backup_offset)
 444{
 445	struct vmw_private *dev_priv = res->dev_priv;
 446
 447	if (!list_empty(&res->lru_head))
 448		return;
 449
 450	if (switch_backup && new_backup != res->backup) {
 451		if (res->backup) {
 452			vmw_resource_mob_detach(res);
 453			if (res->coherent)
 454				vmw_bo_dirty_release(res->backup);
 455			vmw_bo_unreference(&res->backup);
 456		}
 457
 458		if (new_backup) {
 459			res->backup = vmw_bo_reference(new_backup);
 460
 461			/*
 462			 * The validation code should already have added a
 463			 * dirty tracker here.
 464			 */
 465			WARN_ON(res->coherent && !new_backup->dirty);
 466
 467			vmw_resource_mob_attach(res);
 468		} else {
 469			res->backup = NULL;
 470		}
 471	} else if (switch_backup && res->coherent) {
 472		vmw_bo_dirty_release(res->backup);
 473	}
 474
 475	if (switch_backup)
 476		res->backup_offset = new_backup_offset;
 477
 478	if (dirty_set)
 479		res->res_dirty = dirty;
 480
 481	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 482		return;
 483
 484	spin_lock(&dev_priv->resource_lock);
 485	list_add_tail(&res->lru_head,
 486		      &res->dev_priv->res_lru[res->func->res_type]);
 487	spin_unlock(&dev_priv->resource_lock);
 488}
 489
 490/**
 491 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 492 *                             for a resource and in that case, allocate
 493 *                             one, reserve and validate it.
 494 *
 495 * @ticket:         The ww acquire context to use, or NULL if trylocking.
 496 * @res:            The resource for which to allocate a backup buffer.
 497 * @interruptible:  Whether any sleeps during allocation should be
 498 *                  performed while interruptible.
 499 * @val_buf:        On successful return contains data about the
 500 *                  reserved and validated backup buffer.
 501 */
 502static int
 503vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 504			  struct vmw_resource *res,
 505			  bool interruptible,
 506			  struct ttm_validate_buffer *val_buf)
 507{
 508	struct ttm_operation_ctx ctx = { true, false };
 509	struct list_head val_list;
 510	bool backup_dirty = false;
 511	int ret;
 512
 513	if (unlikely(res->backup == NULL)) {
 514		ret = vmw_resource_buf_alloc(res, interruptible);
 515		if (unlikely(ret != 0))
 516			return ret;
 517	}
 518
 519	INIT_LIST_HEAD(&val_list);
 520	ttm_bo_get(&res->backup->base);
 521	val_buf->bo = &res->backup->base;
 522	val_buf->num_shared = 0;
 523	list_add_tail(&val_buf->head, &val_list);
 524	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 525	if (unlikely(ret != 0))
 526		goto out_no_reserve;
 527
 528	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
 529		return 0;
 530
 531	backup_dirty = res->backup_dirty;
 532	ret = ttm_bo_validate(&res->backup->base,
 533			      res->func->backup_placement,
 
 
 534			      &ctx);
 535
 536	if (unlikely(ret != 0))
 537		goto out_no_validate;
 538
 539	return 0;
 540
 541out_no_validate:
 542	ttm_eu_backoff_reservation(ticket, &val_list);
 543out_no_reserve:
 544	ttm_bo_put(val_buf->bo);
 545	val_buf->bo = NULL;
 546	if (backup_dirty)
 547		vmw_bo_unreference(&res->backup);
 548
 549	return ret;
 550}
 551
 552/*
 553 * vmw_resource_reserve - Reserve a resource for command submission
 554 *
 555 * @res:            The resource to reserve.
 556 *
 557 * This function takes the resource off the LRU list and make sure
 558 * a backup buffer is present for guest-backed resources. However,
 559 * the buffer may not be bound to the resource at this point.
 
 560 *
 561 */
 562int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 563			 bool no_backup)
 564{
 565	struct vmw_private *dev_priv = res->dev_priv;
 566	int ret;
 567
 568	spin_lock(&dev_priv->resource_lock);
 569	list_del_init(&res->lru_head);
 570	spin_unlock(&dev_priv->resource_lock);
 571
 572	if (res->func->needs_backup && res->backup == NULL &&
 573	    !no_backup) {
 574		ret = vmw_resource_buf_alloc(res, interruptible);
 575		if (unlikely(ret != 0)) {
 576			DRM_ERROR("Failed to allocate a backup buffer "
 577				  "of size %lu. bytes\n",
 578				  (unsigned long) res->backup_size);
 579			return ret;
 580		}
 581	}
 582
 583	return 0;
 584}
 585
 586/**
 587 * vmw_resource_backoff_reservation - Unreserve and unreference a
 588 *                                    backup buffer
 589 *.
 590 * @ticket:         The ww acquire ctx used for reservation.
 591 * @val_buf:        Backup buffer information.
 592 */
 593static void
 594vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 595				 struct ttm_validate_buffer *val_buf)
 596{
 597	struct list_head val_list;
 598
 599	if (likely(val_buf->bo == NULL))
 600		return;
 601
 602	INIT_LIST_HEAD(&val_list);
 603	list_add_tail(&val_buf->head, &val_list);
 604	ttm_eu_backoff_reservation(ticket, &val_list);
 605	ttm_bo_put(val_buf->bo);
 606	val_buf->bo = NULL;
 607}
 608
 609/**
 610 * vmw_resource_do_evict - Evict a resource, and transfer its data
 611 *                         to a backup buffer.
 612 *
 613 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 614 * @res:            The resource to evict.
 615 * @interruptible:  Whether to wait interruptible.
 616 */
 617static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 618				 struct vmw_resource *res, bool interruptible)
 619{
 620	struct ttm_validate_buffer val_buf;
 621	const struct vmw_res_func *func = res->func;
 622	int ret;
 623
 624	BUG_ON(!func->may_evict);
 625
 626	val_buf.bo = NULL;
 627	val_buf.num_shared = 0;
 628	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 629	if (unlikely(ret != 0))
 630		return ret;
 631
 632	if (unlikely(func->unbind != NULL &&
 633		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
 634		ret = func->unbind(res, res->res_dirty, &val_buf);
 635		if (unlikely(ret != 0))
 636			goto out_no_unbind;
 637		vmw_resource_mob_detach(res);
 638	}
 639	ret = func->destroy(res);
 640	res->backup_dirty = true;
 641	res->res_dirty = false;
 642out_no_unbind:
 643	vmw_resource_backoff_reservation(ticket, &val_buf);
 644
 645	return ret;
 646}
 647
 648
 649/**
 650 * vmw_resource_validate - Make a resource up-to-date and visible
 651 *                         to the device.
 652 * @res: The resource to make visible to the device.
 653 * @intr: Perform waits interruptible if possible.
 654 * @dirtying: Pending GPU operation will dirty the resource
 655 *
 656 * On successful return, any backup DMA buffer pointed to by @res->backup will
 657 * be reserved and validated.
 658 * On hardware resource shortage, this function will repeatedly evict
 659 * resources of the same type until the validation succeeds.
 660 *
 661 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 662 * on failure.
 663 */
 664int vmw_resource_validate(struct vmw_resource *res, bool intr,
 665			  bool dirtying)
 666{
 667	int ret;
 668	struct vmw_resource *evict_res;
 669	struct vmw_private *dev_priv = res->dev_priv;
 670	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 671	struct ttm_validate_buffer val_buf;
 672	unsigned err_count = 0;
 673
 674	if (!res->func->create)
 675		return 0;
 676
 677	val_buf.bo = NULL;
 678	val_buf.num_shared = 0;
 679	if (res->backup)
 680		val_buf.bo = &res->backup->base;
 681	do {
 682		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
 683		if (likely(ret != -EBUSY))
 684			break;
 685
 686		spin_lock(&dev_priv->resource_lock);
 687		if (list_empty(lru_list) || !res->func->may_evict) {
 688			DRM_ERROR("Out of device device resources "
 689				  "for %s.\n", res->func->type_name);
 690			ret = -EBUSY;
 691			spin_unlock(&dev_priv->resource_lock);
 692			break;
 693		}
 694
 695		evict_res = vmw_resource_reference
 696			(list_first_entry(lru_list, struct vmw_resource,
 697					  lru_head));
 698		list_del_init(&evict_res->lru_head);
 699
 700		spin_unlock(&dev_priv->resource_lock);
 701
 702		/* Trylock backup buffers with a NULL ticket. */
 703		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 704		if (unlikely(ret != 0)) {
 705			spin_lock(&dev_priv->resource_lock);
 706			list_add_tail(&evict_res->lru_head, lru_list);
 707			spin_unlock(&dev_priv->resource_lock);
 708			if (ret == -ERESTARTSYS ||
 709			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 710				vmw_resource_unreference(&evict_res);
 711				goto out_no_validate;
 712			}
 713		}
 714
 715		vmw_resource_unreference(&evict_res);
 716	} while (1);
 717
 718	if (unlikely(ret != 0))
 719		goto out_no_validate;
 720	else if (!res->func->needs_backup && res->backup) {
 721		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 722		vmw_bo_unreference(&res->backup);
 723	}
 724
 725	return 0;
 726
 727out_no_validate:
 728	return ret;
 729}
 730
 731
 732/**
 733 * vmw_resource_unbind_list
 734 *
 735 * @vbo: Pointer to the current backing MOB.
 736 *
 737 * Evicts the Guest Backed hardware resource if the backup
 738 * buffer is being moved out of MOB memory.
 739 * Note that this function will not race with the resource
 740 * validation code, since resource validation and eviction
 741 * both require the backup buffer to be reserved.
 742 */
 743void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 744{
 745	struct ttm_validate_buffer val_buf = {
 746		.bo = &vbo->base,
 747		.num_shared = 0
 748	};
 749
 750	dma_resv_assert_held(vbo->base.base.resv);
 751	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
 752		struct rb_node *node = vbo->res_tree.rb_node;
 753		struct vmw_resource *res =
 754			container_of(node, struct vmw_resource, mob_node);
 755
 756		if (!WARN_ON_ONCE(!res->func->unbind))
 757			(void) res->func->unbind(res, res->res_dirty, &val_buf);
 758
 759		res->backup_dirty = true;
 760		res->res_dirty = false;
 761		vmw_resource_mob_detach(res);
 762	}
 763
 764	(void) ttm_bo_wait(&vbo->base, false, false);
 765}
 766
 767
 768/**
 769 * vmw_query_readback_all - Read back cached query states
 770 *
 771 * @dx_query_mob: Buffer containing the DX query MOB
 772 *
 773 * Read back cached states from the device if they exist.  This function
 774 * assumes binding_mutex is held.
 775 */
 776int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 777{
 778	struct vmw_resource *dx_query_ctx;
 779	struct vmw_private *dev_priv;
 780	struct {
 781		SVGA3dCmdHeader header;
 782		SVGA3dCmdDXReadbackAllQuery body;
 783	} *cmd;
 784
 785
 786	/* No query bound, so do nothing */
 787	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 788		return 0;
 789
 790	dx_query_ctx = dx_query_mob->dx_query_ctx;
 791	dev_priv     = dx_query_ctx->dev_priv;
 792
 793	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 794	if (unlikely(cmd == NULL))
 795		return -ENOMEM;
 796
 797	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 798	cmd->header.size = sizeof(cmd->body);
 799	cmd->body.cid    = dx_query_ctx->id;
 800
 801	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 802
 803	/* Triggers a rebind the next time affected context is bound */
 804	dx_query_mob->dx_query_ctx = NULL;
 805
 806	return 0;
 807}
 808
 809
 810
 811/**
 812 * vmw_query_move_notify - Read back cached query states
 813 *
 814 * @bo: The TTM buffer object about to move.
 815 * @old_mem: The memory region @bo is moving from.
 816 * @new_mem: The memory region @bo is moving to.
 817 *
 818 * Called before the query MOB is swapped out to read back cached query
 819 * states from the device.
 820 */
 821void vmw_query_move_notify(struct ttm_buffer_object *bo,
 822			   struct ttm_resource *old_mem,
 823			   struct ttm_resource *new_mem)
 824{
 825	struct vmw_buffer_object *dx_query_mob;
 826	struct ttm_device *bdev = bo->bdev;
 827	struct vmw_private *dev_priv;
 828
 829	dev_priv = container_of(bdev, struct vmw_private, bdev);
 830
 831	mutex_lock(&dev_priv->binding_mutex);
 832
 833	/* If BO is being moved from MOB to system memory */
 834	if (new_mem->mem_type == TTM_PL_SYSTEM &&
 
 835	    old_mem->mem_type == VMW_PL_MOB) {
 836		struct vmw_fence_obj *fence;
 837
 838		dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
 839		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
 840			mutex_unlock(&dev_priv->binding_mutex);
 841			return;
 842		}
 843
 844		(void) vmw_query_readback_all(dx_query_mob);
 845		mutex_unlock(&dev_priv->binding_mutex);
 846
 847		/* Create a fence and attach the BO to it */
 848		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 849		vmw_bo_fence_single(bo, fence);
 850
 851		if (fence != NULL)
 852			vmw_fence_obj_unreference(&fence);
 853
 854		(void) ttm_bo_wait(bo, false, false);
 855	} else
 856		mutex_unlock(&dev_priv->binding_mutex);
 857}
 858
 859/**
 860 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 861 *
 862 * @res:            The resource being queried.
 863 */
 864bool vmw_resource_needs_backup(const struct vmw_resource *res)
 865{
 866	return res->func->needs_backup;
 867}
 868
 869/**
 870 * vmw_resource_evict_type - Evict all resources of a specific type
 871 *
 872 * @dev_priv:       Pointer to a device private struct
 873 * @type:           The resource type to evict
 874 *
 875 * To avoid thrashing starvation or as part of the hibernation sequence,
 876 * try to evict all evictable resources of a specific type.
 877 */
 878static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 879				    enum vmw_res_type type)
 880{
 881	struct list_head *lru_list = &dev_priv->res_lru[type];
 882	struct vmw_resource *evict_res;
 883	unsigned err_count = 0;
 884	int ret;
 885	struct ww_acquire_ctx ticket;
 886
 887	do {
 888		spin_lock(&dev_priv->resource_lock);
 889
 890		if (list_empty(lru_list))
 891			goto out_unlock;
 892
 893		evict_res = vmw_resource_reference(
 894			list_first_entry(lru_list, struct vmw_resource,
 895					 lru_head));
 896		list_del_init(&evict_res->lru_head);
 897		spin_unlock(&dev_priv->resource_lock);
 898
 899		/* Wait lock backup buffers with a ticket. */
 900		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 901		if (unlikely(ret != 0)) {
 902			spin_lock(&dev_priv->resource_lock);
 903			list_add_tail(&evict_res->lru_head, lru_list);
 904			spin_unlock(&dev_priv->resource_lock);
 905			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 906				vmw_resource_unreference(&evict_res);
 907				return;
 908			}
 909		}
 910
 911		vmw_resource_unreference(&evict_res);
 912	} while (1);
 913
 914out_unlock:
 915	spin_unlock(&dev_priv->resource_lock);
 916}
 917
 918/**
 919 * vmw_resource_evict_all - Evict all evictable resources
 920 *
 921 * @dev_priv:       Pointer to a device private struct
 922 *
 923 * To avoid thrashing starvation or as part of the hibernation sequence,
 924 * evict all evictable resources. In particular this means that all
 925 * guest-backed resources that are registered with the device are
 926 * evicted and the OTable becomes clean.
 927 */
 928void vmw_resource_evict_all(struct vmw_private *dev_priv)
 929{
 930	enum vmw_res_type type;
 931
 932	mutex_lock(&dev_priv->cmdbuf_mutex);
 933
 934	for (type = 0; type < vmw_res_max; ++type)
 935		vmw_resource_evict_type(dev_priv, type);
 936
 937	mutex_unlock(&dev_priv->cmdbuf_mutex);
 938}
 939
 940/*
 941 * vmw_resource_pin - Add a pin reference on a resource
 942 *
 943 * @res: The resource to add a pin reference on
 944 *
 945 * This function adds a pin reference, and if needed validates the resource.
 946 * Having a pin reference means that the resource can never be evicted, and
 947 * its id will never change as long as there is a pin reference.
 948 * This function returns 0 on success and a negative error code on failure.
 949 */
 950int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 951{
 952	struct ttm_operation_ctx ctx = { interruptible, false };
 953	struct vmw_private *dev_priv = res->dev_priv;
 954	int ret;
 955
 956	mutex_lock(&dev_priv->cmdbuf_mutex);
 957	ret = vmw_resource_reserve(res, interruptible, false);
 958	if (ret)
 959		goto out_no_reserve;
 960
 961	if (res->pin_count == 0) {
 962		struct vmw_buffer_object *vbo = NULL;
 963
 964		if (res->backup) {
 965			vbo = res->backup;
 966
 967			ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
 968			if (ret)
 969				goto out_no_validate;
 970			if (!vbo->base.pin_count) {
 
 
 
 971				ret = ttm_bo_validate
 972					(&vbo->base,
 973					 res->func->backup_placement,
 974					 &ctx);
 975				if (ret) {
 976					ttm_bo_unreserve(&vbo->base);
 977					goto out_no_validate;
 978				}
 979			}
 980
 981			/* Do we really need to pin the MOB as well? */
 982			vmw_bo_pin_reserved(vbo, true);
 983		}
 984		ret = vmw_resource_validate(res, interruptible, true);
 985		if (vbo)
 986			ttm_bo_unreserve(&vbo->base);
 987		if (ret)
 988			goto out_no_validate;
 989	}
 990	res->pin_count++;
 991
 992out_no_validate:
 993	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
 994out_no_reserve:
 995	mutex_unlock(&dev_priv->cmdbuf_mutex);
 996
 997	return ret;
 998}
 999
1000/**
1001 * vmw_resource_unpin - Remove a pin reference from a resource
1002 *
1003 * @res: The resource to remove a pin reference from
1004 *
1005 * Having a pin reference means that the resource can never be evicted, and
1006 * its id will never change as long as there is a pin reference.
1007 */
1008void vmw_resource_unpin(struct vmw_resource *res)
1009{
1010	struct vmw_private *dev_priv = res->dev_priv;
1011	int ret;
1012
1013	mutex_lock(&dev_priv->cmdbuf_mutex);
1014
1015	ret = vmw_resource_reserve(res, false, true);
1016	WARN_ON(ret);
1017
1018	WARN_ON(res->pin_count == 0);
1019	if (--res->pin_count == 0 && res->backup) {
1020		struct vmw_buffer_object *vbo = res->backup;
1021
1022		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1023		vmw_bo_pin_reserved(vbo, false);
1024		ttm_bo_unreserve(&vbo->base);
1025	}
1026
1027	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1028
1029	mutex_unlock(&dev_priv->cmdbuf_mutex);
1030}
1031
1032/**
1033 * vmw_res_type - Return the resource type
1034 *
1035 * @res: Pointer to the resource
1036 */
1037enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1038{
1039	return res->func->res_type;
1040}
1041
1042/**
1043 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1044 * sequential range of touched backing store memory.
1045 * @res: The resource.
1046 * @start: The first page touched.
1047 * @end: The last page touched + 1.
1048 */
1049void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1050			       pgoff_t end)
1051{
1052	if (res->dirty)
1053		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1054					   end << PAGE_SHIFT);
1055}
1056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1057/**
1058 * vmw_resources_clean - Clean resources intersecting a mob range
1059 * @vbo: The mob buffer object
1060 * @start: The mob page offset starting the range
1061 * @end: The mob page offset ending the range
1062 * @num_prefault: Returns how many pages including the first have been
1063 * cleaned and are ok to prefault
1064 */
1065int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1066			pgoff_t end, pgoff_t *num_prefault)
1067{
1068	struct rb_node *cur = vbo->res_tree.rb_node;
1069	struct vmw_resource *found = NULL;
1070	unsigned long res_start = start << PAGE_SHIFT;
1071	unsigned long res_end = end << PAGE_SHIFT;
1072	unsigned long last_cleaned = 0;
 
1073
1074	/*
1075	 * Find the resource with lowest backup_offset that intersects the
1076	 * range.
1077	 */
1078	while (cur) {
1079		struct vmw_resource *cur_res =
1080			container_of(cur, struct vmw_resource, mob_node);
1081
1082		if (cur_res->backup_offset >= res_end) {
1083			cur = cur->rb_left;
1084		} else if (cur_res->backup_offset + cur_res->backup_size <=
1085			   res_start) {
1086			cur = cur->rb_right;
1087		} else {
1088			found = cur_res;
1089			cur = cur->rb_left;
1090			/* Continue to look for resources with lower offsets */
1091		}
1092	}
1093
1094	/*
1095	 * In order of increasing backup_offset, clean dirty resources
1096	 * intersecting the range.
1097	 */
1098	while (found) {
1099		if (found->res_dirty) {
1100			int ret;
1101
1102			if (!found->func->clean)
1103				return -EINVAL;
1104
1105			ret = found->func->clean(found);
1106			if (ret)
1107				return ret;
1108
1109			found->res_dirty = false;
1110		}
1111		last_cleaned = found->backup_offset + found->backup_size;
1112		cur = rb_next(&found->mob_node);
1113		if (!cur)
1114			break;
1115
1116		found = container_of(cur, struct vmw_resource, mob_node);
1117		if (found->backup_offset >= res_end)
1118			break;
1119	}
1120
1121	/*
1122	 * Set number of pages allowed prefaulting and fence the buffer object
1123	 */
1124	*num_prefault = 1;
1125	if (last_cleaned > res_start) {
1126		struct ttm_buffer_object *bo = &vbo->base;
1127
1128		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1129						      PAGE_SIZE);
1130		vmw_bo_fence_single(bo, NULL);
1131	}
1132
1133	return 0;
1134}