Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_binding.h"
  31#include "vmwgfx_bo.h"
  32#include "vmwgfx_drv.h"
 
 
 
 
  33#include "vmwgfx_resource_priv.h"
 
  34
  35#define VMW_RES_EVICT_ERR_COUNT 10
  36
  37/**
  38 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  39 * @res: The resource
  40 */
  41void vmw_resource_mob_attach(struct vmw_resource *res)
  42{
  43	struct vmw_bo *gbo = res->guest_memory_bo;
  44	struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
  45
  46	dma_resv_assert_held(gbo->tbo.base.resv);
  47	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  48		res->func->prio;
  49
  50	while (*new) {
  51		struct vmw_resource *this =
  52			container_of(*new, struct vmw_resource, mob_node);
  53
  54		parent = *new;
  55		new = (res->guest_memory_offset < this->guest_memory_offset) ?
  56			&((*new)->rb_left) : &((*new)->rb_right);
  57	}
  58
  59	rb_link_node(&res->mob_node, parent, new);
  60	rb_insert_color(&res->mob_node, &gbo->res_tree);
  61
  62	vmw_bo_prio_add(gbo, res->used_prio);
 
 
 
  63}
  64
  65/**
  66 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  67 * @res: The resource
  68 */
  69void vmw_resource_mob_detach(struct vmw_resource *res)
  70{
  71	struct vmw_bo *gbo = res->guest_memory_bo;
  72
  73	dma_resv_assert_held(gbo->tbo.base.resv);
  74	if (vmw_resource_mob_attached(res)) {
  75		rb_erase(&res->mob_node, &gbo->res_tree);
  76		RB_CLEAR_NODE(&res->mob_node);
  77		vmw_bo_prio_del(gbo, res->used_prio);
  78	}
  79}
  80
  81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  82{
  83	kref_get(&res->kref);
  84	return res;
  85}
  86
  87struct vmw_resource *
  88vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  89{
  90	return kref_get_unless_zero(&res->kref) ? res : NULL;
  91}
  92
  93/**
  94 * vmw_resource_release_id - release a resource id to the id manager.
  95 *
  96 * @res: Pointer to the resource.
  97 *
  98 * Release the resource id to the resource id manager and set it to -1
  99 */
 100void vmw_resource_release_id(struct vmw_resource *res)
 101{
 102	struct vmw_private *dev_priv = res->dev_priv;
 103	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 104
 105	spin_lock(&dev_priv->resource_lock);
 106	if (res->id != -1)
 107		idr_remove(idr, res->id);
 108	res->id = -1;
 109	spin_unlock(&dev_priv->resource_lock);
 110}
 111
 112static void vmw_resource_release(struct kref *kref)
 113{
 114	struct vmw_resource *res =
 115	    container_of(kref, struct vmw_resource, kref);
 116	struct vmw_private *dev_priv = res->dev_priv;
 117	int id;
 118	int ret;
 119	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 120
 121	spin_lock(&dev_priv->resource_lock);
 
 122	list_del_init(&res->lru_head);
 123	spin_unlock(&dev_priv->resource_lock);
 124	if (res->guest_memory_bo) {
 125		struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
 126
 127		ret = ttm_bo_reserve(bo, false, false, NULL);
 128		BUG_ON(ret);
 129		if (vmw_resource_mob_attached(res) &&
 130		    res->func->unbind != NULL) {
 131			struct ttm_validate_buffer val_buf;
 132
 133			val_buf.bo = bo;
 134			val_buf.num_shared = 0;
 135			res->func->unbind(res, false, &val_buf);
 136		}
 137		res->guest_memory_size = false;
 138		vmw_resource_mob_detach(res);
 139		if (res->dirty)
 140			res->func->dirty_free(res);
 141		if (res->coherent)
 142			vmw_bo_dirty_release(res->guest_memory_bo);
 143		ttm_bo_unreserve(bo);
 144		vmw_user_bo_unref(&res->guest_memory_bo);
 145	}
 146
 147	if (likely(res->hw_destroy != NULL)) {
 148		mutex_lock(&dev_priv->binding_mutex);
 149		vmw_binding_res_list_kill(&res->binding_head);
 150		mutex_unlock(&dev_priv->binding_mutex);
 151		res->hw_destroy(res);
 152	}
 153
 154	id = res->id;
 155	if (res->res_free != NULL)
 156		res->res_free(res);
 157	else
 158		kfree(res);
 159
 160	spin_lock(&dev_priv->resource_lock);
 161	if (id != -1)
 162		idr_remove(idr, id);
 163	spin_unlock(&dev_priv->resource_lock);
 164}
 165
 166void vmw_resource_unreference(struct vmw_resource **p_res)
 167{
 168	struct vmw_resource *res = *p_res;
 169
 170	*p_res = NULL;
 171	kref_put(&res->kref, vmw_resource_release);
 172}
 173
 174
 175/**
 176 * vmw_resource_alloc_id - release a resource id to the id manager.
 177 *
 178 * @res: Pointer to the resource.
 179 *
 180 * Allocate the lowest free resource from the resource manager, and set
 181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 182 */
 183int vmw_resource_alloc_id(struct vmw_resource *res)
 184{
 185	struct vmw_private *dev_priv = res->dev_priv;
 186	int ret;
 187	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 188
 189	BUG_ON(res->id != -1);
 190
 191	idr_preload(GFP_KERNEL);
 192	spin_lock(&dev_priv->resource_lock);
 193
 194	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 195	if (ret >= 0)
 196		res->id = ret;
 197
 198	spin_unlock(&dev_priv->resource_lock);
 199	idr_preload_end();
 200	return ret < 0 ? ret : 0;
 201}
 202
 203/**
 204 * vmw_resource_init - initialize a struct vmw_resource
 205 *
 206 * @dev_priv:       Pointer to a device private struct.
 207 * @res:            The struct vmw_resource to initialize.
 
 208 * @delay_id:       Boolean whether to defer device id allocation until
 209 *                  the first validation.
 210 * @res_free:       Resource destructor.
 211 * @func:           Resource function table.
 212 */
 213int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 214		      bool delay_id,
 215		      void (*res_free) (struct vmw_resource *res),
 216		      const struct vmw_res_func *func)
 217{
 218	kref_init(&res->kref);
 219	res->hw_destroy = NULL;
 220	res->res_free = res_free;
 
 221	res->dev_priv = dev_priv;
 222	res->func = func;
 223	RB_CLEAR_NODE(&res->mob_node);
 224	INIT_LIST_HEAD(&res->lru_head);
 
 225	INIT_LIST_HEAD(&res->binding_head);
 226	res->id = -1;
 227	res->guest_memory_bo = NULL;
 228	res->guest_memory_offset = 0;
 229	res->guest_memory_dirty = false;
 230	res->res_dirty = false;
 231	res->coherent = false;
 232	res->used_prio = 3;
 233	res->dirty = NULL;
 234	if (delay_id)
 235		return 0;
 236	else
 237		return vmw_resource_alloc_id(res);
 238}
 239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 240
 241/**
 242 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 243 * TTM user-space handle and perform basic type checks
 244 *
 245 * @dev_priv:     Pointer to a device private struct
 246 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 247 * @handle:       The TTM user-space handle
 248 * @converter:    Pointer to an object describing the resource type
 249 * @p_res:        On successful return the location pointed to will contain
 250 *                a pointer to a refcounted struct vmw_resource.
 251 *
 252 * If the handle can't be found or is associated with an incorrect resource
 253 * type, -EINVAL will be returned.
 254 */
 255int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 256				    struct ttm_object_file *tfile,
 257				    uint32_t handle,
 258				    const struct vmw_user_resource_conv
 259				    *converter,
 260				    struct vmw_resource **p_res)
 261{
 262	struct ttm_base_object *base;
 263	struct vmw_resource *res;
 264	int ret = -EINVAL;
 265
 266	base = ttm_base_object_lookup(tfile, handle);
 267	if (unlikely(!base))
 268		return -EINVAL;
 269
 270	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 271		goto out_bad_resource;
 272
 273	res = converter->base_obj_to_res(base);
 
 
 
 
 
 
 
 274	kref_get(&res->kref);
 
 275
 276	*p_res = res;
 277	ret = 0;
 278
 279out_bad_resource:
 280	ttm_base_object_unref(&base);
 281
 282	return ret;
 283}
 284
 285/*
 286 * Helper function that looks either a surface or bo.
 287 *
 288 * The pointer this pointed at by out_surf and out_buf needs to be null.
 289 */
 290int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 291			   struct drm_file *filp,
 292			   uint32_t handle,
 293			   struct vmw_surface **out_surf,
 294			   struct vmw_bo **out_buf)
 295{
 296	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
 297	struct vmw_resource *res;
 298	int ret;
 299
 300	BUG_ON(*out_surf || *out_buf);
 301
 302	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 303					      user_surface_converter,
 304					      &res);
 305	if (!ret) {
 306		*out_surf = vmw_res_to_srf(res);
 307		return 0;
 308	}
 309
 310	*out_surf = NULL;
 311	ret = vmw_user_bo_lookup(filp, handle, out_buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312	return ret;
 313}
 314
 315/**
 316 * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 317 *
 318 * @res:            The resource for which to allocate a gbo buffer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 319 * @interruptible:  Whether any sleeps during allocation should be
 320 *                  performed while interruptible.
 321 */
 322static int vmw_resource_buf_alloc(struct vmw_resource *res,
 323				  bool interruptible)
 324{
 325	unsigned long size = PFN_ALIGN(res->guest_memory_size);
 326	struct vmw_bo *gbo;
 327	struct vmw_bo_params bo_params = {
 328		.domain = res->func->domain,
 329		.busy_domain = res->func->busy_domain,
 330		.bo_type = ttm_bo_type_device,
 331		.size = res->guest_memory_size,
 332		.pin = false
 333	};
 334	int ret;
 335
 336	if (likely(res->guest_memory_bo)) {
 337		BUG_ON(res->guest_memory_bo->tbo.base.size < size);
 338		return 0;
 339	}
 340
 341	ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
 
 
 
 
 
 
 
 342	if (unlikely(ret != 0))
 343		goto out_no_bo;
 344
 345	res->guest_memory_bo = gbo;
 346
 347out_no_bo:
 348	return ret;
 349}
 350
 351/**
 352 * vmw_resource_do_validate - Make a resource up-to-date and visible
 353 *                            to the device.
 354 *
 355 * @res:            The resource to make visible to the device.
 356 * @val_buf:        Information about a buffer possibly
 357 *                  containing backup data if a bind operation is needed.
 358 * @dirtying:       Transfer dirty regions.
 359 *
 360 * On hardware resource shortage, this function returns -EBUSY and
 361 * should be retried once resources have been freed up.
 362 */
 363static int vmw_resource_do_validate(struct vmw_resource *res,
 364				    struct ttm_validate_buffer *val_buf,
 365				    bool dirtying)
 366{
 367	int ret = 0;
 368	const struct vmw_res_func *func = res->func;
 369
 370	if (unlikely(res->id == -1)) {
 371		ret = func->create(res);
 372		if (unlikely(ret != 0))
 373			return ret;
 374	}
 375
 376	if (func->bind &&
 377	    ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
 378	      val_buf->bo) ||
 379	     (!func->needs_guest_memory && val_buf->bo))) {
 380		ret = func->bind(res, val_buf);
 381		if (unlikely(ret != 0))
 382			goto out_bind_failed;
 383		if (func->needs_guest_memory)
 384			vmw_resource_mob_attach(res);
 385	}
 386
 387	/*
 388	 * Handle the case where the backup mob is marked coherent but
 389	 * the resource isn't.
 
 
 390	 */
 391	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
 392	    !res->coherent) {
 393		if (res->guest_memory_bo->dirty && !res->dirty) {
 394			ret = func->dirty_alloc(res);
 395			if (ret)
 396				return ret;
 397		} else if (!res->guest_memory_bo->dirty && res->dirty) {
 398			func->dirty_free(res);
 399		}
 400	}
 401
 402	/*
 403	 * Transfer the dirty regions to the resource and update
 404	 * the resource.
 405	 */
 406	if (res->dirty) {
 407		if (dirtying && !res->res_dirty) {
 408			pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
 409			pgoff_t end = __KERNEL_DIV_ROUND_UP
 410				(res->guest_memory_offset + res->guest_memory_size,
 411				 PAGE_SIZE);
 412
 413			vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
 414		}
 415
 416		vmw_bo_dirty_transfer_to_res(res);
 417		return func->dirty_sync(res);
 418	}
 419
 420	return 0;
 421
 422out_bind_failed:
 423	func->destroy(res);
 424
 425	return ret;
 426}
 427
 428/**
 429 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 430 * command submission.
 431 *
 432 * @res:               Pointer to the struct vmw_resource to unreserve.
 433 * @dirty_set:         Change dirty status of the resource.
 434 * @dirty:             When changing dirty status indicates the new status.
 435 * @switch_guest_memory: Guest memory buffer has been switched.
 436 * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
 437 *                     switched. May be NULL.
 438 * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
 439 *
 440 * Currently unreserving a resource means putting it back on the device's
 441 * resource lru list, so that it can be evicted if necessary.
 442 */
 443void vmw_resource_unreserve(struct vmw_resource *res,
 444			    bool dirty_set,
 445			    bool dirty,
 446			    bool switch_guest_memory,
 447			    struct vmw_bo *new_guest_memory_bo,
 448			    unsigned long new_guest_memory_offset)
 449{
 450	struct vmw_private *dev_priv = res->dev_priv;
 451
 452	if (!list_empty(&res->lru_head))
 453		return;
 454
 455	if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
 456		if (res->guest_memory_bo) {
 457			vmw_resource_mob_detach(res);
 458			if (res->coherent)
 459				vmw_bo_dirty_release(res->guest_memory_bo);
 460			vmw_user_bo_unref(&res->guest_memory_bo);
 461		}
 462
 463		if (new_guest_memory_bo) {
 464			res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
 465
 466			/*
 467			 * The validation code should already have added a
 468			 * dirty tracker here.
 469			 */
 470			WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
 471
 472			vmw_resource_mob_attach(res);
 473		} else {
 474			res->guest_memory_bo = NULL;
 475		}
 476	} else if (switch_guest_memory && res->coherent) {
 477		vmw_bo_dirty_release(res->guest_memory_bo);
 478	}
 479
 480	if (switch_guest_memory)
 481		res->guest_memory_offset = new_guest_memory_offset;
 482
 483	if (dirty_set)
 484		res->res_dirty = dirty;
 485
 486	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 487		return;
 488
 489	spin_lock(&dev_priv->resource_lock);
 490	list_add_tail(&res->lru_head,
 491		      &res->dev_priv->res_lru[res->func->res_type]);
 492	spin_unlock(&dev_priv->resource_lock);
 493}
 494
 495/**
 496 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 497 *                             for a resource and in that case, allocate
 498 *                             one, reserve and validate it.
 499 *
 500 * @ticket:         The ww acquire context to use, or NULL if trylocking.
 501 * @res:            The resource for which to allocate a backup buffer.
 502 * @interruptible:  Whether any sleeps during allocation should be
 503 *                  performed while interruptible.
 504 * @val_buf:        On successful return contains data about the
 505 *                  reserved and validated backup buffer.
 506 */
 507static int
 508vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 509			  struct vmw_resource *res,
 510			  bool interruptible,
 511			  struct ttm_validate_buffer *val_buf)
 512{
 513	struct ttm_operation_ctx ctx = { true, false };
 514	struct list_head val_list;
 515	bool guest_memory_dirty = false;
 516	int ret;
 517
 518	if (unlikely(!res->guest_memory_bo)) {
 519		ret = vmw_resource_buf_alloc(res, interruptible);
 520		if (unlikely(ret != 0))
 521			return ret;
 522	}
 523
 524	INIT_LIST_HEAD(&val_list);
 525	ttm_bo_get(&res->guest_memory_bo->tbo);
 526	val_buf->bo = &res->guest_memory_bo->tbo;
 527	val_buf->num_shared = 0;
 528	list_add_tail(&val_buf->head, &val_list);
 529	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 530	if (unlikely(ret != 0))
 531		goto out_no_reserve;
 532
 533	if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
 534		return 0;
 535
 536	guest_memory_dirty = res->guest_memory_dirty;
 537	vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
 538			     res->func->busy_domain);
 539	ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
 540			      &res->guest_memory_bo->placement,
 541			      &ctx);
 542
 543	if (unlikely(ret != 0))
 544		goto out_no_validate;
 545
 546	return 0;
 547
 548out_no_validate:
 549	ttm_eu_backoff_reservation(ticket, &val_list);
 550out_no_reserve:
 551	ttm_bo_put(val_buf->bo);
 552	val_buf->bo = NULL;
 553	if (guest_memory_dirty)
 554		vmw_user_bo_unref(&res->guest_memory_bo);
 555
 556	return ret;
 557}
 558
 559/*
 560 * vmw_resource_reserve - Reserve a resource for command submission
 561 *
 562 * @res:            The resource to reserve.
 563 *
 564 * This function takes the resource off the LRU list and make sure
 565 * a guest memory buffer is present for guest-backed resources.
 566 * However, the buffer may not be bound to the resource at this
 567 * point.
 568 *
 569 */
 570int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 571			 bool no_guest_memory)
 572{
 573	struct vmw_private *dev_priv = res->dev_priv;
 574	int ret;
 575
 576	spin_lock(&dev_priv->resource_lock);
 577	list_del_init(&res->lru_head);
 578	spin_unlock(&dev_priv->resource_lock);
 579
 580	if (res->func->needs_guest_memory && !res->guest_memory_bo &&
 581	    !no_guest_memory) {
 582		ret = vmw_resource_buf_alloc(res, interruptible);
 583		if (unlikely(ret != 0)) {
 584			DRM_ERROR("Failed to allocate a guest memory buffer "
 585				  "of size %lu. bytes\n",
 586				  (unsigned long) res->guest_memory_size);
 587			return ret;
 588		}
 589	}
 590
 591	return 0;
 592}
 593
 594/**
 595 * vmw_resource_backoff_reservation - Unreserve and unreference a
 596 *                                    guest memory buffer
 597 *.
 598 * @ticket:         The ww acquire ctx used for reservation.
 599 * @val_buf:        Guest memory buffer information.
 600 */
 601static void
 602vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 603				 struct ttm_validate_buffer *val_buf)
 604{
 605	struct list_head val_list;
 606
 607	if (likely(val_buf->bo == NULL))
 608		return;
 609
 610	INIT_LIST_HEAD(&val_list);
 611	list_add_tail(&val_buf->head, &val_list);
 612	ttm_eu_backoff_reservation(ticket, &val_list);
 613	ttm_bo_put(val_buf->bo);
 614	val_buf->bo = NULL;
 615}
 616
 617/**
 618 * vmw_resource_do_evict - Evict a resource, and transfer its data
 619 *                         to a backup buffer.
 620 *
 621 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 622 * @res:            The resource to evict.
 623 * @interruptible:  Whether to wait interruptible.
 624 */
 625static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 626				 struct vmw_resource *res, bool interruptible)
 627{
 628	struct ttm_validate_buffer val_buf;
 629	const struct vmw_res_func *func = res->func;
 630	int ret;
 631
 632	BUG_ON(!func->may_evict);
 633
 634	val_buf.bo = NULL;
 635	val_buf.num_shared = 0;
 636	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 637	if (unlikely(ret != 0))
 638		return ret;
 639
 640	if (unlikely(func->unbind != NULL &&
 641		     (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
 642		ret = func->unbind(res, res->res_dirty, &val_buf);
 643		if (unlikely(ret != 0))
 644			goto out_no_unbind;
 645		vmw_resource_mob_detach(res);
 646	}
 647	ret = func->destroy(res);
 648	res->guest_memory_dirty = true;
 649	res->res_dirty = false;
 650out_no_unbind:
 651	vmw_resource_backoff_reservation(ticket, &val_buf);
 652
 653	return ret;
 654}
 655
 656
 657/**
 658 * vmw_resource_validate - Make a resource up-to-date and visible
 659 *                         to the device.
 660 * @res: The resource to make visible to the device.
 661 * @intr: Perform waits interruptible if possible.
 662 * @dirtying: Pending GPU operation will dirty the resource
 663 *
 664 * On successful return, any backup DMA buffer pointed to by @res->backup will
 
 
 665 * be reserved and validated.
 666 * On hardware resource shortage, this function will repeatedly evict
 667 * resources of the same type until the validation succeeds.
 668 *
 669 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 670 * on failure.
 671 */
 672int vmw_resource_validate(struct vmw_resource *res, bool intr,
 673			  bool dirtying)
 674{
 675	int ret;
 676	struct vmw_resource *evict_res;
 677	struct vmw_private *dev_priv = res->dev_priv;
 678	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 679	struct ttm_validate_buffer val_buf;
 680	unsigned err_count = 0;
 681
 682	if (!res->func->create)
 683		return 0;
 684
 685	val_buf.bo = NULL;
 686	val_buf.num_shared = 0;
 687	if (res->guest_memory_bo)
 688		val_buf.bo = &res->guest_memory_bo->tbo;
 689	do {
 690		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
 691		if (likely(ret != -EBUSY))
 692			break;
 693
 694		spin_lock(&dev_priv->resource_lock);
 695		if (list_empty(lru_list) || !res->func->may_evict) {
 696			DRM_ERROR("Out of device device resources "
 697				  "for %s.\n", res->func->type_name);
 698			ret = -EBUSY;
 699			spin_unlock(&dev_priv->resource_lock);
 700			break;
 701		}
 702
 703		evict_res = vmw_resource_reference
 704			(list_first_entry(lru_list, struct vmw_resource,
 705					  lru_head));
 706		list_del_init(&evict_res->lru_head);
 707
 708		spin_unlock(&dev_priv->resource_lock);
 709
 710		/* Trylock backup buffers with a NULL ticket. */
 711		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 712		if (unlikely(ret != 0)) {
 713			spin_lock(&dev_priv->resource_lock);
 714			list_add_tail(&evict_res->lru_head, lru_list);
 715			spin_unlock(&dev_priv->resource_lock);
 716			if (ret == -ERESTARTSYS ||
 717			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 718				vmw_resource_unreference(&evict_res);
 719				goto out_no_validate;
 720			}
 721		}
 722
 723		vmw_resource_unreference(&evict_res);
 724	} while (1);
 725
 726	if (unlikely(ret != 0))
 727		goto out_no_validate;
 728	else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
 729		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 730		vmw_user_bo_unref(&res->guest_memory_bo);
 731	}
 732
 733	return 0;
 734
 735out_no_validate:
 736	return ret;
 737}
 738
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739
 740/**
 741 * vmw_resource_unbind_list
 742 *
 743 * @vbo: Pointer to the current backing MOB.
 
 
 744 *
 745 * Evicts the Guest Backed hardware resource if the backup
 746 * buffer is being moved out of MOB memory.
 747 * Note that this function will not race with the resource
 748 * validation code, since resource validation and eviction
 749 * both require the backup buffer to be reserved.
 
 
 
 
 
 
 750 */
 751void vmw_resource_unbind_list(struct vmw_bo *vbo)
 
 752{
 753	struct ttm_validate_buffer val_buf = {
 754		.bo = &vbo->tbo,
 755		.num_shared = 0
 756	};
 757
 758	dma_resv_assert_held(vbo->tbo.base.resv);
 759	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
 760		struct rb_node *node = vbo->res_tree.rb_node;
 761		struct vmw_resource *res =
 762			container_of(node, struct vmw_resource, mob_node);
 
 763
 764		if (!WARN_ON_ONCE(!res->func->unbind))
 765			(void) res->func->unbind(res, res->res_dirty, &val_buf);
 766
 767		res->guest_memory_size = true;
 768		res->res_dirty = false;
 769		vmw_resource_mob_detach(res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 770	}
 
 
 771
 772	(void) ttm_bo_wait(&vbo->tbo, false, false);
 
 
 
 
 
 
 
 
 
 
 
 
 773}
 774
 775
 776/**
 777 * vmw_query_readback_all - Read back cached query states
 778 *
 779 * @dx_query_mob: Buffer containing the DX query MOB
 780 *
 781 * Read back cached states from the device if they exist.  This function
 782 * assumes binding_mutex is held.
 783 */
 784int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
 785{
 786	struct vmw_resource *dx_query_ctx;
 787	struct vmw_private *dev_priv;
 788	struct {
 789		SVGA3dCmdHeader header;
 790		SVGA3dCmdDXReadbackAllQuery body;
 791	} *cmd;
 792
 793
 794	/* No query bound, so do nothing */
 795	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 796		return 0;
 797
 798	dx_query_ctx = dx_query_mob->dx_query_ctx;
 799	dev_priv     = dx_query_ctx->dev_priv;
 800
 801	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 802	if (unlikely(cmd == NULL))
 
 
 803		return -ENOMEM;
 
 804
 805	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 806	cmd->header.size = sizeof(cmd->body);
 807	cmd->body.cid    = dx_query_ctx->id;
 808
 809	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 810
 811	/* Triggers a rebind the next time affected context is bound */
 812	dx_query_mob->dx_query_ctx = NULL;
 813
 814	return 0;
 815}
 816
 817
 818
 819/**
 820 * vmw_query_move_notify - Read back cached query states
 821 *
 822 * @bo: The TTM buffer object about to move.
 823 * @old_mem: The memory region @bo is moving from.
 824 * @new_mem: The memory region @bo is moving to.
 825 *
 826 * Called before the query MOB is swapped out to read back cached query
 827 * states from the device.
 828 */
 829void vmw_query_move_notify(struct ttm_buffer_object *bo,
 830			   struct ttm_resource *old_mem,
 831			   struct ttm_resource *new_mem)
 832{
 833	struct vmw_bo *dx_query_mob;
 834	struct ttm_device *bdev = bo->bdev;
 835	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
 
 
 
 836
 837	mutex_lock(&dev_priv->binding_mutex);
 838
 
 
 
 
 
 
 839	/* If BO is being moved from MOB to system memory */
 840	if (old_mem &&
 841	    new_mem->mem_type == TTM_PL_SYSTEM &&
 842	    old_mem->mem_type == VMW_PL_MOB) {
 843		struct vmw_fence_obj *fence;
 844
 845		dx_query_mob = to_vmw_bo(&bo->base);
 846		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
 847			mutex_unlock(&dev_priv->binding_mutex);
 848			return;
 849		}
 850
 851		(void) vmw_query_readback_all(dx_query_mob);
 852		mutex_unlock(&dev_priv->binding_mutex);
 853
 854		/* Create a fence and attach the BO to it */
 855		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 856		vmw_bo_fence_single(bo, fence);
 857
 858		if (fence != NULL)
 859			vmw_fence_obj_unreference(&fence);
 860
 861		(void) ttm_bo_wait(bo, false, false);
 862	} else
 863		mutex_unlock(&dev_priv->binding_mutex);
 
 864}
 865
 866/**
 867 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 868 *
 869 * @res:            The resource being queried.
 870 */
 871bool vmw_resource_needs_backup(const struct vmw_resource *res)
 872{
 873	return res->func->needs_guest_memory;
 874}
 875
 876/**
 877 * vmw_resource_evict_type - Evict all resources of a specific type
 878 *
 879 * @dev_priv:       Pointer to a device private struct
 880 * @type:           The resource type to evict
 881 *
 882 * To avoid thrashing starvation or as part of the hibernation sequence,
 883 * try to evict all evictable resources of a specific type.
 884 */
 885static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 886				    enum vmw_res_type type)
 887{
 888	struct list_head *lru_list = &dev_priv->res_lru[type];
 889	struct vmw_resource *evict_res;
 890	unsigned err_count = 0;
 891	int ret;
 892	struct ww_acquire_ctx ticket;
 893
 894	do {
 895		spin_lock(&dev_priv->resource_lock);
 896
 897		if (list_empty(lru_list))
 898			goto out_unlock;
 899
 900		evict_res = vmw_resource_reference(
 901			list_first_entry(lru_list, struct vmw_resource,
 902					 lru_head));
 903		list_del_init(&evict_res->lru_head);
 904		spin_unlock(&dev_priv->resource_lock);
 905
 906		/* Wait lock backup buffers with a ticket. */
 907		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 908		if (unlikely(ret != 0)) {
 909			spin_lock(&dev_priv->resource_lock);
 910			list_add_tail(&evict_res->lru_head, lru_list);
 911			spin_unlock(&dev_priv->resource_lock);
 912			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 913				vmw_resource_unreference(&evict_res);
 914				return;
 915			}
 916		}
 917
 918		vmw_resource_unreference(&evict_res);
 919	} while (1);
 920
 921out_unlock:
 922	spin_unlock(&dev_priv->resource_lock);
 923}
 924
 925/**
 926 * vmw_resource_evict_all - Evict all evictable resources
 927 *
 928 * @dev_priv:       Pointer to a device private struct
 929 *
 930 * To avoid thrashing starvation or as part of the hibernation sequence,
 931 * evict all evictable resources. In particular this means that all
 932 * guest-backed resources that are registered with the device are
 933 * evicted and the OTable becomes clean.
 934 */
 935void vmw_resource_evict_all(struct vmw_private *dev_priv)
 936{
 937	enum vmw_res_type type;
 938
 939	mutex_lock(&dev_priv->cmdbuf_mutex);
 940
 941	for (type = 0; type < vmw_res_max; ++type)
 942		vmw_resource_evict_type(dev_priv, type);
 943
 944	mutex_unlock(&dev_priv->cmdbuf_mutex);
 945}
 946
 947/*
 948 * vmw_resource_pin - Add a pin reference on a resource
 949 *
 950 * @res: The resource to add a pin reference on
 951 *
 952 * This function adds a pin reference, and if needed validates the resource.
 953 * Having a pin reference means that the resource can never be evicted, and
 954 * its id will never change as long as there is a pin reference.
 955 * This function returns 0 on success and a negative error code on failure.
 956 */
 957int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 958{
 959	struct ttm_operation_ctx ctx = { interruptible, false };
 960	struct vmw_private *dev_priv = res->dev_priv;
 961	int ret;
 962
 
 963	mutex_lock(&dev_priv->cmdbuf_mutex);
 964	ret = vmw_resource_reserve(res, interruptible, false);
 965	if (ret)
 966		goto out_no_reserve;
 967
 968	if (res->pin_count == 0) {
 969		struct vmw_bo *vbo = NULL;
 970
 971		if (res->guest_memory_bo) {
 972			vbo = res->guest_memory_bo;
 973
 974			ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
 975			if (ret)
 976				goto out_no_validate;
 977			if (!vbo->tbo.pin_count) {
 978				vmw_bo_placement_set(vbo,
 979						     res->func->domain,
 980						     res->func->busy_domain);
 981				ret = ttm_bo_validate
 982					(&vbo->tbo,
 983					 &vbo->placement,
 984					 &ctx);
 985				if (ret) {
 986					ttm_bo_unreserve(&vbo->tbo);
 987					goto out_no_validate;
 988				}
 989			}
 990
 991			/* Do we really need to pin the MOB as well? */
 992			vmw_bo_pin_reserved(vbo, true);
 993		}
 994		ret = vmw_resource_validate(res, interruptible, true);
 995		if (vbo)
 996			ttm_bo_unreserve(&vbo->tbo);
 997		if (ret)
 998			goto out_no_validate;
 999	}
1000	res->pin_count++;
1001
1002out_no_validate:
1003	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1004out_no_reserve:
1005	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1006
1007	return ret;
1008}
1009
1010/**
1011 * vmw_resource_unpin - Remove a pin reference from a resource
1012 *
1013 * @res: The resource to remove a pin reference from
1014 *
1015 * Having a pin reference means that the resource can never be evicted, and
1016 * its id will never change as long as there is a pin reference.
1017 */
1018void vmw_resource_unpin(struct vmw_resource *res)
1019{
1020	struct vmw_private *dev_priv = res->dev_priv;
1021	int ret;
1022
 
1023	mutex_lock(&dev_priv->cmdbuf_mutex);
1024
1025	ret = vmw_resource_reserve(res, false, true);
1026	WARN_ON(ret);
1027
1028	WARN_ON(res->pin_count == 0);
1029	if (--res->pin_count == 0 && res->guest_memory_bo) {
1030		struct vmw_bo *vbo = res->guest_memory_bo;
1031
1032		(void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
1033		vmw_bo_pin_reserved(vbo, false);
1034		ttm_bo_unreserve(&vbo->tbo);
1035	}
1036
1037	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1038
1039	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1040}
1041
1042/**
1043 * vmw_res_type - Return the resource type
1044 *
1045 * @res: Pointer to the resource
1046 */
1047enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1048{
1049	return res->func->res_type;
1050}
1051
1052/**
1053 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1054 * sequential range of touched backing store memory.
1055 * @res: The resource.
1056 * @start: The first page touched.
1057 * @end: The last page touched + 1.
1058 */
1059void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1060			       pgoff_t end)
1061{
1062	if (res->dirty)
1063		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1064					   end << PAGE_SHIFT);
1065}
1066
1067/**
1068 * vmw_resources_clean - Clean resources intersecting a mob range
1069 * @vbo: The mob buffer object
1070 * @start: The mob page offset starting the range
1071 * @end: The mob page offset ending the range
1072 * @num_prefault: Returns how many pages including the first have been
1073 * cleaned and are ok to prefault
1074 */
1075int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
1076			pgoff_t end, pgoff_t *num_prefault)
1077{
1078	struct rb_node *cur = vbo->res_tree.rb_node;
1079	struct vmw_resource *found = NULL;
1080	unsigned long res_start = start << PAGE_SHIFT;
1081	unsigned long res_end = end << PAGE_SHIFT;
1082	unsigned long last_cleaned = 0;
1083
1084	/*
1085	 * Find the resource with lowest backup_offset that intersects the
1086	 * range.
1087	 */
1088	while (cur) {
1089		struct vmw_resource *cur_res =
1090			container_of(cur, struct vmw_resource, mob_node);
1091
1092		if (cur_res->guest_memory_offset >= res_end) {
1093			cur = cur->rb_left;
1094		} else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
1095			   res_start) {
1096			cur = cur->rb_right;
1097		} else {
1098			found = cur_res;
1099			cur = cur->rb_left;
1100			/* Continue to look for resources with lower offsets */
1101		}
1102	}
1103
1104	/*
1105	 * In order of increasing guest_memory_offset, clean dirty resources
1106	 * intersecting the range.
1107	 */
1108	while (found) {
1109		if (found->res_dirty) {
1110			int ret;
1111
1112			if (!found->func->clean)
1113				return -EINVAL;
1114
1115			ret = found->func->clean(found);
1116			if (ret)
1117				return ret;
1118
1119			found->res_dirty = false;
1120		}
1121		last_cleaned = found->guest_memory_offset + found->guest_memory_size;
1122		cur = rb_next(&found->mob_node);
1123		if (!cur)
1124			break;
1125
1126		found = container_of(cur, struct vmw_resource, mob_node);
1127		if (found->guest_memory_offset >= res_end)
1128			break;
1129	}
1130
1131	/*
1132	 * Set number of pages allowed prefaulting and fence the buffer object
1133	 */
1134	*num_prefault = 1;
1135	if (last_cleaned > res_start) {
1136		struct ttm_buffer_object *bo = &vbo->tbo;
1137
1138		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1139						      PAGE_SIZE);
1140		vmw_bo_fence_single(bo, NULL);
1141	}
1142
1143	return 0;
1144}
v4.17
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
 
 
  28#include "vmwgfx_drv.h"
  29#include <drm/vmwgfx_drm.h>
  30#include <drm/ttm/ttm_object.h>
  31#include <drm/ttm/ttm_placement.h>
  32#include <drm/drmP.h>
  33#include "vmwgfx_resource_priv.h"
  34#include "vmwgfx_binding.h"
  35
  36#define VMW_RES_EVICT_ERR_COUNT 10
  37
  38struct vmw_user_dma_buffer {
  39	struct ttm_prime_object prime;
  40	struct vmw_dma_buffer dma;
  41};
 
 
 
 
 
 
 
 
 
 
 
 
  42
  43struct vmw_bo_user_rep {
  44	uint32_t handle;
  45	uint64_t map_handle;
  46};
 
 
 
  47
  48static inline struct vmw_dma_buffer *
  49vmw_dma_buffer(struct ttm_buffer_object *bo)
  50{
  51	return container_of(bo, struct vmw_dma_buffer, base);
  52}
  53
  54static inline struct vmw_user_dma_buffer *
  55vmw_user_dma_buffer(struct ttm_buffer_object *bo)
 
 
 
  56{
  57	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  58	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
 
 
 
 
 
 
  59}
  60
  61struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  62{
  63	kref_get(&res->kref);
  64	return res;
  65}
  66
  67struct vmw_resource *
  68vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  69{
  70	return kref_get_unless_zero(&res->kref) ? res : NULL;
  71}
  72
  73/**
  74 * vmw_resource_release_id - release a resource id to the id manager.
  75 *
  76 * @res: Pointer to the resource.
  77 *
  78 * Release the resource id to the resource id manager and set it to -1
  79 */
  80void vmw_resource_release_id(struct vmw_resource *res)
  81{
  82	struct vmw_private *dev_priv = res->dev_priv;
  83	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  84
  85	write_lock(&dev_priv->resource_lock);
  86	if (res->id != -1)
  87		idr_remove(idr, res->id);
  88	res->id = -1;
  89	write_unlock(&dev_priv->resource_lock);
  90}
  91
  92static void vmw_resource_release(struct kref *kref)
  93{
  94	struct vmw_resource *res =
  95	    container_of(kref, struct vmw_resource, kref);
  96	struct vmw_private *dev_priv = res->dev_priv;
  97	int id;
 
  98	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  99
 100	write_lock(&dev_priv->resource_lock);
 101	res->avail = false;
 102	list_del_init(&res->lru_head);
 103	write_unlock(&dev_priv->resource_lock);
 104	if (res->backup) {
 105		struct ttm_buffer_object *bo = &res->backup->base;
 106
 107		ttm_bo_reserve(bo, false, false, NULL);
 108		if (!list_empty(&res->mob_head) &&
 
 109		    res->func->unbind != NULL) {
 110			struct ttm_validate_buffer val_buf;
 111
 112			val_buf.bo = bo;
 113			val_buf.shared = false;
 114			res->func->unbind(res, false, &val_buf);
 115		}
 116		res->backup_dirty = false;
 117		list_del_init(&res->mob_head);
 
 
 
 
 118		ttm_bo_unreserve(bo);
 119		vmw_dmabuf_unreference(&res->backup);
 120	}
 121
 122	if (likely(res->hw_destroy != NULL)) {
 123		mutex_lock(&dev_priv->binding_mutex);
 124		vmw_binding_res_list_kill(&res->binding_head);
 125		mutex_unlock(&dev_priv->binding_mutex);
 126		res->hw_destroy(res);
 127	}
 128
 129	id = res->id;
 130	if (res->res_free != NULL)
 131		res->res_free(res);
 132	else
 133		kfree(res);
 134
 135	write_lock(&dev_priv->resource_lock);
 136	if (id != -1)
 137		idr_remove(idr, id);
 138	write_unlock(&dev_priv->resource_lock);
 139}
 140
 141void vmw_resource_unreference(struct vmw_resource **p_res)
 142{
 143	struct vmw_resource *res = *p_res;
 144
 145	*p_res = NULL;
 146	kref_put(&res->kref, vmw_resource_release);
 147}
 148
 149
 150/**
 151 * vmw_resource_alloc_id - release a resource id to the id manager.
 152 *
 153 * @res: Pointer to the resource.
 154 *
 155 * Allocate the lowest free resource from the resource manager, and set
 156 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 157 */
 158int vmw_resource_alloc_id(struct vmw_resource *res)
 159{
 160	struct vmw_private *dev_priv = res->dev_priv;
 161	int ret;
 162	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 163
 164	BUG_ON(res->id != -1);
 165
 166	idr_preload(GFP_KERNEL);
 167	write_lock(&dev_priv->resource_lock);
 168
 169	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 170	if (ret >= 0)
 171		res->id = ret;
 172
 173	write_unlock(&dev_priv->resource_lock);
 174	idr_preload_end();
 175	return ret < 0 ? ret : 0;
 176}
 177
 178/**
 179 * vmw_resource_init - initialize a struct vmw_resource
 180 *
 181 * @dev_priv:       Pointer to a device private struct.
 182 * @res:            The struct vmw_resource to initialize.
 183 * @obj_type:       Resource object type.
 184 * @delay_id:       Boolean whether to defer device id allocation until
 185 *                  the first validation.
 186 * @res_free:       Resource destructor.
 187 * @func:           Resource function table.
 188 */
 189int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 190		      bool delay_id,
 191		      void (*res_free) (struct vmw_resource *res),
 192		      const struct vmw_res_func *func)
 193{
 194	kref_init(&res->kref);
 195	res->hw_destroy = NULL;
 196	res->res_free = res_free;
 197	res->avail = false;
 198	res->dev_priv = dev_priv;
 199	res->func = func;
 
 200	INIT_LIST_HEAD(&res->lru_head);
 201	INIT_LIST_HEAD(&res->mob_head);
 202	INIT_LIST_HEAD(&res->binding_head);
 203	res->id = -1;
 204	res->backup = NULL;
 205	res->backup_offset = 0;
 206	res->backup_dirty = false;
 207	res->res_dirty = false;
 
 
 
 208	if (delay_id)
 209		return 0;
 210	else
 211		return vmw_resource_alloc_id(res);
 212}
 213
 214/**
 215 * vmw_resource_activate
 216 *
 217 * @res:        Pointer to the newly created resource
 218 * @hw_destroy: Destroy function. NULL if none.
 219 *
 220 * Activate a resource after the hardware has been made aware of it.
 221 * Set tye destroy function to @destroy. Typically this frees the
 222 * resource and destroys the hardware resources associated with it.
 223 * Activate basically means that the function vmw_resource_lookup will
 224 * find it.
 225 */
 226void vmw_resource_activate(struct vmw_resource *res,
 227			   void (*hw_destroy) (struct vmw_resource *))
 228{
 229	struct vmw_private *dev_priv = res->dev_priv;
 230
 231	write_lock(&dev_priv->resource_lock);
 232	res->avail = true;
 233	res->hw_destroy = hw_destroy;
 234	write_unlock(&dev_priv->resource_lock);
 235}
 236
 237/**
 238 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 239 * TTM user-space handle and perform basic type checks
 240 *
 241 * @dev_priv:     Pointer to a device private struct
 242 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 243 * @handle:       The TTM user-space handle
 244 * @converter:    Pointer to an object describing the resource type
 245 * @p_res:        On successful return the location pointed to will contain
 246 *                a pointer to a refcounted struct vmw_resource.
 247 *
 248 * If the handle can't be found or is associated with an incorrect resource
 249 * type, -EINVAL will be returned.
 250 */
 251int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 252				    struct ttm_object_file *tfile,
 253				    uint32_t handle,
 254				    const struct vmw_user_resource_conv
 255				    *converter,
 256				    struct vmw_resource **p_res)
 257{
 258	struct ttm_base_object *base;
 259	struct vmw_resource *res;
 260	int ret = -EINVAL;
 261
 262	base = ttm_base_object_lookup(tfile, handle);
 263	if (unlikely(base == NULL))
 264		return -EINVAL;
 265
 266	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 267		goto out_bad_resource;
 268
 269	res = converter->base_obj_to_res(base);
 270
 271	read_lock(&dev_priv->resource_lock);
 272	if (!res->avail || res->res_free != converter->res_free) {
 273		read_unlock(&dev_priv->resource_lock);
 274		goto out_bad_resource;
 275	}
 276
 277	kref_get(&res->kref);
 278	read_unlock(&dev_priv->resource_lock);
 279
 280	*p_res = res;
 281	ret = 0;
 282
 283out_bad_resource:
 284	ttm_base_object_unref(&base);
 285
 286	return ret;
 287}
 288
 289/**
 290 * Helper function that looks either a surface or dmabuf.
 291 *
 292 * The pointer this pointed at by out_surf and out_buf needs to be null.
 293 */
 294int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 295			   struct ttm_object_file *tfile,
 296			   uint32_t handle,
 297			   struct vmw_surface **out_surf,
 298			   struct vmw_dma_buffer **out_buf)
 299{
 
 300	struct vmw_resource *res;
 301	int ret;
 302
 303	BUG_ON(*out_surf || *out_buf);
 304
 305	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 306					      user_surface_converter,
 307					      &res);
 308	if (!ret) {
 309		*out_surf = vmw_res_to_srf(res);
 310		return 0;
 311	}
 312
 313	*out_surf = NULL;
 314	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
 315	return ret;
 316}
 317
 318/**
 319 * Buffer management.
 320 */
 321
 322/**
 323 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
 324 *
 325 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 326 * @size: The requested buffer size.
 327 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 328 */
 329static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
 330				  bool user)
 331{
 332	static size_t struct_size, user_struct_size;
 333	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 334	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 335
 336	if (unlikely(struct_size == 0)) {
 337		size_t backend_size = ttm_round_pot(vmw_tt_size);
 338
 339		struct_size = backend_size +
 340			ttm_round_pot(sizeof(struct vmw_dma_buffer));
 341		user_struct_size = backend_size +
 342			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
 343	}
 344
 345	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 346		page_array_size +=
 347			ttm_round_pot(num_pages * sizeof(dma_addr_t));
 348
 349	return ((user) ? user_struct_size : struct_size) +
 350		page_array_size;
 351}
 352
 353void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 354{
 355	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 356
 357	vmw_dma_buffer_unmap(vmw_bo);
 358	kfree(vmw_bo);
 359}
 360
 361static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 362{
 363	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 364
 365	vmw_dma_buffer_unmap(&vmw_user_bo->dma);
 366	ttm_prime_object_kfree(vmw_user_bo, prime);
 367}
 368
 369int vmw_dmabuf_init(struct vmw_private *dev_priv,
 370		    struct vmw_dma_buffer *vmw_bo,
 371		    size_t size, struct ttm_placement *placement,
 372		    bool interruptible,
 373		    void (*bo_free) (struct ttm_buffer_object *bo))
 374{
 375	struct ttm_bo_device *bdev = &dev_priv->bdev;
 376	size_t acc_size;
 377	int ret;
 378	bool user = (bo_free == &vmw_user_dmabuf_destroy);
 379
 380	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
 381
 382	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
 383	memset(vmw_bo, 0, sizeof(*vmw_bo));
 384
 385	INIT_LIST_HEAD(&vmw_bo->res_list);
 386
 387	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 388			  ttm_bo_type_device, placement,
 389			  0, interruptible, acc_size,
 390			  NULL, NULL, bo_free);
 391	return ret;
 392}
 393
 394static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 395{
 396	struct vmw_user_dma_buffer *vmw_user_bo;
 397	struct ttm_base_object *base = *p_base;
 398	struct ttm_buffer_object *bo;
 399
 400	*p_base = NULL;
 401
 402	if (unlikely(base == NULL))
 403		return;
 404
 405	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 406				   prime.base);
 407	bo = &vmw_user_bo->dma.base;
 408	ttm_bo_unref(&bo);
 409}
 410
 411static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
 412					    enum ttm_ref_type ref_type)
 413{
 414	struct vmw_user_dma_buffer *user_bo;
 415	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
 416
 417	switch (ref_type) {
 418	case TTM_REF_SYNCCPU_WRITE:
 419		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 420		break;
 421	default:
 422		BUG();
 423	}
 424}
 425
 426/**
 427 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
 428 *
 429 * @dev_priv: Pointer to a struct device private.
 430 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 431 * object.
 432 * @size: Size of the dma buffer.
 433 * @shareable: Boolean whether the buffer is shareable with other open files.
 434 * @handle: Pointer to where the handle value should be assigned.
 435 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
 436 * should be assigned.
 437 */
 438int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
 439			  struct ttm_object_file *tfile,
 440			  uint32_t size,
 441			  bool shareable,
 442			  uint32_t *handle,
 443			  struct vmw_dma_buffer **p_dma_buf,
 444			  struct ttm_base_object **p_base)
 445{
 446	struct vmw_user_dma_buffer *user_bo;
 447	struct ttm_buffer_object *tmp;
 448	int ret;
 449
 450	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 451	if (unlikely(!user_bo)) {
 452		DRM_ERROR("Failed to allocate a buffer.\n");
 453		return -ENOMEM;
 454	}
 455
 456	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
 457			      (dev_priv->has_mob) ?
 458			      &vmw_sys_placement :
 459			      &vmw_vram_sys_placement, true,
 460			      &vmw_user_dmabuf_destroy);
 461	if (unlikely(ret != 0))
 462		return ret;
 463
 464	tmp = ttm_bo_reference(&user_bo->dma.base);
 465	ret = ttm_prime_object_init(tfile,
 466				    size,
 467				    &user_bo->prime,
 468				    shareable,
 469				    ttm_buffer_type,
 470				    &vmw_user_dmabuf_release,
 471				    &vmw_user_dmabuf_ref_obj_release);
 472	if (unlikely(ret != 0)) {
 473		ttm_bo_unref(&tmp);
 474		goto out_no_base_object;
 475	}
 476
 477	*p_dma_buf = &user_bo->dma;
 478	if (p_base) {
 479		*p_base = &user_bo->prime.base;
 480		kref_get(&(*p_base)->refcount);
 481	}
 482	*handle = user_bo->prime.base.hash.key;
 483
 484out_no_base_object:
 485	return ret;
 486}
 487
 488/**
 489 * vmw_user_dmabuf_verify_access - verify access permissions on this
 490 * buffer object.
 491 *
 492 * @bo: Pointer to the buffer object being accessed
 493 * @tfile: Identifying the caller.
 494 */
 495int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
 496				  struct ttm_object_file *tfile)
 497{
 498	struct vmw_user_dma_buffer *vmw_user_bo;
 499
 500	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
 501		return -EPERM;
 502
 503	vmw_user_bo = vmw_user_dma_buffer(bo);
 504
 505	/* Check that the caller has opened the object. */
 506	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 507		return 0;
 508
 509	DRM_ERROR("Could not grant buffer access.\n");
 510	return -EPERM;
 511}
 512
 513/**
 514 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
 515 * access, idling previous GPU operations on the buffer and optionally
 516 * blocking it for further command submissions.
 517 *
 518 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 519 * @tfile: Identifying the caller.
 520 * @flags: Flags indicating how the grab should be performed.
 521 *
 522 * A blocking grab will be automatically released when @tfile is closed.
 523 */
 524static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
 525					struct ttm_object_file *tfile,
 526					uint32_t flags)
 527{
 528	struct ttm_buffer_object *bo = &user_bo->dma.base;
 529	bool existed;
 530	int ret;
 531
 532	if (flags & drm_vmw_synccpu_allow_cs) {
 533		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 534		long lret;
 535
 536		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
 537							   nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 538		if (!lret)
 539			return -EBUSY;
 540		else if (lret < 0)
 541			return lret;
 542		return 0;
 543	}
 544
 545	ret = ttm_bo_synccpu_write_grab
 546		(bo, !!(flags & drm_vmw_synccpu_dontblock));
 547	if (unlikely(ret != 0))
 548		return ret;
 549
 550	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 551				 TTM_REF_SYNCCPU_WRITE, &existed, false);
 552	if (ret != 0 || existed)
 553		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 554
 555	return ret;
 556}
 557
 558/**
 559 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
 560 * and unblock command submission on the buffer if blocked.
 561 *
 562 * @handle: Handle identifying the buffer object.
 563 * @tfile: Identifying the caller.
 564 * @flags: Flags indicating the type of release.
 565 */
 566static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
 567					   struct ttm_object_file *tfile,
 568					   uint32_t flags)
 569{
 570	if (!(flags & drm_vmw_synccpu_allow_cs))
 571		return ttm_ref_object_base_unref(tfile, handle,
 572						 TTM_REF_SYNCCPU_WRITE);
 573
 574	return 0;
 575}
 576
 577/**
 578 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
 579 * functionality.
 580 *
 581 * @dev: Identifies the drm device.
 582 * @data: Pointer to the ioctl argument.
 583 * @file_priv: Identifies the caller.
 584 *
 585 * This function checks the ioctl arguments for validity and calls the
 586 * relevant synccpu functions.
 587 */
 588int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
 589				  struct drm_file *file_priv)
 590{
 591	struct drm_vmw_synccpu_arg *arg =
 592		(struct drm_vmw_synccpu_arg *) data;
 593	struct vmw_dma_buffer *dma_buf;
 594	struct vmw_user_dma_buffer *user_bo;
 595	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 596	struct ttm_base_object *buffer_base;
 597	int ret;
 598
 599	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 600	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 601			       drm_vmw_synccpu_dontblock |
 602			       drm_vmw_synccpu_allow_cs)) != 0) {
 603		DRM_ERROR("Illegal synccpu flags.\n");
 604		return -EINVAL;
 605	}
 606
 607	switch (arg->op) {
 608	case drm_vmw_synccpu_grab:
 609		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
 610					     &buffer_base);
 611		if (unlikely(ret != 0))
 612			return ret;
 613
 614		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
 615				       dma);
 616		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
 617		vmw_dmabuf_unreference(&dma_buf);
 618		ttm_base_object_unref(&buffer_base);
 619		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 620			     ret != -EBUSY)) {
 621			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 622				  (unsigned int) arg->handle);
 623			return ret;
 624		}
 625		break;
 626	case drm_vmw_synccpu_release:
 627		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
 628						      arg->flags);
 629		if (unlikely(ret != 0)) {
 630			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 631				  (unsigned int) arg->handle);
 632			return ret;
 633		}
 634		break;
 635	default:
 636		DRM_ERROR("Invalid synccpu operation.\n");
 637		return -EINVAL;
 638	}
 639
 640	return 0;
 641}
 642
 643int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 644			   struct drm_file *file_priv)
 645{
 646	struct vmw_private *dev_priv = vmw_priv(dev);
 647	union drm_vmw_alloc_dmabuf_arg *arg =
 648	    (union drm_vmw_alloc_dmabuf_arg *)data;
 649	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 650	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 651	struct vmw_dma_buffer *dma_buf;
 652	uint32_t handle;
 653	int ret;
 654
 655	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 656	if (unlikely(ret != 0))
 657		return ret;
 658
 659	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 660				    req->size, false, &handle, &dma_buf,
 661				    NULL);
 662	if (unlikely(ret != 0))
 663		goto out_no_dmabuf;
 664
 665	rep->handle = handle;
 666	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
 667	rep->cur_gmr_id = handle;
 668	rep->cur_gmr_offset = 0;
 669
 670	vmw_dmabuf_unreference(&dma_buf);
 671
 672out_no_dmabuf:
 673	ttm_read_unlock(&dev_priv->reservation_sem);
 674
 675	return ret;
 676}
 677
 678int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 679			   struct drm_file *file_priv)
 680{
 681	struct drm_vmw_unref_dmabuf_arg *arg =
 682	    (struct drm_vmw_unref_dmabuf_arg *)data;
 683
 684	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 685					 arg->handle,
 686					 TTM_REF_USAGE);
 687}
 688
 689int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 690			   uint32_t handle, struct vmw_dma_buffer **out,
 691			   struct ttm_base_object **p_base)
 692{
 693	struct vmw_user_dma_buffer *vmw_user_bo;
 694	struct ttm_base_object *base;
 695
 696	base = ttm_base_object_lookup(tfile, handle);
 697	if (unlikely(base == NULL)) {
 698		pr_err("Invalid buffer object handle 0x%08lx\n",
 699		       (unsigned long)handle);
 700		return -ESRCH;
 701	}
 702
 703	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 704		ttm_base_object_unref(&base);
 705		pr_err("Invalid buffer object handle 0x%08lx\n",
 706		       (unsigned long)handle);
 707		return -EINVAL;
 708	}
 709
 710	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 711				   prime.base);
 712	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
 713	if (p_base)
 714		*p_base = base;
 715	else
 716		ttm_base_object_unref(&base);
 717	*out = &vmw_user_bo->dma;
 718
 719	return 0;
 720}
 721
 722int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 723			      struct vmw_dma_buffer *dma_buf,
 724			      uint32_t *handle)
 725{
 726	struct vmw_user_dma_buffer *user_bo;
 727
 728	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
 729		return -EINVAL;
 730
 731	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
 732
 733	*handle = user_bo->prime.base.hash.key;
 734	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 735				  TTM_REF_USAGE, NULL, false);
 736}
 737
 738/**
 739 * vmw_dumb_create - Create a dumb kms buffer
 740 *
 741 * @file_priv: Pointer to a struct drm_file identifying the caller.
 742 * @dev: Pointer to the drm device.
 743 * @args: Pointer to a struct drm_mode_create_dumb structure
 744 *
 745 * This is a driver callback for the core drm create_dumb functionality.
 746 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
 747 * that the arguments have a different format.
 748 */
 749int vmw_dumb_create(struct drm_file *file_priv,
 750		    struct drm_device *dev,
 751		    struct drm_mode_create_dumb *args)
 752{
 753	struct vmw_private *dev_priv = vmw_priv(dev);
 754	struct vmw_dma_buffer *dma_buf;
 755	int ret;
 756
 757	args->pitch = args->width * ((args->bpp + 7) / 8);
 758	args->size = args->pitch * args->height;
 759
 760	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 761	if (unlikely(ret != 0))
 762		return ret;
 763
 764	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 765				    args->size, false, &args->handle,
 766				    &dma_buf, NULL);
 767	if (unlikely(ret != 0))
 768		goto out_no_dmabuf;
 769
 770	vmw_dmabuf_unreference(&dma_buf);
 771out_no_dmabuf:
 772	ttm_read_unlock(&dev_priv->reservation_sem);
 773	return ret;
 774}
 775
 776/**
 777 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
 778 *
 779 * @file_priv: Pointer to a struct drm_file identifying the caller.
 780 * @dev: Pointer to the drm device.
 781 * @handle: Handle identifying the dumb buffer.
 782 * @offset: The address space offset returned.
 783 *
 784 * This is a driver callback for the core drm dumb_map_offset functionality.
 785 */
 786int vmw_dumb_map_offset(struct drm_file *file_priv,
 787			struct drm_device *dev, uint32_t handle,
 788			uint64_t *offset)
 789{
 790	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 791	struct vmw_dma_buffer *out_buf;
 792	int ret;
 793
 794	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
 795	if (ret != 0)
 796		return -EINVAL;
 797
 798	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
 799	vmw_dmabuf_unreference(&out_buf);
 800	return 0;
 801}
 802
 803/**
 804 * vmw_dumb_destroy - Destroy a dumb boffer
 805 *
 806 * @file_priv: Pointer to a struct drm_file identifying the caller.
 807 * @dev: Pointer to the drm device.
 808 * @handle: Handle identifying the dumb buffer.
 809 *
 810 * This is a driver callback for the core drm dumb_destroy functionality.
 811 */
 812int vmw_dumb_destroy(struct drm_file *file_priv,
 813		     struct drm_device *dev,
 814		     uint32_t handle)
 815{
 816	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 817					 handle, TTM_REF_USAGE);
 818}
 819
 820/**
 821 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 822 *
 823 * @res:            The resource for which to allocate a backup buffer.
 824 * @interruptible:  Whether any sleeps during allocation should be
 825 *                  performed while interruptible.
 826 */
 827static int vmw_resource_buf_alloc(struct vmw_resource *res,
 828				  bool interruptible)
 829{
 830	unsigned long size =
 831		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 832	struct vmw_dma_buffer *backup;
 
 
 
 
 
 
 833	int ret;
 834
 835	if (likely(res->backup)) {
 836		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
 837		return 0;
 838	}
 839
 840	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
 841	if (unlikely(!backup))
 842		return -ENOMEM;
 843
 844	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
 845			      res->func->backup_placement,
 846			      interruptible,
 847			      &vmw_dmabuf_bo_free);
 848	if (unlikely(ret != 0))
 849		goto out_no_dmabuf;
 850
 851	res->backup = backup;
 852
 853out_no_dmabuf:
 854	return ret;
 855}
 856
 857/**
 858 * vmw_resource_do_validate - Make a resource up-to-date and visible
 859 *                            to the device.
 860 *
 861 * @res:            The resource to make visible to the device.
 862 * @val_buf:        Information about a buffer possibly
 863 *                  containing backup data if a bind operation is needed.
 
 864 *
 865 * On hardware resource shortage, this function returns -EBUSY and
 866 * should be retried once resources have been freed up.
 867 */
 868static int vmw_resource_do_validate(struct vmw_resource *res,
 869				    struct ttm_validate_buffer *val_buf)
 
 870{
 871	int ret = 0;
 872	const struct vmw_res_func *func = res->func;
 873
 874	if (unlikely(res->id == -1)) {
 875		ret = func->create(res);
 876		if (unlikely(ret != 0))
 877			return ret;
 878	}
 879
 880	if (func->bind &&
 881	    ((func->needs_backup && list_empty(&res->mob_head) &&
 882	      val_buf->bo != NULL) ||
 883	     (!func->needs_backup && val_buf->bo != NULL))) {
 884		ret = func->bind(res, val_buf);
 885		if (unlikely(ret != 0))
 886			goto out_bind_failed;
 887		if (func->needs_backup)
 888			list_add_tail(&res->mob_head, &res->backup->res_list);
 889	}
 890
 891	/*
 892	 * Only do this on write operations, and move to
 893	 * vmw_resource_unreserve if it can be called after
 894	 * backup buffers have been unreserved. Otherwise
 895	 * sort out locking.
 896	 */
 897	res->res_dirty = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 898
 899	return 0;
 900
 901out_bind_failed:
 902	func->destroy(res);
 903
 904	return ret;
 905}
 906
 907/**
 908 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 909 * command submission.
 910 *
 911 * @res:               Pointer to the struct vmw_resource to unreserve.
 912 * @switch_backup:     Backup buffer has been switched.
 913 * @new_backup:        Pointer to new backup buffer if command submission
 
 
 914 *                     switched. May be NULL.
 915 * @new_backup_offset: New backup offset if @switch_backup is true.
 916 *
 917 * Currently unreserving a resource means putting it back on the device's
 918 * resource lru list, so that it can be evicted if necessary.
 919 */
 920void vmw_resource_unreserve(struct vmw_resource *res,
 921			    bool switch_backup,
 922			    struct vmw_dma_buffer *new_backup,
 923			    unsigned long new_backup_offset)
 
 
 924{
 925	struct vmw_private *dev_priv = res->dev_priv;
 926
 927	if (!list_empty(&res->lru_head))
 928		return;
 929
 930	if (switch_backup && new_backup != res->backup) {
 931		if (res->backup) {
 932			lockdep_assert_held(&res->backup->base.resv->lock.base);
 933			list_del_init(&res->mob_head);
 934			vmw_dmabuf_unreference(&res->backup);
 
 935		}
 936
 937		if (new_backup) {
 938			res->backup = vmw_dmabuf_reference(new_backup);
 939			lockdep_assert_held(&new_backup->base.resv->lock.base);
 940			list_add_tail(&res->mob_head, &new_backup->res_list);
 
 
 
 
 
 
 941		} else {
 942			res->backup = NULL;
 943		}
 
 
 944	}
 945	if (switch_backup)
 946		res->backup_offset = new_backup_offset;
 
 
 
 
 947
 948	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 949		return;
 950
 951	write_lock(&dev_priv->resource_lock);
 952	list_add_tail(&res->lru_head,
 953		      &res->dev_priv->res_lru[res->func->res_type]);
 954	write_unlock(&dev_priv->resource_lock);
 955}
 956
 957/**
 958 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 959 *                             for a resource and in that case, allocate
 960 *                             one, reserve and validate it.
 961 *
 
 962 * @res:            The resource for which to allocate a backup buffer.
 963 * @interruptible:  Whether any sleeps during allocation should be
 964 *                  performed while interruptible.
 965 * @val_buf:        On successful return contains data about the
 966 *                  reserved and validated backup buffer.
 967 */
 968static int
 969vmw_resource_check_buffer(struct vmw_resource *res,
 
 970			  bool interruptible,
 971			  struct ttm_validate_buffer *val_buf)
 972{
 973	struct ttm_operation_ctx ctx = { true, false };
 974	struct list_head val_list;
 975	bool backup_dirty = false;
 976	int ret;
 977
 978	if (unlikely(res->backup == NULL)) {
 979		ret = vmw_resource_buf_alloc(res, interruptible);
 980		if (unlikely(ret != 0))
 981			return ret;
 982	}
 983
 984	INIT_LIST_HEAD(&val_list);
 985	val_buf->bo = ttm_bo_reference(&res->backup->base);
 986	val_buf->shared = false;
 
 987	list_add_tail(&val_buf->head, &val_list);
 988	ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
 989	if (unlikely(ret != 0))
 990		goto out_no_reserve;
 991
 992	if (res->func->needs_backup && list_empty(&res->mob_head))
 993		return 0;
 994
 995	backup_dirty = res->backup_dirty;
 996	ret = ttm_bo_validate(&res->backup->base,
 997			      res->func->backup_placement,
 
 
 998			      &ctx);
 999
1000	if (unlikely(ret != 0))
1001		goto out_no_validate;
1002
1003	return 0;
1004
1005out_no_validate:
1006	ttm_eu_backoff_reservation(NULL, &val_list);
1007out_no_reserve:
1008	ttm_bo_unref(&val_buf->bo);
1009	if (backup_dirty)
1010		vmw_dmabuf_unreference(&res->backup);
 
1011
1012	return ret;
1013}
1014
1015/**
1016 * vmw_resource_reserve - Reserve a resource for command submission
1017 *
1018 * @res:            The resource to reserve.
1019 *
1020 * This function takes the resource off the LRU list and make sure
1021 * a backup buffer is present for guest-backed resources. However,
1022 * the buffer may not be bound to the resource at this point.
 
1023 *
1024 */
1025int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1026			 bool no_backup)
1027{
1028	struct vmw_private *dev_priv = res->dev_priv;
1029	int ret;
1030
1031	write_lock(&dev_priv->resource_lock);
1032	list_del_init(&res->lru_head);
1033	write_unlock(&dev_priv->resource_lock);
1034
1035	if (res->func->needs_backup && res->backup == NULL &&
1036	    !no_backup) {
1037		ret = vmw_resource_buf_alloc(res, interruptible);
1038		if (unlikely(ret != 0)) {
1039			DRM_ERROR("Failed to allocate a backup buffer "
1040				  "of size %lu. bytes\n",
1041				  (unsigned long) res->backup_size);
1042			return ret;
1043		}
1044	}
1045
1046	return 0;
1047}
1048
1049/**
1050 * vmw_resource_backoff_reservation - Unreserve and unreference a
1051 *                                    backup buffer
1052 *.
1053 * @val_buf:        Backup buffer information.
 
1054 */
1055static void
1056vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
 
1057{
1058	struct list_head val_list;
1059
1060	if (likely(val_buf->bo == NULL))
1061		return;
1062
1063	INIT_LIST_HEAD(&val_list);
1064	list_add_tail(&val_buf->head, &val_list);
1065	ttm_eu_backoff_reservation(NULL, &val_list);
1066	ttm_bo_unref(&val_buf->bo);
 
1067}
1068
1069/**
1070 * vmw_resource_do_evict - Evict a resource, and transfer its data
1071 *                         to a backup buffer.
1072 *
 
1073 * @res:            The resource to evict.
1074 * @interruptible:  Whether to wait interruptible.
1075 */
1076static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 
1077{
1078	struct ttm_validate_buffer val_buf;
1079	const struct vmw_res_func *func = res->func;
1080	int ret;
1081
1082	BUG_ON(!func->may_evict);
1083
1084	val_buf.bo = NULL;
1085	val_buf.shared = false;
1086	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1087	if (unlikely(ret != 0))
1088		return ret;
1089
1090	if (unlikely(func->unbind != NULL &&
1091		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1092		ret = func->unbind(res, res->res_dirty, &val_buf);
1093		if (unlikely(ret != 0))
1094			goto out_no_unbind;
1095		list_del_init(&res->mob_head);
1096	}
1097	ret = func->destroy(res);
1098	res->backup_dirty = true;
1099	res->res_dirty = false;
1100out_no_unbind:
1101	vmw_resource_backoff_reservation(&val_buf);
1102
1103	return ret;
1104}
1105
1106
1107/**
1108 * vmw_resource_validate - Make a resource up-to-date and visible
1109 *                         to the device.
 
 
 
1110 *
1111 * @res:            The resource to make visible to the device.
1112 *
1113 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1114 * be reserved and validated.
1115 * On hardware resource shortage, this function will repeatedly evict
1116 * resources of the same type until the validation succeeds.
 
 
 
1117 */
1118int vmw_resource_validate(struct vmw_resource *res)
 
1119{
1120	int ret;
1121	struct vmw_resource *evict_res;
1122	struct vmw_private *dev_priv = res->dev_priv;
1123	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1124	struct ttm_validate_buffer val_buf;
1125	unsigned err_count = 0;
1126
1127	if (!res->func->create)
1128		return 0;
1129
1130	val_buf.bo = NULL;
1131	val_buf.shared = false;
1132	if (res->backup)
1133		val_buf.bo = &res->backup->base;
1134	do {
1135		ret = vmw_resource_do_validate(res, &val_buf);
1136		if (likely(ret != -EBUSY))
1137			break;
1138
1139		write_lock(&dev_priv->resource_lock);
1140		if (list_empty(lru_list) || !res->func->may_evict) {
1141			DRM_ERROR("Out of device device resources "
1142				  "for %s.\n", res->func->type_name);
1143			ret = -EBUSY;
1144			write_unlock(&dev_priv->resource_lock);
1145			break;
1146		}
1147
1148		evict_res = vmw_resource_reference
1149			(list_first_entry(lru_list, struct vmw_resource,
1150					  lru_head));
1151		list_del_init(&evict_res->lru_head);
1152
1153		write_unlock(&dev_priv->resource_lock);
1154
1155		ret = vmw_resource_do_evict(evict_res, true);
 
1156		if (unlikely(ret != 0)) {
1157			write_lock(&dev_priv->resource_lock);
1158			list_add_tail(&evict_res->lru_head, lru_list);
1159			write_unlock(&dev_priv->resource_lock);
1160			if (ret == -ERESTARTSYS ||
1161			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1162				vmw_resource_unreference(&evict_res);
1163				goto out_no_validate;
1164			}
1165		}
1166
1167		vmw_resource_unreference(&evict_res);
1168	} while (1);
1169
1170	if (unlikely(ret != 0))
1171		goto out_no_validate;
1172	else if (!res->func->needs_backup && res->backup) {
1173		list_del_init(&res->mob_head);
1174		vmw_dmabuf_unreference(&res->backup);
1175	}
1176
1177	return 0;
1178
1179out_no_validate:
1180	return ret;
1181}
1182
1183/**
1184 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1185 *                       object without unreserving it.
1186 *
1187 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1188 * @fence:          Pointer to the fence. If NULL, this function will
1189 *                  insert a fence into the command stream..
1190 *
1191 * Contrary to the ttm_eu version of this function, it takes only
1192 * a single buffer object instead of a list, and it also doesn't
1193 * unreserve the buffer object, which needs to be done separately.
1194 */
1195void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1196			 struct vmw_fence_obj *fence)
1197{
1198	struct ttm_bo_device *bdev = bo->bdev;
1199
1200	struct vmw_private *dev_priv =
1201		container_of(bdev, struct vmw_private, bdev);
1202
1203	if (fence == NULL) {
1204		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1205		reservation_object_add_excl_fence(bo->resv, &fence->base);
1206		dma_fence_put(&fence->base);
1207	} else
1208		reservation_object_add_excl_fence(bo->resv, &fence->base);
1209}
1210
1211/**
1212 * vmw_resource_move_notify - TTM move_notify_callback
1213 *
1214 * @bo: The TTM buffer object about to move.
1215 * @mem: The struct ttm_mem_reg indicating to what memory
1216 *       region the move is taking place.
1217 *
1218 * Evicts the Guest Backed hardware resource if the backup
1219 * buffer is being moved out of MOB memory.
1220 * Note that this function should not race with the resource
1221 * validation code as long as it accesses only members of struct
1222 * resource that remain static while bo::res is !NULL and
1223 * while we have @bo reserved. struct resource::backup is *not* a
1224 * static member. The resource validation code will take care
1225 * to set @bo::res to NULL, while having @bo reserved when the
1226 * buffer is no longer bound to the resource, so @bo:res can be
1227 * used to determine whether there is a need to unbind and whether
1228 * it is safe to unbind.
1229 */
1230void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231			      struct ttm_mem_reg *mem)
1232{
1233	struct vmw_dma_buffer *dma_buf;
 
 
 
1234
1235	if (mem == NULL)
1236		return;
1237
1238	if (bo->destroy != vmw_dmabuf_bo_free &&
1239	    bo->destroy != vmw_user_dmabuf_destroy)
1240		return;
1241
1242	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
 
1243
1244	/*
1245	 * Kill any cached kernel maps before move. An optimization could
1246	 * be to do this iff source or destination memory type is VRAM.
1247	 */
1248	vmw_dma_buffer_unmap(dma_buf);
1249
1250	if (mem->mem_type != VMW_PL_MOB) {
1251		struct vmw_resource *res, *n;
1252		struct ttm_validate_buffer val_buf;
1253
1254		val_buf.bo = bo;
1255		val_buf.shared = false;
1256
1257		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1258
1259			if (unlikely(res->func->unbind == NULL))
1260				continue;
1261
1262			(void) res->func->unbind(res, true, &val_buf);
1263			res->backup_dirty = true;
1264			res->res_dirty = false;
1265			list_del_init(&res->mob_head);
1266		}
1267
1268		(void) ttm_bo_wait(bo, false, false);
1269	}
1270}
1271
1272
1273/**
1274 * vmw_resource_swap_notify - swapout notify callback.
1275 *
1276 * @bo: The buffer object to be swapped out.
1277 */
1278void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
1279{
1280	if (bo->destroy != vmw_dmabuf_bo_free &&
1281	    bo->destroy != vmw_user_dmabuf_destroy)
1282		return;
1283
1284	/* Kill any cached kernel maps before swapout */
1285	vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
1286}
1287
1288
1289/**
1290 * vmw_query_readback_all - Read back cached query states
1291 *
1292 * @dx_query_mob: Buffer containing the DX query MOB
1293 *
1294 * Read back cached states from the device if they exist.  This function
1295 * assumings binding_mutex is held.
1296 */
1297int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1298{
1299	struct vmw_resource *dx_query_ctx;
1300	struct vmw_private *dev_priv;
1301	struct {
1302		SVGA3dCmdHeader header;
1303		SVGA3dCmdDXReadbackAllQuery body;
1304	} *cmd;
1305
1306
1307	/* No query bound, so do nothing */
1308	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1309		return 0;
1310
1311	dx_query_ctx = dx_query_mob->dx_query_ctx;
1312	dev_priv     = dx_query_ctx->dev_priv;
1313
1314	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1315	if (unlikely(cmd == NULL)) {
1316		DRM_ERROR("Failed reserving FIFO space for "
1317			  "query MOB read back.\n");
1318		return -ENOMEM;
1319	}
1320
1321	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1322	cmd->header.size = sizeof(cmd->body);
1323	cmd->body.cid    = dx_query_ctx->id;
1324
1325	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1326
1327	/* Triggers a rebind the next time affected context is bound */
1328	dx_query_mob->dx_query_ctx = NULL;
1329
1330	return 0;
1331}
1332
1333
1334
1335/**
1336 * vmw_query_move_notify - Read back cached query states
1337 *
1338 * @bo: The TTM buffer object about to move.
1339 * @mem: The memory region @bo is moving to.
 
1340 *
1341 * Called before the query MOB is swapped out to read back cached query
1342 * states from the device.
1343 */
1344void vmw_query_move_notify(struct ttm_buffer_object *bo,
1345			   struct ttm_mem_reg *mem)
 
1346{
1347	struct vmw_dma_buffer *dx_query_mob;
1348	struct ttm_bo_device *bdev = bo->bdev;
1349	struct vmw_private *dev_priv;
1350
1351
1352	dev_priv = container_of(bdev, struct vmw_private, bdev);
1353
1354	mutex_lock(&dev_priv->binding_mutex);
1355
1356	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1357	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1358		mutex_unlock(&dev_priv->binding_mutex);
1359		return;
1360	}
1361
1362	/* If BO is being moved from MOB to system memory */
1363	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
 
 
1364		struct vmw_fence_obj *fence;
1365
 
 
 
 
 
 
1366		(void) vmw_query_readback_all(dx_query_mob);
1367		mutex_unlock(&dev_priv->binding_mutex);
1368
1369		/* Create a fence and attach the BO to it */
1370		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1371		vmw_fence_single_bo(bo, fence);
1372
1373		if (fence != NULL)
1374			vmw_fence_obj_unreference(&fence);
1375
1376		(void) ttm_bo_wait(bo, false, false);
1377	} else
1378		mutex_unlock(&dev_priv->binding_mutex);
1379
1380}
1381
1382/**
1383 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1384 *
1385 * @res:            The resource being queried.
1386 */
1387bool vmw_resource_needs_backup(const struct vmw_resource *res)
1388{
1389	return res->func->needs_backup;
1390}
1391
1392/**
1393 * vmw_resource_evict_type - Evict all resources of a specific type
1394 *
1395 * @dev_priv:       Pointer to a device private struct
1396 * @type:           The resource type to evict
1397 *
1398 * To avoid thrashing starvation or as part of the hibernation sequence,
1399 * try to evict all evictable resources of a specific type.
1400 */
1401static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1402				    enum vmw_res_type type)
1403{
1404	struct list_head *lru_list = &dev_priv->res_lru[type];
1405	struct vmw_resource *evict_res;
1406	unsigned err_count = 0;
1407	int ret;
 
1408
1409	do {
1410		write_lock(&dev_priv->resource_lock);
1411
1412		if (list_empty(lru_list))
1413			goto out_unlock;
1414
1415		evict_res = vmw_resource_reference(
1416			list_first_entry(lru_list, struct vmw_resource,
1417					 lru_head));
1418		list_del_init(&evict_res->lru_head);
1419		write_unlock(&dev_priv->resource_lock);
1420
1421		ret = vmw_resource_do_evict(evict_res, false);
 
1422		if (unlikely(ret != 0)) {
1423			write_lock(&dev_priv->resource_lock);
1424			list_add_tail(&evict_res->lru_head, lru_list);
1425			write_unlock(&dev_priv->resource_lock);
1426			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1427				vmw_resource_unreference(&evict_res);
1428				return;
1429			}
1430		}
1431
1432		vmw_resource_unreference(&evict_res);
1433	} while (1);
1434
1435out_unlock:
1436	write_unlock(&dev_priv->resource_lock);
1437}
1438
1439/**
1440 * vmw_resource_evict_all - Evict all evictable resources
1441 *
1442 * @dev_priv:       Pointer to a device private struct
1443 *
1444 * To avoid thrashing starvation or as part of the hibernation sequence,
1445 * evict all evictable resources. In particular this means that all
1446 * guest-backed resources that are registered with the device are
1447 * evicted and the OTable becomes clean.
1448 */
1449void vmw_resource_evict_all(struct vmw_private *dev_priv)
1450{
1451	enum vmw_res_type type;
1452
1453	mutex_lock(&dev_priv->cmdbuf_mutex);
1454
1455	for (type = 0; type < vmw_res_max; ++type)
1456		vmw_resource_evict_type(dev_priv, type);
1457
1458	mutex_unlock(&dev_priv->cmdbuf_mutex);
1459}
1460
1461/**
1462 * vmw_resource_pin - Add a pin reference on a resource
1463 *
1464 * @res: The resource to add a pin reference on
1465 *
1466 * This function adds a pin reference, and if needed validates the resource.
1467 * Having a pin reference means that the resource can never be evicted, and
1468 * its id will never change as long as there is a pin reference.
1469 * This function returns 0 on success and a negative error code on failure.
1470 */
1471int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1472{
1473	struct ttm_operation_ctx ctx = { interruptible, false };
1474	struct vmw_private *dev_priv = res->dev_priv;
1475	int ret;
1476
1477	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1478	mutex_lock(&dev_priv->cmdbuf_mutex);
1479	ret = vmw_resource_reserve(res, interruptible, false);
1480	if (ret)
1481		goto out_no_reserve;
1482
1483	if (res->pin_count == 0) {
1484		struct vmw_dma_buffer *vbo = NULL;
1485
1486		if (res->backup) {
1487			vbo = res->backup;
1488
1489			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1490			if (!vbo->pin_count) {
 
 
 
 
 
1491				ret = ttm_bo_validate
1492					(&vbo->base,
1493					 res->func->backup_placement,
1494					 &ctx);
1495				if (ret) {
1496					ttm_bo_unreserve(&vbo->base);
1497					goto out_no_validate;
1498				}
1499			}
1500
1501			/* Do we really need to pin the MOB as well? */
1502			vmw_bo_pin_reserved(vbo, true);
1503		}
1504		ret = vmw_resource_validate(res);
1505		if (vbo)
1506			ttm_bo_unreserve(&vbo->base);
1507		if (ret)
1508			goto out_no_validate;
1509	}
1510	res->pin_count++;
1511
1512out_no_validate:
1513	vmw_resource_unreserve(res, false, NULL, 0UL);
1514out_no_reserve:
1515	mutex_unlock(&dev_priv->cmdbuf_mutex);
1516	ttm_write_unlock(&dev_priv->reservation_sem);
1517
1518	return ret;
1519}
1520
1521/**
1522 * vmw_resource_unpin - Remove a pin reference from a resource
1523 *
1524 * @res: The resource to remove a pin reference from
1525 *
1526 * Having a pin reference means that the resource can never be evicted, and
1527 * its id will never change as long as there is a pin reference.
1528 */
1529void vmw_resource_unpin(struct vmw_resource *res)
1530{
1531	struct vmw_private *dev_priv = res->dev_priv;
1532	int ret;
1533
1534	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1535	mutex_lock(&dev_priv->cmdbuf_mutex);
1536
1537	ret = vmw_resource_reserve(res, false, true);
1538	WARN_ON(ret);
1539
1540	WARN_ON(res->pin_count == 0);
1541	if (--res->pin_count == 0 && res->backup) {
1542		struct vmw_dma_buffer *vbo = res->backup;
1543
1544		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1545		vmw_bo_pin_reserved(vbo, false);
1546		ttm_bo_unreserve(&vbo->base);
1547	}
1548
1549	vmw_resource_unreserve(res, false, NULL, 0UL);
1550
1551	mutex_unlock(&dev_priv->cmdbuf_mutex);
1552	ttm_read_unlock(&dev_priv->reservation_sem);
1553}
1554
1555/**
1556 * vmw_res_type - Return the resource type
1557 *
1558 * @res: Pointer to the resource
1559 */
1560enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1561{
1562	return res->func->res_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1563}