Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_binding.h"
  31#include "vmwgfx_bo.h"
  32#include "vmwgfx_drv.h"
 
 
 
 
  33#include "vmwgfx_resource_priv.h"
  34
  35#define VMW_RES_EVICT_ERR_COUNT 10
  36
  37/**
  38 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  39 * @res: The resource
  40 */
  41void vmw_resource_mob_attach(struct vmw_resource *res)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42{
  43	struct vmw_bo *gbo = res->guest_memory_bo;
  44	struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
  45
  46	dma_resv_assert_held(gbo->tbo.base.resv);
  47	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  48		res->func->prio;
  49
  50	while (*new) {
  51		struct vmw_resource *this =
  52			container_of(*new, struct vmw_resource, mob_node);
  53
  54		parent = *new;
  55		new = (res->guest_memory_offset < this->guest_memory_offset) ?
  56			&((*new)->rb_left) : &((*new)->rb_right);
  57	}
  58
  59	rb_link_node(&res->mob_node, parent, new);
  60	rb_insert_color(&res->mob_node, &gbo->res_tree);
  61
  62	vmw_bo_prio_add(gbo, res->used_prio);
  63}
  64
  65/**
  66 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  67 * @res: The resource
  68 */
  69void vmw_resource_mob_detach(struct vmw_resource *res)
  70{
  71	struct vmw_bo *gbo = res->guest_memory_bo;
  72
  73	dma_resv_assert_held(gbo->tbo.base.resv);
  74	if (vmw_resource_mob_attached(res)) {
  75		rb_erase(&res->mob_node, &gbo->res_tree);
  76		RB_CLEAR_NODE(&res->mob_node);
  77		vmw_bo_prio_del(gbo, res->used_prio);
  78	}
  79}
  80
  81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  82{
  83	kref_get(&res->kref);
  84	return res;
  85}
  86
  87struct vmw_resource *
  88vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  89{
  90	return kref_get_unless_zero(&res->kref) ? res : NULL;
  91}
  92
  93/**
  94 * vmw_resource_release_id - release a resource id to the id manager.
  95 *
  96 * @res: Pointer to the resource.
  97 *
  98 * Release the resource id to the resource id manager and set it to -1
  99 */
 100void vmw_resource_release_id(struct vmw_resource *res)
 101{
 102	struct vmw_private *dev_priv = res->dev_priv;
 103	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 104
 105	spin_lock(&dev_priv->resource_lock);
 106	if (res->id != -1)
 107		idr_remove(idr, res->id);
 108	res->id = -1;
 109	spin_unlock(&dev_priv->resource_lock);
 110}
 111
 112static void vmw_resource_release(struct kref *kref)
 113{
 114	struct vmw_resource *res =
 115	    container_of(kref, struct vmw_resource, kref);
 116	struct vmw_private *dev_priv = res->dev_priv;
 117	int id;
 118	int ret;
 119	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 120
 121	spin_lock(&dev_priv->resource_lock);
 122	list_del_init(&res->lru_head);
 123	spin_unlock(&dev_priv->resource_lock);
 124	if (res->guest_memory_bo) {
 125		struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
 126
 127		ret = ttm_bo_reserve(bo, false, false, NULL);
 128		BUG_ON(ret);
 129		if (vmw_resource_mob_attached(res) &&
 130		    res->func->unbind != NULL) {
 131			struct ttm_validate_buffer val_buf;
 132
 133			val_buf.bo = bo;
 134			val_buf.num_shared = 0;
 135			res->func->unbind(res, false, &val_buf);
 136		}
 137		res->guest_memory_size = false;
 138		vmw_resource_mob_detach(res);
 139		if (res->dirty)
 140			res->func->dirty_free(res);
 141		if (res->coherent)
 142			vmw_bo_dirty_release(res->guest_memory_bo);
 143		ttm_bo_unreserve(bo);
 144		vmw_user_bo_unref(&res->guest_memory_bo);
 145	}
 146
 147	if (likely(res->hw_destroy != NULL)) {
 
 148		mutex_lock(&dev_priv->binding_mutex);
 149		vmw_binding_res_list_kill(&res->binding_head);
 150		mutex_unlock(&dev_priv->binding_mutex);
 151		res->hw_destroy(res);
 152	}
 153
 154	id = res->id;
 155	if (res->res_free != NULL)
 156		res->res_free(res);
 157	else
 158		kfree(res);
 159
 160	spin_lock(&dev_priv->resource_lock);
 
 161	if (id != -1)
 162		idr_remove(idr, id);
 163	spin_unlock(&dev_priv->resource_lock);
 164}
 165
 166void vmw_resource_unreference(struct vmw_resource **p_res)
 167{
 168	struct vmw_resource *res = *p_res;
 
 169
 170	*p_res = NULL;
 
 171	kref_put(&res->kref, vmw_resource_release);
 
 172}
 173
 174
 175/**
 176 * vmw_resource_alloc_id - release a resource id to the id manager.
 177 *
 178 * @res: Pointer to the resource.
 179 *
 180 * Allocate the lowest free resource from the resource manager, and set
 181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 182 */
 183int vmw_resource_alloc_id(struct vmw_resource *res)
 184{
 185	struct vmw_private *dev_priv = res->dev_priv;
 186	int ret;
 187	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 188
 189	BUG_ON(res->id != -1);
 190
 191	idr_preload(GFP_KERNEL);
 192	spin_lock(&dev_priv->resource_lock);
 193
 194	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 195	if (ret >= 0)
 196		res->id = ret;
 197
 198	spin_unlock(&dev_priv->resource_lock);
 199	idr_preload_end();
 200	return ret < 0 ? ret : 0;
 201}
 202
 203/**
 204 * vmw_resource_init - initialize a struct vmw_resource
 205 *
 206 * @dev_priv:       Pointer to a device private struct.
 207 * @res:            The struct vmw_resource to initialize.
 
 208 * @delay_id:       Boolean whether to defer device id allocation until
 209 *                  the first validation.
 210 * @res_free:       Resource destructor.
 211 * @func:           Resource function table.
 212 */
 213int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 214		      bool delay_id,
 215		      void (*res_free) (struct vmw_resource *res),
 216		      const struct vmw_res_func *func)
 217{
 218	kref_init(&res->kref);
 219	res->hw_destroy = NULL;
 220	res->res_free = res_free;
 
 221	res->dev_priv = dev_priv;
 222	res->func = func;
 223	RB_CLEAR_NODE(&res->mob_node);
 224	INIT_LIST_HEAD(&res->lru_head);
 
 225	INIT_LIST_HEAD(&res->binding_head);
 226	res->id = -1;
 227	res->guest_memory_bo = NULL;
 228	res->guest_memory_offset = 0;
 229	res->guest_memory_dirty = false;
 230	res->res_dirty = false;
 231	res->coherent = false;
 232	res->used_prio = 3;
 233	res->dirty = NULL;
 234	if (delay_id)
 235		return 0;
 236	else
 237		return vmw_resource_alloc_id(res);
 238}
 239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 240
 241/**
 242 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 243 * TTM user-space handle and perform basic type checks
 244 *
 245 * @dev_priv:     Pointer to a device private struct
 246 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 247 * @handle:       The TTM user-space handle
 248 * @converter:    Pointer to an object describing the resource type
 249 * @p_res:        On successful return the location pointed to will contain
 250 *                a pointer to a refcounted struct vmw_resource.
 251 *
 252 * If the handle can't be found or is associated with an incorrect resource
 253 * type, -EINVAL will be returned.
 254 */
 255int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 256				    struct ttm_object_file *tfile,
 257				    uint32_t handle,
 258				    const struct vmw_user_resource_conv
 259				    *converter,
 260				    struct vmw_resource **p_res)
 261{
 262	struct ttm_base_object *base;
 263	struct vmw_resource *res;
 264	int ret = -EINVAL;
 265
 266	base = ttm_base_object_lookup(tfile, handle);
 267	if (unlikely(!base))
 268		return -EINVAL;
 269
 270	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 271		goto out_bad_resource;
 272
 273	res = converter->base_obj_to_res(base);
 
 
 
 
 
 
 
 274	kref_get(&res->kref);
 
 275
 276	*p_res = res;
 277	ret = 0;
 278
 279out_bad_resource:
 280	ttm_base_object_unref(&base);
 281
 282	return ret;
 283}
 284
 285/*
 286 * Helper function that looks either a surface or bo.
 287 *
 288 * The pointer this pointed at by out_surf and out_buf needs to be null.
 289 */
 290int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 291			   struct drm_file *filp,
 292			   uint32_t handle,
 293			   struct vmw_surface **out_surf,
 294			   struct vmw_bo **out_buf)
 295{
 296	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
 297	struct vmw_resource *res;
 298	int ret;
 299
 300	BUG_ON(*out_surf || *out_buf);
 301
 302	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 303					      user_surface_converter,
 304					      &res);
 305	if (!ret) {
 306		*out_surf = vmw_res_to_srf(res);
 307		return 0;
 308	}
 309
 310	*out_surf = NULL;
 311	ret = vmw_user_bo_lookup(filp, handle, out_buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312	return ret;
 313}
 314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 315/**
 316 * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
 317 *
 318 * @res:            The resource for which to allocate a gbo buffer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 319 * @interruptible:  Whether any sleeps during allocation should be
 320 *                  performed while interruptible.
 321 */
 322static int vmw_resource_buf_alloc(struct vmw_resource *res,
 323				  bool interruptible)
 324{
 325	unsigned long size = PFN_ALIGN(res->guest_memory_size);
 326	struct vmw_bo *gbo;
 327	struct vmw_bo_params bo_params = {
 328		.domain = res->func->domain,
 329		.busy_domain = res->func->busy_domain,
 330		.bo_type = ttm_bo_type_device,
 331		.size = res->guest_memory_size,
 332		.pin = false
 333	};
 334	int ret;
 335
 336	if (likely(res->guest_memory_bo)) {
 337		BUG_ON(res->guest_memory_bo->tbo.base.size < size);
 338		return 0;
 339	}
 340
 341	ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
 
 
 
 
 
 
 
 342	if (unlikely(ret != 0))
 343		goto out_no_bo;
 344
 345	res->guest_memory_bo = gbo;
 346
 347out_no_bo:
 348	return ret;
 349}
 350
 351/**
 352 * vmw_resource_do_validate - Make a resource up-to-date and visible
 353 *                            to the device.
 354 *
 355 * @res:            The resource to make visible to the device.
 356 * @val_buf:        Information about a buffer possibly
 357 *                  containing backup data if a bind operation is needed.
 358 * @dirtying:       Transfer dirty regions.
 359 *
 360 * On hardware resource shortage, this function returns -EBUSY and
 361 * should be retried once resources have been freed up.
 362 */
 363static int vmw_resource_do_validate(struct vmw_resource *res,
 364				    struct ttm_validate_buffer *val_buf,
 365				    bool dirtying)
 366{
 367	int ret = 0;
 368	const struct vmw_res_func *func = res->func;
 369
 370	if (unlikely(res->id == -1)) {
 371		ret = func->create(res);
 372		if (unlikely(ret != 0))
 373			return ret;
 374	}
 375
 376	if (func->bind &&
 377	    ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
 378	      val_buf->bo) ||
 379	     (!func->needs_guest_memory && val_buf->bo))) {
 380		ret = func->bind(res, val_buf);
 381		if (unlikely(ret != 0))
 382			goto out_bind_failed;
 383		if (func->needs_guest_memory)
 384			vmw_resource_mob_attach(res);
 385	}
 386
 387	/*
 388	 * Handle the case where the backup mob is marked coherent but
 389	 * the resource isn't.
 390	 */
 391	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
 392	    !res->coherent) {
 393		if (res->guest_memory_bo->dirty && !res->dirty) {
 394			ret = func->dirty_alloc(res);
 395			if (ret)
 396				return ret;
 397		} else if (!res->guest_memory_bo->dirty && res->dirty) {
 398			func->dirty_free(res);
 399		}
 400	}
 401
 402	/*
 403	 * Transfer the dirty regions to the resource and update
 404	 * the resource.
 
 
 405	 */
 406	if (res->dirty) {
 407		if (dirtying && !res->res_dirty) {
 408			pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
 409			pgoff_t end = __KERNEL_DIV_ROUND_UP
 410				(res->guest_memory_offset + res->guest_memory_size,
 411				 PAGE_SIZE);
 412
 413			vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
 414		}
 415
 416		vmw_bo_dirty_transfer_to_res(res);
 417		return func->dirty_sync(res);
 418	}
 419
 420	return 0;
 421
 422out_bind_failed:
 423	func->destroy(res);
 424
 425	return ret;
 426}
 427
 428/**
 429 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 430 * command submission.
 431 *
 432 * @res:               Pointer to the struct vmw_resource to unreserve.
 433 * @dirty_set:         Change dirty status of the resource.
 434 * @dirty:             When changing dirty status indicates the new status.
 435 * @switch_guest_memory: Guest memory buffer has been switched.
 436 * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
 437 *                     switched. May be NULL.
 438 * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
 439 *
 440 * Currently unreserving a resource means putting it back on the device's
 441 * resource lru list, so that it can be evicted if necessary.
 442 */
 443void vmw_resource_unreserve(struct vmw_resource *res,
 444			    bool dirty_set,
 445			    bool dirty,
 446			    bool switch_guest_memory,
 447			    struct vmw_bo *new_guest_memory_bo,
 448			    unsigned long new_guest_memory_offset)
 449{
 450	struct vmw_private *dev_priv = res->dev_priv;
 451
 452	if (!list_empty(&res->lru_head))
 453		return;
 454
 455	if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
 456		if (res->guest_memory_bo) {
 457			vmw_resource_mob_detach(res);
 458			if (res->coherent)
 459				vmw_bo_dirty_release(res->guest_memory_bo);
 460			vmw_user_bo_unref(&res->guest_memory_bo);
 461		}
 462
 463		if (new_guest_memory_bo) {
 464			res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
 465
 466			/*
 467			 * The validation code should already have added a
 468			 * dirty tracker here.
 469			 */
 470			WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
 471
 472			vmw_resource_mob_attach(res);
 473		} else {
 474			res->guest_memory_bo = NULL;
 475		}
 476	} else if (switch_guest_memory && res->coherent) {
 477		vmw_bo_dirty_release(res->guest_memory_bo);
 478	}
 479
 480	if (switch_guest_memory)
 481		res->guest_memory_offset = new_guest_memory_offset;
 482
 483	if (dirty_set)
 484		res->res_dirty = dirty;
 
 485
 486	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 487		return;
 488
 489	spin_lock(&dev_priv->resource_lock);
 490	list_add_tail(&res->lru_head,
 491		      &res->dev_priv->res_lru[res->func->res_type]);
 492	spin_unlock(&dev_priv->resource_lock);
 493}
 494
 495/**
 496 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 497 *                             for a resource and in that case, allocate
 498 *                             one, reserve and validate it.
 499 *
 500 * @ticket:         The ww acquire context to use, or NULL if trylocking.
 501 * @res:            The resource for which to allocate a backup buffer.
 502 * @interruptible:  Whether any sleeps during allocation should be
 503 *                  performed while interruptible.
 504 * @val_buf:        On successful return contains data about the
 505 *                  reserved and validated backup buffer.
 506 */
 507static int
 508vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 509			  struct vmw_resource *res,
 510			  bool interruptible,
 511			  struct ttm_validate_buffer *val_buf)
 512{
 513	struct ttm_operation_ctx ctx = { true, false };
 514	struct list_head val_list;
 515	bool guest_memory_dirty = false;
 516	int ret;
 517
 518	if (unlikely(!res->guest_memory_bo)) {
 519		ret = vmw_resource_buf_alloc(res, interruptible);
 520		if (unlikely(ret != 0))
 521			return ret;
 522	}
 523
 524	INIT_LIST_HEAD(&val_list);
 525	ttm_bo_get(&res->guest_memory_bo->tbo);
 526	val_buf->bo = &res->guest_memory_bo->tbo;
 527	val_buf->num_shared = 0;
 528	list_add_tail(&val_buf->head, &val_list);
 529	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 530	if (unlikely(ret != 0))
 531		goto out_no_reserve;
 532
 533	if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
 534		return 0;
 535
 536	guest_memory_dirty = res->guest_memory_dirty;
 537	vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
 538			     res->func->busy_domain);
 539	ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
 540			      &res->guest_memory_bo->placement,
 541			      &ctx);
 542
 543	if (unlikely(ret != 0))
 544		goto out_no_validate;
 545
 546	return 0;
 547
 548out_no_validate:
 549	ttm_eu_backoff_reservation(ticket, &val_list);
 550out_no_reserve:
 551	ttm_bo_put(val_buf->bo);
 552	val_buf->bo = NULL;
 553	if (guest_memory_dirty)
 554		vmw_user_bo_unref(&res->guest_memory_bo);
 555
 556	return ret;
 557}
 558
 559/*
 560 * vmw_resource_reserve - Reserve a resource for command submission
 561 *
 562 * @res:            The resource to reserve.
 563 *
 564 * This function takes the resource off the LRU list and make sure
 565 * a guest memory buffer is present for guest-backed resources.
 566 * However, the buffer may not be bound to the resource at this
 567 * point.
 568 *
 569 */
 570int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 571			 bool no_guest_memory)
 572{
 573	struct vmw_private *dev_priv = res->dev_priv;
 574	int ret;
 575
 576	spin_lock(&dev_priv->resource_lock);
 577	list_del_init(&res->lru_head);
 578	spin_unlock(&dev_priv->resource_lock);
 579
 580	if (res->func->needs_guest_memory && !res->guest_memory_bo &&
 581	    !no_guest_memory) {
 582		ret = vmw_resource_buf_alloc(res, interruptible);
 583		if (unlikely(ret != 0)) {
 584			DRM_ERROR("Failed to allocate a guest memory buffer "
 585				  "of size %lu. bytes\n",
 586				  (unsigned long) res->guest_memory_size);
 587			return ret;
 588		}
 589	}
 590
 591	return 0;
 592}
 593
 594/**
 595 * vmw_resource_backoff_reservation - Unreserve and unreference a
 596 *                                    guest memory buffer
 597 *.
 598 * @ticket:         The ww acquire ctx used for reservation.
 599 * @val_buf:        Guest memory buffer information.
 600 */
 601static void
 602vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 603				 struct ttm_validate_buffer *val_buf)
 604{
 605	struct list_head val_list;
 606
 607	if (likely(val_buf->bo == NULL))
 608		return;
 609
 610	INIT_LIST_HEAD(&val_list);
 611	list_add_tail(&val_buf->head, &val_list);
 612	ttm_eu_backoff_reservation(ticket, &val_list);
 613	ttm_bo_put(val_buf->bo);
 614	val_buf->bo = NULL;
 615}
 616
 617/**
 618 * vmw_resource_do_evict - Evict a resource, and transfer its data
 619 *                         to a backup buffer.
 620 *
 621 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 622 * @res:            The resource to evict.
 623 * @interruptible:  Whether to wait interruptible.
 624 */
 625static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 626				 struct vmw_resource *res, bool interruptible)
 627{
 628	struct ttm_validate_buffer val_buf;
 629	const struct vmw_res_func *func = res->func;
 630	int ret;
 631
 632	BUG_ON(!func->may_evict);
 633
 634	val_buf.bo = NULL;
 635	val_buf.num_shared = 0;
 636	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 637	if (unlikely(ret != 0))
 638		return ret;
 639
 640	if (unlikely(func->unbind != NULL &&
 641		     (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
 642		ret = func->unbind(res, res->res_dirty, &val_buf);
 643		if (unlikely(ret != 0))
 644			goto out_no_unbind;
 645		vmw_resource_mob_detach(res);
 646	}
 647	ret = func->destroy(res);
 648	res->guest_memory_dirty = true;
 649	res->res_dirty = false;
 650out_no_unbind:
 651	vmw_resource_backoff_reservation(ticket, &val_buf);
 652
 653	return ret;
 654}
 655
 656
 657/**
 658 * vmw_resource_validate - Make a resource up-to-date and visible
 659 *                         to the device.
 660 * @res: The resource to make visible to the device.
 661 * @intr: Perform waits interruptible if possible.
 662 * @dirtying: Pending GPU operation will dirty the resource
 663 *
 664 * On successful return, any backup DMA buffer pointed to by @res->backup will
 
 
 665 * be reserved and validated.
 666 * On hardware resource shortage, this function will repeatedly evict
 667 * resources of the same type until the validation succeeds.
 668 *
 669 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 670 * on failure.
 671 */
 672int vmw_resource_validate(struct vmw_resource *res, bool intr,
 673			  bool dirtying)
 674{
 675	int ret;
 676	struct vmw_resource *evict_res;
 677	struct vmw_private *dev_priv = res->dev_priv;
 678	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 679	struct ttm_validate_buffer val_buf;
 680	unsigned err_count = 0;
 681
 682	if (!res->func->create)
 683		return 0;
 684
 685	val_buf.bo = NULL;
 686	val_buf.num_shared = 0;
 687	if (res->guest_memory_bo)
 688		val_buf.bo = &res->guest_memory_bo->tbo;
 689	do {
 690		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
 691		if (likely(ret != -EBUSY))
 692			break;
 693
 694		spin_lock(&dev_priv->resource_lock);
 695		if (list_empty(lru_list) || !res->func->may_evict) {
 696			DRM_ERROR("Out of device device resources "
 697				  "for %s.\n", res->func->type_name);
 698			ret = -EBUSY;
 699			spin_unlock(&dev_priv->resource_lock);
 700			break;
 701		}
 702
 703		evict_res = vmw_resource_reference
 704			(list_first_entry(lru_list, struct vmw_resource,
 705					  lru_head));
 706		list_del_init(&evict_res->lru_head);
 707
 708		spin_unlock(&dev_priv->resource_lock);
 709
 710		/* Trylock backup buffers with a NULL ticket. */
 711		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 712		if (unlikely(ret != 0)) {
 713			spin_lock(&dev_priv->resource_lock);
 714			list_add_tail(&evict_res->lru_head, lru_list);
 715			spin_unlock(&dev_priv->resource_lock);
 716			if (ret == -ERESTARTSYS ||
 717			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 718				vmw_resource_unreference(&evict_res);
 719				goto out_no_validate;
 720			}
 721		}
 722
 723		vmw_resource_unreference(&evict_res);
 724	} while (1);
 725
 726	if (unlikely(ret != 0))
 727		goto out_no_validate;
 728	else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
 729		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 730		vmw_user_bo_unref(&res->guest_memory_bo);
 731	}
 732
 733	return 0;
 734
 735out_no_validate:
 736	return ret;
 737}
 738
 739
 740/**
 741 * vmw_resource_unbind_list
 742 *
 743 * @vbo: Pointer to the current backing MOB.
 744 *
 745 * Evicts the Guest Backed hardware resource if the backup
 746 * buffer is being moved out of MOB memory.
 747 * Note that this function will not race with the resource
 748 * validation code, since resource validation and eviction
 749 * both require the backup buffer to be reserved.
 750 */
 751void vmw_resource_unbind_list(struct vmw_bo *vbo)
 752{
 753	struct ttm_validate_buffer val_buf = {
 754		.bo = &vbo->tbo,
 755		.num_shared = 0
 756	};
 
 
 
 
 757
 758	dma_resv_assert_held(vbo->tbo.base.resv);
 759	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
 760		struct rb_node *node = vbo->res_tree.rb_node;
 761		struct vmw_resource *res =
 762			container_of(node, struct vmw_resource, mob_node);
 763
 764		if (!WARN_ON_ONCE(!res->func->unbind))
 765			(void) res->func->unbind(res, res->res_dirty, &val_buf);
 766
 767		res->guest_memory_size = true;
 768		res->res_dirty = false;
 769		vmw_resource_mob_detach(res);
 770	}
 771
 772	(void) ttm_bo_wait(&vbo->tbo, false, false);
 773}
 774
 
 
 
 775
 776/**
 777 * vmw_query_readback_all - Read back cached query states
 778 *
 779 * @dx_query_mob: Buffer containing the DX query MOB
 
 
 780 *
 781 * Read back cached states from the device if they exist.  This function
 782 * assumes binding_mutex is held.
 
 
 
 
 
 
 
 
 
 783 */
 784int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
 
 785{
 786	struct vmw_resource *dx_query_ctx;
 787	struct vmw_private *dev_priv;
 788	struct {
 789		SVGA3dCmdHeader header;
 790		SVGA3dCmdDXReadbackAllQuery body;
 791	} *cmd;
 792
 793
 794	/* No query bound, so do nothing */
 795	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 796		return 0;
 797
 798	dx_query_ctx = dx_query_mob->dx_query_ctx;
 799	dev_priv     = dx_query_ctx->dev_priv;
 800
 801	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 802	if (unlikely(cmd == NULL))
 803		return -ENOMEM;
 804
 805	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 806	cmd->header.size = sizeof(cmd->body);
 807	cmd->body.cid    = dx_query_ctx->id;
 808
 809	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 810
 811	/* Triggers a rebind the next time affected context is bound */
 812	dx_query_mob->dx_query_ctx = NULL;
 
 
 813
 814	return 0;
 815}
 816
 
 817
 
 
 818
 819/**
 820 * vmw_query_move_notify - Read back cached query states
 821 *
 822 * @bo: The TTM buffer object about to move.
 823 * @old_mem: The memory region @bo is moving from.
 824 * @new_mem: The memory region @bo is moving to.
 825 *
 826 * Called before the query MOB is swapped out to read back cached query
 827 * states from the device.
 828 */
 829void vmw_query_move_notify(struct ttm_buffer_object *bo,
 830			   struct ttm_resource *old_mem,
 831			   struct ttm_resource *new_mem)
 832{
 833	struct vmw_bo *dx_query_mob;
 834	struct ttm_device *bdev = bo->bdev;
 835	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
 836
 837	mutex_lock(&dev_priv->binding_mutex);
 838
 839	/* If BO is being moved from MOB to system memory */
 840	if (old_mem &&
 841	    new_mem->mem_type == TTM_PL_SYSTEM &&
 842	    old_mem->mem_type == VMW_PL_MOB) {
 843		struct vmw_fence_obj *fence;
 844
 845		dx_query_mob = to_vmw_bo(&bo->base);
 846		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
 847			mutex_unlock(&dev_priv->binding_mutex);
 848			return;
 849		}
 850
 851		(void) vmw_query_readback_all(dx_query_mob);
 852		mutex_unlock(&dev_priv->binding_mutex);
 853
 854		/* Create a fence and attach the BO to it */
 855		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 856		vmw_bo_fence_single(bo, fence);
 857
 858		if (fence != NULL)
 859			vmw_fence_obj_unreference(&fence);
 860
 861		(void) ttm_bo_wait(bo, false, false);
 862	} else
 863		mutex_unlock(&dev_priv->binding_mutex);
 864}
 865
 866/**
 867 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 868 *
 869 * @res:            The resource being queried.
 870 */
 871bool vmw_resource_needs_backup(const struct vmw_resource *res)
 872{
 873	return res->func->needs_guest_memory;
 874}
 875
 876/**
 877 * vmw_resource_evict_type - Evict all resources of a specific type
 878 *
 879 * @dev_priv:       Pointer to a device private struct
 880 * @type:           The resource type to evict
 881 *
 882 * To avoid thrashing starvation or as part of the hibernation sequence,
 883 * try to evict all evictable resources of a specific type.
 884 */
 885static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 886				    enum vmw_res_type type)
 887{
 888	struct list_head *lru_list = &dev_priv->res_lru[type];
 889	struct vmw_resource *evict_res;
 890	unsigned err_count = 0;
 891	int ret;
 892	struct ww_acquire_ctx ticket;
 893
 894	do {
 895		spin_lock(&dev_priv->resource_lock);
 896
 897		if (list_empty(lru_list))
 898			goto out_unlock;
 899
 900		evict_res = vmw_resource_reference(
 901			list_first_entry(lru_list, struct vmw_resource,
 902					 lru_head));
 903		list_del_init(&evict_res->lru_head);
 904		spin_unlock(&dev_priv->resource_lock);
 905
 906		/* Wait lock backup buffers with a ticket. */
 907		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 908		if (unlikely(ret != 0)) {
 909			spin_lock(&dev_priv->resource_lock);
 910			list_add_tail(&evict_res->lru_head, lru_list);
 911			spin_unlock(&dev_priv->resource_lock);
 912			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 913				vmw_resource_unreference(&evict_res);
 914				return;
 915			}
 916		}
 917
 918		vmw_resource_unreference(&evict_res);
 919	} while (1);
 920
 921out_unlock:
 922	spin_unlock(&dev_priv->resource_lock);
 923}
 924
 925/**
 926 * vmw_resource_evict_all - Evict all evictable resources
 927 *
 928 * @dev_priv:       Pointer to a device private struct
 929 *
 930 * To avoid thrashing starvation or as part of the hibernation sequence,
 931 * evict all evictable resources. In particular this means that all
 932 * guest-backed resources that are registered with the device are
 933 * evicted and the OTable becomes clean.
 934 */
 935void vmw_resource_evict_all(struct vmw_private *dev_priv)
 936{
 937	enum vmw_res_type type;
 938
 939	mutex_lock(&dev_priv->cmdbuf_mutex);
 940
 941	for (type = 0; type < vmw_res_max; ++type)
 942		vmw_resource_evict_type(dev_priv, type);
 943
 944	mutex_unlock(&dev_priv->cmdbuf_mutex);
 945}
 946
 947/*
 948 * vmw_resource_pin - Add a pin reference on a resource
 949 *
 950 * @res: The resource to add a pin reference on
 951 *
 952 * This function adds a pin reference, and if needed validates the resource.
 953 * Having a pin reference means that the resource can never be evicted, and
 954 * its id will never change as long as there is a pin reference.
 955 * This function returns 0 on success and a negative error code on failure.
 956 */
 957int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 958{
 959	struct ttm_operation_ctx ctx = { interruptible, false };
 960	struct vmw_private *dev_priv = res->dev_priv;
 961	int ret;
 962
 963	mutex_lock(&dev_priv->cmdbuf_mutex);
 964	ret = vmw_resource_reserve(res, interruptible, false);
 965	if (ret)
 966		goto out_no_reserve;
 967
 968	if (res->pin_count == 0) {
 969		struct vmw_bo *vbo = NULL;
 970
 971		if (res->guest_memory_bo) {
 972			vbo = res->guest_memory_bo;
 973
 974			ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
 975			if (ret)
 976				goto out_no_validate;
 977			if (!vbo->tbo.pin_count) {
 978				vmw_bo_placement_set(vbo,
 979						     res->func->domain,
 980						     res->func->busy_domain);
 981				ret = ttm_bo_validate
 982					(&vbo->tbo,
 983					 &vbo->placement,
 984					 &ctx);
 985				if (ret) {
 986					ttm_bo_unreserve(&vbo->tbo);
 987					goto out_no_validate;
 988				}
 989			}
 990
 991			/* Do we really need to pin the MOB as well? */
 992			vmw_bo_pin_reserved(vbo, true);
 993		}
 994		ret = vmw_resource_validate(res, interruptible, true);
 995		if (vbo)
 996			ttm_bo_unreserve(&vbo->tbo);
 997		if (ret)
 998			goto out_no_validate;
 999	}
1000	res->pin_count++;
1001
1002out_no_validate:
1003	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1004out_no_reserve:
1005	mutex_unlock(&dev_priv->cmdbuf_mutex);
1006
1007	return ret;
1008}
1009
1010/**
1011 * vmw_resource_unpin - Remove a pin reference from a resource
1012 *
1013 * @res: The resource to remove a pin reference from
1014 *
1015 * Having a pin reference means that the resource can never be evicted, and
1016 * its id will never change as long as there is a pin reference.
1017 */
1018void vmw_resource_unpin(struct vmw_resource *res)
1019{
1020	struct vmw_private *dev_priv = res->dev_priv;
1021	int ret;
1022
1023	mutex_lock(&dev_priv->cmdbuf_mutex);
1024
1025	ret = vmw_resource_reserve(res, false, true);
1026	WARN_ON(ret);
1027
1028	WARN_ON(res->pin_count == 0);
1029	if (--res->pin_count == 0 && res->guest_memory_bo) {
1030		struct vmw_bo *vbo = res->guest_memory_bo;
1031
1032		(void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
1033		vmw_bo_pin_reserved(vbo, false);
1034		ttm_bo_unreserve(&vbo->tbo);
1035	}
1036
1037	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1038
1039	mutex_unlock(&dev_priv->cmdbuf_mutex);
1040}
1041
1042/**
1043 * vmw_res_type - Return the resource type
1044 *
1045 * @res: Pointer to the resource
1046 */
1047enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1048{
1049	return res->func->res_type;
1050}
1051
1052/**
1053 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1054 * sequential range of touched backing store memory.
1055 * @res: The resource.
1056 * @start: The first page touched.
1057 * @end: The last page touched + 1.
1058 */
1059void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1060			       pgoff_t end)
1061{
1062	if (res->dirty)
1063		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1064					   end << PAGE_SHIFT);
1065}
1066
1067/**
1068 * vmw_resources_clean - Clean resources intersecting a mob range
1069 * @vbo: The mob buffer object
1070 * @start: The mob page offset starting the range
1071 * @end: The mob page offset ending the range
1072 * @num_prefault: Returns how many pages including the first have been
1073 * cleaned and are ok to prefault
1074 */
1075int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
1076			pgoff_t end, pgoff_t *num_prefault)
1077{
1078	struct rb_node *cur = vbo->res_tree.rb_node;
1079	struct vmw_resource *found = NULL;
1080	unsigned long res_start = start << PAGE_SHIFT;
1081	unsigned long res_end = end << PAGE_SHIFT;
1082	unsigned long last_cleaned = 0;
1083
1084	/*
1085	 * Find the resource with lowest backup_offset that intersects the
1086	 * range.
1087	 */
1088	while (cur) {
1089		struct vmw_resource *cur_res =
1090			container_of(cur, struct vmw_resource, mob_node);
1091
1092		if (cur_res->guest_memory_offset >= res_end) {
1093			cur = cur->rb_left;
1094		} else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
1095			   res_start) {
1096			cur = cur->rb_right;
1097		} else {
1098			found = cur_res;
1099			cur = cur->rb_left;
1100			/* Continue to look for resources with lower offsets */
1101		}
1102	}
1103
1104	/*
1105	 * In order of increasing guest_memory_offset, clean dirty resources
1106	 * intersecting the range.
1107	 */
1108	while (found) {
1109		if (found->res_dirty) {
1110			int ret;
1111
1112			if (!found->func->clean)
1113				return -EINVAL;
1114
1115			ret = found->func->clean(found);
1116			if (ret)
1117				return ret;
1118
1119			found->res_dirty = false;
1120		}
1121		last_cleaned = found->guest_memory_offset + found->guest_memory_size;
1122		cur = rb_next(&found->mob_node);
1123		if (!cur)
1124			break;
1125
1126		found = container_of(cur, struct vmw_resource, mob_node);
1127		if (found->guest_memory_offset >= res_end)
1128			break;
1129	}
1130
1131	/*
1132	 * Set number of pages allowed prefaulting and fence the buffer object
1133	 */
1134	*num_prefault = 1;
1135	if (last_cleaned > res_start) {
1136		struct ttm_buffer_object *bo = &vbo->tbo;
1137
1138		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1139						      PAGE_SIZE);
1140		vmw_bo_fence_single(bo, NULL);
1141	}
1142
1143	return 0;
1144}
v3.15
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
 
 
  28#include "vmwgfx_drv.h"
  29#include <drm/vmwgfx_drm.h>
  30#include <drm/ttm/ttm_object.h>
  31#include <drm/ttm/ttm_placement.h>
  32#include <drm/drmP.h>
  33#include "vmwgfx_resource_priv.h"
  34
  35#define VMW_RES_EVICT_ERR_COUNT 10
  36
  37struct vmw_user_dma_buffer {
  38	struct ttm_prime_object prime;
  39	struct vmw_dma_buffer dma;
  40};
  41
  42struct vmw_bo_user_rep {
  43	uint32_t handle;
  44	uint64_t map_handle;
  45};
  46
  47struct vmw_stream {
  48	struct vmw_resource res;
  49	uint32_t stream_id;
  50};
  51
  52struct vmw_user_stream {
  53	struct ttm_base_object base;
  54	struct vmw_stream stream;
  55};
  56
  57
  58static uint64_t vmw_user_stream_size;
  59
  60static const struct vmw_res_func vmw_stream_func = {
  61	.res_type = vmw_res_stream,
  62	.needs_backup = false,
  63	.may_evict = false,
  64	.type_name = "video streams",
  65	.backup_placement = NULL,
  66	.create = NULL,
  67	.destroy = NULL,
  68	.bind = NULL,
  69	.unbind = NULL
  70};
  71
  72static inline struct vmw_dma_buffer *
  73vmw_dma_buffer(struct ttm_buffer_object *bo)
  74{
  75	return container_of(bo, struct vmw_dma_buffer, base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76}
  77
  78static inline struct vmw_user_dma_buffer *
  79vmw_user_dma_buffer(struct ttm_buffer_object *bo)
 
 
 
  80{
  81	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  82	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
 
 
 
 
 
 
  83}
  84
  85struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  86{
  87	kref_get(&res->kref);
  88	return res;
  89}
  90
  91struct vmw_resource *
  92vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  93{
  94	return kref_get_unless_zero(&res->kref) ? res : NULL;
  95}
  96
  97/**
  98 * vmw_resource_release_id - release a resource id to the id manager.
  99 *
 100 * @res: Pointer to the resource.
 101 *
 102 * Release the resource id to the resource id manager and set it to -1
 103 */
 104void vmw_resource_release_id(struct vmw_resource *res)
 105{
 106	struct vmw_private *dev_priv = res->dev_priv;
 107	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 108
 109	write_lock(&dev_priv->resource_lock);
 110	if (res->id != -1)
 111		idr_remove(idr, res->id);
 112	res->id = -1;
 113	write_unlock(&dev_priv->resource_lock);
 114}
 115
 116static void vmw_resource_release(struct kref *kref)
 117{
 118	struct vmw_resource *res =
 119	    container_of(kref, struct vmw_resource, kref);
 120	struct vmw_private *dev_priv = res->dev_priv;
 121	int id;
 
 122	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 123
 124	res->avail = false;
 125	list_del_init(&res->lru_head);
 126	write_unlock(&dev_priv->resource_lock);
 127	if (res->backup) {
 128		struct ttm_buffer_object *bo = &res->backup->base;
 129
 130		ttm_bo_reserve(bo, false, false, false, 0);
 131		if (!list_empty(&res->mob_head) &&
 
 132		    res->func->unbind != NULL) {
 133			struct ttm_validate_buffer val_buf;
 134
 135			val_buf.bo = bo;
 
 136			res->func->unbind(res, false, &val_buf);
 137		}
 138		res->backup_dirty = false;
 139		list_del_init(&res->mob_head);
 
 
 
 
 140		ttm_bo_unreserve(bo);
 141		vmw_dmabuf_unreference(&res->backup);
 142	}
 143
 144	if (likely(res->hw_destroy != NULL)) {
 145		res->hw_destroy(res);
 146		mutex_lock(&dev_priv->binding_mutex);
 147		vmw_context_binding_res_list_kill(&res->binding_head);
 148		mutex_unlock(&dev_priv->binding_mutex);
 
 149	}
 150
 151	id = res->id;
 152	if (res->res_free != NULL)
 153		res->res_free(res);
 154	else
 155		kfree(res);
 156
 157	write_lock(&dev_priv->resource_lock);
 158
 159	if (id != -1)
 160		idr_remove(idr, id);
 
 161}
 162
 163void vmw_resource_unreference(struct vmw_resource **p_res)
 164{
 165	struct vmw_resource *res = *p_res;
 166	struct vmw_private *dev_priv = res->dev_priv;
 167
 168	*p_res = NULL;
 169	write_lock(&dev_priv->resource_lock);
 170	kref_put(&res->kref, vmw_resource_release);
 171	write_unlock(&dev_priv->resource_lock);
 172}
 173
 174
 175/**
 176 * vmw_resource_alloc_id - release a resource id to the id manager.
 177 *
 178 * @res: Pointer to the resource.
 179 *
 180 * Allocate the lowest free resource from the resource manager, and set
 181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 182 */
 183int vmw_resource_alloc_id(struct vmw_resource *res)
 184{
 185	struct vmw_private *dev_priv = res->dev_priv;
 186	int ret;
 187	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 188
 189	BUG_ON(res->id != -1);
 190
 191	idr_preload(GFP_KERNEL);
 192	write_lock(&dev_priv->resource_lock);
 193
 194	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 195	if (ret >= 0)
 196		res->id = ret;
 197
 198	write_unlock(&dev_priv->resource_lock);
 199	idr_preload_end();
 200	return ret < 0 ? ret : 0;
 201}
 202
 203/**
 204 * vmw_resource_init - initialize a struct vmw_resource
 205 *
 206 * @dev_priv:       Pointer to a device private struct.
 207 * @res:            The struct vmw_resource to initialize.
 208 * @obj_type:       Resource object type.
 209 * @delay_id:       Boolean whether to defer device id allocation until
 210 *                  the first validation.
 211 * @res_free:       Resource destructor.
 212 * @func:           Resource function table.
 213 */
 214int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 215		      bool delay_id,
 216		      void (*res_free) (struct vmw_resource *res),
 217		      const struct vmw_res_func *func)
 218{
 219	kref_init(&res->kref);
 220	res->hw_destroy = NULL;
 221	res->res_free = res_free;
 222	res->avail = false;
 223	res->dev_priv = dev_priv;
 224	res->func = func;
 
 225	INIT_LIST_HEAD(&res->lru_head);
 226	INIT_LIST_HEAD(&res->mob_head);
 227	INIT_LIST_HEAD(&res->binding_head);
 228	res->id = -1;
 229	res->backup = NULL;
 230	res->backup_offset = 0;
 231	res->backup_dirty = false;
 232	res->res_dirty = false;
 
 
 
 233	if (delay_id)
 234		return 0;
 235	else
 236		return vmw_resource_alloc_id(res);
 237}
 238
 239/**
 240 * vmw_resource_activate
 241 *
 242 * @res:        Pointer to the newly created resource
 243 * @hw_destroy: Destroy function. NULL if none.
 244 *
 245 * Activate a resource after the hardware has been made aware of it.
 246 * Set tye destroy function to @destroy. Typically this frees the
 247 * resource and destroys the hardware resources associated with it.
 248 * Activate basically means that the function vmw_resource_lookup will
 249 * find it.
 250 */
 251void vmw_resource_activate(struct vmw_resource *res,
 252			   void (*hw_destroy) (struct vmw_resource *))
 253{
 254	struct vmw_private *dev_priv = res->dev_priv;
 255
 256	write_lock(&dev_priv->resource_lock);
 257	res->avail = true;
 258	res->hw_destroy = hw_destroy;
 259	write_unlock(&dev_priv->resource_lock);
 260}
 261
 262struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 263					 struct idr *idr, int id)
 264{
 265	struct vmw_resource *res;
 266
 267	read_lock(&dev_priv->resource_lock);
 268	res = idr_find(idr, id);
 269	if (res && res->avail)
 270		kref_get(&res->kref);
 271	else
 272		res = NULL;
 273	read_unlock(&dev_priv->resource_lock);
 274
 275	if (unlikely(res == NULL))
 276		return NULL;
 277
 278	return res;
 279}
 280
 281/**
 282 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 283 * TTM user-space handle and perform basic type checks
 284 *
 285 * @dev_priv:     Pointer to a device private struct
 286 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 287 * @handle:       The TTM user-space handle
 288 * @converter:    Pointer to an object describing the resource type
 289 * @p_res:        On successful return the location pointed to will contain
 290 *                a pointer to a refcounted struct vmw_resource.
 291 *
 292 * If the handle can't be found or is associated with an incorrect resource
 293 * type, -EINVAL will be returned.
 294 */
 295int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 296				    struct ttm_object_file *tfile,
 297				    uint32_t handle,
 298				    const struct vmw_user_resource_conv
 299				    *converter,
 300				    struct vmw_resource **p_res)
 301{
 302	struct ttm_base_object *base;
 303	struct vmw_resource *res;
 304	int ret = -EINVAL;
 305
 306	base = ttm_base_object_lookup(tfile, handle);
 307	if (unlikely(base == NULL))
 308		return -EINVAL;
 309
 310	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 311		goto out_bad_resource;
 312
 313	res = converter->base_obj_to_res(base);
 314
 315	read_lock(&dev_priv->resource_lock);
 316	if (!res->avail || res->res_free != converter->res_free) {
 317		read_unlock(&dev_priv->resource_lock);
 318		goto out_bad_resource;
 319	}
 320
 321	kref_get(&res->kref);
 322	read_unlock(&dev_priv->resource_lock);
 323
 324	*p_res = res;
 325	ret = 0;
 326
 327out_bad_resource:
 328	ttm_base_object_unref(&base);
 329
 330	return ret;
 331}
 332
 333/**
 334 * Helper function that looks either a surface or dmabuf.
 335 *
 336 * The pointer this pointed at by out_surf and out_buf needs to be null.
 337 */
 338int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 339			   struct ttm_object_file *tfile,
 340			   uint32_t handle,
 341			   struct vmw_surface **out_surf,
 342			   struct vmw_dma_buffer **out_buf)
 343{
 
 344	struct vmw_resource *res;
 345	int ret;
 346
 347	BUG_ON(*out_surf || *out_buf);
 348
 349	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 350					      user_surface_converter,
 351					      &res);
 352	if (!ret) {
 353		*out_surf = vmw_res_to_srf(res);
 354		return 0;
 355	}
 356
 357	*out_surf = NULL;
 358	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
 359	return ret;
 360}
 361
 362/**
 363 * Buffer management.
 364 */
 365
 366/**
 367 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
 368 *
 369 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 370 * @size: The requested buffer size.
 371 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 372 */
 373static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
 374				  bool user)
 375{
 376	static size_t struct_size, user_struct_size;
 377	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 378	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 379
 380	if (unlikely(struct_size == 0)) {
 381		size_t backend_size = ttm_round_pot(vmw_tt_size);
 382
 383		struct_size = backend_size +
 384			ttm_round_pot(sizeof(struct vmw_dma_buffer));
 385		user_struct_size = backend_size +
 386			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
 387	}
 388
 389	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 390		page_array_size +=
 391			ttm_round_pot(num_pages * sizeof(dma_addr_t));
 392
 393	return ((user) ? user_struct_size : struct_size) +
 394		page_array_size;
 395}
 396
 397void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 398{
 399	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 400
 401	kfree(vmw_bo);
 402}
 403
 404static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 405{
 406	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 407
 408	ttm_prime_object_kfree(vmw_user_bo, prime);
 409}
 410
 411int vmw_dmabuf_init(struct vmw_private *dev_priv,
 412		    struct vmw_dma_buffer *vmw_bo,
 413		    size_t size, struct ttm_placement *placement,
 414		    bool interruptible,
 415		    void (*bo_free) (struct ttm_buffer_object *bo))
 416{
 417	struct ttm_bo_device *bdev = &dev_priv->bdev;
 418	size_t acc_size;
 419	int ret;
 420	bool user = (bo_free == &vmw_user_dmabuf_destroy);
 421
 422	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
 423
 424	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
 425	memset(vmw_bo, 0, sizeof(*vmw_bo));
 426
 427	INIT_LIST_HEAD(&vmw_bo->res_list);
 428
 429	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 430			  ttm_bo_type_device, placement,
 431			  0, interruptible,
 432			  NULL, acc_size, NULL, bo_free);
 433	return ret;
 434}
 435
 436static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 437{
 438	struct vmw_user_dma_buffer *vmw_user_bo;
 439	struct ttm_base_object *base = *p_base;
 440	struct ttm_buffer_object *bo;
 441
 442	*p_base = NULL;
 443
 444	if (unlikely(base == NULL))
 445		return;
 446
 447	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 448				   prime.base);
 449	bo = &vmw_user_bo->dma.base;
 450	ttm_bo_unref(&bo);
 451}
 452
 453static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
 454					    enum ttm_ref_type ref_type)
 455{
 456	struct vmw_user_dma_buffer *user_bo;
 457	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
 458
 459	switch (ref_type) {
 460	case TTM_REF_SYNCCPU_WRITE:
 461		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 462		break;
 463	default:
 464		BUG();
 465	}
 466}
 467
 468/**
 469 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
 470 *
 471 * @dev_priv: Pointer to a struct device private.
 472 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 473 * object.
 474 * @size: Size of the dma buffer.
 475 * @shareable: Boolean whether the buffer is shareable with other open files.
 476 * @handle: Pointer to where the handle value should be assigned.
 477 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
 478 * should be assigned.
 479 */
 480int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
 481			  struct ttm_object_file *tfile,
 482			  uint32_t size,
 483			  bool shareable,
 484			  uint32_t *handle,
 485			  struct vmw_dma_buffer **p_dma_buf)
 486{
 487	struct vmw_user_dma_buffer *user_bo;
 488	struct ttm_buffer_object *tmp;
 489	int ret;
 490
 491	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 492	if (unlikely(user_bo == NULL)) {
 493		DRM_ERROR("Failed to allocate a buffer.\n");
 494		return -ENOMEM;
 495	}
 496
 497	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
 498			      (dev_priv->has_mob) ?
 499			      &vmw_sys_placement :
 500			      &vmw_vram_sys_placement, true,
 501			      &vmw_user_dmabuf_destroy);
 502	if (unlikely(ret != 0))
 503		return ret;
 504
 505	tmp = ttm_bo_reference(&user_bo->dma.base);
 506	ret = ttm_prime_object_init(tfile,
 507				    size,
 508				    &user_bo->prime,
 509				    shareable,
 510				    ttm_buffer_type,
 511				    &vmw_user_dmabuf_release,
 512				    &vmw_user_dmabuf_ref_obj_release);
 513	if (unlikely(ret != 0)) {
 514		ttm_bo_unref(&tmp);
 515		goto out_no_base_object;
 516	}
 517
 518	*p_dma_buf = &user_bo->dma;
 519	*handle = user_bo->prime.base.hash.key;
 520
 521out_no_base_object:
 522	return ret;
 523}
 524
 525/**
 526 * vmw_user_dmabuf_verify_access - verify access permissions on this
 527 * buffer object.
 528 *
 529 * @bo: Pointer to the buffer object being accessed
 530 * @tfile: Identifying the caller.
 531 */
 532int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
 533				  struct ttm_object_file *tfile)
 534{
 535	struct vmw_user_dma_buffer *vmw_user_bo;
 536
 537	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
 538		return -EPERM;
 539
 540	vmw_user_bo = vmw_user_dma_buffer(bo);
 541
 542	/* Check that the caller has opened the object. */
 543	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 544		return 0;
 545
 546	DRM_ERROR("Could not grant buffer access.\n");
 547	return -EPERM;
 548}
 549
 550/**
 551 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
 552 * access, idling previous GPU operations on the buffer and optionally
 553 * blocking it for further command submissions.
 554 *
 555 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 556 * @tfile: Identifying the caller.
 557 * @flags: Flags indicating how the grab should be performed.
 558 *
 559 * A blocking grab will be automatically released when @tfile is closed.
 560 */
 561static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
 562					struct ttm_object_file *tfile,
 563					uint32_t flags)
 564{
 565	struct ttm_buffer_object *bo = &user_bo->dma.base;
 566	bool existed;
 567	int ret;
 568
 569	if (flags & drm_vmw_synccpu_allow_cs) {
 570		struct ttm_bo_device *bdev = bo->bdev;
 571
 572		spin_lock(&bdev->fence_lock);
 573		ret = ttm_bo_wait(bo, false, true,
 574				  !!(flags & drm_vmw_synccpu_dontblock));
 575		spin_unlock(&bdev->fence_lock);
 576		return ret;
 577	}
 578
 579	ret = ttm_bo_synccpu_write_grab
 580		(bo, !!(flags & drm_vmw_synccpu_dontblock));
 581	if (unlikely(ret != 0))
 582		return ret;
 583
 584	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 585				 TTM_REF_SYNCCPU_WRITE, &existed);
 586	if (ret != 0 || existed)
 587		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 588
 589	return ret;
 590}
 591
 592/**
 593 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
 594 * and unblock command submission on the buffer if blocked.
 595 *
 596 * @handle: Handle identifying the buffer object.
 597 * @tfile: Identifying the caller.
 598 * @flags: Flags indicating the type of release.
 599 */
 600static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
 601					   struct ttm_object_file *tfile,
 602					   uint32_t flags)
 603{
 604	if (!(flags & drm_vmw_synccpu_allow_cs))
 605		return ttm_ref_object_base_unref(tfile, handle,
 606						 TTM_REF_SYNCCPU_WRITE);
 607
 608	return 0;
 609}
 610
 611/**
 612 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
 613 * functionality.
 614 *
 615 * @dev: Identifies the drm device.
 616 * @data: Pointer to the ioctl argument.
 617 * @file_priv: Identifies the caller.
 618 *
 619 * This function checks the ioctl arguments for validity and calls the
 620 * relevant synccpu functions.
 621 */
 622int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
 623				  struct drm_file *file_priv)
 624{
 625	struct drm_vmw_synccpu_arg *arg =
 626		(struct drm_vmw_synccpu_arg *) data;
 627	struct vmw_dma_buffer *dma_buf;
 628	struct vmw_user_dma_buffer *user_bo;
 629	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 630	int ret;
 631
 632	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 633	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 634			       drm_vmw_synccpu_dontblock |
 635			       drm_vmw_synccpu_allow_cs)) != 0) {
 636		DRM_ERROR("Illegal synccpu flags.\n");
 637		return -EINVAL;
 638	}
 639
 640	switch (arg->op) {
 641	case drm_vmw_synccpu_grab:
 642		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
 643		if (unlikely(ret != 0))
 644			return ret;
 645
 646		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
 647				       dma);
 648		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
 649		vmw_dmabuf_unreference(&dma_buf);
 650		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 651			     ret != -EBUSY)) {
 652			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 653				  (unsigned int) arg->handle);
 654			return ret;
 655		}
 656		break;
 657	case drm_vmw_synccpu_release:
 658		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
 659						      arg->flags);
 660		if (unlikely(ret != 0)) {
 661			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 662				  (unsigned int) arg->handle);
 663			return ret;
 664		}
 665		break;
 666	default:
 667		DRM_ERROR("Invalid synccpu operation.\n");
 668		return -EINVAL;
 669	}
 670
 671	return 0;
 672}
 673
 674int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 675			   struct drm_file *file_priv)
 676{
 677	struct vmw_private *dev_priv = vmw_priv(dev);
 678	union drm_vmw_alloc_dmabuf_arg *arg =
 679	    (union drm_vmw_alloc_dmabuf_arg *)data;
 680	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 681	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 682	struct vmw_dma_buffer *dma_buf;
 683	uint32_t handle;
 684	int ret;
 685
 686	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 687	if (unlikely(ret != 0))
 688		return ret;
 689
 690	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 691				    req->size, false, &handle, &dma_buf);
 692	if (unlikely(ret != 0))
 693		goto out_no_dmabuf;
 694
 695	rep->handle = handle;
 696	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
 697	rep->cur_gmr_id = handle;
 698	rep->cur_gmr_offset = 0;
 699
 700	vmw_dmabuf_unreference(&dma_buf);
 701
 702out_no_dmabuf:
 703	ttm_read_unlock(&dev_priv->reservation_sem);
 704
 705	return ret;
 706}
 707
 708int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 709			   struct drm_file *file_priv)
 710{
 711	struct drm_vmw_unref_dmabuf_arg *arg =
 712	    (struct drm_vmw_unref_dmabuf_arg *)data;
 713
 714	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 715					 arg->handle,
 716					 TTM_REF_USAGE);
 717}
 718
 719int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 720			   uint32_t handle, struct vmw_dma_buffer **out)
 721{
 722	struct vmw_user_dma_buffer *vmw_user_bo;
 723	struct ttm_base_object *base;
 724
 725	base = ttm_base_object_lookup(tfile, handle);
 726	if (unlikely(base == NULL)) {
 727		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 728		       (unsigned long)handle);
 729		return -ESRCH;
 730	}
 731
 732	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 733		ttm_base_object_unref(&base);
 734		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 735		       (unsigned long)handle);
 736		return -EINVAL;
 737	}
 738
 739	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 740				   prime.base);
 741	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
 742	ttm_base_object_unref(&base);
 743	*out = &vmw_user_bo->dma;
 744
 745	return 0;
 746}
 747
 748int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 749			      struct vmw_dma_buffer *dma_buf,
 750			      uint32_t *handle)
 751{
 752	struct vmw_user_dma_buffer *user_bo;
 753
 754	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
 755		return -EINVAL;
 756
 757	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
 758
 759	*handle = user_bo->prime.base.hash.key;
 760	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 761				  TTM_REF_USAGE, NULL);
 762}
 763
 764/*
 765 * Stream management
 766 */
 767
 768static void vmw_stream_destroy(struct vmw_resource *res)
 769{
 770	struct vmw_private *dev_priv = res->dev_priv;
 771	struct vmw_stream *stream;
 772	int ret;
 773
 774	DRM_INFO("%s: unref\n", __func__);
 775	stream = container_of(res, struct vmw_stream, res);
 776
 777	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
 778	WARN_ON(ret != 0);
 779}
 780
 781static int vmw_stream_init(struct vmw_private *dev_priv,
 782			   struct vmw_stream *stream,
 783			   void (*res_free) (struct vmw_resource *res))
 784{
 785	struct vmw_resource *res = &stream->res;
 786	int ret;
 787
 788	ret = vmw_resource_init(dev_priv, res, false, res_free,
 789				&vmw_stream_func);
 790
 791	if (unlikely(ret != 0)) {
 792		if (res_free == NULL)
 793			kfree(stream);
 794		else
 795			res_free(&stream->res);
 796		return ret;
 797	}
 798
 799	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
 800	if (ret) {
 801		vmw_resource_unreference(&res);
 802		return ret;
 803	}
 804
 805	DRM_INFO("%s: claimed\n", __func__);
 806
 807	vmw_resource_activate(&stream->res, vmw_stream_destroy);
 808	return 0;
 809}
 810
 811static void vmw_user_stream_free(struct vmw_resource *res)
 812{
 813	struct vmw_user_stream *stream =
 814	    container_of(res, struct vmw_user_stream, stream.res);
 815	struct vmw_private *dev_priv = res->dev_priv;
 816
 817	ttm_base_object_kfree(stream, base);
 818	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 819			    vmw_user_stream_size);
 820}
 821
 822/**
 823 * This function is called when user space has no more references on the
 824 * base object. It releases the base-object's reference on the resource object.
 825 */
 826
 827static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
 828{
 829	struct ttm_base_object *base = *p_base;
 830	struct vmw_user_stream *stream =
 831	    container_of(base, struct vmw_user_stream, base);
 832	struct vmw_resource *res = &stream->stream.res;
 833
 834	*p_base = NULL;
 835	vmw_resource_unreference(&res);
 836}
 837
 838int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
 839			   struct drm_file *file_priv)
 840{
 841	struct vmw_private *dev_priv = vmw_priv(dev);
 842	struct vmw_resource *res;
 843	struct vmw_user_stream *stream;
 844	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 845	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 846	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
 847	int ret = 0;
 848
 849
 850	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
 851	if (unlikely(res == NULL))
 852		return -EINVAL;
 853
 854	if (res->res_free != &vmw_user_stream_free) {
 855		ret = -EINVAL;
 856		goto out;
 857	}
 858
 859	stream = container_of(res, struct vmw_user_stream, stream.res);
 860	if (stream->base.tfile != tfile) {
 861		ret = -EINVAL;
 862		goto out;
 863	}
 864
 865	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
 866out:
 867	vmw_resource_unreference(&res);
 868	return ret;
 869}
 870
 871int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 872			   struct drm_file *file_priv)
 873{
 874	struct vmw_private *dev_priv = vmw_priv(dev);
 875	struct vmw_user_stream *stream;
 876	struct vmw_resource *res;
 877	struct vmw_resource *tmp;
 878	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 879	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 880	int ret;
 881
 882	/*
 883	 * Approximate idr memory usage with 128 bytes. It will be limited
 884	 * by maximum number_of streams anyway?
 885	 */
 886
 887	if (unlikely(vmw_user_stream_size == 0))
 888		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
 889
 890	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 891	if (unlikely(ret != 0))
 892		return ret;
 893
 894	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 895				   vmw_user_stream_size,
 896				   false, true);
 897	if (unlikely(ret != 0)) {
 898		if (ret != -ERESTARTSYS)
 899			DRM_ERROR("Out of graphics memory for stream"
 900				  " creation.\n");
 901		goto out_unlock;
 902	}
 903
 904
 905	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
 906	if (unlikely(stream == NULL)) {
 907		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 908				    vmw_user_stream_size);
 909		ret = -ENOMEM;
 910		goto out_unlock;
 911	}
 912
 913	res = &stream->stream.res;
 914	stream->base.shareable = false;
 915	stream->base.tfile = NULL;
 916
 917	/*
 918	 * From here on, the destructor takes over resource freeing.
 919	 */
 920
 921	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
 922	if (unlikely(ret != 0))
 923		goto out_unlock;
 924
 925	tmp = vmw_resource_reference(res);
 926	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
 927				   &vmw_user_stream_base_release, NULL);
 928
 929	if (unlikely(ret != 0)) {
 930		vmw_resource_unreference(&tmp);
 931		goto out_err;
 932	}
 933
 934	arg->stream_id = res->id;
 935out_err:
 936	vmw_resource_unreference(&res);
 937out_unlock:
 938	ttm_read_unlock(&dev_priv->reservation_sem);
 939	return ret;
 940}
 941
 942int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 943			   struct ttm_object_file *tfile,
 944			   uint32_t *inout_id, struct vmw_resource **out)
 945{
 946	struct vmw_user_stream *stream;
 947	struct vmw_resource *res;
 948	int ret;
 949
 950	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
 951				  *inout_id);
 952	if (unlikely(res == NULL))
 953		return -EINVAL;
 954
 955	if (res->res_free != &vmw_user_stream_free) {
 956		ret = -EINVAL;
 957		goto err_ref;
 958	}
 959
 960	stream = container_of(res, struct vmw_user_stream, stream.res);
 961	if (stream->base.tfile != tfile) {
 962		ret = -EPERM;
 963		goto err_ref;
 964	}
 965
 966	*inout_id = stream->stream.stream_id;
 967	*out = res;
 968	return 0;
 969err_ref:
 970	vmw_resource_unreference(&res);
 971	return ret;
 972}
 973
 974
 975/**
 976 * vmw_dumb_create - Create a dumb kms buffer
 977 *
 978 * @file_priv: Pointer to a struct drm_file identifying the caller.
 979 * @dev: Pointer to the drm device.
 980 * @args: Pointer to a struct drm_mode_create_dumb structure
 981 *
 982 * This is a driver callback for the core drm create_dumb functionality.
 983 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
 984 * that the arguments have a different format.
 985 */
 986int vmw_dumb_create(struct drm_file *file_priv,
 987		    struct drm_device *dev,
 988		    struct drm_mode_create_dumb *args)
 989{
 990	struct vmw_private *dev_priv = vmw_priv(dev);
 991	struct vmw_dma_buffer *dma_buf;
 992	int ret;
 993
 994	args->pitch = args->width * ((args->bpp + 7) / 8);
 995	args->size = args->pitch * args->height;
 996
 997	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 998	if (unlikely(ret != 0))
 999		return ret;
1000
1001	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1002				    args->size, false, &args->handle,
1003				    &dma_buf);
1004	if (unlikely(ret != 0))
1005		goto out_no_dmabuf;
1006
1007	vmw_dmabuf_unreference(&dma_buf);
1008out_no_dmabuf:
1009	ttm_read_unlock(&dev_priv->reservation_sem);
1010	return ret;
1011}
1012
1013/**
1014 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1015 *
1016 * @file_priv: Pointer to a struct drm_file identifying the caller.
1017 * @dev: Pointer to the drm device.
1018 * @handle: Handle identifying the dumb buffer.
1019 * @offset: The address space offset returned.
1020 *
1021 * This is a driver callback for the core drm dumb_map_offset functionality.
1022 */
1023int vmw_dumb_map_offset(struct drm_file *file_priv,
1024			struct drm_device *dev, uint32_t handle,
1025			uint64_t *offset)
1026{
1027	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1028	struct vmw_dma_buffer *out_buf;
1029	int ret;
1030
1031	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1032	if (ret != 0)
1033		return -EINVAL;
1034
1035	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1036	vmw_dmabuf_unreference(&out_buf);
1037	return 0;
1038}
1039
1040/**
1041 * vmw_dumb_destroy - Destroy a dumb boffer
1042 *
1043 * @file_priv: Pointer to a struct drm_file identifying the caller.
1044 * @dev: Pointer to the drm device.
1045 * @handle: Handle identifying the dumb buffer.
1046 *
1047 * This is a driver callback for the core drm dumb_destroy functionality.
1048 */
1049int vmw_dumb_destroy(struct drm_file *file_priv,
1050		     struct drm_device *dev,
1051		     uint32_t handle)
1052{
1053	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1054					 handle, TTM_REF_USAGE);
1055}
1056
1057/**
1058 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1059 *
1060 * @res:            The resource for which to allocate a backup buffer.
1061 * @interruptible:  Whether any sleeps during allocation should be
1062 *                  performed while interruptible.
1063 */
1064static int vmw_resource_buf_alloc(struct vmw_resource *res,
1065				  bool interruptible)
1066{
1067	unsigned long size =
1068		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1069	struct vmw_dma_buffer *backup;
 
 
 
 
 
 
1070	int ret;
1071
1072	if (likely(res->backup)) {
1073		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1074		return 0;
1075	}
1076
1077	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1078	if (unlikely(backup == NULL))
1079		return -ENOMEM;
1080
1081	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1082			      res->func->backup_placement,
1083			      interruptible,
1084			      &vmw_dmabuf_bo_free);
1085	if (unlikely(ret != 0))
1086		goto out_no_dmabuf;
1087
1088	res->backup = backup;
1089
1090out_no_dmabuf:
1091	return ret;
1092}
1093
1094/**
1095 * vmw_resource_do_validate - Make a resource up-to-date and visible
1096 *                            to the device.
1097 *
1098 * @res:            The resource to make visible to the device.
1099 * @val_buf:        Information about a buffer possibly
1100 *                  containing backup data if a bind operation is needed.
 
1101 *
1102 * On hardware resource shortage, this function returns -EBUSY and
1103 * should be retried once resources have been freed up.
1104 */
1105static int vmw_resource_do_validate(struct vmw_resource *res,
1106				    struct ttm_validate_buffer *val_buf)
 
1107{
1108	int ret = 0;
1109	const struct vmw_res_func *func = res->func;
1110
1111	if (unlikely(res->id == -1)) {
1112		ret = func->create(res);
1113		if (unlikely(ret != 0))
1114			return ret;
1115	}
1116
1117	if (func->bind &&
1118	    ((func->needs_backup && list_empty(&res->mob_head) &&
1119	      val_buf->bo != NULL) ||
1120	     (!func->needs_backup && val_buf->bo != NULL))) {
1121		ret = func->bind(res, val_buf);
1122		if (unlikely(ret != 0))
1123			goto out_bind_failed;
1124		if (func->needs_backup)
1125			list_add_tail(&res->mob_head, &res->backup->res_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1126	}
1127
1128	/*
1129	 * Only do this on write operations, and move to
1130	 * vmw_resource_unreserve if it can be called after
1131	 * backup buffers have been unreserved. Otherwise
1132	 * sort out locking.
1133	 */
1134	res->res_dirty = true;
 
 
 
 
 
 
 
 
 
 
 
 
1135
1136	return 0;
1137
1138out_bind_failed:
1139	func->destroy(res);
1140
1141	return ret;
1142}
1143
1144/**
1145 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1146 * command submission.
1147 *
1148 * @res:               Pointer to the struct vmw_resource to unreserve.
1149 * @new_backup:        Pointer to new backup buffer if command submission
1150 *                     switched.
1151 * @new_backup_offset: New backup offset if @new_backup is !NULL.
 
 
 
1152 *
1153 * Currently unreserving a resource means putting it back on the device's
1154 * resource lru list, so that it can be evicted if necessary.
1155 */
1156void vmw_resource_unreserve(struct vmw_resource *res,
1157			    struct vmw_dma_buffer *new_backup,
1158			    unsigned long new_backup_offset)
 
 
 
1159{
1160	struct vmw_private *dev_priv = res->dev_priv;
1161
1162	if (!list_empty(&res->lru_head))
1163		return;
1164
1165	if (new_backup && new_backup != res->backup) {
 
 
 
 
 
 
 
 
 
1166
1167		if (res->backup) {
1168			lockdep_assert_held(&res->backup->base.resv->lock.base);
1169			list_del_init(&res->mob_head);
1170			vmw_dmabuf_unreference(&res->backup);
 
 
 
 
 
1171		}
 
 
 
1172
1173		res->backup = vmw_dmabuf_reference(new_backup);
1174		lockdep_assert_held(&new_backup->base.resv->lock.base);
1175		list_add_tail(&res->mob_head, &new_backup->res_list);
1176	}
1177	if (new_backup)
1178		res->backup_offset = new_backup_offset;
1179
1180	if (!res->func->may_evict || res->id == -1)
1181		return;
1182
1183	write_lock(&dev_priv->resource_lock);
1184	list_add_tail(&res->lru_head,
1185		      &res->dev_priv->res_lru[res->func->res_type]);
1186	write_unlock(&dev_priv->resource_lock);
1187}
1188
1189/**
1190 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1191 *                             for a resource and in that case, allocate
1192 *                             one, reserve and validate it.
1193 *
 
1194 * @res:            The resource for which to allocate a backup buffer.
1195 * @interruptible:  Whether any sleeps during allocation should be
1196 *                  performed while interruptible.
1197 * @val_buf:        On successful return contains data about the
1198 *                  reserved and validated backup buffer.
1199 */
1200static int
1201vmw_resource_check_buffer(struct vmw_resource *res,
 
1202			  bool interruptible,
1203			  struct ttm_validate_buffer *val_buf)
1204{
 
1205	struct list_head val_list;
1206	bool backup_dirty = false;
1207	int ret;
1208
1209	if (unlikely(res->backup == NULL)) {
1210		ret = vmw_resource_buf_alloc(res, interruptible);
1211		if (unlikely(ret != 0))
1212			return ret;
1213	}
1214
1215	INIT_LIST_HEAD(&val_list);
1216	val_buf->bo = ttm_bo_reference(&res->backup->base);
 
 
1217	list_add_tail(&val_buf->head, &val_list);
1218	ret = ttm_eu_reserve_buffers(NULL, &val_list);
1219	if (unlikely(ret != 0))
1220		goto out_no_reserve;
1221
1222	if (res->func->needs_backup && list_empty(&res->mob_head))
1223		return 0;
1224
1225	backup_dirty = res->backup_dirty;
1226	ret = ttm_bo_validate(&res->backup->base,
1227			      res->func->backup_placement,
1228			      true, false);
 
 
1229
1230	if (unlikely(ret != 0))
1231		goto out_no_validate;
1232
1233	return 0;
1234
1235out_no_validate:
1236	ttm_eu_backoff_reservation(NULL, &val_list);
1237out_no_reserve:
1238	ttm_bo_unref(&val_buf->bo);
1239	if (backup_dirty)
1240		vmw_dmabuf_unreference(&res->backup);
 
1241
1242	return ret;
1243}
1244
1245/**
1246 * vmw_resource_reserve - Reserve a resource for command submission
1247 *
1248 * @res:            The resource to reserve.
1249 *
1250 * This function takes the resource off the LRU list and make sure
1251 * a backup buffer is present for guest-backed resources. However,
1252 * the buffer may not be bound to the resource at this point.
 
1253 *
1254 */
1255int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
 
1256{
1257	struct vmw_private *dev_priv = res->dev_priv;
1258	int ret;
1259
1260	write_lock(&dev_priv->resource_lock);
1261	list_del_init(&res->lru_head);
1262	write_unlock(&dev_priv->resource_lock);
1263
1264	if (res->func->needs_backup && res->backup == NULL &&
1265	    !no_backup) {
1266		ret = vmw_resource_buf_alloc(res, true);
1267		if (unlikely(ret != 0))
 
 
 
1268			return ret;
 
1269	}
1270
1271	return 0;
1272}
1273
1274/**
1275 * vmw_resource_backoff_reservation - Unreserve and unreference a
1276 *                                    backup buffer
1277 *.
1278 * @val_buf:        Backup buffer information.
 
1279 */
1280static void
1281vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
 
1282{
1283	struct list_head val_list;
1284
1285	if (likely(val_buf->bo == NULL))
1286		return;
1287
1288	INIT_LIST_HEAD(&val_list);
1289	list_add_tail(&val_buf->head, &val_list);
1290	ttm_eu_backoff_reservation(NULL, &val_list);
1291	ttm_bo_unref(&val_buf->bo);
 
1292}
1293
1294/**
1295 * vmw_resource_do_evict - Evict a resource, and transfer its data
1296 *                         to a backup buffer.
1297 *
 
1298 * @res:            The resource to evict.
1299 * @interruptible:  Whether to wait interruptible.
1300 */
1301int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 
1302{
1303	struct ttm_validate_buffer val_buf;
1304	const struct vmw_res_func *func = res->func;
1305	int ret;
1306
1307	BUG_ON(!func->may_evict);
1308
1309	val_buf.bo = NULL;
1310	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
 
1311	if (unlikely(ret != 0))
1312		return ret;
1313
1314	if (unlikely(func->unbind != NULL &&
1315		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1316		ret = func->unbind(res, res->res_dirty, &val_buf);
1317		if (unlikely(ret != 0))
1318			goto out_no_unbind;
1319		list_del_init(&res->mob_head);
1320	}
1321	ret = func->destroy(res);
1322	res->backup_dirty = true;
1323	res->res_dirty = false;
1324out_no_unbind:
1325	vmw_resource_backoff_reservation(&val_buf);
1326
1327	return ret;
1328}
1329
1330
1331/**
1332 * vmw_resource_validate - Make a resource up-to-date and visible
1333 *                         to the device.
 
 
 
1334 *
1335 * @res:            The resource to make visible to the device.
1336 *
1337 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1338 * be reserved and validated.
1339 * On hardware resource shortage, this function will repeatedly evict
1340 * resources of the same type until the validation succeeds.
 
 
 
1341 */
1342int vmw_resource_validate(struct vmw_resource *res)
 
1343{
1344	int ret;
1345	struct vmw_resource *evict_res;
1346	struct vmw_private *dev_priv = res->dev_priv;
1347	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1348	struct ttm_validate_buffer val_buf;
1349	unsigned err_count = 0;
1350
1351	if (likely(!res->func->may_evict))
1352		return 0;
1353
1354	val_buf.bo = NULL;
1355	if (res->backup)
1356		val_buf.bo = &res->backup->base;
 
1357	do {
1358		ret = vmw_resource_do_validate(res, &val_buf);
1359		if (likely(ret != -EBUSY))
1360			break;
1361
1362		write_lock(&dev_priv->resource_lock);
1363		if (list_empty(lru_list) || !res->func->may_evict) {
1364			DRM_ERROR("Out of device device resources "
1365				  "for %s.\n", res->func->type_name);
1366			ret = -EBUSY;
1367			write_unlock(&dev_priv->resource_lock);
1368			break;
1369		}
1370
1371		evict_res = vmw_resource_reference
1372			(list_first_entry(lru_list, struct vmw_resource,
1373					  lru_head));
1374		list_del_init(&evict_res->lru_head);
1375
1376		write_unlock(&dev_priv->resource_lock);
1377
1378		ret = vmw_resource_do_evict(evict_res, true);
 
1379		if (unlikely(ret != 0)) {
1380			write_lock(&dev_priv->resource_lock);
1381			list_add_tail(&evict_res->lru_head, lru_list);
1382			write_unlock(&dev_priv->resource_lock);
1383			if (ret == -ERESTARTSYS ||
1384			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1385				vmw_resource_unreference(&evict_res);
1386				goto out_no_validate;
1387			}
1388		}
1389
1390		vmw_resource_unreference(&evict_res);
1391	} while (1);
1392
1393	if (unlikely(ret != 0))
1394		goto out_no_validate;
1395	else if (!res->func->needs_backup && res->backup) {
1396		list_del_init(&res->mob_head);
1397		vmw_dmabuf_unreference(&res->backup);
1398	}
1399
1400	return 0;
1401
1402out_no_validate:
1403	return ret;
1404}
1405
 
1406/**
1407 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1408 *                       object without unreserving it.
 
1409 *
1410 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1411 * @fence:          Pointer to the fence. If NULL, this function will
1412 *                  insert a fence into the command stream..
1413 *
1414 * Contrary to the ttm_eu version of this function, it takes only
1415 * a single buffer object instead of a list, and it also doesn't
1416 * unreserve the buffer object, which needs to be done separately.
1417 */
1418void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1419			 struct vmw_fence_obj *fence)
1420{
1421	struct ttm_bo_device *bdev = bo->bdev;
1422	struct ttm_bo_driver *driver = bdev->driver;
1423	struct vmw_fence_obj *old_fence_obj;
1424	struct vmw_private *dev_priv =
1425		container_of(bdev, struct vmw_private, bdev);
1426
1427	if (fence == NULL)
1428		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1429	else
1430		driver->sync_obj_ref(fence);
 
1431
1432	spin_lock(&bdev->fence_lock);
 
1433
1434	old_fence_obj = bo->sync_obj;
1435	bo->sync_obj = fence;
 
 
1436
1437	spin_unlock(&bdev->fence_lock);
 
1438
1439	if (old_fence_obj)
1440		vmw_fence_obj_unreference(&old_fence_obj);
1441}
1442
1443/**
1444 * vmw_resource_move_notify - TTM move_notify_callback
1445 *
1446 * @bo:             The TTM buffer object about to move.
1447 * @mem:            The truct ttm_mem_reg indicating to what memory
1448 *                  region the move is taking place.
1449 *
1450 * Evicts the Guest Backed hardware resource if the backup
1451 * buffer is being moved out of MOB memory.
1452 * Note that this function should not race with the resource
1453 * validation code as long as it accesses only members of struct
1454 * resource that remain static while bo::res is !NULL and
1455 * while we have @bo reserved. struct resource::backup is *not* a
1456 * static member. The resource validation code will take care
1457 * to set @bo::res to NULL, while having @bo reserved when the
1458 * buffer is no longer bound to the resource, so @bo:res can be
1459 * used to determine whether there is a need to unbind and whether
1460 * it is safe to unbind.
1461 */
1462void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1463			      struct ttm_mem_reg *mem)
1464{
1465	struct vmw_dma_buffer *dma_buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
1466
1467	if (mem == NULL)
1468		return;
 
1469
1470	if (bo->destroy != vmw_dmabuf_bo_free &&
1471	    bo->destroy != vmw_user_dmabuf_destroy)
1472		return;
1473
1474	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1475
1476	if (mem->mem_type != VMW_PL_MOB) {
1477		struct vmw_resource *res, *n;
1478		struct ttm_bo_device *bdev = bo->bdev;
1479		struct ttm_validate_buffer val_buf;
1480
1481		val_buf.bo = bo;
 
1482
1483		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1484
1485			if (unlikely(res->func->unbind == NULL))
1486				continue;
1487
1488			(void) res->func->unbind(res, true, &val_buf);
1489			res->backup_dirty = true;
1490			res->res_dirty = false;
1491			list_del_init(&res->mob_head);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1492		}
1493
1494		spin_lock(&bdev->fence_lock);
1495		(void) ttm_bo_wait(bo, false, false, false);
1496		spin_unlock(&bdev->fence_lock);
1497	}
 
 
 
 
 
 
 
 
 
1498}
1499
1500/**
1501 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1502 *
1503 * @res:            The resource being queried.
1504 */
1505bool vmw_resource_needs_backup(const struct vmw_resource *res)
1506{
1507	return res->func->needs_backup;
1508}
1509
1510/**
1511 * vmw_resource_evict_type - Evict all resources of a specific type
1512 *
1513 * @dev_priv:       Pointer to a device private struct
1514 * @type:           The resource type to evict
1515 *
1516 * To avoid thrashing starvation or as part of the hibernation sequence,
1517 * try to evict all evictable resources of a specific type.
1518 */
1519static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1520				    enum vmw_res_type type)
1521{
1522	struct list_head *lru_list = &dev_priv->res_lru[type];
1523	struct vmw_resource *evict_res;
1524	unsigned err_count = 0;
1525	int ret;
 
1526
1527	do {
1528		write_lock(&dev_priv->resource_lock);
1529
1530		if (list_empty(lru_list))
1531			goto out_unlock;
1532
1533		evict_res = vmw_resource_reference(
1534			list_first_entry(lru_list, struct vmw_resource,
1535					 lru_head));
1536		list_del_init(&evict_res->lru_head);
1537		write_unlock(&dev_priv->resource_lock);
1538
1539		ret = vmw_resource_do_evict(evict_res, false);
 
1540		if (unlikely(ret != 0)) {
1541			write_lock(&dev_priv->resource_lock);
1542			list_add_tail(&evict_res->lru_head, lru_list);
1543			write_unlock(&dev_priv->resource_lock);
1544			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1545				vmw_resource_unreference(&evict_res);
1546				return;
1547			}
1548		}
1549
1550		vmw_resource_unreference(&evict_res);
1551	} while (1);
1552
1553out_unlock:
1554	write_unlock(&dev_priv->resource_lock);
1555}
1556
1557/**
1558 * vmw_resource_evict_all - Evict all evictable resources
1559 *
1560 * @dev_priv:       Pointer to a device private struct
1561 *
1562 * To avoid thrashing starvation or as part of the hibernation sequence,
1563 * evict all evictable resources. In particular this means that all
1564 * guest-backed resources that are registered with the device are
1565 * evicted and the OTable becomes clean.
1566 */
1567void vmw_resource_evict_all(struct vmw_private *dev_priv)
1568{
1569	enum vmw_res_type type;
1570
1571	mutex_lock(&dev_priv->cmdbuf_mutex);
1572
1573	for (type = 0; type < vmw_res_max; ++type)
1574		vmw_resource_evict_type(dev_priv, type);
1575
1576	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1577}