Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#include <drm/ttm/ttm_placement.h>
  30
  31#include "vmwgfx_binding.h"
  32#include "vmwgfx_bo.h"
  33#include "vmwgfx_drv.h"
 
 
 
 
  34#include "vmwgfx_resource_priv.h"
 
  35
  36#define VMW_RES_EVICT_ERR_COUNT 10
  37
  38/**
  39 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  40 * @res: The resource
  41 */
  42void vmw_resource_mob_attach(struct vmw_resource *res)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43{
  44	struct vmw_bo *gbo = res->guest_memory_bo;
  45	struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
  46
  47	dma_resv_assert_held(gbo->tbo.base.resv);
  48	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  49		res->func->prio;
  50
  51	while (*new) {
  52		struct vmw_resource *this =
  53			container_of(*new, struct vmw_resource, mob_node);
  54
  55		parent = *new;
  56		new = (res->guest_memory_offset < this->guest_memory_offset) ?
  57			&((*new)->rb_left) : &((*new)->rb_right);
  58	}
  59
  60	rb_link_node(&res->mob_node, parent, new);
  61	rb_insert_color(&res->mob_node, &gbo->res_tree);
  62	vmw_bo_del_detached_resource(gbo, res);
  63
  64	vmw_bo_prio_add(gbo, res->used_prio);
  65}
  66
  67/**
  68 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  69 * @res: The resource
  70 */
  71void vmw_resource_mob_detach(struct vmw_resource *res)
  72{
  73	struct vmw_bo *gbo = res->guest_memory_bo;
  74
  75	dma_resv_assert_held(gbo->tbo.base.resv);
  76	if (vmw_resource_mob_attached(res)) {
  77		rb_erase(&res->mob_node, &gbo->res_tree);
  78		RB_CLEAR_NODE(&res->mob_node);
  79		vmw_bo_prio_del(gbo, res->used_prio);
  80	}
  81}
  82
  83struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  84{
  85	kref_get(&res->kref);
  86	return res;
  87}
  88
  89struct vmw_resource *
  90vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  91{
  92	return kref_get_unless_zero(&res->kref) ? res : NULL;
  93}
  94
  95/**
  96 * vmw_resource_release_id - release a resource id to the id manager.
  97 *
  98 * @res: Pointer to the resource.
  99 *
 100 * Release the resource id to the resource id manager and set it to -1
 101 */
 102void vmw_resource_release_id(struct vmw_resource *res)
 103{
 104	struct vmw_private *dev_priv = res->dev_priv;
 105	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 106
 107	spin_lock(&dev_priv->resource_lock);
 108	if (res->id != -1)
 109		idr_remove(idr, res->id);
 110	res->id = -1;
 111	spin_unlock(&dev_priv->resource_lock);
 112}
 113
 114static void vmw_resource_release(struct kref *kref)
 115{
 116	struct vmw_resource *res =
 117	    container_of(kref, struct vmw_resource, kref);
 118	struct vmw_private *dev_priv = res->dev_priv;
 119	int id;
 120	int ret;
 121	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 122
 123	spin_lock(&dev_priv->resource_lock);
 
 124	list_del_init(&res->lru_head);
 125	spin_unlock(&dev_priv->resource_lock);
 126	if (res->guest_memory_bo) {
 127		struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
 128
 129		ret = ttm_bo_reserve(bo, false, false, NULL);
 130		BUG_ON(ret);
 131		if (vmw_resource_mob_attached(res) &&
 132		    res->func->unbind != NULL) {
 133			struct ttm_validate_buffer val_buf;
 134
 135			val_buf.bo = bo;
 136			val_buf.num_shared = 0;
 137			res->func->unbind(res, false, &val_buf);
 138		}
 139		res->guest_memory_size = false;
 140		vmw_resource_mob_detach(res);
 141		if (res->dirty)
 142			res->func->dirty_free(res);
 143		if (res->coherent)
 144			vmw_bo_dirty_release(res->guest_memory_bo);
 145		ttm_bo_unreserve(bo);
 146		vmw_user_bo_unref(&res->guest_memory_bo);
 147	}
 148
 149	if (likely(res->hw_destroy != NULL)) {
 150		mutex_lock(&dev_priv->binding_mutex);
 151		vmw_binding_res_list_kill(&res->binding_head);
 152		mutex_unlock(&dev_priv->binding_mutex);
 153		res->hw_destroy(res);
 154	}
 155
 156	id = res->id;
 157	if (res->res_free != NULL)
 158		res->res_free(res);
 159	else
 160		kfree(res);
 161
 162	spin_lock(&dev_priv->resource_lock);
 163	if (id != -1)
 164		idr_remove(idr, id);
 165	spin_unlock(&dev_priv->resource_lock);
 166}
 167
 168void vmw_resource_unreference(struct vmw_resource **p_res)
 169{
 170	struct vmw_resource *res = *p_res;
 171
 172	*p_res = NULL;
 173	kref_put(&res->kref, vmw_resource_release);
 174}
 175
 176
 177/**
 178 * vmw_resource_alloc_id - release a resource id to the id manager.
 179 *
 180 * @res: Pointer to the resource.
 181 *
 182 * Allocate the lowest free resource from the resource manager, and set
 183 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 184 */
 185int vmw_resource_alloc_id(struct vmw_resource *res)
 186{
 187	struct vmw_private *dev_priv = res->dev_priv;
 188	int ret;
 189	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 190
 191	BUG_ON(res->id != -1);
 192
 193	idr_preload(GFP_KERNEL);
 194	spin_lock(&dev_priv->resource_lock);
 195
 196	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 197	if (ret >= 0)
 198		res->id = ret;
 199
 200	spin_unlock(&dev_priv->resource_lock);
 201	idr_preload_end();
 202	return ret < 0 ? ret : 0;
 203}
 204
 205/**
 206 * vmw_resource_init - initialize a struct vmw_resource
 207 *
 208 * @dev_priv:       Pointer to a device private struct.
 209 * @res:            The struct vmw_resource to initialize.
 
 210 * @delay_id:       Boolean whether to defer device id allocation until
 211 *                  the first validation.
 212 * @res_free:       Resource destructor.
 213 * @func:           Resource function table.
 214 */
 215int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 216		      bool delay_id,
 217		      void (*res_free) (struct vmw_resource *res),
 218		      const struct vmw_res_func *func)
 219{
 220	kref_init(&res->kref);
 221	res->hw_destroy = NULL;
 222	res->res_free = res_free;
 
 223	res->dev_priv = dev_priv;
 224	res->func = func;
 225	RB_CLEAR_NODE(&res->mob_node);
 226	INIT_LIST_HEAD(&res->lru_head);
 
 227	INIT_LIST_HEAD(&res->binding_head);
 228	res->id = -1;
 229	res->guest_memory_bo = NULL;
 230	res->guest_memory_offset = 0;
 231	res->guest_memory_dirty = false;
 232	res->res_dirty = false;
 233	res->coherent = false;
 234	res->used_prio = 3;
 235	res->dirty = NULL;
 236	if (delay_id)
 237		return 0;
 238	else
 239		return vmw_resource_alloc_id(res);
 240}
 241
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 242
 243/**
 244 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 245 * TTM user-space handle and perform basic type checks
 246 *
 247 * @dev_priv:     Pointer to a device private struct
 248 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 249 * @handle:       The TTM user-space handle
 250 * @converter:    Pointer to an object describing the resource type
 251 * @p_res:        On successful return the location pointed to will contain
 252 *                a pointer to a refcounted struct vmw_resource.
 253 *
 254 * If the handle can't be found or is associated with an incorrect resource
 255 * type, -EINVAL will be returned.
 256 */
 257int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 258				    struct ttm_object_file *tfile,
 259				    uint32_t handle,
 260				    const struct vmw_user_resource_conv
 261				    *converter,
 262				    struct vmw_resource **p_res)
 263{
 264	struct ttm_base_object *base;
 265	struct vmw_resource *res;
 266	int ret = -EINVAL;
 267
 268	base = ttm_base_object_lookup(tfile, handle);
 269	if (unlikely(!base))
 270		return -EINVAL;
 271
 272	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 273		goto out_bad_resource;
 274
 275	res = converter->base_obj_to_res(base);
 
 
 
 
 
 
 
 276	kref_get(&res->kref);
 
 277
 278	*p_res = res;
 279	ret = 0;
 280
 281out_bad_resource:
 282	ttm_base_object_unref(&base);
 283
 284	return ret;
 285}
 286
 287/*
 288 * Helper function that looks either a surface or bo.
 289 *
 290 * The pointer this pointed at by out_surf and out_buf needs to be null.
 291 */
 292int vmw_user_object_lookup(struct vmw_private *dev_priv,
 293			   struct drm_file *filp,
 294			   u32 handle,
 295			   struct vmw_user_object *uo)
 
 296{
 297	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
 298	struct vmw_resource *res;
 299	int ret;
 300
 301	WARN_ON(uo->surface || uo->buffer);
 302
 303	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 304					      user_surface_converter,
 305					      &res);
 306	if (!ret) {
 307		uo->surface = vmw_res_to_srf(res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308		return 0;
 309	}
 310
 311	uo->surface = NULL;
 312	ret = vmw_user_bo_lookup(filp, handle, &uo->buffer);
 313	if (!ret && !uo->buffer->is_dumb) {
 314		uo->surface = vmw_lookup_surface_for_buffer(dev_priv,
 315							    uo->buffer,
 316							    handle);
 317		if (uo->surface)
 318			vmw_user_bo_unref(&uo->buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 319	}
 320
 
 
 
 321	return ret;
 322}
 323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 324/**
 325 * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
 326 *
 327 * @res:            The resource for which to allocate a gbo buffer.
 328 * @interruptible:  Whether any sleeps during allocation should be
 329 *                  performed while interruptible.
 330 */
 331static int vmw_resource_buf_alloc(struct vmw_resource *res,
 332				  bool interruptible)
 333{
 334	unsigned long size = PFN_ALIGN(res->guest_memory_size);
 335	struct vmw_bo *gbo;
 336	struct vmw_bo_params bo_params = {
 337		.domain = res->func->domain,
 338		.busy_domain = res->func->busy_domain,
 339		.bo_type = ttm_bo_type_device,
 340		.size = res->guest_memory_size,
 341		.pin = false
 342	};
 343	int ret;
 344
 345	if (likely(res->guest_memory_bo)) {
 346		BUG_ON(res->guest_memory_bo->tbo.base.size < size);
 347		return 0;
 348	}
 349
 350	ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
 
 
 
 
 
 
 
 351	if (unlikely(ret != 0))
 352		goto out_no_bo;
 353
 354	res->guest_memory_bo = gbo;
 355
 356out_no_bo:
 357	return ret;
 358}
 359
 360/**
 361 * vmw_resource_do_validate - Make a resource up-to-date and visible
 362 *                            to the device.
 363 *
 364 * @res:            The resource to make visible to the device.
 365 * @val_buf:        Information about a buffer possibly
 366 *                  containing backup data if a bind operation is needed.
 367 * @dirtying:       Transfer dirty regions.
 368 *
 369 * On hardware resource shortage, this function returns -EBUSY and
 370 * should be retried once resources have been freed up.
 371 */
 372static int vmw_resource_do_validate(struct vmw_resource *res,
 373				    struct ttm_validate_buffer *val_buf,
 374				    bool dirtying)
 375{
 376	int ret = 0;
 377	const struct vmw_res_func *func = res->func;
 378
 379	if (unlikely(res->id == -1)) {
 380		ret = func->create(res);
 381		if (unlikely(ret != 0))
 382			return ret;
 383	}
 384
 385	if (func->bind &&
 386	    ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
 387	      val_buf->bo) ||
 388	     (!func->needs_guest_memory && val_buf->bo))) {
 389		ret = func->bind(res, val_buf);
 390		if (unlikely(ret != 0))
 391			goto out_bind_failed;
 392		if (func->needs_guest_memory)
 393			vmw_resource_mob_attach(res);
 394	}
 395
 396	/*
 397	 * Handle the case where the backup mob is marked coherent but
 398	 * the resource isn't.
 399	 */
 400	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
 401	    !res->coherent) {
 402		if (res->guest_memory_bo->dirty && !res->dirty) {
 403			ret = func->dirty_alloc(res);
 404			if (ret)
 405				return ret;
 406		} else if (!res->guest_memory_bo->dirty && res->dirty) {
 407			func->dirty_free(res);
 408		}
 409	}
 410
 411	/*
 412	 * Transfer the dirty regions to the resource and update
 413	 * the resource.
 
 
 414	 */
 415	if (res->dirty) {
 416		if (dirtying && !res->res_dirty) {
 417			pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
 418			pgoff_t end = __KERNEL_DIV_ROUND_UP
 419				(res->guest_memory_offset + res->guest_memory_size,
 420				 PAGE_SIZE);
 421
 422			vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
 423		}
 424
 425		vmw_bo_dirty_transfer_to_res(res);
 426		return func->dirty_sync(res);
 427	}
 428
 429	return 0;
 430
 431out_bind_failed:
 432	func->destroy(res);
 433
 434	return ret;
 435}
 436
 437/**
 438 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 439 * command submission.
 440 *
 441 * @res:               Pointer to the struct vmw_resource to unreserve.
 442 * @dirty_set:         Change dirty status of the resource.
 443 * @dirty:             When changing dirty status indicates the new status.
 444 * @switch_guest_memory: Guest memory buffer has been switched.
 445 * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
 446 *                     switched. May be NULL.
 447 * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
 448 *
 449 * Currently unreserving a resource means putting it back on the device's
 450 * resource lru list, so that it can be evicted if necessary.
 451 */
 452void vmw_resource_unreserve(struct vmw_resource *res,
 453			    bool dirty_set,
 454			    bool dirty,
 455			    bool switch_guest_memory,
 456			    struct vmw_bo *new_guest_memory_bo,
 457			    unsigned long new_guest_memory_offset)
 458{
 459	struct vmw_private *dev_priv = res->dev_priv;
 460
 461	if (!list_empty(&res->lru_head))
 462		return;
 463
 464	if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
 465		if (res->guest_memory_bo) {
 466			vmw_resource_mob_detach(res);
 467			if (res->coherent)
 468				vmw_bo_dirty_release(res->guest_memory_bo);
 469			vmw_user_bo_unref(&res->guest_memory_bo);
 470		}
 471
 472		if (new_guest_memory_bo) {
 473			res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
 474
 475			/*
 476			 * The validation code should already have added a
 477			 * dirty tracker here.
 478			 */
 479			WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
 480
 481			vmw_resource_mob_attach(res);
 482		} else {
 483			res->guest_memory_bo = NULL;
 484		}
 485	} else if (switch_guest_memory && res->coherent) {
 486		vmw_bo_dirty_release(res->guest_memory_bo);
 487	}
 488
 489	if (switch_guest_memory)
 490		res->guest_memory_offset = new_guest_memory_offset;
 491
 492	if (dirty_set)
 493		res->res_dirty = dirty;
 494
 495	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 496		return;
 497
 498	spin_lock(&dev_priv->resource_lock);
 499	list_add_tail(&res->lru_head,
 500		      &res->dev_priv->res_lru[res->func->res_type]);
 501	spin_unlock(&dev_priv->resource_lock);
 502}
 503
 504/**
 505 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 506 *                             for a resource and in that case, allocate
 507 *                             one, reserve and validate it.
 508 *
 509 * @ticket:         The ww acquire context to use, or NULL if trylocking.
 510 * @res:            The resource for which to allocate a backup buffer.
 511 * @interruptible:  Whether any sleeps during allocation should be
 512 *                  performed while interruptible.
 513 * @val_buf:        On successful return contains data about the
 514 *                  reserved and validated backup buffer.
 515 */
 516static int
 517vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 518			  struct vmw_resource *res,
 519			  bool interruptible,
 520			  struct ttm_validate_buffer *val_buf)
 521{
 522	struct ttm_operation_ctx ctx = { true, false };
 523	struct list_head val_list;
 524	bool guest_memory_dirty = false;
 525	int ret;
 526
 527	if (unlikely(!res->guest_memory_bo)) {
 528		ret = vmw_resource_buf_alloc(res, interruptible);
 529		if (unlikely(ret != 0))
 530			return ret;
 531	}
 532
 533	INIT_LIST_HEAD(&val_list);
 534	ttm_bo_get(&res->guest_memory_bo->tbo);
 535	val_buf->bo = &res->guest_memory_bo->tbo;
 536	val_buf->num_shared = 0;
 537	list_add_tail(&val_buf->head, &val_list);
 538	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 539	if (unlikely(ret != 0))
 540		goto out_no_reserve;
 541
 542	if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
 543		return 0;
 544
 545	guest_memory_dirty = res->guest_memory_dirty;
 546	vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
 547			     res->func->busy_domain);
 548	ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
 549			      &res->guest_memory_bo->placement,
 550			      &ctx);
 551
 552	if (unlikely(ret != 0))
 553		goto out_no_validate;
 554
 555	return 0;
 556
 557out_no_validate:
 558	ttm_eu_backoff_reservation(ticket, &val_list);
 559out_no_reserve:
 560	ttm_bo_put(val_buf->bo);
 561	val_buf->bo = NULL;
 562	if (guest_memory_dirty)
 563		vmw_user_bo_unref(&res->guest_memory_bo);
 564
 565	return ret;
 566}
 567
 568/*
 569 * vmw_resource_reserve - Reserve a resource for command submission
 570 *
 571 * @res:            The resource to reserve.
 572 *
 573 * This function takes the resource off the LRU list and make sure
 574 * a guest memory buffer is present for guest-backed resources.
 575 * However, the buffer may not be bound to the resource at this
 576 * point.
 577 *
 578 */
 579int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 580			 bool no_guest_memory)
 581{
 582	struct vmw_private *dev_priv = res->dev_priv;
 583	int ret;
 584
 585	spin_lock(&dev_priv->resource_lock);
 586	list_del_init(&res->lru_head);
 587	spin_unlock(&dev_priv->resource_lock);
 588
 589	if (res->func->needs_guest_memory && !res->guest_memory_bo &&
 590	    !no_guest_memory) {
 591		ret = vmw_resource_buf_alloc(res, interruptible);
 592		if (unlikely(ret != 0)) {
 593			DRM_ERROR("Failed to allocate a guest memory buffer "
 594				  "of size %lu. bytes\n",
 595				  (unsigned long) res->guest_memory_size);
 596			return ret;
 597		}
 598	}
 599
 600	return 0;
 601}
 602
 603/**
 604 * vmw_resource_backoff_reservation - Unreserve and unreference a
 605 *                                    guest memory buffer
 606 *.
 607 * @ticket:         The ww acquire ctx used for reservation.
 608 * @val_buf:        Guest memory buffer information.
 609 */
 610static void
 611vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 612				 struct ttm_validate_buffer *val_buf)
 613{
 614	struct list_head val_list;
 615
 616	if (likely(val_buf->bo == NULL))
 617		return;
 618
 619	INIT_LIST_HEAD(&val_list);
 620	list_add_tail(&val_buf->head, &val_list);
 621	ttm_eu_backoff_reservation(ticket, &val_list);
 622	ttm_bo_put(val_buf->bo);
 623	val_buf->bo = NULL;
 624}
 625
 626/**
 627 * vmw_resource_do_evict - Evict a resource, and transfer its data
 628 *                         to a backup buffer.
 629 *
 630 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 631 * @res:            The resource to evict.
 632 * @interruptible:  Whether to wait interruptible.
 633 */
 634static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 635				 struct vmw_resource *res, bool interruptible)
 636{
 637	struct ttm_validate_buffer val_buf;
 638	const struct vmw_res_func *func = res->func;
 639	int ret;
 640
 641	BUG_ON(!func->may_evict);
 642
 643	val_buf.bo = NULL;
 644	val_buf.num_shared = 0;
 645	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 646	if (unlikely(ret != 0))
 647		return ret;
 648
 649	if (unlikely(func->unbind != NULL &&
 650		     (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
 651		ret = func->unbind(res, res->res_dirty, &val_buf);
 652		if (unlikely(ret != 0))
 653			goto out_no_unbind;
 654		vmw_resource_mob_detach(res);
 655	}
 656	ret = func->destroy(res);
 657	res->guest_memory_dirty = true;
 658	res->res_dirty = false;
 659out_no_unbind:
 660	vmw_resource_backoff_reservation(ticket, &val_buf);
 661
 662	return ret;
 663}
 664
 665
 666/**
 667 * vmw_resource_validate - Make a resource up-to-date and visible
 668 *                         to the device.
 669 * @res: The resource to make visible to the device.
 670 * @intr: Perform waits interruptible if possible.
 671 * @dirtying: Pending GPU operation will dirty the resource
 672 *
 673 * On successful return, any backup DMA buffer pointed to by @res->backup will
 
 
 674 * be reserved and validated.
 675 * On hardware resource shortage, this function will repeatedly evict
 676 * resources of the same type until the validation succeeds.
 677 *
 678 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 679 * on failure.
 680 */
 681int vmw_resource_validate(struct vmw_resource *res, bool intr,
 682			  bool dirtying)
 683{
 684	int ret;
 685	struct vmw_resource *evict_res;
 686	struct vmw_private *dev_priv = res->dev_priv;
 687	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 688	struct ttm_validate_buffer val_buf;
 689	unsigned err_count = 0;
 690
 691	if (!res->func->create)
 692		return 0;
 693
 694	val_buf.bo = NULL;
 695	val_buf.num_shared = 0;
 696	if (res->guest_memory_bo)
 697		val_buf.bo = &res->guest_memory_bo->tbo;
 698	do {
 699		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
 700		if (likely(ret != -EBUSY))
 701			break;
 702
 703		spin_lock(&dev_priv->resource_lock);
 704		if (list_empty(lru_list) || !res->func->may_evict) {
 705			DRM_ERROR("Out of device device resources "
 706				  "for %s.\n", res->func->type_name);
 707			ret = -EBUSY;
 708			spin_unlock(&dev_priv->resource_lock);
 709			break;
 710		}
 711
 712		evict_res = vmw_resource_reference
 713			(list_first_entry(lru_list, struct vmw_resource,
 714					  lru_head));
 715		list_del_init(&evict_res->lru_head);
 716
 717		spin_unlock(&dev_priv->resource_lock);
 718
 719		/* Trylock backup buffers with a NULL ticket. */
 720		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 721		if (unlikely(ret != 0)) {
 722			spin_lock(&dev_priv->resource_lock);
 723			list_add_tail(&evict_res->lru_head, lru_list);
 724			spin_unlock(&dev_priv->resource_lock);
 725			if (ret == -ERESTARTSYS ||
 726			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 727				vmw_resource_unreference(&evict_res);
 728				goto out_no_validate;
 729			}
 730		}
 731
 732		vmw_resource_unreference(&evict_res);
 733	} while (1);
 734
 735	if (unlikely(ret != 0))
 736		goto out_no_validate;
 737	else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
 738		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 739		vmw_user_bo_unref(&res->guest_memory_bo);
 740	}
 741
 742	return 0;
 743
 744out_no_validate:
 745	return ret;
 746}
 747
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748
 749/**
 750 * vmw_resource_unbind_list
 751 *
 752 * @vbo: Pointer to the current backing MOB.
 
 
 753 *
 754 * Evicts the Guest Backed hardware resource if the backup
 755 * buffer is being moved out of MOB memory.
 756 * Note that this function will not race with the resource
 757 * validation code, since resource validation and eviction
 758 * both require the backup buffer to be reserved.
 
 
 
 
 
 
 759 */
 760void vmw_resource_unbind_list(struct vmw_bo *vbo)
 
 761{
 762	struct ttm_validate_buffer val_buf = {
 763		.bo = &vbo->tbo,
 764		.num_shared = 0
 765	};
 766
 767	dma_resv_assert_held(vbo->tbo.base.resv);
 768	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
 769		struct rb_node *node = vbo->res_tree.rb_node;
 770		struct vmw_resource *res =
 771			container_of(node, struct vmw_resource, mob_node);
 772
 773		if (!WARN_ON_ONCE(!res->func->unbind))
 774			(void) res->func->unbind(res, res->res_dirty, &val_buf);
 775
 776		res->guest_memory_size = true;
 777		res->res_dirty = false;
 778		vmw_resource_mob_detach(res);
 779	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 780
 781	(void) ttm_bo_wait(&vbo->tbo, false, false);
 
 782}
 783
 784
 
 785/**
 786 * vmw_query_readback_all - Read back cached query states
 787 *
 788 * @dx_query_mob: Buffer containing the DX query MOB
 789 *
 790 * Read back cached states from the device if they exist.  This function
 791 * assumes binding_mutex is held.
 792 */
 793int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
 794{
 795	struct vmw_resource *dx_query_ctx;
 796	struct vmw_private *dev_priv;
 797	struct {
 798		SVGA3dCmdHeader header;
 799		SVGA3dCmdDXReadbackAllQuery body;
 800	} *cmd;
 801
 802
 803	/* No query bound, so do nothing */
 804	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 805		return 0;
 806
 807	dx_query_ctx = dx_query_mob->dx_query_ctx;
 808	dev_priv     = dx_query_ctx->dev_priv;
 809
 810	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 811	if (unlikely(cmd == NULL))
 
 
 812		return -ENOMEM;
 
 813
 814	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 815	cmd->header.size = sizeof(cmd->body);
 816	cmd->body.cid    = dx_query_ctx->id;
 817
 818	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 819
 820	/* Triggers a rebind the next time affected context is bound */
 821	dx_query_mob->dx_query_ctx = NULL;
 822
 823	return 0;
 824}
 825
 826
 827
 828/**
 829 * vmw_query_move_notify - Read back cached query states
 830 *
 831 * @bo: The TTM buffer object about to move.
 832 * @old_mem: The memory region @bo is moving from.
 833 * @new_mem: The memory region @bo is moving to.
 834 *
 835 * Called before the query MOB is swapped out to read back cached query
 836 * states from the device.
 837 */
 838void vmw_query_move_notify(struct ttm_buffer_object *bo,
 839			   struct ttm_resource *old_mem,
 840			   struct ttm_resource *new_mem)
 841{
 842	struct vmw_bo *dx_query_mob;
 843	struct ttm_device *bdev = bo->bdev;
 844	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
 
 
 
 845
 846	mutex_lock(&dev_priv->binding_mutex);
 847
 
 
 
 
 
 
 848	/* If BO is being moved from MOB to system memory */
 849	if (old_mem &&
 850	    new_mem->mem_type == TTM_PL_SYSTEM &&
 851	    old_mem->mem_type == VMW_PL_MOB) {
 852		struct vmw_fence_obj *fence;
 853
 854		dx_query_mob = to_vmw_bo(&bo->base);
 855		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
 856			mutex_unlock(&dev_priv->binding_mutex);
 857			return;
 858		}
 859
 860		(void) vmw_query_readback_all(dx_query_mob);
 861		mutex_unlock(&dev_priv->binding_mutex);
 862
 863		/* Create a fence and attach the BO to it */
 864		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 865		vmw_bo_fence_single(bo, fence);
 866
 867		if (fence != NULL)
 868			vmw_fence_obj_unreference(&fence);
 869
 870		(void) ttm_bo_wait(bo, false, false);
 871	} else
 872		mutex_unlock(&dev_priv->binding_mutex);
 
 873}
 874
 875/**
 876 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 877 *
 878 * @res:            The resource being queried.
 879 */
 880bool vmw_resource_needs_backup(const struct vmw_resource *res)
 881{
 882	return res->func->needs_guest_memory;
 883}
 884
 885/**
 886 * vmw_resource_evict_type - Evict all resources of a specific type
 887 *
 888 * @dev_priv:       Pointer to a device private struct
 889 * @type:           The resource type to evict
 890 *
 891 * To avoid thrashing starvation or as part of the hibernation sequence,
 892 * try to evict all evictable resources of a specific type.
 893 */
 894static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 895				    enum vmw_res_type type)
 896{
 897	struct list_head *lru_list = &dev_priv->res_lru[type];
 898	struct vmw_resource *evict_res;
 899	unsigned err_count = 0;
 900	int ret;
 901	struct ww_acquire_ctx ticket;
 902
 903	do {
 904		spin_lock(&dev_priv->resource_lock);
 905
 906		if (list_empty(lru_list))
 907			goto out_unlock;
 908
 909		evict_res = vmw_resource_reference(
 910			list_first_entry(lru_list, struct vmw_resource,
 911					 lru_head));
 912		list_del_init(&evict_res->lru_head);
 913		spin_unlock(&dev_priv->resource_lock);
 914
 915		/* Wait lock backup buffers with a ticket. */
 916		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 917		if (unlikely(ret != 0)) {
 918			spin_lock(&dev_priv->resource_lock);
 919			list_add_tail(&evict_res->lru_head, lru_list);
 920			spin_unlock(&dev_priv->resource_lock);
 921			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 922				vmw_resource_unreference(&evict_res);
 923				return;
 924			}
 925		}
 926
 927		vmw_resource_unreference(&evict_res);
 928	} while (1);
 929
 930out_unlock:
 931	spin_unlock(&dev_priv->resource_lock);
 932}
 933
 934/**
 935 * vmw_resource_evict_all - Evict all evictable resources
 936 *
 937 * @dev_priv:       Pointer to a device private struct
 938 *
 939 * To avoid thrashing starvation or as part of the hibernation sequence,
 940 * evict all evictable resources. In particular this means that all
 941 * guest-backed resources that are registered with the device are
 942 * evicted and the OTable becomes clean.
 943 */
 944void vmw_resource_evict_all(struct vmw_private *dev_priv)
 945{
 946	enum vmw_res_type type;
 947
 948	mutex_lock(&dev_priv->cmdbuf_mutex);
 949
 950	for (type = 0; type < vmw_res_max; ++type)
 951		vmw_resource_evict_type(dev_priv, type);
 952
 953	mutex_unlock(&dev_priv->cmdbuf_mutex);
 954}
 955
 956/*
 957 * vmw_resource_pin - Add a pin reference on a resource
 958 *
 959 * @res: The resource to add a pin reference on
 960 *
 961 * This function adds a pin reference, and if needed validates the resource.
 962 * Having a pin reference means that the resource can never be evicted, and
 963 * its id will never change as long as there is a pin reference.
 964 * This function returns 0 on success and a negative error code on failure.
 965 */
 966int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 967{
 968	struct ttm_operation_ctx ctx = { interruptible, false };
 969	struct vmw_private *dev_priv = res->dev_priv;
 970	int ret;
 971
 
 972	mutex_lock(&dev_priv->cmdbuf_mutex);
 973	ret = vmw_resource_reserve(res, interruptible, false);
 974	if (ret)
 975		goto out_no_reserve;
 976
 977	if (res->pin_count == 0) {
 978		struct vmw_bo *vbo = NULL;
 979
 980		if (res->guest_memory_bo) {
 981			vbo = res->guest_memory_bo;
 982
 983			ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
 984			if (ret)
 985				goto out_no_validate;
 986			if (!vbo->tbo.pin_count) {
 987				vmw_bo_placement_set(vbo,
 988						     res->func->domain,
 989						     res->func->busy_domain);
 990				ret = ttm_bo_validate
 991					(&vbo->tbo,
 992					 &vbo->placement,
 993					 &ctx);
 994				if (ret) {
 995					ttm_bo_unreserve(&vbo->tbo);
 996					goto out_no_validate;
 997				}
 998			}
 999
1000			/* Do we really need to pin the MOB as well? */
1001			vmw_bo_pin_reserved(vbo, true);
1002		}
1003		ret = vmw_resource_validate(res, interruptible, true);
1004		if (vbo)
1005			ttm_bo_unreserve(&vbo->tbo);
1006		if (ret)
1007			goto out_no_validate;
1008	}
1009	res->pin_count++;
1010
1011out_no_validate:
1012	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1013out_no_reserve:
1014	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1015
1016	return ret;
1017}
1018
1019/**
1020 * vmw_resource_unpin - Remove a pin reference from a resource
1021 *
1022 * @res: The resource to remove a pin reference from
1023 *
1024 * Having a pin reference means that the resource can never be evicted, and
1025 * its id will never change as long as there is a pin reference.
1026 */
1027void vmw_resource_unpin(struct vmw_resource *res)
1028{
1029	struct vmw_private *dev_priv = res->dev_priv;
1030	int ret;
1031
 
1032	mutex_lock(&dev_priv->cmdbuf_mutex);
1033
1034	ret = vmw_resource_reserve(res, false, true);
1035	WARN_ON(ret);
1036
1037	WARN_ON(res->pin_count == 0);
1038	if (--res->pin_count == 0 && res->guest_memory_bo) {
1039		struct vmw_bo *vbo = res->guest_memory_bo;
1040
1041		(void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
1042		vmw_bo_pin_reserved(vbo, false);
1043		ttm_bo_unreserve(&vbo->tbo);
1044	}
1045
1046	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1047
1048	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1049}
1050
1051/**
1052 * vmw_res_type - Return the resource type
1053 *
1054 * @res: Pointer to the resource
1055 */
1056enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1057{
1058	return res->func->res_type;
1059}
1060
1061/**
1062 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1063 * sequential range of touched backing store memory.
1064 * @res: The resource.
1065 * @start: The first page touched.
1066 * @end: The last page touched + 1.
1067 */
1068void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1069			       pgoff_t end)
1070{
1071	if (res->dirty)
1072		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1073					   end << PAGE_SHIFT);
1074}
1075
1076int vmw_resource_clean(struct vmw_resource *res)
1077{
1078	int ret = 0;
1079
1080	if (res->res_dirty) {
1081		if (!res->func->clean)
1082			return -EINVAL;
1083
1084		ret = res->func->clean(res);
1085		if (ret)
1086			return ret;
1087		res->res_dirty = false;
1088	}
1089	return ret;
1090}
1091
1092/**
1093 * vmw_resources_clean - Clean resources intersecting a mob range
1094 * @vbo: The mob buffer object
1095 * @start: The mob page offset starting the range
1096 * @end: The mob page offset ending the range
1097 * @num_prefault: Returns how many pages including the first have been
1098 * cleaned and are ok to prefault
1099 */
1100int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
1101			pgoff_t end, pgoff_t *num_prefault)
1102{
1103	struct rb_node *cur = vbo->res_tree.rb_node;
1104	struct vmw_resource *found = NULL;
1105	unsigned long res_start = start << PAGE_SHIFT;
1106	unsigned long res_end = end << PAGE_SHIFT;
1107	unsigned long last_cleaned = 0;
1108	int ret;
1109
1110	/*
1111	 * Find the resource with lowest backup_offset that intersects the
1112	 * range.
1113	 */
1114	while (cur) {
1115		struct vmw_resource *cur_res =
1116			container_of(cur, struct vmw_resource, mob_node);
1117
1118		if (cur_res->guest_memory_offset >= res_end) {
1119			cur = cur->rb_left;
1120		} else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
1121			   res_start) {
1122			cur = cur->rb_right;
1123		} else {
1124			found = cur_res;
1125			cur = cur->rb_left;
1126			/* Continue to look for resources with lower offsets */
1127		}
1128	}
1129
1130	/*
1131	 * In order of increasing guest_memory_offset, clean dirty resources
1132	 * intersecting the range.
1133	 */
1134	while (found) {
1135		ret = vmw_resource_clean(found);
1136		if (ret)
1137			return ret;
1138		last_cleaned = found->guest_memory_offset + found->guest_memory_size;
1139		cur = rb_next(&found->mob_node);
1140		if (!cur)
1141			break;
1142
1143		found = container_of(cur, struct vmw_resource, mob_node);
1144		if (found->guest_memory_offset >= res_end)
1145			break;
1146	}
1147
1148	/*
1149	 * Set number of pages allowed prefaulting and fence the buffer object
1150	 */
1151	*num_prefault = 1;
1152	if (last_cleaned > res_start) {
1153		struct ttm_buffer_object *bo = &vbo->tbo;
1154
1155		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1156						      PAGE_SIZE);
1157		vmw_bo_fence_single(bo, NULL);
1158	}
1159
1160	return 0;
1161}
v4.10.11
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
 
 
  28#include "vmwgfx_drv.h"
  29#include <drm/vmwgfx_drm.h>
  30#include <drm/ttm/ttm_object.h>
  31#include <drm/ttm/ttm_placement.h>
  32#include <drm/drmP.h>
  33#include "vmwgfx_resource_priv.h"
  34#include "vmwgfx_binding.h"
  35
  36#define VMW_RES_EVICT_ERR_COUNT 10
  37
  38struct vmw_user_dma_buffer {
  39	struct ttm_prime_object prime;
  40	struct vmw_dma_buffer dma;
  41};
  42
  43struct vmw_bo_user_rep {
  44	uint32_t handle;
  45	uint64_t map_handle;
  46};
  47
  48struct vmw_stream {
  49	struct vmw_resource res;
  50	uint32_t stream_id;
  51};
  52
  53struct vmw_user_stream {
  54	struct ttm_base_object base;
  55	struct vmw_stream stream;
  56};
  57
  58
  59static uint64_t vmw_user_stream_size;
  60
  61static const struct vmw_res_func vmw_stream_func = {
  62	.res_type = vmw_res_stream,
  63	.needs_backup = false,
  64	.may_evict = false,
  65	.type_name = "video streams",
  66	.backup_placement = NULL,
  67	.create = NULL,
  68	.destroy = NULL,
  69	.bind = NULL,
  70	.unbind = NULL
  71};
  72
  73static inline struct vmw_dma_buffer *
  74vmw_dma_buffer(struct ttm_buffer_object *bo)
  75{
  76	return container_of(bo, struct vmw_dma_buffer, base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77}
  78
  79static inline struct vmw_user_dma_buffer *
  80vmw_user_dma_buffer(struct ttm_buffer_object *bo)
 
 
 
  81{
  82	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  83	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
 
 
 
 
 
 
  84}
  85
  86struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  87{
  88	kref_get(&res->kref);
  89	return res;
  90}
  91
  92struct vmw_resource *
  93vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  94{
  95	return kref_get_unless_zero(&res->kref) ? res : NULL;
  96}
  97
  98/**
  99 * vmw_resource_release_id - release a resource id to the id manager.
 100 *
 101 * @res: Pointer to the resource.
 102 *
 103 * Release the resource id to the resource id manager and set it to -1
 104 */
 105void vmw_resource_release_id(struct vmw_resource *res)
 106{
 107	struct vmw_private *dev_priv = res->dev_priv;
 108	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 109
 110	write_lock(&dev_priv->resource_lock);
 111	if (res->id != -1)
 112		idr_remove(idr, res->id);
 113	res->id = -1;
 114	write_unlock(&dev_priv->resource_lock);
 115}
 116
 117static void vmw_resource_release(struct kref *kref)
 118{
 119	struct vmw_resource *res =
 120	    container_of(kref, struct vmw_resource, kref);
 121	struct vmw_private *dev_priv = res->dev_priv;
 122	int id;
 
 123	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 124
 125	write_lock(&dev_priv->resource_lock);
 126	res->avail = false;
 127	list_del_init(&res->lru_head);
 128	write_unlock(&dev_priv->resource_lock);
 129	if (res->backup) {
 130		struct ttm_buffer_object *bo = &res->backup->base;
 131
 132		ttm_bo_reserve(bo, false, false, NULL);
 133		if (!list_empty(&res->mob_head) &&
 
 134		    res->func->unbind != NULL) {
 135			struct ttm_validate_buffer val_buf;
 136
 137			val_buf.bo = bo;
 138			val_buf.shared = false;
 139			res->func->unbind(res, false, &val_buf);
 140		}
 141		res->backup_dirty = false;
 142		list_del_init(&res->mob_head);
 
 
 
 
 143		ttm_bo_unreserve(bo);
 144		vmw_dmabuf_unreference(&res->backup);
 145	}
 146
 147	if (likely(res->hw_destroy != NULL)) {
 148		mutex_lock(&dev_priv->binding_mutex);
 149		vmw_binding_res_list_kill(&res->binding_head);
 150		mutex_unlock(&dev_priv->binding_mutex);
 151		res->hw_destroy(res);
 152	}
 153
 154	id = res->id;
 155	if (res->res_free != NULL)
 156		res->res_free(res);
 157	else
 158		kfree(res);
 159
 160	write_lock(&dev_priv->resource_lock);
 161	if (id != -1)
 162		idr_remove(idr, id);
 163	write_unlock(&dev_priv->resource_lock);
 164}
 165
 166void vmw_resource_unreference(struct vmw_resource **p_res)
 167{
 168	struct vmw_resource *res = *p_res;
 169
 170	*p_res = NULL;
 171	kref_put(&res->kref, vmw_resource_release);
 172}
 173
 174
 175/**
 176 * vmw_resource_alloc_id - release a resource id to the id manager.
 177 *
 178 * @res: Pointer to the resource.
 179 *
 180 * Allocate the lowest free resource from the resource manager, and set
 181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 182 */
 183int vmw_resource_alloc_id(struct vmw_resource *res)
 184{
 185	struct vmw_private *dev_priv = res->dev_priv;
 186	int ret;
 187	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 188
 189	BUG_ON(res->id != -1);
 190
 191	idr_preload(GFP_KERNEL);
 192	write_lock(&dev_priv->resource_lock);
 193
 194	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 195	if (ret >= 0)
 196		res->id = ret;
 197
 198	write_unlock(&dev_priv->resource_lock);
 199	idr_preload_end();
 200	return ret < 0 ? ret : 0;
 201}
 202
 203/**
 204 * vmw_resource_init - initialize a struct vmw_resource
 205 *
 206 * @dev_priv:       Pointer to a device private struct.
 207 * @res:            The struct vmw_resource to initialize.
 208 * @obj_type:       Resource object type.
 209 * @delay_id:       Boolean whether to defer device id allocation until
 210 *                  the first validation.
 211 * @res_free:       Resource destructor.
 212 * @func:           Resource function table.
 213 */
 214int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 215		      bool delay_id,
 216		      void (*res_free) (struct vmw_resource *res),
 217		      const struct vmw_res_func *func)
 218{
 219	kref_init(&res->kref);
 220	res->hw_destroy = NULL;
 221	res->res_free = res_free;
 222	res->avail = false;
 223	res->dev_priv = dev_priv;
 224	res->func = func;
 
 225	INIT_LIST_HEAD(&res->lru_head);
 226	INIT_LIST_HEAD(&res->mob_head);
 227	INIT_LIST_HEAD(&res->binding_head);
 228	res->id = -1;
 229	res->backup = NULL;
 230	res->backup_offset = 0;
 231	res->backup_dirty = false;
 232	res->res_dirty = false;
 
 
 
 233	if (delay_id)
 234		return 0;
 235	else
 236		return vmw_resource_alloc_id(res);
 237}
 238
 239/**
 240 * vmw_resource_activate
 241 *
 242 * @res:        Pointer to the newly created resource
 243 * @hw_destroy: Destroy function. NULL if none.
 244 *
 245 * Activate a resource after the hardware has been made aware of it.
 246 * Set tye destroy function to @destroy. Typically this frees the
 247 * resource and destroys the hardware resources associated with it.
 248 * Activate basically means that the function vmw_resource_lookup will
 249 * find it.
 250 */
 251void vmw_resource_activate(struct vmw_resource *res,
 252			   void (*hw_destroy) (struct vmw_resource *))
 253{
 254	struct vmw_private *dev_priv = res->dev_priv;
 255
 256	write_lock(&dev_priv->resource_lock);
 257	res->avail = true;
 258	res->hw_destroy = hw_destroy;
 259	write_unlock(&dev_priv->resource_lock);
 260}
 261
 262static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 263						struct idr *idr, int id)
 264{
 265	struct vmw_resource *res;
 266
 267	read_lock(&dev_priv->resource_lock);
 268	res = idr_find(idr, id);
 269	if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
 270		res = NULL;
 271
 272	read_unlock(&dev_priv->resource_lock);
 273
 274	if (unlikely(res == NULL))
 275		return NULL;
 276
 277	return res;
 278}
 279
 280/**
 281 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 282 * TTM user-space handle and perform basic type checks
 283 *
 284 * @dev_priv:     Pointer to a device private struct
 285 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 286 * @handle:       The TTM user-space handle
 287 * @converter:    Pointer to an object describing the resource type
 288 * @p_res:        On successful return the location pointed to will contain
 289 *                a pointer to a refcounted struct vmw_resource.
 290 *
 291 * If the handle can't be found or is associated with an incorrect resource
 292 * type, -EINVAL will be returned.
 293 */
 294int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 295				    struct ttm_object_file *tfile,
 296				    uint32_t handle,
 297				    const struct vmw_user_resource_conv
 298				    *converter,
 299				    struct vmw_resource **p_res)
 300{
 301	struct ttm_base_object *base;
 302	struct vmw_resource *res;
 303	int ret = -EINVAL;
 304
 305	base = ttm_base_object_lookup(tfile, handle);
 306	if (unlikely(base == NULL))
 307		return -EINVAL;
 308
 309	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 310		goto out_bad_resource;
 311
 312	res = converter->base_obj_to_res(base);
 313
 314	read_lock(&dev_priv->resource_lock);
 315	if (!res->avail || res->res_free != converter->res_free) {
 316		read_unlock(&dev_priv->resource_lock);
 317		goto out_bad_resource;
 318	}
 319
 320	kref_get(&res->kref);
 321	read_unlock(&dev_priv->resource_lock);
 322
 323	*p_res = res;
 324	ret = 0;
 325
 326out_bad_resource:
 327	ttm_base_object_unref(&base);
 328
 329	return ret;
 330}
 331
 332/**
 333 * Helper function that looks either a surface or dmabuf.
 334 *
 335 * The pointer this pointed at by out_surf and out_buf needs to be null.
 336 */
 337int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 338			   struct ttm_object_file *tfile,
 339			   uint32_t handle,
 340			   struct vmw_surface **out_surf,
 341			   struct vmw_dma_buffer **out_buf)
 342{
 
 343	struct vmw_resource *res;
 344	int ret;
 345
 346	BUG_ON(*out_surf || *out_buf);
 347
 348	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 349					      user_surface_converter,
 350					      &res);
 351	if (!ret) {
 352		*out_surf = vmw_res_to_srf(res);
 353		return 0;
 354	}
 355
 356	*out_surf = NULL;
 357	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
 358	return ret;
 359}
 360
 361/**
 362 * Buffer management.
 363 */
 364
 365/**
 366 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
 367 *
 368 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 369 * @size: The requested buffer size.
 370 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 371 */
 372static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
 373				  bool user)
 374{
 375	static size_t struct_size, user_struct_size;
 376	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 377	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 378
 379	if (unlikely(struct_size == 0)) {
 380		size_t backend_size = ttm_round_pot(vmw_tt_size);
 381
 382		struct_size = backend_size +
 383			ttm_round_pot(sizeof(struct vmw_dma_buffer));
 384		user_struct_size = backend_size +
 385			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
 386	}
 387
 388	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 389		page_array_size +=
 390			ttm_round_pot(num_pages * sizeof(dma_addr_t));
 391
 392	return ((user) ? user_struct_size : struct_size) +
 393		page_array_size;
 394}
 395
 396void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 397{
 398	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 399
 400	kfree(vmw_bo);
 401}
 402
 403static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 404{
 405	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 406
 407	ttm_prime_object_kfree(vmw_user_bo, prime);
 408}
 409
 410int vmw_dmabuf_init(struct vmw_private *dev_priv,
 411		    struct vmw_dma_buffer *vmw_bo,
 412		    size_t size, struct ttm_placement *placement,
 413		    bool interruptible,
 414		    void (*bo_free) (struct ttm_buffer_object *bo))
 415{
 416	struct ttm_bo_device *bdev = &dev_priv->bdev;
 417	size_t acc_size;
 418	int ret;
 419	bool user = (bo_free == &vmw_user_dmabuf_destroy);
 420
 421	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
 422
 423	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
 424	memset(vmw_bo, 0, sizeof(*vmw_bo));
 425
 426	INIT_LIST_HEAD(&vmw_bo->res_list);
 427
 428	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 429			  ttm_bo_type_device, placement,
 430			  0, interruptible,
 431			  NULL, acc_size, NULL, NULL, bo_free);
 432	return ret;
 433}
 434
 435static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 436{
 437	struct vmw_user_dma_buffer *vmw_user_bo;
 438	struct ttm_base_object *base = *p_base;
 439	struct ttm_buffer_object *bo;
 440
 441	*p_base = NULL;
 442
 443	if (unlikely(base == NULL))
 444		return;
 445
 446	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 447				   prime.base);
 448	bo = &vmw_user_bo->dma.base;
 449	ttm_bo_unref(&bo);
 450}
 451
 452static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
 453					    enum ttm_ref_type ref_type)
 454{
 455	struct vmw_user_dma_buffer *user_bo;
 456	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
 457
 458	switch (ref_type) {
 459	case TTM_REF_SYNCCPU_WRITE:
 460		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 461		break;
 462	default:
 463		BUG();
 464	}
 465}
 466
 467/**
 468 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
 469 *
 470 * @dev_priv: Pointer to a struct device private.
 471 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 472 * object.
 473 * @size: Size of the dma buffer.
 474 * @shareable: Boolean whether the buffer is shareable with other open files.
 475 * @handle: Pointer to where the handle value should be assigned.
 476 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
 477 * should be assigned.
 478 */
 479int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
 480			  struct ttm_object_file *tfile,
 481			  uint32_t size,
 482			  bool shareable,
 483			  uint32_t *handle,
 484			  struct vmw_dma_buffer **p_dma_buf,
 485			  struct ttm_base_object **p_base)
 486{
 487	struct vmw_user_dma_buffer *user_bo;
 488	struct ttm_buffer_object *tmp;
 489	int ret;
 490
 491	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 492	if (unlikely(user_bo == NULL)) {
 493		DRM_ERROR("Failed to allocate a buffer.\n");
 494		return -ENOMEM;
 495	}
 496
 497	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
 498			      (dev_priv->has_mob) ?
 499			      &vmw_sys_placement :
 500			      &vmw_vram_sys_placement, true,
 501			      &vmw_user_dmabuf_destroy);
 502	if (unlikely(ret != 0))
 503		return ret;
 504
 505	tmp = ttm_bo_reference(&user_bo->dma.base);
 506	ret = ttm_prime_object_init(tfile,
 507				    size,
 508				    &user_bo->prime,
 509				    shareable,
 510				    ttm_buffer_type,
 511				    &vmw_user_dmabuf_release,
 512				    &vmw_user_dmabuf_ref_obj_release);
 513	if (unlikely(ret != 0)) {
 514		ttm_bo_unref(&tmp);
 515		goto out_no_base_object;
 516	}
 517
 518	*p_dma_buf = &user_bo->dma;
 519	if (p_base) {
 520		*p_base = &user_bo->prime.base;
 521		kref_get(&(*p_base)->refcount);
 522	}
 523	*handle = user_bo->prime.base.hash.key;
 524
 525out_no_base_object:
 526	return ret;
 527}
 528
 529/**
 530 * vmw_user_dmabuf_verify_access - verify access permissions on this
 531 * buffer object.
 532 *
 533 * @bo: Pointer to the buffer object being accessed
 534 * @tfile: Identifying the caller.
 535 */
 536int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
 537				  struct ttm_object_file *tfile)
 538{
 539	struct vmw_user_dma_buffer *vmw_user_bo;
 540
 541	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
 542		return -EPERM;
 543
 544	vmw_user_bo = vmw_user_dma_buffer(bo);
 545
 546	/* Check that the caller has opened the object. */
 547	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 548		return 0;
 549
 550	DRM_ERROR("Could not grant buffer access.\n");
 551	return -EPERM;
 552}
 553
 554/**
 555 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
 556 * access, idling previous GPU operations on the buffer and optionally
 557 * blocking it for further command submissions.
 558 *
 559 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 560 * @tfile: Identifying the caller.
 561 * @flags: Flags indicating how the grab should be performed.
 562 *
 563 * A blocking grab will be automatically released when @tfile is closed.
 564 */
 565static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
 566					struct ttm_object_file *tfile,
 567					uint32_t flags)
 568{
 569	struct ttm_buffer_object *bo = &user_bo->dma.base;
 570	bool existed;
 571	int ret;
 572
 573	if (flags & drm_vmw_synccpu_allow_cs) {
 574		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 575		long lret;
 576
 577		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
 578							   nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 579		if (!lret)
 580			return -EBUSY;
 581		else if (lret < 0)
 582			return lret;
 583		return 0;
 584	}
 585
 586	ret = ttm_bo_synccpu_write_grab
 587		(bo, !!(flags & drm_vmw_synccpu_dontblock));
 588	if (unlikely(ret != 0))
 589		return ret;
 590
 591	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 592				 TTM_REF_SYNCCPU_WRITE, &existed, false);
 593	if (ret != 0 || existed)
 594		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 595
 596	return ret;
 597}
 598
 599/**
 600 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
 601 * and unblock command submission on the buffer if blocked.
 602 *
 603 * @handle: Handle identifying the buffer object.
 604 * @tfile: Identifying the caller.
 605 * @flags: Flags indicating the type of release.
 606 */
 607static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
 608					   struct ttm_object_file *tfile,
 609					   uint32_t flags)
 610{
 611	if (!(flags & drm_vmw_synccpu_allow_cs))
 612		return ttm_ref_object_base_unref(tfile, handle,
 613						 TTM_REF_SYNCCPU_WRITE);
 614
 615	return 0;
 616}
 617
 618/**
 619 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
 620 * functionality.
 621 *
 622 * @dev: Identifies the drm device.
 623 * @data: Pointer to the ioctl argument.
 624 * @file_priv: Identifies the caller.
 625 *
 626 * This function checks the ioctl arguments for validity and calls the
 627 * relevant synccpu functions.
 628 */
 629int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
 630				  struct drm_file *file_priv)
 631{
 632	struct drm_vmw_synccpu_arg *arg =
 633		(struct drm_vmw_synccpu_arg *) data;
 634	struct vmw_dma_buffer *dma_buf;
 635	struct vmw_user_dma_buffer *user_bo;
 636	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 637	struct ttm_base_object *buffer_base;
 638	int ret;
 639
 640	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 641	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 642			       drm_vmw_synccpu_dontblock |
 643			       drm_vmw_synccpu_allow_cs)) != 0) {
 644		DRM_ERROR("Illegal synccpu flags.\n");
 645		return -EINVAL;
 646	}
 647
 648	switch (arg->op) {
 649	case drm_vmw_synccpu_grab:
 650		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
 651					     &buffer_base);
 652		if (unlikely(ret != 0))
 653			return ret;
 654
 655		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
 656				       dma);
 657		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
 658		vmw_dmabuf_unreference(&dma_buf);
 659		ttm_base_object_unref(&buffer_base);
 660		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 661			     ret != -EBUSY)) {
 662			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 663				  (unsigned int) arg->handle);
 664			return ret;
 665		}
 666		break;
 667	case drm_vmw_synccpu_release:
 668		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
 669						      arg->flags);
 670		if (unlikely(ret != 0)) {
 671			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 672				  (unsigned int) arg->handle);
 673			return ret;
 674		}
 675		break;
 676	default:
 677		DRM_ERROR("Invalid synccpu operation.\n");
 678		return -EINVAL;
 679	}
 680
 681	return 0;
 682}
 683
 684int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 685			   struct drm_file *file_priv)
 686{
 687	struct vmw_private *dev_priv = vmw_priv(dev);
 688	union drm_vmw_alloc_dmabuf_arg *arg =
 689	    (union drm_vmw_alloc_dmabuf_arg *)data;
 690	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 691	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 692	struct vmw_dma_buffer *dma_buf;
 693	uint32_t handle;
 694	int ret;
 695
 696	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 697	if (unlikely(ret != 0))
 698		return ret;
 699
 700	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 701				    req->size, false, &handle, &dma_buf,
 702				    NULL);
 703	if (unlikely(ret != 0))
 704		goto out_no_dmabuf;
 705
 706	rep->handle = handle;
 707	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
 708	rep->cur_gmr_id = handle;
 709	rep->cur_gmr_offset = 0;
 710
 711	vmw_dmabuf_unreference(&dma_buf);
 712
 713out_no_dmabuf:
 714	ttm_read_unlock(&dev_priv->reservation_sem);
 715
 716	return ret;
 717}
 718
 719int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 720			   struct drm_file *file_priv)
 721{
 722	struct drm_vmw_unref_dmabuf_arg *arg =
 723	    (struct drm_vmw_unref_dmabuf_arg *)data;
 724
 725	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 726					 arg->handle,
 727					 TTM_REF_USAGE);
 728}
 729
 730int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 731			   uint32_t handle, struct vmw_dma_buffer **out,
 732			   struct ttm_base_object **p_base)
 733{
 734	struct vmw_user_dma_buffer *vmw_user_bo;
 735	struct ttm_base_object *base;
 736
 737	base = ttm_base_object_lookup(tfile, handle);
 738	if (unlikely(base == NULL)) {
 739		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 740		       (unsigned long)handle);
 741		return -ESRCH;
 742	}
 743
 744	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 745		ttm_base_object_unref(&base);
 746		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 747		       (unsigned long)handle);
 748		return -EINVAL;
 749	}
 750
 751	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 752				   prime.base);
 753	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
 754	if (p_base)
 755		*p_base = base;
 756	else
 757		ttm_base_object_unref(&base);
 758	*out = &vmw_user_bo->dma;
 759
 760	return 0;
 761}
 762
 763int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 764			      struct vmw_dma_buffer *dma_buf,
 765			      uint32_t *handle)
 766{
 767	struct vmw_user_dma_buffer *user_bo;
 768
 769	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
 770		return -EINVAL;
 771
 772	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
 773
 774	*handle = user_bo->prime.base.hash.key;
 775	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 776				  TTM_REF_USAGE, NULL, false);
 777}
 778
 779/*
 780 * Stream management
 781 */
 782
 783static void vmw_stream_destroy(struct vmw_resource *res)
 784{
 785	struct vmw_private *dev_priv = res->dev_priv;
 786	struct vmw_stream *stream;
 787	int ret;
 788
 789	DRM_INFO("%s: unref\n", __func__);
 790	stream = container_of(res, struct vmw_stream, res);
 791
 792	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
 793	WARN_ON(ret != 0);
 794}
 795
 796static int vmw_stream_init(struct vmw_private *dev_priv,
 797			   struct vmw_stream *stream,
 798			   void (*res_free) (struct vmw_resource *res))
 799{
 800	struct vmw_resource *res = &stream->res;
 801	int ret;
 802
 803	ret = vmw_resource_init(dev_priv, res, false, res_free,
 804				&vmw_stream_func);
 805
 806	if (unlikely(ret != 0)) {
 807		if (res_free == NULL)
 808			kfree(stream);
 809		else
 810			res_free(&stream->res);
 811		return ret;
 812	}
 813
 814	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
 815	if (ret) {
 816		vmw_resource_unreference(&res);
 817		return ret;
 818	}
 819
 820	DRM_INFO("%s: claimed\n", __func__);
 821
 822	vmw_resource_activate(&stream->res, vmw_stream_destroy);
 823	return 0;
 824}
 825
 826static void vmw_user_stream_free(struct vmw_resource *res)
 827{
 828	struct vmw_user_stream *stream =
 829	    container_of(res, struct vmw_user_stream, stream.res);
 830	struct vmw_private *dev_priv = res->dev_priv;
 831
 832	ttm_base_object_kfree(stream, base);
 833	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 834			    vmw_user_stream_size);
 835}
 836
 837/**
 838 * This function is called when user space has no more references on the
 839 * base object. It releases the base-object's reference on the resource object.
 840 */
 841
 842static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
 843{
 844	struct ttm_base_object *base = *p_base;
 845	struct vmw_user_stream *stream =
 846	    container_of(base, struct vmw_user_stream, base);
 847	struct vmw_resource *res = &stream->stream.res;
 848
 849	*p_base = NULL;
 850	vmw_resource_unreference(&res);
 851}
 852
 853int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
 854			   struct drm_file *file_priv)
 855{
 856	struct vmw_private *dev_priv = vmw_priv(dev);
 857	struct vmw_resource *res;
 858	struct vmw_user_stream *stream;
 859	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 860	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 861	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
 862	int ret = 0;
 863
 864
 865	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
 866	if (unlikely(res == NULL))
 867		return -EINVAL;
 868
 869	if (res->res_free != &vmw_user_stream_free) {
 870		ret = -EINVAL;
 871		goto out;
 872	}
 873
 874	stream = container_of(res, struct vmw_user_stream, stream.res);
 875	if (stream->base.tfile != tfile) {
 876		ret = -EINVAL;
 877		goto out;
 878	}
 879
 880	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
 881out:
 882	vmw_resource_unreference(&res);
 883	return ret;
 884}
 885
 886int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 887			   struct drm_file *file_priv)
 888{
 889	struct vmw_private *dev_priv = vmw_priv(dev);
 890	struct vmw_user_stream *stream;
 891	struct vmw_resource *res;
 892	struct vmw_resource *tmp;
 893	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 894	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 895	int ret;
 896
 897	/*
 898	 * Approximate idr memory usage with 128 bytes. It will be limited
 899	 * by maximum number_of streams anyway?
 900	 */
 901
 902	if (unlikely(vmw_user_stream_size == 0))
 903		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
 904
 905	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 906	if (unlikely(ret != 0))
 907		return ret;
 908
 909	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 910				   vmw_user_stream_size,
 911				   false, true);
 912	ttm_read_unlock(&dev_priv->reservation_sem);
 913	if (unlikely(ret != 0)) {
 914		if (ret != -ERESTARTSYS)
 915			DRM_ERROR("Out of graphics memory for stream"
 916				  " creation.\n");
 917
 918		goto out_ret;
 919	}
 920
 921	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
 922	if (unlikely(stream == NULL)) {
 923		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 924				    vmw_user_stream_size);
 925		ret = -ENOMEM;
 926		goto out_ret;
 927	}
 928
 929	res = &stream->stream.res;
 930	stream->base.shareable = false;
 931	stream->base.tfile = NULL;
 932
 933	/*
 934	 * From here on, the destructor takes over resource freeing.
 935	 */
 936
 937	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
 938	if (unlikely(ret != 0))
 939		goto out_ret;
 940
 941	tmp = vmw_resource_reference(res);
 942	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
 943				   &vmw_user_stream_base_release, NULL);
 944
 945	if (unlikely(ret != 0)) {
 946		vmw_resource_unreference(&tmp);
 947		goto out_err;
 948	}
 949
 950	arg->stream_id = res->id;
 951out_err:
 952	vmw_resource_unreference(&res);
 953out_ret:
 954	return ret;
 955}
 956
 957int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 958			   struct ttm_object_file *tfile,
 959			   uint32_t *inout_id, struct vmw_resource **out)
 960{
 961	struct vmw_user_stream *stream;
 962	struct vmw_resource *res;
 963	int ret;
 964
 965	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
 966				  *inout_id);
 967	if (unlikely(res == NULL))
 968		return -EINVAL;
 969
 970	if (res->res_free != &vmw_user_stream_free) {
 971		ret = -EINVAL;
 972		goto err_ref;
 973	}
 974
 975	stream = container_of(res, struct vmw_user_stream, stream.res);
 976	if (stream->base.tfile != tfile) {
 977		ret = -EPERM;
 978		goto err_ref;
 979	}
 980
 981	*inout_id = stream->stream.stream_id;
 982	*out = res;
 983	return 0;
 984err_ref:
 985	vmw_resource_unreference(&res);
 986	return ret;
 987}
 988
 989
 990/**
 991 * vmw_dumb_create - Create a dumb kms buffer
 992 *
 993 * @file_priv: Pointer to a struct drm_file identifying the caller.
 994 * @dev: Pointer to the drm device.
 995 * @args: Pointer to a struct drm_mode_create_dumb structure
 996 *
 997 * This is a driver callback for the core drm create_dumb functionality.
 998 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
 999 * that the arguments have a different format.
1000 */
1001int vmw_dumb_create(struct drm_file *file_priv,
1002		    struct drm_device *dev,
1003		    struct drm_mode_create_dumb *args)
1004{
1005	struct vmw_private *dev_priv = vmw_priv(dev);
1006	struct vmw_dma_buffer *dma_buf;
1007	int ret;
1008
1009	args->pitch = args->width * ((args->bpp + 7) / 8);
1010	args->size = args->pitch * args->height;
1011
1012	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1013	if (unlikely(ret != 0))
1014		return ret;
1015
1016	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1017				    args->size, false, &args->handle,
1018				    &dma_buf, NULL);
1019	if (unlikely(ret != 0))
1020		goto out_no_dmabuf;
1021
1022	vmw_dmabuf_unreference(&dma_buf);
1023out_no_dmabuf:
1024	ttm_read_unlock(&dev_priv->reservation_sem);
1025	return ret;
1026}
1027
1028/**
1029 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1030 *
1031 * @file_priv: Pointer to a struct drm_file identifying the caller.
1032 * @dev: Pointer to the drm device.
1033 * @handle: Handle identifying the dumb buffer.
1034 * @offset: The address space offset returned.
1035 *
1036 * This is a driver callback for the core drm dumb_map_offset functionality.
1037 */
1038int vmw_dumb_map_offset(struct drm_file *file_priv,
1039			struct drm_device *dev, uint32_t handle,
1040			uint64_t *offset)
1041{
1042	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1043	struct vmw_dma_buffer *out_buf;
1044	int ret;
1045
1046	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1047	if (ret != 0)
1048		return -EINVAL;
1049
1050	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1051	vmw_dmabuf_unreference(&out_buf);
1052	return 0;
1053}
1054
1055/**
1056 * vmw_dumb_destroy - Destroy a dumb boffer
1057 *
1058 * @file_priv: Pointer to a struct drm_file identifying the caller.
1059 * @dev: Pointer to the drm device.
1060 * @handle: Handle identifying the dumb buffer.
1061 *
1062 * This is a driver callback for the core drm dumb_destroy functionality.
1063 */
1064int vmw_dumb_destroy(struct drm_file *file_priv,
1065		     struct drm_device *dev,
1066		     uint32_t handle)
1067{
1068	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1069					 handle, TTM_REF_USAGE);
1070}
1071
1072/**
1073 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1074 *
1075 * @res:            The resource for which to allocate a backup buffer.
1076 * @interruptible:  Whether any sleeps during allocation should be
1077 *                  performed while interruptible.
1078 */
1079static int vmw_resource_buf_alloc(struct vmw_resource *res,
1080				  bool interruptible)
1081{
1082	unsigned long size =
1083		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1084	struct vmw_dma_buffer *backup;
 
 
 
 
 
 
1085	int ret;
1086
1087	if (likely(res->backup)) {
1088		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1089		return 0;
1090	}
1091
1092	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1093	if (unlikely(backup == NULL))
1094		return -ENOMEM;
1095
1096	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1097			      res->func->backup_placement,
1098			      interruptible,
1099			      &vmw_dmabuf_bo_free);
1100	if (unlikely(ret != 0))
1101		goto out_no_dmabuf;
1102
1103	res->backup = backup;
1104
1105out_no_dmabuf:
1106	return ret;
1107}
1108
1109/**
1110 * vmw_resource_do_validate - Make a resource up-to-date and visible
1111 *                            to the device.
1112 *
1113 * @res:            The resource to make visible to the device.
1114 * @val_buf:        Information about a buffer possibly
1115 *                  containing backup data if a bind operation is needed.
 
1116 *
1117 * On hardware resource shortage, this function returns -EBUSY and
1118 * should be retried once resources have been freed up.
1119 */
1120static int vmw_resource_do_validate(struct vmw_resource *res,
1121				    struct ttm_validate_buffer *val_buf)
 
1122{
1123	int ret = 0;
1124	const struct vmw_res_func *func = res->func;
1125
1126	if (unlikely(res->id == -1)) {
1127		ret = func->create(res);
1128		if (unlikely(ret != 0))
1129			return ret;
1130	}
1131
1132	if (func->bind &&
1133	    ((func->needs_backup && list_empty(&res->mob_head) &&
1134	      val_buf->bo != NULL) ||
1135	     (!func->needs_backup && val_buf->bo != NULL))) {
1136		ret = func->bind(res, val_buf);
1137		if (unlikely(ret != 0))
1138			goto out_bind_failed;
1139		if (func->needs_backup)
1140			list_add_tail(&res->mob_head, &res->backup->res_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1141	}
1142
1143	/*
1144	 * Only do this on write operations, and move to
1145	 * vmw_resource_unreserve if it can be called after
1146	 * backup buffers have been unreserved. Otherwise
1147	 * sort out locking.
1148	 */
1149	res->res_dirty = true;
 
 
 
 
 
 
 
 
 
 
 
 
1150
1151	return 0;
1152
1153out_bind_failed:
1154	func->destroy(res);
1155
1156	return ret;
1157}
1158
1159/**
1160 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1161 * command submission.
1162 *
1163 * @res:               Pointer to the struct vmw_resource to unreserve.
1164 * @switch_backup:     Backup buffer has been switched.
1165 * @new_backup:        Pointer to new backup buffer if command submission
 
 
1166 *                     switched. May be NULL.
1167 * @new_backup_offset: New backup offset if @switch_backup is true.
1168 *
1169 * Currently unreserving a resource means putting it back on the device's
1170 * resource lru list, so that it can be evicted if necessary.
1171 */
1172void vmw_resource_unreserve(struct vmw_resource *res,
1173			    bool switch_backup,
1174			    struct vmw_dma_buffer *new_backup,
1175			    unsigned long new_backup_offset)
 
 
1176{
1177	struct vmw_private *dev_priv = res->dev_priv;
1178
1179	if (!list_empty(&res->lru_head))
1180		return;
1181
1182	if (switch_backup && new_backup != res->backup) {
1183		if (res->backup) {
1184			lockdep_assert_held(&res->backup->base.resv->lock.base);
1185			list_del_init(&res->mob_head);
1186			vmw_dmabuf_unreference(&res->backup);
 
1187		}
1188
1189		if (new_backup) {
1190			res->backup = vmw_dmabuf_reference(new_backup);
1191			lockdep_assert_held(&new_backup->base.resv->lock.base);
1192			list_add_tail(&res->mob_head, &new_backup->res_list);
 
 
 
 
 
 
1193		} else {
1194			res->backup = NULL;
1195		}
 
 
1196	}
1197	if (switch_backup)
1198		res->backup_offset = new_backup_offset;
 
 
 
 
1199
1200	if (!res->func->may_evict || res->id == -1 || res->pin_count)
1201		return;
1202
1203	write_lock(&dev_priv->resource_lock);
1204	list_add_tail(&res->lru_head,
1205		      &res->dev_priv->res_lru[res->func->res_type]);
1206	write_unlock(&dev_priv->resource_lock);
1207}
1208
1209/**
1210 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1211 *                             for a resource and in that case, allocate
1212 *                             one, reserve and validate it.
1213 *
 
1214 * @res:            The resource for which to allocate a backup buffer.
1215 * @interruptible:  Whether any sleeps during allocation should be
1216 *                  performed while interruptible.
1217 * @val_buf:        On successful return contains data about the
1218 *                  reserved and validated backup buffer.
1219 */
1220static int
1221vmw_resource_check_buffer(struct vmw_resource *res,
 
1222			  bool interruptible,
1223			  struct ttm_validate_buffer *val_buf)
1224{
 
1225	struct list_head val_list;
1226	bool backup_dirty = false;
1227	int ret;
1228
1229	if (unlikely(res->backup == NULL)) {
1230		ret = vmw_resource_buf_alloc(res, interruptible);
1231		if (unlikely(ret != 0))
1232			return ret;
1233	}
1234
1235	INIT_LIST_HEAD(&val_list);
1236	val_buf->bo = ttm_bo_reference(&res->backup->base);
1237	val_buf->shared = false;
 
1238	list_add_tail(&val_buf->head, &val_list);
1239	ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1240	if (unlikely(ret != 0))
1241		goto out_no_reserve;
1242
1243	if (res->func->needs_backup && list_empty(&res->mob_head))
1244		return 0;
1245
1246	backup_dirty = res->backup_dirty;
1247	ret = ttm_bo_validate(&res->backup->base,
1248			      res->func->backup_placement,
1249			      true, false);
 
 
1250
1251	if (unlikely(ret != 0))
1252		goto out_no_validate;
1253
1254	return 0;
1255
1256out_no_validate:
1257	ttm_eu_backoff_reservation(NULL, &val_list);
1258out_no_reserve:
1259	ttm_bo_unref(&val_buf->bo);
1260	if (backup_dirty)
1261		vmw_dmabuf_unreference(&res->backup);
 
1262
1263	return ret;
1264}
1265
1266/**
1267 * vmw_resource_reserve - Reserve a resource for command submission
1268 *
1269 * @res:            The resource to reserve.
1270 *
1271 * This function takes the resource off the LRU list and make sure
1272 * a backup buffer is present for guest-backed resources. However,
1273 * the buffer may not be bound to the resource at this point.
 
1274 *
1275 */
1276int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1277			 bool no_backup)
1278{
1279	struct vmw_private *dev_priv = res->dev_priv;
1280	int ret;
1281
1282	write_lock(&dev_priv->resource_lock);
1283	list_del_init(&res->lru_head);
1284	write_unlock(&dev_priv->resource_lock);
1285
1286	if (res->func->needs_backup && res->backup == NULL &&
1287	    !no_backup) {
1288		ret = vmw_resource_buf_alloc(res, interruptible);
1289		if (unlikely(ret != 0)) {
1290			DRM_ERROR("Failed to allocate a backup buffer "
1291				  "of size %lu. bytes\n",
1292				  (unsigned long) res->backup_size);
1293			return ret;
1294		}
1295	}
1296
1297	return 0;
1298}
1299
1300/**
1301 * vmw_resource_backoff_reservation - Unreserve and unreference a
1302 *                                    backup buffer
1303 *.
1304 * @val_buf:        Backup buffer information.
 
1305 */
1306static void
1307vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
 
1308{
1309	struct list_head val_list;
1310
1311	if (likely(val_buf->bo == NULL))
1312		return;
1313
1314	INIT_LIST_HEAD(&val_list);
1315	list_add_tail(&val_buf->head, &val_list);
1316	ttm_eu_backoff_reservation(NULL, &val_list);
1317	ttm_bo_unref(&val_buf->bo);
 
1318}
1319
1320/**
1321 * vmw_resource_do_evict - Evict a resource, and transfer its data
1322 *                         to a backup buffer.
1323 *
 
1324 * @res:            The resource to evict.
1325 * @interruptible:  Whether to wait interruptible.
1326 */
1327static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 
1328{
1329	struct ttm_validate_buffer val_buf;
1330	const struct vmw_res_func *func = res->func;
1331	int ret;
1332
1333	BUG_ON(!func->may_evict);
1334
1335	val_buf.bo = NULL;
1336	val_buf.shared = false;
1337	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1338	if (unlikely(ret != 0))
1339		return ret;
1340
1341	if (unlikely(func->unbind != NULL &&
1342		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1343		ret = func->unbind(res, res->res_dirty, &val_buf);
1344		if (unlikely(ret != 0))
1345			goto out_no_unbind;
1346		list_del_init(&res->mob_head);
1347	}
1348	ret = func->destroy(res);
1349	res->backup_dirty = true;
1350	res->res_dirty = false;
1351out_no_unbind:
1352	vmw_resource_backoff_reservation(&val_buf);
1353
1354	return ret;
1355}
1356
1357
1358/**
1359 * vmw_resource_validate - Make a resource up-to-date and visible
1360 *                         to the device.
 
 
 
1361 *
1362 * @res:            The resource to make visible to the device.
1363 *
1364 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1365 * be reserved and validated.
1366 * On hardware resource shortage, this function will repeatedly evict
1367 * resources of the same type until the validation succeeds.
 
 
 
1368 */
1369int vmw_resource_validate(struct vmw_resource *res)
 
1370{
1371	int ret;
1372	struct vmw_resource *evict_res;
1373	struct vmw_private *dev_priv = res->dev_priv;
1374	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1375	struct ttm_validate_buffer val_buf;
1376	unsigned err_count = 0;
1377
1378	if (!res->func->create)
1379		return 0;
1380
1381	val_buf.bo = NULL;
1382	val_buf.shared = false;
1383	if (res->backup)
1384		val_buf.bo = &res->backup->base;
1385	do {
1386		ret = vmw_resource_do_validate(res, &val_buf);
1387		if (likely(ret != -EBUSY))
1388			break;
1389
1390		write_lock(&dev_priv->resource_lock);
1391		if (list_empty(lru_list) || !res->func->may_evict) {
1392			DRM_ERROR("Out of device device resources "
1393				  "for %s.\n", res->func->type_name);
1394			ret = -EBUSY;
1395			write_unlock(&dev_priv->resource_lock);
1396			break;
1397		}
1398
1399		evict_res = vmw_resource_reference
1400			(list_first_entry(lru_list, struct vmw_resource,
1401					  lru_head));
1402		list_del_init(&evict_res->lru_head);
1403
1404		write_unlock(&dev_priv->resource_lock);
1405
1406		ret = vmw_resource_do_evict(evict_res, true);
 
1407		if (unlikely(ret != 0)) {
1408			write_lock(&dev_priv->resource_lock);
1409			list_add_tail(&evict_res->lru_head, lru_list);
1410			write_unlock(&dev_priv->resource_lock);
1411			if (ret == -ERESTARTSYS ||
1412			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1413				vmw_resource_unreference(&evict_res);
1414				goto out_no_validate;
1415			}
1416		}
1417
1418		vmw_resource_unreference(&evict_res);
1419	} while (1);
1420
1421	if (unlikely(ret != 0))
1422		goto out_no_validate;
1423	else if (!res->func->needs_backup && res->backup) {
1424		list_del_init(&res->mob_head);
1425		vmw_dmabuf_unreference(&res->backup);
1426	}
1427
1428	return 0;
1429
1430out_no_validate:
1431	return ret;
1432}
1433
1434/**
1435 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1436 *                       object without unreserving it.
1437 *
1438 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1439 * @fence:          Pointer to the fence. If NULL, this function will
1440 *                  insert a fence into the command stream..
1441 *
1442 * Contrary to the ttm_eu version of this function, it takes only
1443 * a single buffer object instead of a list, and it also doesn't
1444 * unreserve the buffer object, which needs to be done separately.
1445 */
1446void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1447			 struct vmw_fence_obj *fence)
1448{
1449	struct ttm_bo_device *bdev = bo->bdev;
1450
1451	struct vmw_private *dev_priv =
1452		container_of(bdev, struct vmw_private, bdev);
1453
1454	if (fence == NULL) {
1455		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1456		reservation_object_add_excl_fence(bo->resv, &fence->base);
1457		dma_fence_put(&fence->base);
1458	} else
1459		reservation_object_add_excl_fence(bo->resv, &fence->base);
1460}
1461
1462/**
1463 * vmw_resource_move_notify - TTM move_notify_callback
1464 *
1465 * @bo: The TTM buffer object about to move.
1466 * @mem: The struct ttm_mem_reg indicating to what memory
1467 *       region the move is taking place.
1468 *
1469 * Evicts the Guest Backed hardware resource if the backup
1470 * buffer is being moved out of MOB memory.
1471 * Note that this function should not race with the resource
1472 * validation code as long as it accesses only members of struct
1473 * resource that remain static while bo::res is !NULL and
1474 * while we have @bo reserved. struct resource::backup is *not* a
1475 * static member. The resource validation code will take care
1476 * to set @bo::res to NULL, while having @bo reserved when the
1477 * buffer is no longer bound to the resource, so @bo:res can be
1478 * used to determine whether there is a need to unbind and whether
1479 * it is safe to unbind.
1480 */
1481void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1482			      struct ttm_mem_reg *mem)
1483{
1484	struct vmw_dma_buffer *dma_buf;
1485
1486	if (mem == NULL)
1487		return;
1488
1489	if (bo->destroy != vmw_dmabuf_bo_free &&
1490	    bo->destroy != vmw_user_dmabuf_destroy)
1491		return;
 
 
1492
1493	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
 
1494
1495	if (mem->mem_type != VMW_PL_MOB) {
1496		struct vmw_resource *res, *n;
1497		struct ttm_validate_buffer val_buf;
1498
1499		val_buf.bo = bo;
1500		val_buf.shared = false;
1501
1502		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1503
1504			if (unlikely(res->func->unbind == NULL))
1505				continue;
1506
1507			(void) res->func->unbind(res, true, &val_buf);
1508			res->backup_dirty = true;
1509			res->res_dirty = false;
1510			list_del_init(&res->mob_head);
1511		}
1512
1513		(void) ttm_bo_wait(bo, false, false);
1514	}
1515}
1516
1517
1518
1519/**
1520 * vmw_query_readback_all - Read back cached query states
1521 *
1522 * @dx_query_mob: Buffer containing the DX query MOB
1523 *
1524 * Read back cached states from the device if they exist.  This function
1525 * assumings binding_mutex is held.
1526 */
1527int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1528{
1529	struct vmw_resource *dx_query_ctx;
1530	struct vmw_private *dev_priv;
1531	struct {
1532		SVGA3dCmdHeader header;
1533		SVGA3dCmdDXReadbackAllQuery body;
1534	} *cmd;
1535
1536
1537	/* No query bound, so do nothing */
1538	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1539		return 0;
1540
1541	dx_query_ctx = dx_query_mob->dx_query_ctx;
1542	dev_priv     = dx_query_ctx->dev_priv;
1543
1544	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1545	if (unlikely(cmd == NULL)) {
1546		DRM_ERROR("Failed reserving FIFO space for "
1547			  "query MOB read back.\n");
1548		return -ENOMEM;
1549	}
1550
1551	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1552	cmd->header.size = sizeof(cmd->body);
1553	cmd->body.cid    = dx_query_ctx->id;
1554
1555	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1556
1557	/* Triggers a rebind the next time affected context is bound */
1558	dx_query_mob->dx_query_ctx = NULL;
1559
1560	return 0;
1561}
1562
1563
1564
1565/**
1566 * vmw_query_move_notify - Read back cached query states
1567 *
1568 * @bo: The TTM buffer object about to move.
1569 * @mem: The memory region @bo is moving to.
 
1570 *
1571 * Called before the query MOB is swapped out to read back cached query
1572 * states from the device.
1573 */
1574void vmw_query_move_notify(struct ttm_buffer_object *bo,
1575			   struct ttm_mem_reg *mem)
 
1576{
1577	struct vmw_dma_buffer *dx_query_mob;
1578	struct ttm_bo_device *bdev = bo->bdev;
1579	struct vmw_private *dev_priv;
1580
1581
1582	dev_priv = container_of(bdev, struct vmw_private, bdev);
1583
1584	mutex_lock(&dev_priv->binding_mutex);
1585
1586	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1587	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1588		mutex_unlock(&dev_priv->binding_mutex);
1589		return;
1590	}
1591
1592	/* If BO is being moved from MOB to system memory */
1593	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
 
 
1594		struct vmw_fence_obj *fence;
1595
 
 
 
 
 
 
1596		(void) vmw_query_readback_all(dx_query_mob);
1597		mutex_unlock(&dev_priv->binding_mutex);
1598
1599		/* Create a fence and attach the BO to it */
1600		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1601		vmw_fence_single_bo(bo, fence);
1602
1603		if (fence != NULL)
1604			vmw_fence_obj_unreference(&fence);
1605
1606		(void) ttm_bo_wait(bo, false, false);
1607	} else
1608		mutex_unlock(&dev_priv->binding_mutex);
1609
1610}
1611
1612/**
1613 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1614 *
1615 * @res:            The resource being queried.
1616 */
1617bool vmw_resource_needs_backup(const struct vmw_resource *res)
1618{
1619	return res->func->needs_backup;
1620}
1621
1622/**
1623 * vmw_resource_evict_type - Evict all resources of a specific type
1624 *
1625 * @dev_priv:       Pointer to a device private struct
1626 * @type:           The resource type to evict
1627 *
1628 * To avoid thrashing starvation or as part of the hibernation sequence,
1629 * try to evict all evictable resources of a specific type.
1630 */
1631static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1632				    enum vmw_res_type type)
1633{
1634	struct list_head *lru_list = &dev_priv->res_lru[type];
1635	struct vmw_resource *evict_res;
1636	unsigned err_count = 0;
1637	int ret;
 
1638
1639	do {
1640		write_lock(&dev_priv->resource_lock);
1641
1642		if (list_empty(lru_list))
1643			goto out_unlock;
1644
1645		evict_res = vmw_resource_reference(
1646			list_first_entry(lru_list, struct vmw_resource,
1647					 lru_head));
1648		list_del_init(&evict_res->lru_head);
1649		write_unlock(&dev_priv->resource_lock);
1650
1651		ret = vmw_resource_do_evict(evict_res, false);
 
1652		if (unlikely(ret != 0)) {
1653			write_lock(&dev_priv->resource_lock);
1654			list_add_tail(&evict_res->lru_head, lru_list);
1655			write_unlock(&dev_priv->resource_lock);
1656			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1657				vmw_resource_unreference(&evict_res);
1658				return;
1659			}
1660		}
1661
1662		vmw_resource_unreference(&evict_res);
1663	} while (1);
1664
1665out_unlock:
1666	write_unlock(&dev_priv->resource_lock);
1667}
1668
1669/**
1670 * vmw_resource_evict_all - Evict all evictable resources
1671 *
1672 * @dev_priv:       Pointer to a device private struct
1673 *
1674 * To avoid thrashing starvation or as part of the hibernation sequence,
1675 * evict all evictable resources. In particular this means that all
1676 * guest-backed resources that are registered with the device are
1677 * evicted and the OTable becomes clean.
1678 */
1679void vmw_resource_evict_all(struct vmw_private *dev_priv)
1680{
1681	enum vmw_res_type type;
1682
1683	mutex_lock(&dev_priv->cmdbuf_mutex);
1684
1685	for (type = 0; type < vmw_res_max; ++type)
1686		vmw_resource_evict_type(dev_priv, type);
1687
1688	mutex_unlock(&dev_priv->cmdbuf_mutex);
1689}
1690
1691/**
1692 * vmw_resource_pin - Add a pin reference on a resource
1693 *
1694 * @res: The resource to add a pin reference on
1695 *
1696 * This function adds a pin reference, and if needed validates the resource.
1697 * Having a pin reference means that the resource can never be evicted, and
1698 * its id will never change as long as there is a pin reference.
1699 * This function returns 0 on success and a negative error code on failure.
1700 */
1701int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1702{
 
1703	struct vmw_private *dev_priv = res->dev_priv;
1704	int ret;
1705
1706	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1707	mutex_lock(&dev_priv->cmdbuf_mutex);
1708	ret = vmw_resource_reserve(res, interruptible, false);
1709	if (ret)
1710		goto out_no_reserve;
1711
1712	if (res->pin_count == 0) {
1713		struct vmw_dma_buffer *vbo = NULL;
1714
1715		if (res->backup) {
1716			vbo = res->backup;
1717
1718			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1719			if (!vbo->pin_count) {
 
 
 
 
 
1720				ret = ttm_bo_validate
1721					(&vbo->base,
1722					 res->func->backup_placement,
1723					 interruptible, false);
1724				if (ret) {
1725					ttm_bo_unreserve(&vbo->base);
1726					goto out_no_validate;
1727				}
1728			}
1729
1730			/* Do we really need to pin the MOB as well? */
1731			vmw_bo_pin_reserved(vbo, true);
1732		}
1733		ret = vmw_resource_validate(res);
1734		if (vbo)
1735			ttm_bo_unreserve(&vbo->base);
1736		if (ret)
1737			goto out_no_validate;
1738	}
1739	res->pin_count++;
1740
1741out_no_validate:
1742	vmw_resource_unreserve(res, false, NULL, 0UL);
1743out_no_reserve:
1744	mutex_unlock(&dev_priv->cmdbuf_mutex);
1745	ttm_write_unlock(&dev_priv->reservation_sem);
1746
1747	return ret;
1748}
1749
1750/**
1751 * vmw_resource_unpin - Remove a pin reference from a resource
1752 *
1753 * @res: The resource to remove a pin reference from
1754 *
1755 * Having a pin reference means that the resource can never be evicted, and
1756 * its id will never change as long as there is a pin reference.
1757 */
1758void vmw_resource_unpin(struct vmw_resource *res)
1759{
1760	struct vmw_private *dev_priv = res->dev_priv;
1761	int ret;
1762
1763	ttm_read_lock(&dev_priv->reservation_sem, false);
1764	mutex_lock(&dev_priv->cmdbuf_mutex);
1765
1766	ret = vmw_resource_reserve(res, false, true);
1767	WARN_ON(ret);
1768
1769	WARN_ON(res->pin_count == 0);
1770	if (--res->pin_count == 0 && res->backup) {
1771		struct vmw_dma_buffer *vbo = res->backup;
1772
1773		ttm_bo_reserve(&vbo->base, false, false, NULL);
1774		vmw_bo_pin_reserved(vbo, false);
1775		ttm_bo_unreserve(&vbo->base);
1776	}
1777
1778	vmw_resource_unreserve(res, false, NULL, 0UL);
1779
1780	mutex_unlock(&dev_priv->cmdbuf_mutex);
1781	ttm_read_unlock(&dev_priv->reservation_sem);
1782}
1783
1784/**
1785 * vmw_res_type - Return the resource type
1786 *
1787 * @res: Pointer to the resource
1788 */
1789enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1790{
1791	return res->func->res_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1792}