Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
 
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_resource_priv.h"
  31#include "vmwgfx_binding.h"
  32#include "vmwgfx_drv.h"
  33
  34#define VMW_RES_EVICT_ERR_COUNT 10
  35
  36/**
  37 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  38 * @res: The resource
  39 */
  40void vmw_resource_mob_attach(struct vmw_resource *res)
  41{
  42	struct vmw_buffer_object *backup = res->backup;
  43
  44	dma_resv_assert_held(res->backup->base.base.resv);
  45	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  46		res->func->prio;
  47	list_add_tail(&res->mob_head, &backup->res_list);
  48	vmw_bo_prio_add(backup, res->used_prio);
  49}
  50
  51/**
  52 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  53 * @res: The resource
  54 */
  55void vmw_resource_mob_detach(struct vmw_resource *res)
  56{
  57	struct vmw_buffer_object *backup = res->backup;
 
  58
  59	dma_resv_assert_held(backup->base.base.resv);
  60	if (vmw_resource_mob_attached(res)) {
  61		list_del_init(&res->mob_head);
  62		vmw_bo_prio_del(backup, res->used_prio);
  63	}
  64}
  65
  66struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  67{
  68	kref_get(&res->kref);
  69	return res;
  70}
  71
  72struct vmw_resource *
  73vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  74{
  75	return kref_get_unless_zero(&res->kref) ? res : NULL;
  76}
  77
  78/**
  79 * vmw_resource_release_id - release a resource id to the id manager.
  80 *
  81 * @res: Pointer to the resource.
  82 *
  83 * Release the resource id to the resource id manager and set it to -1
  84 */
  85void vmw_resource_release_id(struct vmw_resource *res)
  86{
  87	struct vmw_private *dev_priv = res->dev_priv;
  88	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  89
  90	spin_lock(&dev_priv->resource_lock);
  91	if (res->id != -1)
  92		idr_remove(idr, res->id);
  93	res->id = -1;
  94	spin_unlock(&dev_priv->resource_lock);
  95}
  96
  97static void vmw_resource_release(struct kref *kref)
  98{
  99	struct vmw_resource *res =
 100	    container_of(kref, struct vmw_resource, kref);
 101	struct vmw_private *dev_priv = res->dev_priv;
 102	int id;
 103	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 104
 105	spin_lock(&dev_priv->resource_lock);
 
 106	list_del_init(&res->lru_head);
 107	spin_unlock(&dev_priv->resource_lock);
 108	if (res->backup) {
 109		struct ttm_buffer_object *bo = &res->backup->base;
 110
 111		ttm_bo_reserve(bo, false, false, NULL);
 112		if (vmw_resource_mob_attached(res) &&
 113		    res->func->unbind != NULL) {
 114			struct ttm_validate_buffer val_buf;
 115
 116			val_buf.bo = bo;
 117			val_buf.num_shared = 0;
 118			res->func->unbind(res, false, &val_buf);
 119		}
 120		res->backup_dirty = false;
 121		vmw_resource_mob_detach(res);
 122		ttm_bo_unreserve(bo);
 123		vmw_bo_unreference(&res->backup);
 124	}
 125
 126	if (likely(res->hw_destroy != NULL)) {
 127		mutex_lock(&dev_priv->binding_mutex);
 128		vmw_binding_res_list_kill(&res->binding_head);
 129		mutex_unlock(&dev_priv->binding_mutex);
 130		res->hw_destroy(res);
 131	}
 132
 133	id = res->id;
 134	if (res->res_free != NULL)
 135		res->res_free(res);
 136	else
 137		kfree(res);
 138
 139	spin_lock(&dev_priv->resource_lock);
 140	if (id != -1)
 141		idr_remove(idr, id);
 142	spin_unlock(&dev_priv->resource_lock);
 143}
 144
 145void vmw_resource_unreference(struct vmw_resource **p_res)
 146{
 147	struct vmw_resource *res = *p_res;
 148
 149	*p_res = NULL;
 150	kref_put(&res->kref, vmw_resource_release);
 151}
 152
 153
 154/**
 155 * vmw_resource_alloc_id - release a resource id to the id manager.
 156 *
 157 * @res: Pointer to the resource.
 158 *
 159 * Allocate the lowest free resource from the resource manager, and set
 160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 161 */
 162int vmw_resource_alloc_id(struct vmw_resource *res)
 163{
 164	struct vmw_private *dev_priv = res->dev_priv;
 165	int ret;
 166	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 167
 168	BUG_ON(res->id != -1);
 169
 170	idr_preload(GFP_KERNEL);
 171	spin_lock(&dev_priv->resource_lock);
 172
 173	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 174	if (ret >= 0)
 175		res->id = ret;
 176
 177	spin_unlock(&dev_priv->resource_lock);
 178	idr_preload_end();
 179	return ret < 0 ? ret : 0;
 180}
 181
 182/**
 183 * vmw_resource_init - initialize a struct vmw_resource
 184 *
 185 * @dev_priv:       Pointer to a device private struct.
 186 * @res:            The struct vmw_resource to initialize.
 187 * @obj_type:       Resource object type.
 188 * @delay_id:       Boolean whether to defer device id allocation until
 189 *                  the first validation.
 190 * @res_free:       Resource destructor.
 191 * @func:           Resource function table.
 192 */
 193int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 194		      bool delay_id,
 195		      void (*res_free) (struct vmw_resource *res),
 196		      const struct vmw_res_func *func)
 197{
 198	kref_init(&res->kref);
 199	res->hw_destroy = NULL;
 200	res->res_free = res_free;
 
 201	res->dev_priv = dev_priv;
 202	res->func = func;
 203	INIT_LIST_HEAD(&res->lru_head);
 204	INIT_LIST_HEAD(&res->mob_head);
 205	INIT_LIST_HEAD(&res->binding_head);
 206	res->id = -1;
 207	res->backup = NULL;
 208	res->backup_offset = 0;
 209	res->backup_dirty = false;
 210	res->res_dirty = false;
 211	res->used_prio = 3;
 212	if (delay_id)
 213		return 0;
 214	else
 215		return vmw_resource_alloc_id(res);
 216}
 217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 218
 219/**
 220 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 221 * TTM user-space handle and perform basic type checks
 222 *
 223 * @dev_priv:     Pointer to a device private struct
 224 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 225 * @handle:       The TTM user-space handle
 226 * @converter:    Pointer to an object describing the resource type
 227 * @p_res:        On successful return the location pointed to will contain
 228 *                a pointer to a refcounted struct vmw_resource.
 229 *
 230 * If the handle can't be found or is associated with an incorrect resource
 231 * type, -EINVAL will be returned.
 232 */
 233int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 234				    struct ttm_object_file *tfile,
 235				    uint32_t handle,
 236				    const struct vmw_user_resource_conv
 237				    *converter,
 238				    struct vmw_resource **p_res)
 239{
 240	struct ttm_base_object *base;
 241	struct vmw_resource *res;
 242	int ret = -EINVAL;
 243
 244	base = ttm_base_object_lookup(tfile, handle);
 245	if (unlikely(base == NULL))
 246		return -EINVAL;
 247
 248	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 249		goto out_bad_resource;
 250
 251	res = converter->base_obj_to_res(base);
 
 
 
 
 
 
 
 252	kref_get(&res->kref);
 
 253
 254	*p_res = res;
 255	ret = 0;
 256
 257out_bad_resource:
 258	ttm_base_object_unref(&base);
 259
 260	return ret;
 261}
 262
 263/**
 264 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 265 * TTM user-space handle and perform basic type checks
 266 *
 267 * @dev_priv:     Pointer to a device private struct
 268 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 269 * @handle:       The TTM user-space handle
 270 * @converter:    Pointer to an object describing the resource type
 271 * @p_res:        On successful return the location pointed to will contain
 272 *                a pointer to a refcounted struct vmw_resource.
 273 *
 274 * If the handle can't be found or is associated with an incorrect resource
 275 * type, -EINVAL will be returned.
 276 */
 277struct vmw_resource *
 278vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
 279				      struct ttm_object_file *tfile,
 280				      uint32_t handle,
 281				      const struct vmw_user_resource_conv
 282				      *converter)
 283{
 284	struct ttm_base_object *base;
 285
 286	base = ttm_base_object_noref_lookup(tfile, handle);
 287	if (!base)
 288		return ERR_PTR(-ESRCH);
 289
 290	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
 291		ttm_base_object_noref_release();
 292		return ERR_PTR(-EINVAL);
 293	}
 294
 295	return converter->base_obj_to_res(base);
 296}
 297
 298/**
 299 * Helper function that looks either a surface or bo.
 300 *
 301 * The pointer this pointed at by out_surf and out_buf needs to be null.
 302 */
 303int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 304			   struct ttm_object_file *tfile,
 305			   uint32_t handle,
 306			   struct vmw_surface **out_surf,
 307			   struct vmw_buffer_object **out_buf)
 308{
 309	struct vmw_resource *res;
 310	int ret;
 311
 312	BUG_ON(*out_surf || *out_buf);
 313
 314	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 315					      user_surface_converter,
 316					      &res);
 317	if (!ret) {
 318		*out_surf = vmw_res_to_srf(res);
 319		return 0;
 320	}
 321
 322	*out_surf = NULL;
 323	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 324	return ret;
 325}
 326
 327/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 329 *
 330 * @res:            The resource for which to allocate a backup buffer.
 331 * @interruptible:  Whether any sleeps during allocation should be
 332 *                  performed while interruptible.
 333 */
 334static int vmw_resource_buf_alloc(struct vmw_resource *res,
 335				  bool interruptible)
 336{
 337	unsigned long size =
 338		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 339	struct vmw_buffer_object *backup;
 340	int ret;
 341
 342	if (likely(res->backup)) {
 343		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
 344		return 0;
 345	}
 346
 347	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
 348	if (unlikely(!backup))
 349		return -ENOMEM;
 350
 351	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
 352			      res->func->backup_placement,
 353			      interruptible,
 354			      &vmw_bo_bo_free);
 355	if (unlikely(ret != 0))
 356		goto out_no_bo;
 357
 358	res->backup = backup;
 359
 360out_no_bo:
 361	return ret;
 362}
 363
 364/**
 365 * vmw_resource_do_validate - Make a resource up-to-date and visible
 366 *                            to the device.
 367 *
 368 * @res:            The resource to make visible to the device.
 369 * @val_buf:        Information about a buffer possibly
 370 *                  containing backup data if a bind operation is needed.
 371 *
 372 * On hardware resource shortage, this function returns -EBUSY and
 373 * should be retried once resources have been freed up.
 374 */
 375static int vmw_resource_do_validate(struct vmw_resource *res,
 376				    struct ttm_validate_buffer *val_buf)
 377{
 378	int ret = 0;
 379	const struct vmw_res_func *func = res->func;
 380
 381	if (unlikely(res->id == -1)) {
 382		ret = func->create(res);
 383		if (unlikely(ret != 0))
 384			return ret;
 385	}
 386
 387	if (func->bind &&
 388	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
 389	      val_buf->bo != NULL) ||
 390	     (!func->needs_backup && val_buf->bo != NULL))) {
 391		ret = func->bind(res, val_buf);
 392		if (unlikely(ret != 0))
 393			goto out_bind_failed;
 394		if (func->needs_backup)
 395			vmw_resource_mob_attach(res);
 396	}
 397
 
 
 
 
 
 
 
 
 398	return 0;
 399
 400out_bind_failed:
 401	func->destroy(res);
 402
 403	return ret;
 404}
 405
 406/**
 407 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 408 * command submission.
 409 *
 410 * @res:               Pointer to the struct vmw_resource to unreserve.
 411 * @dirty_set:         Change dirty status of the resource.
 412 * @dirty:             When changing dirty status indicates the new status.
 413 * @switch_backup:     Backup buffer has been switched.
 414 * @new_backup:        Pointer to new backup buffer if command submission
 415 *                     switched. May be NULL.
 416 * @new_backup_offset: New backup offset if @switch_backup is true.
 417 *
 418 * Currently unreserving a resource means putting it back on the device's
 419 * resource lru list, so that it can be evicted if necessary.
 420 */
 421void vmw_resource_unreserve(struct vmw_resource *res,
 422			    bool dirty_set,
 423			    bool dirty,
 424			    bool switch_backup,
 425			    struct vmw_buffer_object *new_backup,
 426			    unsigned long new_backup_offset)
 427{
 428	struct vmw_private *dev_priv = res->dev_priv;
 429
 430	if (!list_empty(&res->lru_head))
 431		return;
 432
 433	if (switch_backup && new_backup != res->backup) {
 434		if (res->backup) {
 435			vmw_resource_mob_detach(res);
 436			vmw_bo_unreference(&res->backup);
 
 437		}
 438
 439		if (new_backup) {
 440			res->backup = vmw_bo_reference(new_backup);
 441			vmw_resource_mob_attach(res);
 
 442		} else {
 443			res->backup = NULL;
 444		}
 445	}
 446	if (switch_backup)
 447		res->backup_offset = new_backup_offset;
 448
 449	if (dirty_set)
 450		res->res_dirty = dirty;
 451
 452	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 453		return;
 454
 455	spin_lock(&dev_priv->resource_lock);
 456	list_add_tail(&res->lru_head,
 457		      &res->dev_priv->res_lru[res->func->res_type]);
 458	spin_unlock(&dev_priv->resource_lock);
 459}
 460
 461/**
 462 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 463 *                             for a resource and in that case, allocate
 464 *                             one, reserve and validate it.
 465 *
 466 * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
 467 * @res:            The resource for which to allocate a backup buffer.
 468 * @interruptible:  Whether any sleeps during allocation should be
 469 *                  performed while interruptible.
 470 * @val_buf:        On successful return contains data about the
 471 *                  reserved and validated backup buffer.
 472 */
 473static int
 474vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 475			  struct vmw_resource *res,
 476			  bool interruptible,
 477			  struct ttm_validate_buffer *val_buf)
 478{
 479	struct ttm_operation_ctx ctx = { true, false };
 480	struct list_head val_list;
 481	bool backup_dirty = false;
 482	int ret;
 483
 484	if (unlikely(res->backup == NULL)) {
 485		ret = vmw_resource_buf_alloc(res, interruptible);
 486		if (unlikely(ret != 0))
 487			return ret;
 488	}
 489
 490	INIT_LIST_HEAD(&val_list);
 491	ttm_bo_get(&res->backup->base);
 492	val_buf->bo = &res->backup->base;
 493	val_buf->num_shared = 0;
 494	list_add_tail(&val_buf->head, &val_list);
 495	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
 496				     true);
 497	if (unlikely(ret != 0))
 498		goto out_no_reserve;
 499
 500	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
 501		return 0;
 502
 503	backup_dirty = res->backup_dirty;
 504	ret = ttm_bo_validate(&res->backup->base,
 505			      res->func->backup_placement,
 506			      &ctx);
 507
 508	if (unlikely(ret != 0))
 509		goto out_no_validate;
 510
 511	return 0;
 512
 513out_no_validate:
 514	ttm_eu_backoff_reservation(ticket, &val_list);
 515out_no_reserve:
 516	ttm_bo_put(val_buf->bo);
 517	val_buf->bo = NULL;
 518	if (backup_dirty)
 519		vmw_bo_unreference(&res->backup);
 520
 521	return ret;
 522}
 523
 524/**
 525 * vmw_resource_reserve - Reserve a resource for command submission
 526 *
 527 * @res:            The resource to reserve.
 528 *
 529 * This function takes the resource off the LRU list and make sure
 530 * a backup buffer is present for guest-backed resources. However,
 531 * the buffer may not be bound to the resource at this point.
 532 *
 533 */
 534int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 535			 bool no_backup)
 536{
 537	struct vmw_private *dev_priv = res->dev_priv;
 538	int ret;
 539
 540	spin_lock(&dev_priv->resource_lock);
 541	list_del_init(&res->lru_head);
 542	spin_unlock(&dev_priv->resource_lock);
 543
 544	if (res->func->needs_backup && res->backup == NULL &&
 545	    !no_backup) {
 546		ret = vmw_resource_buf_alloc(res, interruptible);
 547		if (unlikely(ret != 0)) {
 548			DRM_ERROR("Failed to allocate a backup buffer "
 549				  "of size %lu. bytes\n",
 550				  (unsigned long) res->backup_size);
 551			return ret;
 552		}
 553	}
 554
 555	return 0;
 556}
 557
 558/**
 559 * vmw_resource_backoff_reservation - Unreserve and unreference a
 560 *                                    backup buffer
 561 *.
 562 * @ticket:         The ww acquire ctx used for reservation.
 563 * @val_buf:        Backup buffer information.
 564 */
 565static void
 566vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 567				 struct ttm_validate_buffer *val_buf)
 568{
 569	struct list_head val_list;
 570
 571	if (likely(val_buf->bo == NULL))
 572		return;
 573
 574	INIT_LIST_HEAD(&val_list);
 575	list_add_tail(&val_buf->head, &val_list);
 576	ttm_eu_backoff_reservation(ticket, &val_list);
 577	ttm_bo_put(val_buf->bo);
 578	val_buf->bo = NULL;
 579}
 580
 581/**
 582 * vmw_resource_do_evict - Evict a resource, and transfer its data
 583 *                         to a backup buffer.
 584 *
 585 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 586 * @res:            The resource to evict.
 587 * @interruptible:  Whether to wait interruptible.
 588 */
 589static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 590				 struct vmw_resource *res, bool interruptible)
 591{
 592	struct ttm_validate_buffer val_buf;
 593	const struct vmw_res_func *func = res->func;
 594	int ret;
 595
 596	BUG_ON(!func->may_evict);
 597
 598	val_buf.bo = NULL;
 599	val_buf.num_shared = 0;
 600	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 601	if (unlikely(ret != 0))
 602		return ret;
 603
 604	if (unlikely(func->unbind != NULL &&
 605		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
 606		ret = func->unbind(res, res->res_dirty, &val_buf);
 607		if (unlikely(ret != 0))
 608			goto out_no_unbind;
 609		vmw_resource_mob_detach(res);
 610	}
 611	ret = func->destroy(res);
 612	res->backup_dirty = true;
 613	res->res_dirty = false;
 614out_no_unbind:
 615	vmw_resource_backoff_reservation(ticket, &val_buf);
 616
 617	return ret;
 618}
 619
 620
 621/**
 622 * vmw_resource_validate - Make a resource up-to-date and visible
 623 *                         to the device.
 624 * @res: The resource to make visible to the device.
 625 * @intr: Perform waits interruptible if possible.
 626 *
 627 * On succesful return, any backup DMA buffer pointed to by @res->backup will
 628 * be reserved and validated.
 629 * On hardware resource shortage, this function will repeatedly evict
 630 * resources of the same type until the validation succeeds.
 631 *
 632 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 633 * on failure.
 634 */
 635int vmw_resource_validate(struct vmw_resource *res, bool intr)
 636{
 637	int ret;
 638	struct vmw_resource *evict_res;
 639	struct vmw_private *dev_priv = res->dev_priv;
 640	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 641	struct ttm_validate_buffer val_buf;
 642	unsigned err_count = 0;
 643
 644	if (!res->func->create)
 645		return 0;
 646
 647	val_buf.bo = NULL;
 648	val_buf.num_shared = 0;
 649	if (res->backup)
 650		val_buf.bo = &res->backup->base;
 651	do {
 652		ret = vmw_resource_do_validate(res, &val_buf);
 653		if (likely(ret != -EBUSY))
 654			break;
 655
 656		spin_lock(&dev_priv->resource_lock);
 657		if (list_empty(lru_list) || !res->func->may_evict) {
 658			DRM_ERROR("Out of device device resources "
 659				  "for %s.\n", res->func->type_name);
 660			ret = -EBUSY;
 661			spin_unlock(&dev_priv->resource_lock);
 662			break;
 663		}
 664
 665		evict_res = vmw_resource_reference
 666			(list_first_entry(lru_list, struct vmw_resource,
 667					  lru_head));
 668		list_del_init(&evict_res->lru_head);
 669
 670		spin_unlock(&dev_priv->resource_lock);
 671
 672		/* Trylock backup buffers with a NULL ticket. */
 673		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 674		if (unlikely(ret != 0)) {
 675			spin_lock(&dev_priv->resource_lock);
 676			list_add_tail(&evict_res->lru_head, lru_list);
 677			spin_unlock(&dev_priv->resource_lock);
 678			if (ret == -ERESTARTSYS ||
 679			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 680				vmw_resource_unreference(&evict_res);
 681				goto out_no_validate;
 682			}
 683		}
 684
 685		vmw_resource_unreference(&evict_res);
 686	} while (1);
 687
 688	if (unlikely(ret != 0))
 689		goto out_no_validate;
 690	else if (!res->func->needs_backup && res->backup) {
 691		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 692		vmw_bo_unreference(&res->backup);
 693	}
 694
 695	return 0;
 696
 697out_no_validate:
 698	return ret;
 699}
 700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 701
 702/**
 703 * vmw_resource_unbind_list
 704 *
 705 * @vbo: Pointer to the current backing MOB.
 
 
 706 *
 707 * Evicts the Guest Backed hardware resource if the backup
 708 * buffer is being moved out of MOB memory.
 709 * Note that this function will not race with the resource
 710 * validation code, since resource validation and eviction
 711 * both require the backup buffer to be reserved.
 
 
 
 
 
 
 712 */
 713void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 
 714{
 
 
 
 
 
 
 
 
 715
 716	struct vmw_resource *res, *next;
 717	struct ttm_validate_buffer val_buf = {
 718		.bo = &vbo->base,
 719		.num_shared = 0
 720	};
 721
 722	dma_resv_assert_held(vbo->base.base.resv);
 723	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
 724		if (!res->func->unbind)
 725			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 726
 727		(void) res->func->unbind(res, res->res_dirty, &val_buf);
 728		res->backup_dirty = true;
 729		res->res_dirty = false;
 730		vmw_resource_mob_detach(res);
 731	}
 
 732
 733	(void) ttm_bo_wait(&vbo->base, false, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 734}
 735
 736
 737/**
 738 * vmw_query_readback_all - Read back cached query states
 739 *
 740 * @dx_query_mob: Buffer containing the DX query MOB
 741 *
 742 * Read back cached states from the device if they exist.  This function
 743 * assumings binding_mutex is held.
 744 */
 745int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 746{
 747	struct vmw_resource *dx_query_ctx;
 748	struct vmw_private *dev_priv;
 749	struct {
 750		SVGA3dCmdHeader header;
 751		SVGA3dCmdDXReadbackAllQuery body;
 752	} *cmd;
 753
 754
 755	/* No query bound, so do nothing */
 756	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 757		return 0;
 758
 759	dx_query_ctx = dx_query_mob->dx_query_ctx;
 760	dev_priv     = dx_query_ctx->dev_priv;
 761
 762	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 763	if (unlikely(cmd == NULL))
 
 
 764		return -ENOMEM;
 
 765
 766	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 767	cmd->header.size = sizeof(cmd->body);
 768	cmd->body.cid    = dx_query_ctx->id;
 769
 770	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 771
 772	/* Triggers a rebind the next time affected context is bound */
 773	dx_query_mob->dx_query_ctx = NULL;
 774
 775	return 0;
 776}
 777
 778
 779
 780/**
 781 * vmw_query_move_notify - Read back cached query states
 782 *
 783 * @bo: The TTM buffer object about to move.
 784 * @mem: The memory region @bo is moving to.
 785 *
 786 * Called before the query MOB is swapped out to read back cached query
 787 * states from the device.
 788 */
 789void vmw_query_move_notify(struct ttm_buffer_object *bo,
 790			   struct ttm_mem_reg *mem)
 791{
 792	struct vmw_buffer_object *dx_query_mob;
 793	struct ttm_bo_device *bdev = bo->bdev;
 794	struct vmw_private *dev_priv;
 795
 796
 797	dev_priv = container_of(bdev, struct vmw_private, bdev);
 798
 799	mutex_lock(&dev_priv->binding_mutex);
 800
 801	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
 802	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
 803		mutex_unlock(&dev_priv->binding_mutex);
 804		return;
 805	}
 806
 807	/* If BO is being moved from MOB to system memory */
 808	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
 809		struct vmw_fence_obj *fence;
 810
 811		(void) vmw_query_readback_all(dx_query_mob);
 812		mutex_unlock(&dev_priv->binding_mutex);
 813
 814		/* Create a fence and attach the BO to it */
 815		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 816		vmw_bo_fence_single(bo, fence);
 817
 818		if (fence != NULL)
 819			vmw_fence_obj_unreference(&fence);
 820
 821		(void) ttm_bo_wait(bo, false, false);
 822	} else
 823		mutex_unlock(&dev_priv->binding_mutex);
 824
 825}
 826
 827/**
 828 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 829 *
 830 * @res:            The resource being queried.
 831 */
 832bool vmw_resource_needs_backup(const struct vmw_resource *res)
 833{
 834	return res->func->needs_backup;
 835}
 836
 837/**
 838 * vmw_resource_evict_type - Evict all resources of a specific type
 839 *
 840 * @dev_priv:       Pointer to a device private struct
 841 * @type:           The resource type to evict
 842 *
 843 * To avoid thrashing starvation or as part of the hibernation sequence,
 844 * try to evict all evictable resources of a specific type.
 845 */
 846static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 847				    enum vmw_res_type type)
 848{
 849	struct list_head *lru_list = &dev_priv->res_lru[type];
 850	struct vmw_resource *evict_res;
 851	unsigned err_count = 0;
 852	int ret;
 853	struct ww_acquire_ctx ticket;
 854
 855	do {
 856		spin_lock(&dev_priv->resource_lock);
 857
 858		if (list_empty(lru_list))
 859			goto out_unlock;
 860
 861		evict_res = vmw_resource_reference(
 862			list_first_entry(lru_list, struct vmw_resource,
 863					 lru_head));
 864		list_del_init(&evict_res->lru_head);
 865		spin_unlock(&dev_priv->resource_lock);
 866
 867		/* Wait lock backup buffers with a ticket. */
 868		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 869		if (unlikely(ret != 0)) {
 870			spin_lock(&dev_priv->resource_lock);
 871			list_add_tail(&evict_res->lru_head, lru_list);
 872			spin_unlock(&dev_priv->resource_lock);
 873			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 874				vmw_resource_unreference(&evict_res);
 875				return;
 876			}
 877		}
 878
 879		vmw_resource_unreference(&evict_res);
 880	} while (1);
 881
 882out_unlock:
 883	spin_unlock(&dev_priv->resource_lock);
 884}
 885
 886/**
 887 * vmw_resource_evict_all - Evict all evictable resources
 888 *
 889 * @dev_priv:       Pointer to a device private struct
 890 *
 891 * To avoid thrashing starvation or as part of the hibernation sequence,
 892 * evict all evictable resources. In particular this means that all
 893 * guest-backed resources that are registered with the device are
 894 * evicted and the OTable becomes clean.
 895 */
 896void vmw_resource_evict_all(struct vmw_private *dev_priv)
 897{
 898	enum vmw_res_type type;
 899
 900	mutex_lock(&dev_priv->cmdbuf_mutex);
 901
 902	for (type = 0; type < vmw_res_max; ++type)
 903		vmw_resource_evict_type(dev_priv, type);
 904
 905	mutex_unlock(&dev_priv->cmdbuf_mutex);
 906}
 907
 908/**
 909 * vmw_resource_pin - Add a pin reference on a resource
 910 *
 911 * @res: The resource to add a pin reference on
 912 *
 913 * This function adds a pin reference, and if needed validates the resource.
 914 * Having a pin reference means that the resource can never be evicted, and
 915 * its id will never change as long as there is a pin reference.
 916 * This function returns 0 on success and a negative error code on failure.
 917 */
 918int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 919{
 920	struct ttm_operation_ctx ctx = { interruptible, false };
 921	struct vmw_private *dev_priv = res->dev_priv;
 922	int ret;
 923
 924	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 925	mutex_lock(&dev_priv->cmdbuf_mutex);
 926	ret = vmw_resource_reserve(res, interruptible, false);
 927	if (ret)
 928		goto out_no_reserve;
 929
 930	if (res->pin_count == 0) {
 931		struct vmw_buffer_object *vbo = NULL;
 932
 933		if (res->backup) {
 934			vbo = res->backup;
 935
 936			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
 937			if (!vbo->pin_count) {
 938				ret = ttm_bo_validate
 939					(&vbo->base,
 940					 res->func->backup_placement,
 941					 &ctx);
 942				if (ret) {
 943					ttm_bo_unreserve(&vbo->base);
 944					goto out_no_validate;
 945				}
 946			}
 947
 948			/* Do we really need to pin the MOB as well? */
 949			vmw_bo_pin_reserved(vbo, true);
 950		}
 951		ret = vmw_resource_validate(res, interruptible);
 952		if (vbo)
 953			ttm_bo_unreserve(&vbo->base);
 954		if (ret)
 955			goto out_no_validate;
 956	}
 957	res->pin_count++;
 958
 959out_no_validate:
 960	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
 961out_no_reserve:
 962	mutex_unlock(&dev_priv->cmdbuf_mutex);
 963	ttm_write_unlock(&dev_priv->reservation_sem);
 964
 965	return ret;
 966}
 967
 968/**
 969 * vmw_resource_unpin - Remove a pin reference from a resource
 970 *
 971 * @res: The resource to remove a pin reference from
 972 *
 973 * Having a pin reference means that the resource can never be evicted, and
 974 * its id will never change as long as there is a pin reference.
 975 */
 976void vmw_resource_unpin(struct vmw_resource *res)
 977{
 978	struct vmw_private *dev_priv = res->dev_priv;
 979	int ret;
 980
 981	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
 982	mutex_lock(&dev_priv->cmdbuf_mutex);
 983
 984	ret = vmw_resource_reserve(res, false, true);
 985	WARN_ON(ret);
 986
 987	WARN_ON(res->pin_count == 0);
 988	if (--res->pin_count == 0 && res->backup) {
 989		struct vmw_buffer_object *vbo = res->backup;
 990
 991		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
 992		vmw_bo_pin_reserved(vbo, false);
 993		ttm_bo_unreserve(&vbo->base);
 994	}
 995
 996	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
 997
 998	mutex_unlock(&dev_priv->cmdbuf_mutex);
 999	ttm_read_unlock(&dev_priv->reservation_sem);
1000}
1001
1002/**
1003 * vmw_res_type - Return the resource type
1004 *
1005 * @res: Pointer to the resource
1006 */
1007enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1008{
1009	return res->func->res_type;
1010}
v4.17
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include <drm/vmwgfx_drm.h>
  30#include <drm/ttm/ttm_object.h>
  31#include <drm/ttm/ttm_placement.h>
  32#include <drm/drmP.h>
  33#include "vmwgfx_resource_priv.h"
  34#include "vmwgfx_binding.h"
 
  35
  36#define VMW_RES_EVICT_ERR_COUNT 10
  37
  38struct vmw_user_dma_buffer {
  39	struct ttm_prime_object prime;
  40	struct vmw_dma_buffer dma;
  41};
 
 
 
  42
  43struct vmw_bo_user_rep {
  44	uint32_t handle;
  45	uint64_t map_handle;
  46};
 
 
  47
  48static inline struct vmw_dma_buffer *
  49vmw_dma_buffer(struct ttm_buffer_object *bo)
 
 
 
  50{
  51	return container_of(bo, struct vmw_dma_buffer, base);
  52}
  53
  54static inline struct vmw_user_dma_buffer *
  55vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  56{
  57	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  58	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  59}
  60
  61struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  62{
  63	kref_get(&res->kref);
  64	return res;
  65}
  66
  67struct vmw_resource *
  68vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  69{
  70	return kref_get_unless_zero(&res->kref) ? res : NULL;
  71}
  72
  73/**
  74 * vmw_resource_release_id - release a resource id to the id manager.
  75 *
  76 * @res: Pointer to the resource.
  77 *
  78 * Release the resource id to the resource id manager and set it to -1
  79 */
  80void vmw_resource_release_id(struct vmw_resource *res)
  81{
  82	struct vmw_private *dev_priv = res->dev_priv;
  83	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  84
  85	write_lock(&dev_priv->resource_lock);
  86	if (res->id != -1)
  87		idr_remove(idr, res->id);
  88	res->id = -1;
  89	write_unlock(&dev_priv->resource_lock);
  90}
  91
  92static void vmw_resource_release(struct kref *kref)
  93{
  94	struct vmw_resource *res =
  95	    container_of(kref, struct vmw_resource, kref);
  96	struct vmw_private *dev_priv = res->dev_priv;
  97	int id;
  98	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  99
 100	write_lock(&dev_priv->resource_lock);
 101	res->avail = false;
 102	list_del_init(&res->lru_head);
 103	write_unlock(&dev_priv->resource_lock);
 104	if (res->backup) {
 105		struct ttm_buffer_object *bo = &res->backup->base;
 106
 107		ttm_bo_reserve(bo, false, false, NULL);
 108		if (!list_empty(&res->mob_head) &&
 109		    res->func->unbind != NULL) {
 110			struct ttm_validate_buffer val_buf;
 111
 112			val_buf.bo = bo;
 113			val_buf.shared = false;
 114			res->func->unbind(res, false, &val_buf);
 115		}
 116		res->backup_dirty = false;
 117		list_del_init(&res->mob_head);
 118		ttm_bo_unreserve(bo);
 119		vmw_dmabuf_unreference(&res->backup);
 120	}
 121
 122	if (likely(res->hw_destroy != NULL)) {
 123		mutex_lock(&dev_priv->binding_mutex);
 124		vmw_binding_res_list_kill(&res->binding_head);
 125		mutex_unlock(&dev_priv->binding_mutex);
 126		res->hw_destroy(res);
 127	}
 128
 129	id = res->id;
 130	if (res->res_free != NULL)
 131		res->res_free(res);
 132	else
 133		kfree(res);
 134
 135	write_lock(&dev_priv->resource_lock);
 136	if (id != -1)
 137		idr_remove(idr, id);
 138	write_unlock(&dev_priv->resource_lock);
 139}
 140
 141void vmw_resource_unreference(struct vmw_resource **p_res)
 142{
 143	struct vmw_resource *res = *p_res;
 144
 145	*p_res = NULL;
 146	kref_put(&res->kref, vmw_resource_release);
 147}
 148
 149
 150/**
 151 * vmw_resource_alloc_id - release a resource id to the id manager.
 152 *
 153 * @res: Pointer to the resource.
 154 *
 155 * Allocate the lowest free resource from the resource manager, and set
 156 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 157 */
 158int vmw_resource_alloc_id(struct vmw_resource *res)
 159{
 160	struct vmw_private *dev_priv = res->dev_priv;
 161	int ret;
 162	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 163
 164	BUG_ON(res->id != -1);
 165
 166	idr_preload(GFP_KERNEL);
 167	write_lock(&dev_priv->resource_lock);
 168
 169	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 170	if (ret >= 0)
 171		res->id = ret;
 172
 173	write_unlock(&dev_priv->resource_lock);
 174	idr_preload_end();
 175	return ret < 0 ? ret : 0;
 176}
 177
 178/**
 179 * vmw_resource_init - initialize a struct vmw_resource
 180 *
 181 * @dev_priv:       Pointer to a device private struct.
 182 * @res:            The struct vmw_resource to initialize.
 183 * @obj_type:       Resource object type.
 184 * @delay_id:       Boolean whether to defer device id allocation until
 185 *                  the first validation.
 186 * @res_free:       Resource destructor.
 187 * @func:           Resource function table.
 188 */
 189int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 190		      bool delay_id,
 191		      void (*res_free) (struct vmw_resource *res),
 192		      const struct vmw_res_func *func)
 193{
 194	kref_init(&res->kref);
 195	res->hw_destroy = NULL;
 196	res->res_free = res_free;
 197	res->avail = false;
 198	res->dev_priv = dev_priv;
 199	res->func = func;
 200	INIT_LIST_HEAD(&res->lru_head);
 201	INIT_LIST_HEAD(&res->mob_head);
 202	INIT_LIST_HEAD(&res->binding_head);
 203	res->id = -1;
 204	res->backup = NULL;
 205	res->backup_offset = 0;
 206	res->backup_dirty = false;
 207	res->res_dirty = false;
 
 208	if (delay_id)
 209		return 0;
 210	else
 211		return vmw_resource_alloc_id(res);
 212}
 213
 214/**
 215 * vmw_resource_activate
 216 *
 217 * @res:        Pointer to the newly created resource
 218 * @hw_destroy: Destroy function. NULL if none.
 219 *
 220 * Activate a resource after the hardware has been made aware of it.
 221 * Set tye destroy function to @destroy. Typically this frees the
 222 * resource and destroys the hardware resources associated with it.
 223 * Activate basically means that the function vmw_resource_lookup will
 224 * find it.
 225 */
 226void vmw_resource_activate(struct vmw_resource *res,
 227			   void (*hw_destroy) (struct vmw_resource *))
 228{
 229	struct vmw_private *dev_priv = res->dev_priv;
 230
 231	write_lock(&dev_priv->resource_lock);
 232	res->avail = true;
 233	res->hw_destroy = hw_destroy;
 234	write_unlock(&dev_priv->resource_lock);
 235}
 236
 237/**
 238 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 239 * TTM user-space handle and perform basic type checks
 240 *
 241 * @dev_priv:     Pointer to a device private struct
 242 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 243 * @handle:       The TTM user-space handle
 244 * @converter:    Pointer to an object describing the resource type
 245 * @p_res:        On successful return the location pointed to will contain
 246 *                a pointer to a refcounted struct vmw_resource.
 247 *
 248 * If the handle can't be found or is associated with an incorrect resource
 249 * type, -EINVAL will be returned.
 250 */
 251int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 252				    struct ttm_object_file *tfile,
 253				    uint32_t handle,
 254				    const struct vmw_user_resource_conv
 255				    *converter,
 256				    struct vmw_resource **p_res)
 257{
 258	struct ttm_base_object *base;
 259	struct vmw_resource *res;
 260	int ret = -EINVAL;
 261
 262	base = ttm_base_object_lookup(tfile, handle);
 263	if (unlikely(base == NULL))
 264		return -EINVAL;
 265
 266	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 267		goto out_bad_resource;
 268
 269	res = converter->base_obj_to_res(base);
 270
 271	read_lock(&dev_priv->resource_lock);
 272	if (!res->avail || res->res_free != converter->res_free) {
 273		read_unlock(&dev_priv->resource_lock);
 274		goto out_bad_resource;
 275	}
 276
 277	kref_get(&res->kref);
 278	read_unlock(&dev_priv->resource_lock);
 279
 280	*p_res = res;
 281	ret = 0;
 282
 283out_bad_resource:
 284	ttm_base_object_unref(&base);
 285
 286	return ret;
 287}
 288
 289/**
 290 * Helper function that looks either a surface or dmabuf.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291 *
 292 * The pointer this pointed at by out_surf and out_buf needs to be null.
 293 */
 294int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 295			   struct ttm_object_file *tfile,
 296			   uint32_t handle,
 297			   struct vmw_surface **out_surf,
 298			   struct vmw_dma_buffer **out_buf)
 299{
 300	struct vmw_resource *res;
 301	int ret;
 302
 303	BUG_ON(*out_surf || *out_buf);
 304
 305	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 306					      user_surface_converter,
 307					      &res);
 308	if (!ret) {
 309		*out_surf = vmw_res_to_srf(res);
 310		return 0;
 311	}
 312
 313	*out_surf = NULL;
 314	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
 315	return ret;
 316}
 317
 318/**
 319 * Buffer management.
 320 */
 321
 322/**
 323 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
 324 *
 325 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 326 * @size: The requested buffer size.
 327 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 328 */
 329static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
 330				  bool user)
 331{
 332	static size_t struct_size, user_struct_size;
 333	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 334	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 335
 336	if (unlikely(struct_size == 0)) {
 337		size_t backend_size = ttm_round_pot(vmw_tt_size);
 338
 339		struct_size = backend_size +
 340			ttm_round_pot(sizeof(struct vmw_dma_buffer));
 341		user_struct_size = backend_size +
 342			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
 343	}
 344
 345	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 346		page_array_size +=
 347			ttm_round_pot(num_pages * sizeof(dma_addr_t));
 348
 349	return ((user) ? user_struct_size : struct_size) +
 350		page_array_size;
 351}
 352
 353void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 354{
 355	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 356
 357	vmw_dma_buffer_unmap(vmw_bo);
 358	kfree(vmw_bo);
 359}
 360
 361static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 362{
 363	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 364
 365	vmw_dma_buffer_unmap(&vmw_user_bo->dma);
 366	ttm_prime_object_kfree(vmw_user_bo, prime);
 367}
 368
 369int vmw_dmabuf_init(struct vmw_private *dev_priv,
 370		    struct vmw_dma_buffer *vmw_bo,
 371		    size_t size, struct ttm_placement *placement,
 372		    bool interruptible,
 373		    void (*bo_free) (struct ttm_buffer_object *bo))
 374{
 375	struct ttm_bo_device *bdev = &dev_priv->bdev;
 376	size_t acc_size;
 377	int ret;
 378	bool user = (bo_free == &vmw_user_dmabuf_destroy);
 379
 380	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
 381
 382	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
 383	memset(vmw_bo, 0, sizeof(*vmw_bo));
 384
 385	INIT_LIST_HEAD(&vmw_bo->res_list);
 386
 387	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 388			  ttm_bo_type_device, placement,
 389			  0, interruptible, acc_size,
 390			  NULL, NULL, bo_free);
 391	return ret;
 392}
 393
 394static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 395{
 396	struct vmw_user_dma_buffer *vmw_user_bo;
 397	struct ttm_base_object *base = *p_base;
 398	struct ttm_buffer_object *bo;
 399
 400	*p_base = NULL;
 401
 402	if (unlikely(base == NULL))
 403		return;
 404
 405	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 406				   prime.base);
 407	bo = &vmw_user_bo->dma.base;
 408	ttm_bo_unref(&bo);
 409}
 410
 411static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
 412					    enum ttm_ref_type ref_type)
 413{
 414	struct vmw_user_dma_buffer *user_bo;
 415	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
 416
 417	switch (ref_type) {
 418	case TTM_REF_SYNCCPU_WRITE:
 419		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 420		break;
 421	default:
 422		BUG();
 423	}
 424}
 425
 426/**
 427 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
 428 *
 429 * @dev_priv: Pointer to a struct device private.
 430 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 431 * object.
 432 * @size: Size of the dma buffer.
 433 * @shareable: Boolean whether the buffer is shareable with other open files.
 434 * @handle: Pointer to where the handle value should be assigned.
 435 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
 436 * should be assigned.
 437 */
 438int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
 439			  struct ttm_object_file *tfile,
 440			  uint32_t size,
 441			  bool shareable,
 442			  uint32_t *handle,
 443			  struct vmw_dma_buffer **p_dma_buf,
 444			  struct ttm_base_object **p_base)
 445{
 446	struct vmw_user_dma_buffer *user_bo;
 447	struct ttm_buffer_object *tmp;
 448	int ret;
 449
 450	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 451	if (unlikely(!user_bo)) {
 452		DRM_ERROR("Failed to allocate a buffer.\n");
 453		return -ENOMEM;
 454	}
 455
 456	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
 457			      (dev_priv->has_mob) ?
 458			      &vmw_sys_placement :
 459			      &vmw_vram_sys_placement, true,
 460			      &vmw_user_dmabuf_destroy);
 461	if (unlikely(ret != 0))
 462		return ret;
 463
 464	tmp = ttm_bo_reference(&user_bo->dma.base);
 465	ret = ttm_prime_object_init(tfile,
 466				    size,
 467				    &user_bo->prime,
 468				    shareable,
 469				    ttm_buffer_type,
 470				    &vmw_user_dmabuf_release,
 471				    &vmw_user_dmabuf_ref_obj_release);
 472	if (unlikely(ret != 0)) {
 473		ttm_bo_unref(&tmp);
 474		goto out_no_base_object;
 475	}
 476
 477	*p_dma_buf = &user_bo->dma;
 478	if (p_base) {
 479		*p_base = &user_bo->prime.base;
 480		kref_get(&(*p_base)->refcount);
 481	}
 482	*handle = user_bo->prime.base.hash.key;
 483
 484out_no_base_object:
 485	return ret;
 486}
 487
 488/**
 489 * vmw_user_dmabuf_verify_access - verify access permissions on this
 490 * buffer object.
 491 *
 492 * @bo: Pointer to the buffer object being accessed
 493 * @tfile: Identifying the caller.
 494 */
 495int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
 496				  struct ttm_object_file *tfile)
 497{
 498	struct vmw_user_dma_buffer *vmw_user_bo;
 499
 500	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
 501		return -EPERM;
 502
 503	vmw_user_bo = vmw_user_dma_buffer(bo);
 504
 505	/* Check that the caller has opened the object. */
 506	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 507		return 0;
 508
 509	DRM_ERROR("Could not grant buffer access.\n");
 510	return -EPERM;
 511}
 512
 513/**
 514 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
 515 * access, idling previous GPU operations on the buffer and optionally
 516 * blocking it for further command submissions.
 517 *
 518 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 519 * @tfile: Identifying the caller.
 520 * @flags: Flags indicating how the grab should be performed.
 521 *
 522 * A blocking grab will be automatically released when @tfile is closed.
 523 */
 524static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
 525					struct ttm_object_file *tfile,
 526					uint32_t flags)
 527{
 528	struct ttm_buffer_object *bo = &user_bo->dma.base;
 529	bool existed;
 530	int ret;
 531
 532	if (flags & drm_vmw_synccpu_allow_cs) {
 533		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 534		long lret;
 535
 536		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
 537							   nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 538		if (!lret)
 539			return -EBUSY;
 540		else if (lret < 0)
 541			return lret;
 542		return 0;
 543	}
 544
 545	ret = ttm_bo_synccpu_write_grab
 546		(bo, !!(flags & drm_vmw_synccpu_dontblock));
 547	if (unlikely(ret != 0))
 548		return ret;
 549
 550	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 551				 TTM_REF_SYNCCPU_WRITE, &existed, false);
 552	if (ret != 0 || existed)
 553		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 554
 555	return ret;
 556}
 557
 558/**
 559 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
 560 * and unblock command submission on the buffer if blocked.
 561 *
 562 * @handle: Handle identifying the buffer object.
 563 * @tfile: Identifying the caller.
 564 * @flags: Flags indicating the type of release.
 565 */
 566static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
 567					   struct ttm_object_file *tfile,
 568					   uint32_t flags)
 569{
 570	if (!(flags & drm_vmw_synccpu_allow_cs))
 571		return ttm_ref_object_base_unref(tfile, handle,
 572						 TTM_REF_SYNCCPU_WRITE);
 573
 574	return 0;
 575}
 576
 577/**
 578 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
 579 * functionality.
 580 *
 581 * @dev: Identifies the drm device.
 582 * @data: Pointer to the ioctl argument.
 583 * @file_priv: Identifies the caller.
 584 *
 585 * This function checks the ioctl arguments for validity and calls the
 586 * relevant synccpu functions.
 587 */
 588int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
 589				  struct drm_file *file_priv)
 590{
 591	struct drm_vmw_synccpu_arg *arg =
 592		(struct drm_vmw_synccpu_arg *) data;
 593	struct vmw_dma_buffer *dma_buf;
 594	struct vmw_user_dma_buffer *user_bo;
 595	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 596	struct ttm_base_object *buffer_base;
 597	int ret;
 598
 599	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 600	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 601			       drm_vmw_synccpu_dontblock |
 602			       drm_vmw_synccpu_allow_cs)) != 0) {
 603		DRM_ERROR("Illegal synccpu flags.\n");
 604		return -EINVAL;
 605	}
 606
 607	switch (arg->op) {
 608	case drm_vmw_synccpu_grab:
 609		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
 610					     &buffer_base);
 611		if (unlikely(ret != 0))
 612			return ret;
 613
 614		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
 615				       dma);
 616		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
 617		vmw_dmabuf_unreference(&dma_buf);
 618		ttm_base_object_unref(&buffer_base);
 619		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 620			     ret != -EBUSY)) {
 621			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 622				  (unsigned int) arg->handle);
 623			return ret;
 624		}
 625		break;
 626	case drm_vmw_synccpu_release:
 627		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
 628						      arg->flags);
 629		if (unlikely(ret != 0)) {
 630			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 631				  (unsigned int) arg->handle);
 632			return ret;
 633		}
 634		break;
 635	default:
 636		DRM_ERROR("Invalid synccpu operation.\n");
 637		return -EINVAL;
 638	}
 639
 640	return 0;
 641}
 642
 643int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 644			   struct drm_file *file_priv)
 645{
 646	struct vmw_private *dev_priv = vmw_priv(dev);
 647	union drm_vmw_alloc_dmabuf_arg *arg =
 648	    (union drm_vmw_alloc_dmabuf_arg *)data;
 649	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 650	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 651	struct vmw_dma_buffer *dma_buf;
 652	uint32_t handle;
 653	int ret;
 654
 655	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 656	if (unlikely(ret != 0))
 657		return ret;
 658
 659	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 660				    req->size, false, &handle, &dma_buf,
 661				    NULL);
 662	if (unlikely(ret != 0))
 663		goto out_no_dmabuf;
 664
 665	rep->handle = handle;
 666	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
 667	rep->cur_gmr_id = handle;
 668	rep->cur_gmr_offset = 0;
 669
 670	vmw_dmabuf_unreference(&dma_buf);
 671
 672out_no_dmabuf:
 673	ttm_read_unlock(&dev_priv->reservation_sem);
 674
 675	return ret;
 676}
 677
 678int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 679			   struct drm_file *file_priv)
 680{
 681	struct drm_vmw_unref_dmabuf_arg *arg =
 682	    (struct drm_vmw_unref_dmabuf_arg *)data;
 683
 684	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 685					 arg->handle,
 686					 TTM_REF_USAGE);
 687}
 688
 689int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 690			   uint32_t handle, struct vmw_dma_buffer **out,
 691			   struct ttm_base_object **p_base)
 692{
 693	struct vmw_user_dma_buffer *vmw_user_bo;
 694	struct ttm_base_object *base;
 695
 696	base = ttm_base_object_lookup(tfile, handle);
 697	if (unlikely(base == NULL)) {
 698		pr_err("Invalid buffer object handle 0x%08lx\n",
 699		       (unsigned long)handle);
 700		return -ESRCH;
 701	}
 702
 703	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 704		ttm_base_object_unref(&base);
 705		pr_err("Invalid buffer object handle 0x%08lx\n",
 706		       (unsigned long)handle);
 707		return -EINVAL;
 708	}
 709
 710	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 711				   prime.base);
 712	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
 713	if (p_base)
 714		*p_base = base;
 715	else
 716		ttm_base_object_unref(&base);
 717	*out = &vmw_user_bo->dma;
 718
 719	return 0;
 720}
 721
 722int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 723			      struct vmw_dma_buffer *dma_buf,
 724			      uint32_t *handle)
 725{
 726	struct vmw_user_dma_buffer *user_bo;
 727
 728	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
 729		return -EINVAL;
 730
 731	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
 732
 733	*handle = user_bo->prime.base.hash.key;
 734	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 735				  TTM_REF_USAGE, NULL, false);
 736}
 737
 738/**
 739 * vmw_dumb_create - Create a dumb kms buffer
 740 *
 741 * @file_priv: Pointer to a struct drm_file identifying the caller.
 742 * @dev: Pointer to the drm device.
 743 * @args: Pointer to a struct drm_mode_create_dumb structure
 744 *
 745 * This is a driver callback for the core drm create_dumb functionality.
 746 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
 747 * that the arguments have a different format.
 748 */
 749int vmw_dumb_create(struct drm_file *file_priv,
 750		    struct drm_device *dev,
 751		    struct drm_mode_create_dumb *args)
 752{
 753	struct vmw_private *dev_priv = vmw_priv(dev);
 754	struct vmw_dma_buffer *dma_buf;
 755	int ret;
 756
 757	args->pitch = args->width * ((args->bpp + 7) / 8);
 758	args->size = args->pitch * args->height;
 759
 760	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 761	if (unlikely(ret != 0))
 762		return ret;
 763
 764	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 765				    args->size, false, &args->handle,
 766				    &dma_buf, NULL);
 767	if (unlikely(ret != 0))
 768		goto out_no_dmabuf;
 769
 770	vmw_dmabuf_unreference(&dma_buf);
 771out_no_dmabuf:
 772	ttm_read_unlock(&dev_priv->reservation_sem);
 773	return ret;
 774}
 775
 776/**
 777 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
 778 *
 779 * @file_priv: Pointer to a struct drm_file identifying the caller.
 780 * @dev: Pointer to the drm device.
 781 * @handle: Handle identifying the dumb buffer.
 782 * @offset: The address space offset returned.
 783 *
 784 * This is a driver callback for the core drm dumb_map_offset functionality.
 785 */
 786int vmw_dumb_map_offset(struct drm_file *file_priv,
 787			struct drm_device *dev, uint32_t handle,
 788			uint64_t *offset)
 789{
 790	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 791	struct vmw_dma_buffer *out_buf;
 792	int ret;
 793
 794	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
 795	if (ret != 0)
 796		return -EINVAL;
 797
 798	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
 799	vmw_dmabuf_unreference(&out_buf);
 800	return 0;
 801}
 802
 803/**
 804 * vmw_dumb_destroy - Destroy a dumb boffer
 805 *
 806 * @file_priv: Pointer to a struct drm_file identifying the caller.
 807 * @dev: Pointer to the drm device.
 808 * @handle: Handle identifying the dumb buffer.
 809 *
 810 * This is a driver callback for the core drm dumb_destroy functionality.
 811 */
 812int vmw_dumb_destroy(struct drm_file *file_priv,
 813		     struct drm_device *dev,
 814		     uint32_t handle)
 815{
 816	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 817					 handle, TTM_REF_USAGE);
 818}
 819
 820/**
 821 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 822 *
 823 * @res:            The resource for which to allocate a backup buffer.
 824 * @interruptible:  Whether any sleeps during allocation should be
 825 *                  performed while interruptible.
 826 */
 827static int vmw_resource_buf_alloc(struct vmw_resource *res,
 828				  bool interruptible)
 829{
 830	unsigned long size =
 831		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 832	struct vmw_dma_buffer *backup;
 833	int ret;
 834
 835	if (likely(res->backup)) {
 836		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
 837		return 0;
 838	}
 839
 840	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
 841	if (unlikely(!backup))
 842		return -ENOMEM;
 843
 844	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
 845			      res->func->backup_placement,
 846			      interruptible,
 847			      &vmw_dmabuf_bo_free);
 848	if (unlikely(ret != 0))
 849		goto out_no_dmabuf;
 850
 851	res->backup = backup;
 852
 853out_no_dmabuf:
 854	return ret;
 855}
 856
 857/**
 858 * vmw_resource_do_validate - Make a resource up-to-date and visible
 859 *                            to the device.
 860 *
 861 * @res:            The resource to make visible to the device.
 862 * @val_buf:        Information about a buffer possibly
 863 *                  containing backup data if a bind operation is needed.
 864 *
 865 * On hardware resource shortage, this function returns -EBUSY and
 866 * should be retried once resources have been freed up.
 867 */
 868static int vmw_resource_do_validate(struct vmw_resource *res,
 869				    struct ttm_validate_buffer *val_buf)
 870{
 871	int ret = 0;
 872	const struct vmw_res_func *func = res->func;
 873
 874	if (unlikely(res->id == -1)) {
 875		ret = func->create(res);
 876		if (unlikely(ret != 0))
 877			return ret;
 878	}
 879
 880	if (func->bind &&
 881	    ((func->needs_backup && list_empty(&res->mob_head) &&
 882	      val_buf->bo != NULL) ||
 883	     (!func->needs_backup && val_buf->bo != NULL))) {
 884		ret = func->bind(res, val_buf);
 885		if (unlikely(ret != 0))
 886			goto out_bind_failed;
 887		if (func->needs_backup)
 888			list_add_tail(&res->mob_head, &res->backup->res_list);
 889	}
 890
 891	/*
 892	 * Only do this on write operations, and move to
 893	 * vmw_resource_unreserve if it can be called after
 894	 * backup buffers have been unreserved. Otherwise
 895	 * sort out locking.
 896	 */
 897	res->res_dirty = true;
 898
 899	return 0;
 900
 901out_bind_failed:
 902	func->destroy(res);
 903
 904	return ret;
 905}
 906
 907/**
 908 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 909 * command submission.
 910 *
 911 * @res:               Pointer to the struct vmw_resource to unreserve.
 
 
 912 * @switch_backup:     Backup buffer has been switched.
 913 * @new_backup:        Pointer to new backup buffer if command submission
 914 *                     switched. May be NULL.
 915 * @new_backup_offset: New backup offset if @switch_backup is true.
 916 *
 917 * Currently unreserving a resource means putting it back on the device's
 918 * resource lru list, so that it can be evicted if necessary.
 919 */
 920void vmw_resource_unreserve(struct vmw_resource *res,
 
 
 921			    bool switch_backup,
 922			    struct vmw_dma_buffer *new_backup,
 923			    unsigned long new_backup_offset)
 924{
 925	struct vmw_private *dev_priv = res->dev_priv;
 926
 927	if (!list_empty(&res->lru_head))
 928		return;
 929
 930	if (switch_backup && new_backup != res->backup) {
 931		if (res->backup) {
 932			lockdep_assert_held(&res->backup->base.resv->lock.base);
 933			list_del_init(&res->mob_head);
 934			vmw_dmabuf_unreference(&res->backup);
 935		}
 936
 937		if (new_backup) {
 938			res->backup = vmw_dmabuf_reference(new_backup);
 939			lockdep_assert_held(&new_backup->base.resv->lock.base);
 940			list_add_tail(&res->mob_head, &new_backup->res_list);
 941		} else {
 942			res->backup = NULL;
 943		}
 944	}
 945	if (switch_backup)
 946		res->backup_offset = new_backup_offset;
 947
 
 
 
 948	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 949		return;
 950
 951	write_lock(&dev_priv->resource_lock);
 952	list_add_tail(&res->lru_head,
 953		      &res->dev_priv->res_lru[res->func->res_type]);
 954	write_unlock(&dev_priv->resource_lock);
 955}
 956
 957/**
 958 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 959 *                             for a resource and in that case, allocate
 960 *                             one, reserve and validate it.
 961 *
 
 962 * @res:            The resource for which to allocate a backup buffer.
 963 * @interruptible:  Whether any sleeps during allocation should be
 964 *                  performed while interruptible.
 965 * @val_buf:        On successful return contains data about the
 966 *                  reserved and validated backup buffer.
 967 */
 968static int
 969vmw_resource_check_buffer(struct vmw_resource *res,
 
 970			  bool interruptible,
 971			  struct ttm_validate_buffer *val_buf)
 972{
 973	struct ttm_operation_ctx ctx = { true, false };
 974	struct list_head val_list;
 975	bool backup_dirty = false;
 976	int ret;
 977
 978	if (unlikely(res->backup == NULL)) {
 979		ret = vmw_resource_buf_alloc(res, interruptible);
 980		if (unlikely(ret != 0))
 981			return ret;
 982	}
 983
 984	INIT_LIST_HEAD(&val_list);
 985	val_buf->bo = ttm_bo_reference(&res->backup->base);
 986	val_buf->shared = false;
 
 987	list_add_tail(&val_buf->head, &val_list);
 988	ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
 
 989	if (unlikely(ret != 0))
 990		goto out_no_reserve;
 991
 992	if (res->func->needs_backup && list_empty(&res->mob_head))
 993		return 0;
 994
 995	backup_dirty = res->backup_dirty;
 996	ret = ttm_bo_validate(&res->backup->base,
 997			      res->func->backup_placement,
 998			      &ctx);
 999
1000	if (unlikely(ret != 0))
1001		goto out_no_validate;
1002
1003	return 0;
1004
1005out_no_validate:
1006	ttm_eu_backoff_reservation(NULL, &val_list);
1007out_no_reserve:
1008	ttm_bo_unref(&val_buf->bo);
 
1009	if (backup_dirty)
1010		vmw_dmabuf_unreference(&res->backup);
1011
1012	return ret;
1013}
1014
1015/**
1016 * vmw_resource_reserve - Reserve a resource for command submission
1017 *
1018 * @res:            The resource to reserve.
1019 *
1020 * This function takes the resource off the LRU list and make sure
1021 * a backup buffer is present for guest-backed resources. However,
1022 * the buffer may not be bound to the resource at this point.
1023 *
1024 */
1025int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1026			 bool no_backup)
1027{
1028	struct vmw_private *dev_priv = res->dev_priv;
1029	int ret;
1030
1031	write_lock(&dev_priv->resource_lock);
1032	list_del_init(&res->lru_head);
1033	write_unlock(&dev_priv->resource_lock);
1034
1035	if (res->func->needs_backup && res->backup == NULL &&
1036	    !no_backup) {
1037		ret = vmw_resource_buf_alloc(res, interruptible);
1038		if (unlikely(ret != 0)) {
1039			DRM_ERROR("Failed to allocate a backup buffer "
1040				  "of size %lu. bytes\n",
1041				  (unsigned long) res->backup_size);
1042			return ret;
1043		}
1044	}
1045
1046	return 0;
1047}
1048
1049/**
1050 * vmw_resource_backoff_reservation - Unreserve and unreference a
1051 *                                    backup buffer
1052 *.
 
1053 * @val_buf:        Backup buffer information.
1054 */
1055static void
1056vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
 
1057{
1058	struct list_head val_list;
1059
1060	if (likely(val_buf->bo == NULL))
1061		return;
1062
1063	INIT_LIST_HEAD(&val_list);
1064	list_add_tail(&val_buf->head, &val_list);
1065	ttm_eu_backoff_reservation(NULL, &val_list);
1066	ttm_bo_unref(&val_buf->bo);
 
1067}
1068
1069/**
1070 * vmw_resource_do_evict - Evict a resource, and transfer its data
1071 *                         to a backup buffer.
1072 *
 
1073 * @res:            The resource to evict.
1074 * @interruptible:  Whether to wait interruptible.
1075 */
1076static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 
1077{
1078	struct ttm_validate_buffer val_buf;
1079	const struct vmw_res_func *func = res->func;
1080	int ret;
1081
1082	BUG_ON(!func->may_evict);
1083
1084	val_buf.bo = NULL;
1085	val_buf.shared = false;
1086	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1087	if (unlikely(ret != 0))
1088		return ret;
1089
1090	if (unlikely(func->unbind != NULL &&
1091		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1092		ret = func->unbind(res, res->res_dirty, &val_buf);
1093		if (unlikely(ret != 0))
1094			goto out_no_unbind;
1095		list_del_init(&res->mob_head);
1096	}
1097	ret = func->destroy(res);
1098	res->backup_dirty = true;
1099	res->res_dirty = false;
1100out_no_unbind:
1101	vmw_resource_backoff_reservation(&val_buf);
1102
1103	return ret;
1104}
1105
1106
1107/**
1108 * vmw_resource_validate - Make a resource up-to-date and visible
1109 *                         to the device.
1110 *
1111 * @res:            The resource to make visible to the device.
1112 *
1113 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1114 * be reserved and validated.
1115 * On hardware resource shortage, this function will repeatedly evict
1116 * resources of the same type until the validation succeeds.
 
 
 
1117 */
1118int vmw_resource_validate(struct vmw_resource *res)
1119{
1120	int ret;
1121	struct vmw_resource *evict_res;
1122	struct vmw_private *dev_priv = res->dev_priv;
1123	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1124	struct ttm_validate_buffer val_buf;
1125	unsigned err_count = 0;
1126
1127	if (!res->func->create)
1128		return 0;
1129
1130	val_buf.bo = NULL;
1131	val_buf.shared = false;
1132	if (res->backup)
1133		val_buf.bo = &res->backup->base;
1134	do {
1135		ret = vmw_resource_do_validate(res, &val_buf);
1136		if (likely(ret != -EBUSY))
1137			break;
1138
1139		write_lock(&dev_priv->resource_lock);
1140		if (list_empty(lru_list) || !res->func->may_evict) {
1141			DRM_ERROR("Out of device device resources "
1142				  "for %s.\n", res->func->type_name);
1143			ret = -EBUSY;
1144			write_unlock(&dev_priv->resource_lock);
1145			break;
1146		}
1147
1148		evict_res = vmw_resource_reference
1149			(list_first_entry(lru_list, struct vmw_resource,
1150					  lru_head));
1151		list_del_init(&evict_res->lru_head);
1152
1153		write_unlock(&dev_priv->resource_lock);
1154
1155		ret = vmw_resource_do_evict(evict_res, true);
 
1156		if (unlikely(ret != 0)) {
1157			write_lock(&dev_priv->resource_lock);
1158			list_add_tail(&evict_res->lru_head, lru_list);
1159			write_unlock(&dev_priv->resource_lock);
1160			if (ret == -ERESTARTSYS ||
1161			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1162				vmw_resource_unreference(&evict_res);
1163				goto out_no_validate;
1164			}
1165		}
1166
1167		vmw_resource_unreference(&evict_res);
1168	} while (1);
1169
1170	if (unlikely(ret != 0))
1171		goto out_no_validate;
1172	else if (!res->func->needs_backup && res->backup) {
1173		list_del_init(&res->mob_head);
1174		vmw_dmabuf_unreference(&res->backup);
1175	}
1176
1177	return 0;
1178
1179out_no_validate:
1180	return ret;
1181}
1182
1183/**
1184 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1185 *                       object without unreserving it.
1186 *
1187 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1188 * @fence:          Pointer to the fence. If NULL, this function will
1189 *                  insert a fence into the command stream..
1190 *
1191 * Contrary to the ttm_eu version of this function, it takes only
1192 * a single buffer object instead of a list, and it also doesn't
1193 * unreserve the buffer object, which needs to be done separately.
1194 */
1195void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1196			 struct vmw_fence_obj *fence)
1197{
1198	struct ttm_bo_device *bdev = bo->bdev;
1199
1200	struct vmw_private *dev_priv =
1201		container_of(bdev, struct vmw_private, bdev);
1202
1203	if (fence == NULL) {
1204		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1205		reservation_object_add_excl_fence(bo->resv, &fence->base);
1206		dma_fence_put(&fence->base);
1207	} else
1208		reservation_object_add_excl_fence(bo->resv, &fence->base);
1209}
1210
1211/**
1212 * vmw_resource_move_notify - TTM move_notify_callback
1213 *
1214 * @bo: The TTM buffer object about to move.
1215 * @mem: The struct ttm_mem_reg indicating to what memory
1216 *       region the move is taking place.
1217 *
1218 * Evicts the Guest Backed hardware resource if the backup
1219 * buffer is being moved out of MOB memory.
1220 * Note that this function should not race with the resource
1221 * validation code as long as it accesses only members of struct
1222 * resource that remain static while bo::res is !NULL and
1223 * while we have @bo reserved. struct resource::backup is *not* a
1224 * static member. The resource validation code will take care
1225 * to set @bo::res to NULL, while having @bo reserved when the
1226 * buffer is no longer bound to the resource, so @bo:res can be
1227 * used to determine whether there is a need to unbind and whether
1228 * it is safe to unbind.
1229 */
1230void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231			      struct ttm_mem_reg *mem)
1232{
1233	struct vmw_dma_buffer *dma_buf;
1234
1235	if (mem == NULL)
1236		return;
1237
1238	if (bo->destroy != vmw_dmabuf_bo_free &&
1239	    bo->destroy != vmw_user_dmabuf_destroy)
1240		return;
1241
1242	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
 
 
 
 
1243
1244	/*
1245	 * Kill any cached kernel maps before move. An optimization could
1246	 * be to do this iff source or destination memory type is VRAM.
1247	 */
1248	vmw_dma_buffer_unmap(dma_buf);
1249
1250	if (mem->mem_type != VMW_PL_MOB) {
1251		struct vmw_resource *res, *n;
1252		struct ttm_validate_buffer val_buf;
1253
1254		val_buf.bo = bo;
1255		val_buf.shared = false;
1256
1257		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1258
1259			if (unlikely(res->func->unbind == NULL))
1260				continue;
1261
1262			(void) res->func->unbind(res, true, &val_buf);
1263			res->backup_dirty = true;
1264			res->res_dirty = false;
1265			list_del_init(&res->mob_head);
1266		}
1267
1268		(void) ttm_bo_wait(bo, false, false);
 
 
 
1269	}
1270}
1271
1272
1273/**
1274 * vmw_resource_swap_notify - swapout notify callback.
1275 *
1276 * @bo: The buffer object to be swapped out.
1277 */
1278void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
1279{
1280	if (bo->destroy != vmw_dmabuf_bo_free &&
1281	    bo->destroy != vmw_user_dmabuf_destroy)
1282		return;
1283
1284	/* Kill any cached kernel maps before swapout */
1285	vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
1286}
1287
1288
1289/**
1290 * vmw_query_readback_all - Read back cached query states
1291 *
1292 * @dx_query_mob: Buffer containing the DX query MOB
1293 *
1294 * Read back cached states from the device if they exist.  This function
1295 * assumings binding_mutex is held.
1296 */
1297int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1298{
1299	struct vmw_resource *dx_query_ctx;
1300	struct vmw_private *dev_priv;
1301	struct {
1302		SVGA3dCmdHeader header;
1303		SVGA3dCmdDXReadbackAllQuery body;
1304	} *cmd;
1305
1306
1307	/* No query bound, so do nothing */
1308	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1309		return 0;
1310
1311	dx_query_ctx = dx_query_mob->dx_query_ctx;
1312	dev_priv     = dx_query_ctx->dev_priv;
1313
1314	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1315	if (unlikely(cmd == NULL)) {
1316		DRM_ERROR("Failed reserving FIFO space for "
1317			  "query MOB read back.\n");
1318		return -ENOMEM;
1319	}
1320
1321	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1322	cmd->header.size = sizeof(cmd->body);
1323	cmd->body.cid    = dx_query_ctx->id;
1324
1325	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1326
1327	/* Triggers a rebind the next time affected context is bound */
1328	dx_query_mob->dx_query_ctx = NULL;
1329
1330	return 0;
1331}
1332
1333
1334
1335/**
1336 * vmw_query_move_notify - Read back cached query states
1337 *
1338 * @bo: The TTM buffer object about to move.
1339 * @mem: The memory region @bo is moving to.
1340 *
1341 * Called before the query MOB is swapped out to read back cached query
1342 * states from the device.
1343 */
1344void vmw_query_move_notify(struct ttm_buffer_object *bo,
1345			   struct ttm_mem_reg *mem)
1346{
1347	struct vmw_dma_buffer *dx_query_mob;
1348	struct ttm_bo_device *bdev = bo->bdev;
1349	struct vmw_private *dev_priv;
1350
1351
1352	dev_priv = container_of(bdev, struct vmw_private, bdev);
1353
1354	mutex_lock(&dev_priv->binding_mutex);
1355
1356	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1357	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1358		mutex_unlock(&dev_priv->binding_mutex);
1359		return;
1360	}
1361
1362	/* If BO is being moved from MOB to system memory */
1363	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1364		struct vmw_fence_obj *fence;
1365
1366		(void) vmw_query_readback_all(dx_query_mob);
1367		mutex_unlock(&dev_priv->binding_mutex);
1368
1369		/* Create a fence and attach the BO to it */
1370		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1371		vmw_fence_single_bo(bo, fence);
1372
1373		if (fence != NULL)
1374			vmw_fence_obj_unreference(&fence);
1375
1376		(void) ttm_bo_wait(bo, false, false);
1377	} else
1378		mutex_unlock(&dev_priv->binding_mutex);
1379
1380}
1381
1382/**
1383 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1384 *
1385 * @res:            The resource being queried.
1386 */
1387bool vmw_resource_needs_backup(const struct vmw_resource *res)
1388{
1389	return res->func->needs_backup;
1390}
1391
1392/**
1393 * vmw_resource_evict_type - Evict all resources of a specific type
1394 *
1395 * @dev_priv:       Pointer to a device private struct
1396 * @type:           The resource type to evict
1397 *
1398 * To avoid thrashing starvation or as part of the hibernation sequence,
1399 * try to evict all evictable resources of a specific type.
1400 */
1401static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1402				    enum vmw_res_type type)
1403{
1404	struct list_head *lru_list = &dev_priv->res_lru[type];
1405	struct vmw_resource *evict_res;
1406	unsigned err_count = 0;
1407	int ret;
 
1408
1409	do {
1410		write_lock(&dev_priv->resource_lock);
1411
1412		if (list_empty(lru_list))
1413			goto out_unlock;
1414
1415		evict_res = vmw_resource_reference(
1416			list_first_entry(lru_list, struct vmw_resource,
1417					 lru_head));
1418		list_del_init(&evict_res->lru_head);
1419		write_unlock(&dev_priv->resource_lock);
1420
1421		ret = vmw_resource_do_evict(evict_res, false);
 
1422		if (unlikely(ret != 0)) {
1423			write_lock(&dev_priv->resource_lock);
1424			list_add_tail(&evict_res->lru_head, lru_list);
1425			write_unlock(&dev_priv->resource_lock);
1426			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1427				vmw_resource_unreference(&evict_res);
1428				return;
1429			}
1430		}
1431
1432		vmw_resource_unreference(&evict_res);
1433	} while (1);
1434
1435out_unlock:
1436	write_unlock(&dev_priv->resource_lock);
1437}
1438
1439/**
1440 * vmw_resource_evict_all - Evict all evictable resources
1441 *
1442 * @dev_priv:       Pointer to a device private struct
1443 *
1444 * To avoid thrashing starvation or as part of the hibernation sequence,
1445 * evict all evictable resources. In particular this means that all
1446 * guest-backed resources that are registered with the device are
1447 * evicted and the OTable becomes clean.
1448 */
1449void vmw_resource_evict_all(struct vmw_private *dev_priv)
1450{
1451	enum vmw_res_type type;
1452
1453	mutex_lock(&dev_priv->cmdbuf_mutex);
1454
1455	for (type = 0; type < vmw_res_max; ++type)
1456		vmw_resource_evict_type(dev_priv, type);
1457
1458	mutex_unlock(&dev_priv->cmdbuf_mutex);
1459}
1460
1461/**
1462 * vmw_resource_pin - Add a pin reference on a resource
1463 *
1464 * @res: The resource to add a pin reference on
1465 *
1466 * This function adds a pin reference, and if needed validates the resource.
1467 * Having a pin reference means that the resource can never be evicted, and
1468 * its id will never change as long as there is a pin reference.
1469 * This function returns 0 on success and a negative error code on failure.
1470 */
1471int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1472{
1473	struct ttm_operation_ctx ctx = { interruptible, false };
1474	struct vmw_private *dev_priv = res->dev_priv;
1475	int ret;
1476
1477	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1478	mutex_lock(&dev_priv->cmdbuf_mutex);
1479	ret = vmw_resource_reserve(res, interruptible, false);
1480	if (ret)
1481		goto out_no_reserve;
1482
1483	if (res->pin_count == 0) {
1484		struct vmw_dma_buffer *vbo = NULL;
1485
1486		if (res->backup) {
1487			vbo = res->backup;
1488
1489			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1490			if (!vbo->pin_count) {
1491				ret = ttm_bo_validate
1492					(&vbo->base,
1493					 res->func->backup_placement,
1494					 &ctx);
1495				if (ret) {
1496					ttm_bo_unreserve(&vbo->base);
1497					goto out_no_validate;
1498				}
1499			}
1500
1501			/* Do we really need to pin the MOB as well? */
1502			vmw_bo_pin_reserved(vbo, true);
1503		}
1504		ret = vmw_resource_validate(res);
1505		if (vbo)
1506			ttm_bo_unreserve(&vbo->base);
1507		if (ret)
1508			goto out_no_validate;
1509	}
1510	res->pin_count++;
1511
1512out_no_validate:
1513	vmw_resource_unreserve(res, false, NULL, 0UL);
1514out_no_reserve:
1515	mutex_unlock(&dev_priv->cmdbuf_mutex);
1516	ttm_write_unlock(&dev_priv->reservation_sem);
1517
1518	return ret;
1519}
1520
1521/**
1522 * vmw_resource_unpin - Remove a pin reference from a resource
1523 *
1524 * @res: The resource to remove a pin reference from
1525 *
1526 * Having a pin reference means that the resource can never be evicted, and
1527 * its id will never change as long as there is a pin reference.
1528 */
1529void vmw_resource_unpin(struct vmw_resource *res)
1530{
1531	struct vmw_private *dev_priv = res->dev_priv;
1532	int ret;
1533
1534	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1535	mutex_lock(&dev_priv->cmdbuf_mutex);
1536
1537	ret = vmw_resource_reserve(res, false, true);
1538	WARN_ON(ret);
1539
1540	WARN_ON(res->pin_count == 0);
1541	if (--res->pin_count == 0 && res->backup) {
1542		struct vmw_dma_buffer *vbo = res->backup;
1543
1544		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1545		vmw_bo_pin_reserved(vbo, false);
1546		ttm_bo_unreserve(&vbo->base);
1547	}
1548
1549	vmw_resource_unreserve(res, false, NULL, 0UL);
1550
1551	mutex_unlock(&dev_priv->cmdbuf_mutex);
1552	ttm_read_unlock(&dev_priv->reservation_sem);
1553}
1554
1555/**
1556 * vmw_res_type - Return the resource type
1557 *
1558 * @res: Pointer to the resource
1559 */
1560enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1561{
1562	return res->func->res_type;
1563}