Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v4.6
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include <drm/vmwgfx_drm.h>
  30#include <drm/ttm/ttm_object.h>
  31#include <drm/ttm/ttm_placement.h>
  32#include <drm/drmP.h>
  33#include "vmwgfx_resource_priv.h"
  34#include "vmwgfx_binding.h"
 
 
 
  35
  36#define VMW_RES_EVICT_ERR_COUNT 10
  37
  38struct vmw_user_dma_buffer {
  39	struct ttm_prime_object prime;
  40	struct vmw_dma_buffer dma;
  41};
  42
  43struct vmw_bo_user_rep {
  44	uint32_t handle;
  45	uint64_t map_handle;
  46};
  47
  48struct vmw_stream {
  49	struct vmw_resource res;
  50	uint32_t stream_id;
  51};
  52
  53struct vmw_user_stream {
  54	struct ttm_base_object base;
  55	struct vmw_stream stream;
  56};
  57
  58
  59static uint64_t vmw_user_stream_size;
  60
  61static const struct vmw_res_func vmw_stream_func = {
  62	.res_type = vmw_res_stream,
  63	.needs_backup = false,
  64	.may_evict = false,
  65	.type_name = "video streams",
  66	.backup_placement = NULL,
  67	.create = NULL,
  68	.destroy = NULL,
  69	.bind = NULL,
  70	.unbind = NULL
  71};
  72
  73static inline struct vmw_dma_buffer *
  74vmw_dma_buffer(struct ttm_buffer_object *bo)
  75{
  76	return container_of(bo, struct vmw_dma_buffer, base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77}
  78
  79static inline struct vmw_user_dma_buffer *
  80vmw_user_dma_buffer(struct ttm_buffer_object *bo)
 
 
 
  81{
  82	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  83	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
 
 
 
 
 
 
  84}
  85
  86struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  87{
  88	kref_get(&res->kref);
  89	return res;
  90}
  91
  92struct vmw_resource *
  93vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  94{
  95	return kref_get_unless_zero(&res->kref) ? res : NULL;
  96}
  97
  98/**
  99 * vmw_resource_release_id - release a resource id to the id manager.
 100 *
 101 * @res: Pointer to the resource.
 102 *
 103 * Release the resource id to the resource id manager and set it to -1
 104 */
 105void vmw_resource_release_id(struct vmw_resource *res)
 106{
 107	struct vmw_private *dev_priv = res->dev_priv;
 108	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 109
 110	write_lock(&dev_priv->resource_lock);
 111	if (res->id != -1)
 112		idr_remove(idr, res->id);
 113	res->id = -1;
 114	write_unlock(&dev_priv->resource_lock);
 115}
 116
 117static void vmw_resource_release(struct kref *kref)
 118{
 119	struct vmw_resource *res =
 120	    container_of(kref, struct vmw_resource, kref);
 121	struct vmw_private *dev_priv = res->dev_priv;
 122	int id;
 
 123	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 124
 125	write_lock(&dev_priv->resource_lock);
 126	res->avail = false;
 127	list_del_init(&res->lru_head);
 128	write_unlock(&dev_priv->resource_lock);
 129	if (res->backup) {
 130		struct ttm_buffer_object *bo = &res->backup->base;
 131
 132		ttm_bo_reserve(bo, false, false, false, NULL);
 133		if (!list_empty(&res->mob_head) &&
 
 134		    res->func->unbind != NULL) {
 135			struct ttm_validate_buffer val_buf;
 136
 137			val_buf.bo = bo;
 138			val_buf.shared = false;
 139			res->func->unbind(res, false, &val_buf);
 140		}
 141		res->backup_dirty = false;
 142		list_del_init(&res->mob_head);
 
 
 
 
 143		ttm_bo_unreserve(bo);
 144		vmw_dmabuf_unreference(&res->backup);
 145	}
 146
 147	if (likely(res->hw_destroy != NULL)) {
 148		mutex_lock(&dev_priv->binding_mutex);
 149		vmw_binding_res_list_kill(&res->binding_head);
 150		mutex_unlock(&dev_priv->binding_mutex);
 151		res->hw_destroy(res);
 152	}
 153
 154	id = res->id;
 155	if (res->res_free != NULL)
 156		res->res_free(res);
 157	else
 158		kfree(res);
 159
 160	write_lock(&dev_priv->resource_lock);
 161	if (id != -1)
 162		idr_remove(idr, id);
 163	write_unlock(&dev_priv->resource_lock);
 164}
 165
 166void vmw_resource_unreference(struct vmw_resource **p_res)
 167{
 168	struct vmw_resource *res = *p_res;
 169
 170	*p_res = NULL;
 171	kref_put(&res->kref, vmw_resource_release);
 172}
 173
 174
 175/**
 176 * vmw_resource_alloc_id - release a resource id to the id manager.
 177 *
 178 * @res: Pointer to the resource.
 179 *
 180 * Allocate the lowest free resource from the resource manager, and set
 181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 182 */
 183int vmw_resource_alloc_id(struct vmw_resource *res)
 184{
 185	struct vmw_private *dev_priv = res->dev_priv;
 186	int ret;
 187	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 188
 189	BUG_ON(res->id != -1);
 190
 191	idr_preload(GFP_KERNEL);
 192	write_lock(&dev_priv->resource_lock);
 193
 194	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 195	if (ret >= 0)
 196		res->id = ret;
 197
 198	write_unlock(&dev_priv->resource_lock);
 199	idr_preload_end();
 200	return ret < 0 ? ret : 0;
 201}
 202
 203/**
 204 * vmw_resource_init - initialize a struct vmw_resource
 205 *
 206 * @dev_priv:       Pointer to a device private struct.
 207 * @res:            The struct vmw_resource to initialize.
 208 * @obj_type:       Resource object type.
 209 * @delay_id:       Boolean whether to defer device id allocation until
 210 *                  the first validation.
 211 * @res_free:       Resource destructor.
 212 * @func:           Resource function table.
 213 */
 214int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 215		      bool delay_id,
 216		      void (*res_free) (struct vmw_resource *res),
 217		      const struct vmw_res_func *func)
 218{
 219	kref_init(&res->kref);
 220	res->hw_destroy = NULL;
 221	res->res_free = res_free;
 222	res->avail = false;
 223	res->dev_priv = dev_priv;
 224	res->func = func;
 
 225	INIT_LIST_HEAD(&res->lru_head);
 226	INIT_LIST_HEAD(&res->mob_head);
 227	INIT_LIST_HEAD(&res->binding_head);
 228	res->id = -1;
 229	res->backup = NULL;
 230	res->backup_offset = 0;
 231	res->backup_dirty = false;
 232	res->res_dirty = false;
 
 
 
 233	if (delay_id)
 234		return 0;
 235	else
 236		return vmw_resource_alloc_id(res);
 237}
 238
 239/**
 240 * vmw_resource_activate
 241 *
 242 * @res:        Pointer to the newly created resource
 243 * @hw_destroy: Destroy function. NULL if none.
 244 *
 245 * Activate a resource after the hardware has been made aware of it.
 246 * Set tye destroy function to @destroy. Typically this frees the
 247 * resource and destroys the hardware resources associated with it.
 248 * Activate basically means that the function vmw_resource_lookup will
 249 * find it.
 250 */
 251void vmw_resource_activate(struct vmw_resource *res,
 252			   void (*hw_destroy) (struct vmw_resource *))
 253{
 254	struct vmw_private *dev_priv = res->dev_priv;
 255
 256	write_lock(&dev_priv->resource_lock);
 257	res->avail = true;
 258	res->hw_destroy = hw_destroy;
 259	write_unlock(&dev_priv->resource_lock);
 260}
 261
 262static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 263						struct idr *idr, int id)
 264{
 265	struct vmw_resource *res;
 266
 267	read_lock(&dev_priv->resource_lock);
 268	res = idr_find(idr, id);
 269	if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
 270		res = NULL;
 271
 272	read_unlock(&dev_priv->resource_lock);
 273
 274	if (unlikely(res == NULL))
 275		return NULL;
 276
 277	return res;
 278}
 279
 280/**
 281 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 282 * TTM user-space handle and perform basic type checks
 283 *
 284 * @dev_priv:     Pointer to a device private struct
 285 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 286 * @handle:       The TTM user-space handle
 287 * @converter:    Pointer to an object describing the resource type
 288 * @p_res:        On successful return the location pointed to will contain
 289 *                a pointer to a refcounted struct vmw_resource.
 290 *
 291 * If the handle can't be found or is associated with an incorrect resource
 292 * type, -EINVAL will be returned.
 293 */
 294int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 295				    struct ttm_object_file *tfile,
 296				    uint32_t handle,
 297				    const struct vmw_user_resource_conv
 298				    *converter,
 299				    struct vmw_resource **p_res)
 300{
 301	struct ttm_base_object *base;
 302	struct vmw_resource *res;
 303	int ret = -EINVAL;
 304
 305	base = ttm_base_object_lookup(tfile, handle);
 306	if (unlikely(base == NULL))
 307		return -EINVAL;
 308
 309	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 310		goto out_bad_resource;
 311
 312	res = converter->base_obj_to_res(base);
 313
 314	read_lock(&dev_priv->resource_lock);
 315	if (!res->avail || res->res_free != converter->res_free) {
 316		read_unlock(&dev_priv->resource_lock);
 317		goto out_bad_resource;
 318	}
 319
 320	kref_get(&res->kref);
 321	read_unlock(&dev_priv->resource_lock);
 322
 323	*p_res = res;
 324	ret = 0;
 325
 326out_bad_resource:
 327	ttm_base_object_unref(&base);
 328
 329	return ret;
 330}
 331
 332/**
 333 * Helper function that looks either a surface or dmabuf.
 334 *
 335 * The pointer this pointed at by out_surf and out_buf needs to be null.
 336 */
 337int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 338			   struct ttm_object_file *tfile,
 339			   uint32_t handle,
 340			   struct vmw_surface **out_surf,
 341			   struct vmw_dma_buffer **out_buf)
 342{
 
 343	struct vmw_resource *res;
 344	int ret;
 345
 346	BUG_ON(*out_surf || *out_buf);
 347
 348	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 349					      user_surface_converter,
 350					      &res);
 351	if (!ret) {
 352		*out_surf = vmw_res_to_srf(res);
 353		return 0;
 354	}
 355
 356	*out_surf = NULL;
 357	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
 358	return ret;
 359}
 360
 361/**
 362 * Buffer management.
 363 */
 364
 365/**
 366 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
 367 *
 368 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 369 * @size: The requested buffer size.
 370 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 371 */
 372static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
 373				  bool user)
 374{
 375	static size_t struct_size, user_struct_size;
 376	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 377	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 378
 379	if (unlikely(struct_size == 0)) {
 380		size_t backend_size = ttm_round_pot(vmw_tt_size);
 381
 382		struct_size = backend_size +
 383			ttm_round_pot(sizeof(struct vmw_dma_buffer));
 384		user_struct_size = backend_size +
 385			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
 386	}
 387
 388	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 389		page_array_size +=
 390			ttm_round_pot(num_pages * sizeof(dma_addr_t));
 391
 392	return ((user) ? user_struct_size : struct_size) +
 393		page_array_size;
 394}
 395
 396void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 397{
 398	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 399
 400	kfree(vmw_bo);
 401}
 402
 403static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 404{
 405	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 406
 407	ttm_prime_object_kfree(vmw_user_bo, prime);
 408}
 409
 410int vmw_dmabuf_init(struct vmw_private *dev_priv,
 411		    struct vmw_dma_buffer *vmw_bo,
 412		    size_t size, struct ttm_placement *placement,
 413		    bool interruptible,
 414		    void (*bo_free) (struct ttm_buffer_object *bo))
 415{
 416	struct ttm_bo_device *bdev = &dev_priv->bdev;
 417	size_t acc_size;
 418	int ret;
 419	bool user = (bo_free == &vmw_user_dmabuf_destroy);
 420
 421	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
 422
 423	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
 424	memset(vmw_bo, 0, sizeof(*vmw_bo));
 425
 426	INIT_LIST_HEAD(&vmw_bo->res_list);
 427
 428	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 429			  ttm_bo_type_device, placement,
 430			  0, interruptible,
 431			  NULL, acc_size, NULL, NULL, bo_free);
 432	return ret;
 433}
 434
 435static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 436{
 437	struct vmw_user_dma_buffer *vmw_user_bo;
 438	struct ttm_base_object *base = *p_base;
 439	struct ttm_buffer_object *bo;
 440
 441	*p_base = NULL;
 442
 443	if (unlikely(base == NULL))
 444		return;
 445
 446	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 447				   prime.base);
 448	bo = &vmw_user_bo->dma.base;
 449	ttm_bo_unref(&bo);
 450}
 451
 452static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
 453					    enum ttm_ref_type ref_type)
 454{
 455	struct vmw_user_dma_buffer *user_bo;
 456	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
 457
 458	switch (ref_type) {
 459	case TTM_REF_SYNCCPU_WRITE:
 460		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 461		break;
 462	default:
 463		BUG();
 464	}
 465}
 466
 467/**
 468 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
 469 *
 470 * @dev_priv: Pointer to a struct device private.
 471 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 472 * object.
 473 * @size: Size of the dma buffer.
 474 * @shareable: Boolean whether the buffer is shareable with other open files.
 475 * @handle: Pointer to where the handle value should be assigned.
 476 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
 477 * should be assigned.
 478 */
 479int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
 480			  struct ttm_object_file *tfile,
 481			  uint32_t size,
 482			  bool shareable,
 483			  uint32_t *handle,
 484			  struct vmw_dma_buffer **p_dma_buf,
 485			  struct ttm_base_object **p_base)
 486{
 487	struct vmw_user_dma_buffer *user_bo;
 488	struct ttm_buffer_object *tmp;
 489	int ret;
 490
 491	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 492	if (unlikely(user_bo == NULL)) {
 493		DRM_ERROR("Failed to allocate a buffer.\n");
 494		return -ENOMEM;
 495	}
 496
 497	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
 498			      (dev_priv->has_mob) ?
 499			      &vmw_sys_placement :
 500			      &vmw_vram_sys_placement, true,
 501			      &vmw_user_dmabuf_destroy);
 502	if (unlikely(ret != 0))
 503		return ret;
 504
 505	tmp = ttm_bo_reference(&user_bo->dma.base);
 506	ret = ttm_prime_object_init(tfile,
 507				    size,
 508				    &user_bo->prime,
 509				    shareable,
 510				    ttm_buffer_type,
 511				    &vmw_user_dmabuf_release,
 512				    &vmw_user_dmabuf_ref_obj_release);
 513	if (unlikely(ret != 0)) {
 514		ttm_bo_unref(&tmp);
 515		goto out_no_base_object;
 516	}
 517
 518	*p_dma_buf = &user_bo->dma;
 519	if (p_base) {
 520		*p_base = &user_bo->prime.base;
 521		kref_get(&(*p_base)->refcount);
 522	}
 523	*handle = user_bo->prime.base.hash.key;
 524
 525out_no_base_object:
 526	return ret;
 527}
 528
 529/**
 530 * vmw_user_dmabuf_verify_access - verify access permissions on this
 531 * buffer object.
 532 *
 533 * @bo: Pointer to the buffer object being accessed
 534 * @tfile: Identifying the caller.
 535 */
 536int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
 537				  struct ttm_object_file *tfile)
 538{
 539	struct vmw_user_dma_buffer *vmw_user_bo;
 540
 541	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
 542		return -EPERM;
 543
 544	vmw_user_bo = vmw_user_dma_buffer(bo);
 545
 546	/* Check that the caller has opened the object. */
 547	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 548		return 0;
 549
 550	DRM_ERROR("Could not grant buffer access.\n");
 551	return -EPERM;
 552}
 553
 554/**
 555 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
 556 * access, idling previous GPU operations on the buffer and optionally
 557 * blocking it for further command submissions.
 558 *
 559 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 560 * @tfile: Identifying the caller.
 561 * @flags: Flags indicating how the grab should be performed.
 562 *
 563 * A blocking grab will be automatically released when @tfile is closed.
 564 */
 565static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
 566					struct ttm_object_file *tfile,
 567					uint32_t flags)
 568{
 569	struct ttm_buffer_object *bo = &user_bo->dma.base;
 570	bool existed;
 571	int ret;
 572
 573	if (flags & drm_vmw_synccpu_allow_cs) {
 574		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 575		long lret;
 576
 577		if (nonblock)
 578			return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
 579
 580		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
 581		if (!lret)
 582			return -EBUSY;
 583		else if (lret < 0)
 584			return lret;
 585		return 0;
 586	}
 587
 588	ret = ttm_bo_synccpu_write_grab
 589		(bo, !!(flags & drm_vmw_synccpu_dontblock));
 590	if (unlikely(ret != 0))
 591		return ret;
 592
 593	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 594				 TTM_REF_SYNCCPU_WRITE, &existed);
 595	if (ret != 0 || existed)
 596		ttm_bo_synccpu_write_release(&user_bo->dma.base);
 597
 598	return ret;
 599}
 600
 601/**
 602 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
 603 * and unblock command submission on the buffer if blocked.
 604 *
 605 * @handle: Handle identifying the buffer object.
 606 * @tfile: Identifying the caller.
 607 * @flags: Flags indicating the type of release.
 608 */
 609static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
 610					   struct ttm_object_file *tfile,
 611					   uint32_t flags)
 612{
 613	if (!(flags & drm_vmw_synccpu_allow_cs))
 614		return ttm_ref_object_base_unref(tfile, handle,
 615						 TTM_REF_SYNCCPU_WRITE);
 616
 617	return 0;
 618}
 619
 620/**
 621 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
 622 * functionality.
 623 *
 624 * @dev: Identifies the drm device.
 625 * @data: Pointer to the ioctl argument.
 626 * @file_priv: Identifies the caller.
 627 *
 628 * This function checks the ioctl arguments for validity and calls the
 629 * relevant synccpu functions.
 630 */
 631int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
 632				  struct drm_file *file_priv)
 633{
 634	struct drm_vmw_synccpu_arg *arg =
 635		(struct drm_vmw_synccpu_arg *) data;
 636	struct vmw_dma_buffer *dma_buf;
 637	struct vmw_user_dma_buffer *user_bo;
 638	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 639	struct ttm_base_object *buffer_base;
 640	int ret;
 641
 642	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 643	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 644			       drm_vmw_synccpu_dontblock |
 645			       drm_vmw_synccpu_allow_cs)) != 0) {
 646		DRM_ERROR("Illegal synccpu flags.\n");
 647		return -EINVAL;
 648	}
 649
 650	switch (arg->op) {
 651	case drm_vmw_synccpu_grab:
 652		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
 653					     &buffer_base);
 654		if (unlikely(ret != 0))
 655			return ret;
 656
 657		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
 658				       dma);
 659		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
 660		vmw_dmabuf_unreference(&dma_buf);
 661		ttm_base_object_unref(&buffer_base);
 662		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 663			     ret != -EBUSY)) {
 664			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 665				  (unsigned int) arg->handle);
 666			return ret;
 667		}
 668		break;
 669	case drm_vmw_synccpu_release:
 670		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
 671						      arg->flags);
 672		if (unlikely(ret != 0)) {
 673			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 674				  (unsigned int) arg->handle);
 675			return ret;
 676		}
 677		break;
 678	default:
 679		DRM_ERROR("Invalid synccpu operation.\n");
 680		return -EINVAL;
 681	}
 682
 683	return 0;
 684}
 685
 686int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 687			   struct drm_file *file_priv)
 688{
 689	struct vmw_private *dev_priv = vmw_priv(dev);
 690	union drm_vmw_alloc_dmabuf_arg *arg =
 691	    (union drm_vmw_alloc_dmabuf_arg *)data;
 692	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 693	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 694	struct vmw_dma_buffer *dma_buf;
 695	uint32_t handle;
 696	int ret;
 697
 698	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 699	if (unlikely(ret != 0))
 700		return ret;
 701
 702	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 703				    req->size, false, &handle, &dma_buf,
 704				    NULL);
 705	if (unlikely(ret != 0))
 706		goto out_no_dmabuf;
 707
 708	rep->handle = handle;
 709	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
 710	rep->cur_gmr_id = handle;
 711	rep->cur_gmr_offset = 0;
 712
 713	vmw_dmabuf_unreference(&dma_buf);
 714
 715out_no_dmabuf:
 716	ttm_read_unlock(&dev_priv->reservation_sem);
 717
 718	return ret;
 719}
 720
 721int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 722			   struct drm_file *file_priv)
 723{
 724	struct drm_vmw_unref_dmabuf_arg *arg =
 725	    (struct drm_vmw_unref_dmabuf_arg *)data;
 726
 727	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 728					 arg->handle,
 729					 TTM_REF_USAGE);
 730}
 731
 732int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 733			   uint32_t handle, struct vmw_dma_buffer **out,
 734			   struct ttm_base_object **p_base)
 735{
 736	struct vmw_user_dma_buffer *vmw_user_bo;
 737	struct ttm_base_object *base;
 738
 739	base = ttm_base_object_lookup(tfile, handle);
 740	if (unlikely(base == NULL)) {
 741		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 742		       (unsigned long)handle);
 743		return -ESRCH;
 744	}
 745
 746	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 747		ttm_base_object_unref(&base);
 748		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 749		       (unsigned long)handle);
 750		return -EINVAL;
 751	}
 752
 753	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 754				   prime.base);
 755	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
 756	if (p_base)
 757		*p_base = base;
 758	else
 759		ttm_base_object_unref(&base);
 760	*out = &vmw_user_bo->dma;
 761
 762	return 0;
 763}
 764
 765int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 766			      struct vmw_dma_buffer *dma_buf,
 767			      uint32_t *handle)
 768{
 769	struct vmw_user_dma_buffer *user_bo;
 770
 771	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
 772		return -EINVAL;
 773
 774	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
 775
 776	*handle = user_bo->prime.base.hash.key;
 777	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 778				  TTM_REF_USAGE, NULL);
 779}
 780
 781/*
 782 * Stream management
 783 */
 784
 785static void vmw_stream_destroy(struct vmw_resource *res)
 786{
 787	struct vmw_private *dev_priv = res->dev_priv;
 788	struct vmw_stream *stream;
 789	int ret;
 790
 791	DRM_INFO("%s: unref\n", __func__);
 792	stream = container_of(res, struct vmw_stream, res);
 793
 794	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
 795	WARN_ON(ret != 0);
 796}
 797
 798static int vmw_stream_init(struct vmw_private *dev_priv,
 799			   struct vmw_stream *stream,
 800			   void (*res_free) (struct vmw_resource *res))
 801{
 802	struct vmw_resource *res = &stream->res;
 803	int ret;
 804
 805	ret = vmw_resource_init(dev_priv, res, false, res_free,
 806				&vmw_stream_func);
 807
 808	if (unlikely(ret != 0)) {
 809		if (res_free == NULL)
 810			kfree(stream);
 811		else
 812			res_free(&stream->res);
 813		return ret;
 814	}
 815
 816	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
 817	if (ret) {
 818		vmw_resource_unreference(&res);
 819		return ret;
 820	}
 821
 822	DRM_INFO("%s: claimed\n", __func__);
 823
 824	vmw_resource_activate(&stream->res, vmw_stream_destroy);
 825	return 0;
 826}
 827
 828static void vmw_user_stream_free(struct vmw_resource *res)
 829{
 830	struct vmw_user_stream *stream =
 831	    container_of(res, struct vmw_user_stream, stream.res);
 832	struct vmw_private *dev_priv = res->dev_priv;
 833
 834	ttm_base_object_kfree(stream, base);
 835	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 836			    vmw_user_stream_size);
 837}
 838
 839/**
 840 * This function is called when user space has no more references on the
 841 * base object. It releases the base-object's reference on the resource object.
 842 */
 843
 844static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
 845{
 846	struct ttm_base_object *base = *p_base;
 847	struct vmw_user_stream *stream =
 848	    container_of(base, struct vmw_user_stream, base);
 849	struct vmw_resource *res = &stream->stream.res;
 850
 851	*p_base = NULL;
 852	vmw_resource_unreference(&res);
 853}
 854
 855int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
 856			   struct drm_file *file_priv)
 857{
 858	struct vmw_private *dev_priv = vmw_priv(dev);
 859	struct vmw_resource *res;
 860	struct vmw_user_stream *stream;
 861	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 862	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 863	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
 864	int ret = 0;
 865
 866
 867	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
 868	if (unlikely(res == NULL))
 869		return -EINVAL;
 870
 871	if (res->res_free != &vmw_user_stream_free) {
 872		ret = -EINVAL;
 873		goto out;
 874	}
 875
 876	stream = container_of(res, struct vmw_user_stream, stream.res);
 877	if (stream->base.tfile != tfile) {
 878		ret = -EINVAL;
 879		goto out;
 880	}
 881
 882	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
 883out:
 884	vmw_resource_unreference(&res);
 885	return ret;
 886}
 887
 888int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 889			   struct drm_file *file_priv)
 890{
 891	struct vmw_private *dev_priv = vmw_priv(dev);
 892	struct vmw_user_stream *stream;
 893	struct vmw_resource *res;
 894	struct vmw_resource *tmp;
 895	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 896	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 897	int ret;
 898
 899	/*
 900	 * Approximate idr memory usage with 128 bytes. It will be limited
 901	 * by maximum number_of streams anyway?
 902	 */
 903
 904	if (unlikely(vmw_user_stream_size == 0))
 905		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
 906
 907	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 908	if (unlikely(ret != 0))
 909		return ret;
 910
 911	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 912				   vmw_user_stream_size,
 913				   false, true);
 914	ttm_read_unlock(&dev_priv->reservation_sem);
 915	if (unlikely(ret != 0)) {
 916		if (ret != -ERESTARTSYS)
 917			DRM_ERROR("Out of graphics memory for stream"
 918				  " creation.\n");
 919
 920		goto out_ret;
 921	}
 922
 923	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
 924	if (unlikely(stream == NULL)) {
 925		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 926				    vmw_user_stream_size);
 927		ret = -ENOMEM;
 928		goto out_ret;
 929	}
 930
 931	res = &stream->stream.res;
 932	stream->base.shareable = false;
 933	stream->base.tfile = NULL;
 934
 935	/*
 936	 * From here on, the destructor takes over resource freeing.
 937	 */
 938
 939	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
 940	if (unlikely(ret != 0))
 941		goto out_ret;
 942
 943	tmp = vmw_resource_reference(res);
 944	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
 945				   &vmw_user_stream_base_release, NULL);
 946
 947	if (unlikely(ret != 0)) {
 948		vmw_resource_unreference(&tmp);
 949		goto out_err;
 950	}
 951
 952	arg->stream_id = res->id;
 953out_err:
 954	vmw_resource_unreference(&res);
 955out_ret:
 956	return ret;
 957}
 958
 959int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 960			   struct ttm_object_file *tfile,
 961			   uint32_t *inout_id, struct vmw_resource **out)
 962{
 963	struct vmw_user_stream *stream;
 964	struct vmw_resource *res;
 965	int ret;
 966
 967	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
 968				  *inout_id);
 969	if (unlikely(res == NULL))
 970		return -EINVAL;
 971
 972	if (res->res_free != &vmw_user_stream_free) {
 973		ret = -EINVAL;
 974		goto err_ref;
 975	}
 976
 977	stream = container_of(res, struct vmw_user_stream, stream.res);
 978	if (stream->base.tfile != tfile) {
 979		ret = -EPERM;
 980		goto err_ref;
 981	}
 982
 983	*inout_id = stream->stream.stream_id;
 984	*out = res;
 985	return 0;
 986err_ref:
 987	vmw_resource_unreference(&res);
 988	return ret;
 989}
 990
 991
 992/**
 993 * vmw_dumb_create - Create a dumb kms buffer
 994 *
 995 * @file_priv: Pointer to a struct drm_file identifying the caller.
 996 * @dev: Pointer to the drm device.
 997 * @args: Pointer to a struct drm_mode_create_dumb structure
 998 *
 999 * This is a driver callback for the core drm create_dumb functionality.
1000 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
1001 * that the arguments have a different format.
1002 */
1003int vmw_dumb_create(struct drm_file *file_priv,
1004		    struct drm_device *dev,
1005		    struct drm_mode_create_dumb *args)
1006{
1007	struct vmw_private *dev_priv = vmw_priv(dev);
1008	struct vmw_dma_buffer *dma_buf;
1009	int ret;
1010
1011	args->pitch = args->width * ((args->bpp + 7) / 8);
1012	args->size = args->pitch * args->height;
1013
1014	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1015	if (unlikely(ret != 0))
1016		return ret;
1017
1018	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1019				    args->size, false, &args->handle,
1020				    &dma_buf, NULL);
1021	if (unlikely(ret != 0))
1022		goto out_no_dmabuf;
1023
1024	vmw_dmabuf_unreference(&dma_buf);
1025out_no_dmabuf:
1026	ttm_read_unlock(&dev_priv->reservation_sem);
1027	return ret;
1028}
1029
1030/**
1031 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1032 *
1033 * @file_priv: Pointer to a struct drm_file identifying the caller.
1034 * @dev: Pointer to the drm device.
1035 * @handle: Handle identifying the dumb buffer.
1036 * @offset: The address space offset returned.
1037 *
1038 * This is a driver callback for the core drm dumb_map_offset functionality.
1039 */
1040int vmw_dumb_map_offset(struct drm_file *file_priv,
1041			struct drm_device *dev, uint32_t handle,
1042			uint64_t *offset)
1043{
1044	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1045	struct vmw_dma_buffer *out_buf;
1046	int ret;
1047
1048	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1049	if (ret != 0)
1050		return -EINVAL;
1051
1052	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1053	vmw_dmabuf_unreference(&out_buf);
1054	return 0;
1055}
1056
1057/**
1058 * vmw_dumb_destroy - Destroy a dumb boffer
1059 *
1060 * @file_priv: Pointer to a struct drm_file identifying the caller.
1061 * @dev: Pointer to the drm device.
1062 * @handle: Handle identifying the dumb buffer.
1063 *
1064 * This is a driver callback for the core drm dumb_destroy functionality.
1065 */
1066int vmw_dumb_destroy(struct drm_file *file_priv,
1067		     struct drm_device *dev,
1068		     uint32_t handle)
1069{
1070	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1071					 handle, TTM_REF_USAGE);
1072}
1073
1074/**
1075 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1076 *
1077 * @res:            The resource for which to allocate a backup buffer.
1078 * @interruptible:  Whether any sleeps during allocation should be
1079 *                  performed while interruptible.
1080 */
1081static int vmw_resource_buf_alloc(struct vmw_resource *res,
1082				  bool interruptible)
1083{
1084	unsigned long size =
1085		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1086	struct vmw_dma_buffer *backup;
 
 
 
 
 
 
1087	int ret;
1088
1089	if (likely(res->backup)) {
1090		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1091		return 0;
1092	}
1093
1094	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1095	if (unlikely(backup == NULL))
1096		return -ENOMEM;
1097
1098	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1099			      res->func->backup_placement,
1100			      interruptible,
1101			      &vmw_dmabuf_bo_free);
1102	if (unlikely(ret != 0))
1103		goto out_no_dmabuf;
1104
1105	res->backup = backup;
1106
1107out_no_dmabuf:
1108	return ret;
1109}
1110
1111/**
1112 * vmw_resource_do_validate - Make a resource up-to-date and visible
1113 *                            to the device.
1114 *
1115 * @res:            The resource to make visible to the device.
1116 * @val_buf:        Information about a buffer possibly
1117 *                  containing backup data if a bind operation is needed.
 
1118 *
1119 * On hardware resource shortage, this function returns -EBUSY and
1120 * should be retried once resources have been freed up.
1121 */
1122static int vmw_resource_do_validate(struct vmw_resource *res,
1123				    struct ttm_validate_buffer *val_buf)
 
1124{
1125	int ret = 0;
1126	const struct vmw_res_func *func = res->func;
1127
1128	if (unlikely(res->id == -1)) {
1129		ret = func->create(res);
1130		if (unlikely(ret != 0))
1131			return ret;
1132	}
1133
1134	if (func->bind &&
1135	    ((func->needs_backup && list_empty(&res->mob_head) &&
1136	      val_buf->bo != NULL) ||
1137	     (!func->needs_backup && val_buf->bo != NULL))) {
1138		ret = func->bind(res, val_buf);
1139		if (unlikely(ret != 0))
1140			goto out_bind_failed;
1141		if (func->needs_backup)
1142			list_add_tail(&res->mob_head, &res->backup->res_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143	}
1144
1145	/*
1146	 * Only do this on write operations, and move to
1147	 * vmw_resource_unreserve if it can be called after
1148	 * backup buffers have been unreserved. Otherwise
1149	 * sort out locking.
1150	 */
1151	res->res_dirty = true;
 
 
 
 
 
 
 
 
 
 
 
 
1152
1153	return 0;
1154
1155out_bind_failed:
1156	func->destroy(res);
1157
1158	return ret;
1159}
1160
1161/**
1162 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1163 * command submission.
1164 *
1165 * @res:               Pointer to the struct vmw_resource to unreserve.
1166 * @switch_backup:     Backup buffer has been switched.
1167 * @new_backup:        Pointer to new backup buffer if command submission
 
 
1168 *                     switched. May be NULL.
1169 * @new_backup_offset: New backup offset if @switch_backup is true.
1170 *
1171 * Currently unreserving a resource means putting it back on the device's
1172 * resource lru list, so that it can be evicted if necessary.
1173 */
1174void vmw_resource_unreserve(struct vmw_resource *res,
1175			    bool switch_backup,
1176			    struct vmw_dma_buffer *new_backup,
1177			    unsigned long new_backup_offset)
 
 
1178{
1179	struct vmw_private *dev_priv = res->dev_priv;
1180
1181	if (!list_empty(&res->lru_head))
1182		return;
1183
1184	if (switch_backup && new_backup != res->backup) {
1185		if (res->backup) {
1186			lockdep_assert_held(&res->backup->base.resv->lock.base);
1187			list_del_init(&res->mob_head);
1188			vmw_dmabuf_unreference(&res->backup);
 
1189		}
1190
1191		if (new_backup) {
1192			res->backup = vmw_dmabuf_reference(new_backup);
1193			lockdep_assert_held(&new_backup->base.resv->lock.base);
1194			list_add_tail(&res->mob_head, &new_backup->res_list);
 
 
 
 
 
 
1195		} else {
1196			res->backup = NULL;
1197		}
 
 
1198	}
1199	if (switch_backup)
1200		res->backup_offset = new_backup_offset;
 
 
 
 
1201
1202	if (!res->func->may_evict || res->id == -1 || res->pin_count)
1203		return;
1204
1205	write_lock(&dev_priv->resource_lock);
1206	list_add_tail(&res->lru_head,
1207		      &res->dev_priv->res_lru[res->func->res_type]);
1208	write_unlock(&dev_priv->resource_lock);
1209}
1210
1211/**
1212 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1213 *                             for a resource and in that case, allocate
1214 *                             one, reserve and validate it.
1215 *
 
1216 * @res:            The resource for which to allocate a backup buffer.
1217 * @interruptible:  Whether any sleeps during allocation should be
1218 *                  performed while interruptible.
1219 * @val_buf:        On successful return contains data about the
1220 *                  reserved and validated backup buffer.
1221 */
1222static int
1223vmw_resource_check_buffer(struct vmw_resource *res,
 
1224			  bool interruptible,
1225			  struct ttm_validate_buffer *val_buf)
1226{
 
1227	struct list_head val_list;
1228	bool backup_dirty = false;
1229	int ret;
1230
1231	if (unlikely(res->backup == NULL)) {
1232		ret = vmw_resource_buf_alloc(res, interruptible);
1233		if (unlikely(ret != 0))
1234			return ret;
1235	}
1236
1237	INIT_LIST_HEAD(&val_list);
1238	val_buf->bo = ttm_bo_reference(&res->backup->base);
1239	val_buf->shared = false;
 
1240	list_add_tail(&val_buf->head, &val_list);
1241	ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1242	if (unlikely(ret != 0))
1243		goto out_no_reserve;
1244
1245	if (res->func->needs_backup && list_empty(&res->mob_head))
1246		return 0;
1247
1248	backup_dirty = res->backup_dirty;
1249	ret = ttm_bo_validate(&res->backup->base,
1250			      res->func->backup_placement,
1251			      true, false);
 
 
1252
1253	if (unlikely(ret != 0))
1254		goto out_no_validate;
1255
1256	return 0;
1257
1258out_no_validate:
1259	ttm_eu_backoff_reservation(NULL, &val_list);
1260out_no_reserve:
1261	ttm_bo_unref(&val_buf->bo);
1262	if (backup_dirty)
1263		vmw_dmabuf_unreference(&res->backup);
 
1264
1265	return ret;
1266}
1267
1268/**
1269 * vmw_resource_reserve - Reserve a resource for command submission
1270 *
1271 * @res:            The resource to reserve.
1272 *
1273 * This function takes the resource off the LRU list and make sure
1274 * a backup buffer is present for guest-backed resources. However,
1275 * the buffer may not be bound to the resource at this point.
 
1276 *
1277 */
1278int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1279			 bool no_backup)
1280{
1281	struct vmw_private *dev_priv = res->dev_priv;
1282	int ret;
1283
1284	write_lock(&dev_priv->resource_lock);
1285	list_del_init(&res->lru_head);
1286	write_unlock(&dev_priv->resource_lock);
1287
1288	if (res->func->needs_backup && res->backup == NULL &&
1289	    !no_backup) {
1290		ret = vmw_resource_buf_alloc(res, interruptible);
1291		if (unlikely(ret != 0)) {
1292			DRM_ERROR("Failed to allocate a backup buffer "
1293				  "of size %lu. bytes\n",
1294				  (unsigned long) res->backup_size);
1295			return ret;
1296		}
1297	}
1298
1299	return 0;
1300}
1301
1302/**
1303 * vmw_resource_backoff_reservation - Unreserve and unreference a
1304 *                                    backup buffer
1305 *.
1306 * @val_buf:        Backup buffer information.
 
1307 */
1308static void
1309vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
 
1310{
1311	struct list_head val_list;
1312
1313	if (likely(val_buf->bo == NULL))
1314		return;
1315
1316	INIT_LIST_HEAD(&val_list);
1317	list_add_tail(&val_buf->head, &val_list);
1318	ttm_eu_backoff_reservation(NULL, &val_list);
1319	ttm_bo_unref(&val_buf->bo);
 
1320}
1321
1322/**
1323 * vmw_resource_do_evict - Evict a resource, and transfer its data
1324 *                         to a backup buffer.
1325 *
 
1326 * @res:            The resource to evict.
1327 * @interruptible:  Whether to wait interruptible.
1328 */
1329static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 
1330{
1331	struct ttm_validate_buffer val_buf;
1332	const struct vmw_res_func *func = res->func;
1333	int ret;
1334
1335	BUG_ON(!func->may_evict);
1336
1337	val_buf.bo = NULL;
1338	val_buf.shared = false;
1339	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1340	if (unlikely(ret != 0))
1341		return ret;
1342
1343	if (unlikely(func->unbind != NULL &&
1344		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1345		ret = func->unbind(res, res->res_dirty, &val_buf);
1346		if (unlikely(ret != 0))
1347			goto out_no_unbind;
1348		list_del_init(&res->mob_head);
1349	}
1350	ret = func->destroy(res);
1351	res->backup_dirty = true;
1352	res->res_dirty = false;
1353out_no_unbind:
1354	vmw_resource_backoff_reservation(&val_buf);
1355
1356	return ret;
1357}
1358
1359
1360/**
1361 * vmw_resource_validate - Make a resource up-to-date and visible
1362 *                         to the device.
 
 
 
1363 *
1364 * @res:            The resource to make visible to the device.
1365 *
1366 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1367 * be reserved and validated.
1368 * On hardware resource shortage, this function will repeatedly evict
1369 * resources of the same type until the validation succeeds.
 
 
 
1370 */
1371int vmw_resource_validate(struct vmw_resource *res)
 
1372{
1373	int ret;
1374	struct vmw_resource *evict_res;
1375	struct vmw_private *dev_priv = res->dev_priv;
1376	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1377	struct ttm_validate_buffer val_buf;
1378	unsigned err_count = 0;
1379
1380	if (!res->func->create)
1381		return 0;
1382
1383	val_buf.bo = NULL;
1384	val_buf.shared = false;
1385	if (res->backup)
1386		val_buf.bo = &res->backup->base;
1387	do {
1388		ret = vmw_resource_do_validate(res, &val_buf);
1389		if (likely(ret != -EBUSY))
1390			break;
1391
1392		write_lock(&dev_priv->resource_lock);
1393		if (list_empty(lru_list) || !res->func->may_evict) {
1394			DRM_ERROR("Out of device device resources "
1395				  "for %s.\n", res->func->type_name);
1396			ret = -EBUSY;
1397			write_unlock(&dev_priv->resource_lock);
1398			break;
1399		}
1400
1401		evict_res = vmw_resource_reference
1402			(list_first_entry(lru_list, struct vmw_resource,
1403					  lru_head));
1404		list_del_init(&evict_res->lru_head);
1405
1406		write_unlock(&dev_priv->resource_lock);
1407
1408		ret = vmw_resource_do_evict(evict_res, true);
 
1409		if (unlikely(ret != 0)) {
1410			write_lock(&dev_priv->resource_lock);
1411			list_add_tail(&evict_res->lru_head, lru_list);
1412			write_unlock(&dev_priv->resource_lock);
1413			if (ret == -ERESTARTSYS ||
1414			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1415				vmw_resource_unreference(&evict_res);
1416				goto out_no_validate;
1417			}
1418		}
1419
1420		vmw_resource_unreference(&evict_res);
1421	} while (1);
1422
1423	if (unlikely(ret != 0))
1424		goto out_no_validate;
1425	else if (!res->func->needs_backup && res->backup) {
1426		list_del_init(&res->mob_head);
1427		vmw_dmabuf_unreference(&res->backup);
1428	}
1429
1430	return 0;
1431
1432out_no_validate:
1433	return ret;
1434}
1435
1436/**
1437 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1438 *                       object without unreserving it.
1439 *
1440 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1441 * @fence:          Pointer to the fence. If NULL, this function will
1442 *                  insert a fence into the command stream..
1443 *
1444 * Contrary to the ttm_eu version of this function, it takes only
1445 * a single buffer object instead of a list, and it also doesn't
1446 * unreserve the buffer object, which needs to be done separately.
1447 */
1448void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1449			 struct vmw_fence_obj *fence)
1450{
1451	struct ttm_bo_device *bdev = bo->bdev;
1452
1453	struct vmw_private *dev_priv =
1454		container_of(bdev, struct vmw_private, bdev);
1455
1456	if (fence == NULL) {
1457		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1458		reservation_object_add_excl_fence(bo->resv, &fence->base);
1459		fence_put(&fence->base);
1460	} else
1461		reservation_object_add_excl_fence(bo->resv, &fence->base);
1462}
1463
1464/**
1465 * vmw_resource_move_notify - TTM move_notify_callback
1466 *
1467 * @bo: The TTM buffer object about to move.
1468 * @mem: The struct ttm_mem_reg indicating to what memory
1469 *       region the move is taking place.
1470 *
1471 * Evicts the Guest Backed hardware resource if the backup
1472 * buffer is being moved out of MOB memory.
1473 * Note that this function should not race with the resource
1474 * validation code as long as it accesses only members of struct
1475 * resource that remain static while bo::res is !NULL and
1476 * while we have @bo reserved. struct resource::backup is *not* a
1477 * static member. The resource validation code will take care
1478 * to set @bo::res to NULL, while having @bo reserved when the
1479 * buffer is no longer bound to the resource, so @bo:res can be
1480 * used to determine whether there is a need to unbind and whether
1481 * it is safe to unbind.
1482 */
1483void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1484			      struct ttm_mem_reg *mem)
1485{
1486	struct vmw_dma_buffer *dma_buf;
1487
1488	if (mem == NULL)
1489		return;
1490
1491	if (bo->destroy != vmw_dmabuf_bo_free &&
1492	    bo->destroy != vmw_user_dmabuf_destroy)
1493		return;
1494
1495	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1496
1497	if (mem->mem_type != VMW_PL_MOB) {
1498		struct vmw_resource *res, *n;
1499		struct ttm_validate_buffer val_buf;
1500
1501		val_buf.bo = bo;
1502		val_buf.shared = false;
1503
1504		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1505
1506			if (unlikely(res->func->unbind == NULL))
1507				continue;
1508
1509			(void) res->func->unbind(res, true, &val_buf);
1510			res->backup_dirty = true;
1511			res->res_dirty = false;
1512			list_del_init(&res->mob_head);
1513		}
1514
1515		(void) ttm_bo_wait(bo, false, false, false);
 
 
1516	}
1517}
1518
 
 
1519
1520
1521/**
1522 * vmw_query_readback_all - Read back cached query states
1523 *
1524 * @dx_query_mob: Buffer containing the DX query MOB
1525 *
1526 * Read back cached states from the device if they exist.  This function
1527 * assumings binding_mutex is held.
1528 */
1529int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1530{
1531	struct vmw_resource *dx_query_ctx;
1532	struct vmw_private *dev_priv;
1533	struct {
1534		SVGA3dCmdHeader header;
1535		SVGA3dCmdDXReadbackAllQuery body;
1536	} *cmd;
1537
1538
1539	/* No query bound, so do nothing */
1540	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1541		return 0;
1542
1543	dx_query_ctx = dx_query_mob->dx_query_ctx;
1544	dev_priv     = dx_query_ctx->dev_priv;
1545
1546	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1547	if (unlikely(cmd == NULL)) {
1548		DRM_ERROR("Failed reserving FIFO space for "
1549			  "query MOB read back.\n");
1550		return -ENOMEM;
1551	}
1552
1553	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1554	cmd->header.size = sizeof(cmd->body);
1555	cmd->body.cid    = dx_query_ctx->id;
1556
1557	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1558
1559	/* Triggers a rebind the next time affected context is bound */
1560	dx_query_mob->dx_query_ctx = NULL;
1561
1562	return 0;
1563}
1564
1565
1566
1567/**
1568 * vmw_query_move_notify - Read back cached query states
1569 *
1570 * @bo: The TTM buffer object about to move.
1571 * @mem: The memory region @bo is moving to.
 
1572 *
1573 * Called before the query MOB is swapped out to read back cached query
1574 * states from the device.
1575 */
1576void vmw_query_move_notify(struct ttm_buffer_object *bo,
1577			   struct ttm_mem_reg *mem)
 
1578{
1579	struct vmw_dma_buffer *dx_query_mob;
1580	struct ttm_bo_device *bdev = bo->bdev;
1581	struct vmw_private *dev_priv;
1582
1583
1584	dev_priv = container_of(bdev, struct vmw_private, bdev);
1585
1586	mutex_lock(&dev_priv->binding_mutex);
1587
1588	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1589	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1590		mutex_unlock(&dev_priv->binding_mutex);
1591		return;
1592	}
1593
1594	/* If BO is being moved from MOB to system memory */
1595	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
 
 
1596		struct vmw_fence_obj *fence;
1597
 
 
 
 
 
 
1598		(void) vmw_query_readback_all(dx_query_mob);
1599		mutex_unlock(&dev_priv->binding_mutex);
1600
1601		/* Create a fence and attach the BO to it */
1602		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1603		vmw_fence_single_bo(bo, fence);
1604
1605		if (fence != NULL)
1606			vmw_fence_obj_unreference(&fence);
1607
1608		(void) ttm_bo_wait(bo, false, false, false);
1609	} else
1610		mutex_unlock(&dev_priv->binding_mutex);
1611
1612}
1613
1614/**
1615 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1616 *
1617 * @res:            The resource being queried.
1618 */
1619bool vmw_resource_needs_backup(const struct vmw_resource *res)
1620{
1621	return res->func->needs_backup;
1622}
1623
1624/**
1625 * vmw_resource_evict_type - Evict all resources of a specific type
1626 *
1627 * @dev_priv:       Pointer to a device private struct
1628 * @type:           The resource type to evict
1629 *
1630 * To avoid thrashing starvation or as part of the hibernation sequence,
1631 * try to evict all evictable resources of a specific type.
1632 */
1633static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1634				    enum vmw_res_type type)
1635{
1636	struct list_head *lru_list = &dev_priv->res_lru[type];
1637	struct vmw_resource *evict_res;
1638	unsigned err_count = 0;
1639	int ret;
 
1640
1641	do {
1642		write_lock(&dev_priv->resource_lock);
1643
1644		if (list_empty(lru_list))
1645			goto out_unlock;
1646
1647		evict_res = vmw_resource_reference(
1648			list_first_entry(lru_list, struct vmw_resource,
1649					 lru_head));
1650		list_del_init(&evict_res->lru_head);
1651		write_unlock(&dev_priv->resource_lock);
1652
1653		ret = vmw_resource_do_evict(evict_res, false);
 
1654		if (unlikely(ret != 0)) {
1655			write_lock(&dev_priv->resource_lock);
1656			list_add_tail(&evict_res->lru_head, lru_list);
1657			write_unlock(&dev_priv->resource_lock);
1658			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1659				vmw_resource_unreference(&evict_res);
1660				return;
1661			}
1662		}
1663
1664		vmw_resource_unreference(&evict_res);
1665	} while (1);
1666
1667out_unlock:
1668	write_unlock(&dev_priv->resource_lock);
1669}
1670
1671/**
1672 * vmw_resource_evict_all - Evict all evictable resources
1673 *
1674 * @dev_priv:       Pointer to a device private struct
1675 *
1676 * To avoid thrashing starvation or as part of the hibernation sequence,
1677 * evict all evictable resources. In particular this means that all
1678 * guest-backed resources that are registered with the device are
1679 * evicted and the OTable becomes clean.
1680 */
1681void vmw_resource_evict_all(struct vmw_private *dev_priv)
1682{
1683	enum vmw_res_type type;
1684
1685	mutex_lock(&dev_priv->cmdbuf_mutex);
1686
1687	for (type = 0; type < vmw_res_max; ++type)
1688		vmw_resource_evict_type(dev_priv, type);
1689
1690	mutex_unlock(&dev_priv->cmdbuf_mutex);
1691}
1692
1693/**
1694 * vmw_resource_pin - Add a pin reference on a resource
1695 *
1696 * @res: The resource to add a pin reference on
1697 *
1698 * This function adds a pin reference, and if needed validates the resource.
1699 * Having a pin reference means that the resource can never be evicted, and
1700 * its id will never change as long as there is a pin reference.
1701 * This function returns 0 on success and a negative error code on failure.
1702 */
1703int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1704{
 
1705	struct vmw_private *dev_priv = res->dev_priv;
1706	int ret;
1707
1708	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1709	mutex_lock(&dev_priv->cmdbuf_mutex);
1710	ret = vmw_resource_reserve(res, interruptible, false);
1711	if (ret)
1712		goto out_no_reserve;
1713
1714	if (res->pin_count == 0) {
1715		struct vmw_dma_buffer *vbo = NULL;
1716
1717		if (res->backup) {
1718			vbo = res->backup;
1719
1720			ttm_bo_reserve(&vbo->base, interruptible, false, false,
1721				       NULL);
1722			if (!vbo->pin_count) {
 
 
 
 
1723				ret = ttm_bo_validate
1724					(&vbo->base,
1725					 res->func->backup_placement,
1726					 interruptible, false);
1727				if (ret) {
1728					ttm_bo_unreserve(&vbo->base);
1729					goto out_no_validate;
1730				}
1731			}
1732
1733			/* Do we really need to pin the MOB as well? */
1734			vmw_bo_pin_reserved(vbo, true);
1735		}
1736		ret = vmw_resource_validate(res);
1737		if (vbo)
1738			ttm_bo_unreserve(&vbo->base);
1739		if (ret)
1740			goto out_no_validate;
1741	}
1742	res->pin_count++;
1743
1744out_no_validate:
1745	vmw_resource_unreserve(res, false, NULL, 0UL);
1746out_no_reserve:
1747	mutex_unlock(&dev_priv->cmdbuf_mutex);
1748	ttm_write_unlock(&dev_priv->reservation_sem);
1749
1750	return ret;
1751}
1752
1753/**
1754 * vmw_resource_unpin - Remove a pin reference from a resource
1755 *
1756 * @res: The resource to remove a pin reference from
1757 *
1758 * Having a pin reference means that the resource can never be evicted, and
1759 * its id will never change as long as there is a pin reference.
1760 */
1761void vmw_resource_unpin(struct vmw_resource *res)
1762{
1763	struct vmw_private *dev_priv = res->dev_priv;
1764	int ret;
1765
1766	ttm_read_lock(&dev_priv->reservation_sem, false);
1767	mutex_lock(&dev_priv->cmdbuf_mutex);
1768
1769	ret = vmw_resource_reserve(res, false, true);
1770	WARN_ON(ret);
1771
1772	WARN_ON(res->pin_count == 0);
1773	if (--res->pin_count == 0 && res->backup) {
1774		struct vmw_dma_buffer *vbo = res->backup;
1775
1776		ttm_bo_reserve(&vbo->base, false, false, false, NULL);
1777		vmw_bo_pin_reserved(vbo, false);
1778		ttm_bo_unreserve(&vbo->base);
1779	}
1780
1781	vmw_resource_unreserve(res, false, NULL, 0UL);
1782
1783	mutex_unlock(&dev_priv->cmdbuf_mutex);
1784	ttm_read_unlock(&dev_priv->reservation_sem);
1785}
1786
1787/**
1788 * vmw_res_type - Return the resource type
1789 *
1790 * @res: Pointer to the resource
1791 */
1792enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1793{
1794	return res->func->res_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1795}
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
 
  28#include <drm/ttm/ttm_placement.h>
  29
 
  30#include "vmwgfx_binding.h"
  31#include "vmwgfx_bo.h"
  32#include "vmwgfx_drv.h"
  33#include "vmwgfx_resource_priv.h"
  34
  35#define VMW_RES_EVICT_ERR_COUNT 10
  36
  37/**
  38 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
  39 * @res: The resource
  40 */
  41void vmw_resource_mob_attach(struct vmw_resource *res)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42{
  43	struct vmw_bo *gbo = res->guest_memory_bo;
  44	struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
  45
  46	dma_resv_assert_held(gbo->tbo.base.resv);
  47	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
  48		res->func->prio;
  49
  50	while (*new) {
  51		struct vmw_resource *this =
  52			container_of(*new, struct vmw_resource, mob_node);
  53
  54		parent = *new;
  55		new = (res->guest_memory_offset < this->guest_memory_offset) ?
  56			&((*new)->rb_left) : &((*new)->rb_right);
  57	}
  58
  59	rb_link_node(&res->mob_node, parent, new);
  60	rb_insert_color(&res->mob_node, &gbo->res_tree);
  61
  62	vmw_bo_prio_add(gbo, res->used_prio);
  63}
  64
  65/**
  66 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
  67 * @res: The resource
  68 */
  69void vmw_resource_mob_detach(struct vmw_resource *res)
  70{
  71	struct vmw_bo *gbo = res->guest_memory_bo;
  72
  73	dma_resv_assert_held(gbo->tbo.base.resv);
  74	if (vmw_resource_mob_attached(res)) {
  75		rb_erase(&res->mob_node, &gbo->res_tree);
  76		RB_CLEAR_NODE(&res->mob_node);
  77		vmw_bo_prio_del(gbo, res->used_prio);
  78	}
  79}
  80
  81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  82{
  83	kref_get(&res->kref);
  84	return res;
  85}
  86
  87struct vmw_resource *
  88vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  89{
  90	return kref_get_unless_zero(&res->kref) ? res : NULL;
  91}
  92
  93/**
  94 * vmw_resource_release_id - release a resource id to the id manager.
  95 *
  96 * @res: Pointer to the resource.
  97 *
  98 * Release the resource id to the resource id manager and set it to -1
  99 */
 100void vmw_resource_release_id(struct vmw_resource *res)
 101{
 102	struct vmw_private *dev_priv = res->dev_priv;
 103	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 104
 105	spin_lock(&dev_priv->resource_lock);
 106	if (res->id != -1)
 107		idr_remove(idr, res->id);
 108	res->id = -1;
 109	spin_unlock(&dev_priv->resource_lock);
 110}
 111
 112static void vmw_resource_release(struct kref *kref)
 113{
 114	struct vmw_resource *res =
 115	    container_of(kref, struct vmw_resource, kref);
 116	struct vmw_private *dev_priv = res->dev_priv;
 117	int id;
 118	int ret;
 119	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 120
 121	spin_lock(&dev_priv->resource_lock);
 
 122	list_del_init(&res->lru_head);
 123	spin_unlock(&dev_priv->resource_lock);
 124	if (res->guest_memory_bo) {
 125		struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
 126
 127		ret = ttm_bo_reserve(bo, false, false, NULL);
 128		BUG_ON(ret);
 129		if (vmw_resource_mob_attached(res) &&
 130		    res->func->unbind != NULL) {
 131			struct ttm_validate_buffer val_buf;
 132
 133			val_buf.bo = bo;
 134			val_buf.num_shared = 0;
 135			res->func->unbind(res, false, &val_buf);
 136		}
 137		res->guest_memory_size = false;
 138		vmw_resource_mob_detach(res);
 139		if (res->dirty)
 140			res->func->dirty_free(res);
 141		if (res->coherent)
 142			vmw_bo_dirty_release(res->guest_memory_bo);
 143		ttm_bo_unreserve(bo);
 144		vmw_user_bo_unref(&res->guest_memory_bo);
 145	}
 146
 147	if (likely(res->hw_destroy != NULL)) {
 148		mutex_lock(&dev_priv->binding_mutex);
 149		vmw_binding_res_list_kill(&res->binding_head);
 150		mutex_unlock(&dev_priv->binding_mutex);
 151		res->hw_destroy(res);
 152	}
 153
 154	id = res->id;
 155	if (res->res_free != NULL)
 156		res->res_free(res);
 157	else
 158		kfree(res);
 159
 160	spin_lock(&dev_priv->resource_lock);
 161	if (id != -1)
 162		idr_remove(idr, id);
 163	spin_unlock(&dev_priv->resource_lock);
 164}
 165
 166void vmw_resource_unreference(struct vmw_resource **p_res)
 167{
 168	struct vmw_resource *res = *p_res;
 169
 170	*p_res = NULL;
 171	kref_put(&res->kref, vmw_resource_release);
 172}
 173
 174
 175/**
 176 * vmw_resource_alloc_id - release a resource id to the id manager.
 177 *
 178 * @res: Pointer to the resource.
 179 *
 180 * Allocate the lowest free resource from the resource manager, and set
 181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 182 */
 183int vmw_resource_alloc_id(struct vmw_resource *res)
 184{
 185	struct vmw_private *dev_priv = res->dev_priv;
 186	int ret;
 187	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 188
 189	BUG_ON(res->id != -1);
 190
 191	idr_preload(GFP_KERNEL);
 192	spin_lock(&dev_priv->resource_lock);
 193
 194	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 195	if (ret >= 0)
 196		res->id = ret;
 197
 198	spin_unlock(&dev_priv->resource_lock);
 199	idr_preload_end();
 200	return ret < 0 ? ret : 0;
 201}
 202
 203/**
 204 * vmw_resource_init - initialize a struct vmw_resource
 205 *
 206 * @dev_priv:       Pointer to a device private struct.
 207 * @res:            The struct vmw_resource to initialize.
 
 208 * @delay_id:       Boolean whether to defer device id allocation until
 209 *                  the first validation.
 210 * @res_free:       Resource destructor.
 211 * @func:           Resource function table.
 212 */
 213int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 214		      bool delay_id,
 215		      void (*res_free) (struct vmw_resource *res),
 216		      const struct vmw_res_func *func)
 217{
 218	kref_init(&res->kref);
 219	res->hw_destroy = NULL;
 220	res->res_free = res_free;
 
 221	res->dev_priv = dev_priv;
 222	res->func = func;
 223	RB_CLEAR_NODE(&res->mob_node);
 224	INIT_LIST_HEAD(&res->lru_head);
 
 225	INIT_LIST_HEAD(&res->binding_head);
 226	res->id = -1;
 227	res->guest_memory_bo = NULL;
 228	res->guest_memory_offset = 0;
 229	res->guest_memory_dirty = false;
 230	res->res_dirty = false;
 231	res->coherent = false;
 232	res->used_prio = 3;
 233	res->dirty = NULL;
 234	if (delay_id)
 235		return 0;
 236	else
 237		return vmw_resource_alloc_id(res);
 238}
 239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 240
 241/**
 242 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 243 * TTM user-space handle and perform basic type checks
 244 *
 245 * @dev_priv:     Pointer to a device private struct
 246 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 247 * @handle:       The TTM user-space handle
 248 * @converter:    Pointer to an object describing the resource type
 249 * @p_res:        On successful return the location pointed to will contain
 250 *                a pointer to a refcounted struct vmw_resource.
 251 *
 252 * If the handle can't be found or is associated with an incorrect resource
 253 * type, -EINVAL will be returned.
 254 */
 255int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 256				    struct ttm_object_file *tfile,
 257				    uint32_t handle,
 258				    const struct vmw_user_resource_conv
 259				    *converter,
 260				    struct vmw_resource **p_res)
 261{
 262	struct ttm_base_object *base;
 263	struct vmw_resource *res;
 264	int ret = -EINVAL;
 265
 266	base = ttm_base_object_lookup(tfile, handle);
 267	if (unlikely(!base))
 268		return -EINVAL;
 269
 270	if (unlikely(ttm_base_object_type(base) != converter->object_type))
 271		goto out_bad_resource;
 272
 273	res = converter->base_obj_to_res(base);
 
 
 
 
 
 
 
 274	kref_get(&res->kref);
 
 275
 276	*p_res = res;
 277	ret = 0;
 278
 279out_bad_resource:
 280	ttm_base_object_unref(&base);
 281
 282	return ret;
 283}
 284
 285/*
 286 * Helper function that looks either a surface or bo.
 287 *
 288 * The pointer this pointed at by out_surf and out_buf needs to be null.
 289 */
 290int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 291			   struct drm_file *filp,
 292			   uint32_t handle,
 293			   struct vmw_surface **out_surf,
 294			   struct vmw_bo **out_buf)
 295{
 296	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
 297	struct vmw_resource *res;
 298	int ret;
 299
 300	BUG_ON(*out_surf || *out_buf);
 301
 302	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 303					      user_surface_converter,
 304					      &res);
 305	if (!ret) {
 306		*out_surf = vmw_res_to_srf(res);
 307		return 0;
 308	}
 309
 310	*out_surf = NULL;
 311	ret = vmw_user_bo_lookup(filp, handle, out_buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312	return ret;
 313}
 314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 315/**
 316 * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
 317 *
 318 * @res:            The resource for which to allocate a gbo buffer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 319 * @interruptible:  Whether any sleeps during allocation should be
 320 *                  performed while interruptible.
 321 */
 322static int vmw_resource_buf_alloc(struct vmw_resource *res,
 323				  bool interruptible)
 324{
 325	unsigned long size = PFN_ALIGN(res->guest_memory_size);
 326	struct vmw_bo *gbo;
 327	struct vmw_bo_params bo_params = {
 328		.domain = res->func->domain,
 329		.busy_domain = res->func->busy_domain,
 330		.bo_type = ttm_bo_type_device,
 331		.size = res->guest_memory_size,
 332		.pin = false
 333	};
 334	int ret;
 335
 336	if (likely(res->guest_memory_bo)) {
 337		BUG_ON(res->guest_memory_bo->tbo.base.size < size);
 338		return 0;
 339	}
 340
 341	ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
 
 
 
 
 
 
 
 342	if (unlikely(ret != 0))
 343		goto out_no_bo;
 344
 345	res->guest_memory_bo = gbo;
 346
 347out_no_bo:
 348	return ret;
 349}
 350
 351/**
 352 * vmw_resource_do_validate - Make a resource up-to-date and visible
 353 *                            to the device.
 354 *
 355 * @res:            The resource to make visible to the device.
 356 * @val_buf:        Information about a buffer possibly
 357 *                  containing backup data if a bind operation is needed.
 358 * @dirtying:       Transfer dirty regions.
 359 *
 360 * On hardware resource shortage, this function returns -EBUSY and
 361 * should be retried once resources have been freed up.
 362 */
 363static int vmw_resource_do_validate(struct vmw_resource *res,
 364				    struct ttm_validate_buffer *val_buf,
 365				    bool dirtying)
 366{
 367	int ret = 0;
 368	const struct vmw_res_func *func = res->func;
 369
 370	if (unlikely(res->id == -1)) {
 371		ret = func->create(res);
 372		if (unlikely(ret != 0))
 373			return ret;
 374	}
 375
 376	if (func->bind &&
 377	    ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
 378	      val_buf->bo) ||
 379	     (!func->needs_guest_memory && val_buf->bo))) {
 380		ret = func->bind(res, val_buf);
 381		if (unlikely(ret != 0))
 382			goto out_bind_failed;
 383		if (func->needs_guest_memory)
 384			vmw_resource_mob_attach(res);
 385	}
 386
 387	/*
 388	 * Handle the case where the backup mob is marked coherent but
 389	 * the resource isn't.
 390	 */
 391	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
 392	    !res->coherent) {
 393		if (res->guest_memory_bo->dirty && !res->dirty) {
 394			ret = func->dirty_alloc(res);
 395			if (ret)
 396				return ret;
 397		} else if (!res->guest_memory_bo->dirty && res->dirty) {
 398			func->dirty_free(res);
 399		}
 400	}
 401
 402	/*
 403	 * Transfer the dirty regions to the resource and update
 404	 * the resource.
 
 
 405	 */
 406	if (res->dirty) {
 407		if (dirtying && !res->res_dirty) {
 408			pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
 409			pgoff_t end = __KERNEL_DIV_ROUND_UP
 410				(res->guest_memory_offset + res->guest_memory_size,
 411				 PAGE_SIZE);
 412
 413			vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
 414		}
 415
 416		vmw_bo_dirty_transfer_to_res(res);
 417		return func->dirty_sync(res);
 418	}
 419
 420	return 0;
 421
 422out_bind_failed:
 423	func->destroy(res);
 424
 425	return ret;
 426}
 427
 428/**
 429 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 430 * command submission.
 431 *
 432 * @res:               Pointer to the struct vmw_resource to unreserve.
 433 * @dirty_set:         Change dirty status of the resource.
 434 * @dirty:             When changing dirty status indicates the new status.
 435 * @switch_guest_memory: Guest memory buffer has been switched.
 436 * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
 437 *                     switched. May be NULL.
 438 * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
 439 *
 440 * Currently unreserving a resource means putting it back on the device's
 441 * resource lru list, so that it can be evicted if necessary.
 442 */
 443void vmw_resource_unreserve(struct vmw_resource *res,
 444			    bool dirty_set,
 445			    bool dirty,
 446			    bool switch_guest_memory,
 447			    struct vmw_bo *new_guest_memory_bo,
 448			    unsigned long new_guest_memory_offset)
 449{
 450	struct vmw_private *dev_priv = res->dev_priv;
 451
 452	if (!list_empty(&res->lru_head))
 453		return;
 454
 455	if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
 456		if (res->guest_memory_bo) {
 457			vmw_resource_mob_detach(res);
 458			if (res->coherent)
 459				vmw_bo_dirty_release(res->guest_memory_bo);
 460			vmw_user_bo_unref(&res->guest_memory_bo);
 461		}
 462
 463		if (new_guest_memory_bo) {
 464			res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
 465
 466			/*
 467			 * The validation code should already have added a
 468			 * dirty tracker here.
 469			 */
 470			WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
 471
 472			vmw_resource_mob_attach(res);
 473		} else {
 474			res->guest_memory_bo = NULL;
 475		}
 476	} else if (switch_guest_memory && res->coherent) {
 477		vmw_bo_dirty_release(res->guest_memory_bo);
 478	}
 479
 480	if (switch_guest_memory)
 481		res->guest_memory_offset = new_guest_memory_offset;
 482
 483	if (dirty_set)
 484		res->res_dirty = dirty;
 485
 486	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 487		return;
 488
 489	spin_lock(&dev_priv->resource_lock);
 490	list_add_tail(&res->lru_head,
 491		      &res->dev_priv->res_lru[res->func->res_type]);
 492	spin_unlock(&dev_priv->resource_lock);
 493}
 494
 495/**
 496 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 497 *                             for a resource and in that case, allocate
 498 *                             one, reserve and validate it.
 499 *
 500 * @ticket:         The ww acquire context to use, or NULL if trylocking.
 501 * @res:            The resource for which to allocate a backup buffer.
 502 * @interruptible:  Whether any sleeps during allocation should be
 503 *                  performed while interruptible.
 504 * @val_buf:        On successful return contains data about the
 505 *                  reserved and validated backup buffer.
 506 */
 507static int
 508vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 509			  struct vmw_resource *res,
 510			  bool interruptible,
 511			  struct ttm_validate_buffer *val_buf)
 512{
 513	struct ttm_operation_ctx ctx = { true, false };
 514	struct list_head val_list;
 515	bool guest_memory_dirty = false;
 516	int ret;
 517
 518	if (unlikely(!res->guest_memory_bo)) {
 519		ret = vmw_resource_buf_alloc(res, interruptible);
 520		if (unlikely(ret != 0))
 521			return ret;
 522	}
 523
 524	INIT_LIST_HEAD(&val_list);
 525	ttm_bo_get(&res->guest_memory_bo->tbo);
 526	val_buf->bo = &res->guest_memory_bo->tbo;
 527	val_buf->num_shared = 0;
 528	list_add_tail(&val_buf->head, &val_list);
 529	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 530	if (unlikely(ret != 0))
 531		goto out_no_reserve;
 532
 533	if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
 534		return 0;
 535
 536	guest_memory_dirty = res->guest_memory_dirty;
 537	vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
 538			     res->func->busy_domain);
 539	ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
 540			      &res->guest_memory_bo->placement,
 541			      &ctx);
 542
 543	if (unlikely(ret != 0))
 544		goto out_no_validate;
 545
 546	return 0;
 547
 548out_no_validate:
 549	ttm_eu_backoff_reservation(ticket, &val_list);
 550out_no_reserve:
 551	ttm_bo_put(val_buf->bo);
 552	val_buf->bo = NULL;
 553	if (guest_memory_dirty)
 554		vmw_user_bo_unref(&res->guest_memory_bo);
 555
 556	return ret;
 557}
 558
 559/*
 560 * vmw_resource_reserve - Reserve a resource for command submission
 561 *
 562 * @res:            The resource to reserve.
 563 *
 564 * This function takes the resource off the LRU list and make sure
 565 * a guest memory buffer is present for guest-backed resources.
 566 * However, the buffer may not be bound to the resource at this
 567 * point.
 568 *
 569 */
 570int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 571			 bool no_guest_memory)
 572{
 573	struct vmw_private *dev_priv = res->dev_priv;
 574	int ret;
 575
 576	spin_lock(&dev_priv->resource_lock);
 577	list_del_init(&res->lru_head);
 578	spin_unlock(&dev_priv->resource_lock);
 579
 580	if (res->func->needs_guest_memory && !res->guest_memory_bo &&
 581	    !no_guest_memory) {
 582		ret = vmw_resource_buf_alloc(res, interruptible);
 583		if (unlikely(ret != 0)) {
 584			DRM_ERROR("Failed to allocate a guest memory buffer "
 585				  "of size %lu. bytes\n",
 586				  (unsigned long) res->guest_memory_size);
 587			return ret;
 588		}
 589	}
 590
 591	return 0;
 592}
 593
 594/**
 595 * vmw_resource_backoff_reservation - Unreserve and unreference a
 596 *                                    guest memory buffer
 597 *.
 598 * @ticket:         The ww acquire ctx used for reservation.
 599 * @val_buf:        Guest memory buffer information.
 600 */
 601static void
 602vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 603				 struct ttm_validate_buffer *val_buf)
 604{
 605	struct list_head val_list;
 606
 607	if (likely(val_buf->bo == NULL))
 608		return;
 609
 610	INIT_LIST_HEAD(&val_list);
 611	list_add_tail(&val_buf->head, &val_list);
 612	ttm_eu_backoff_reservation(ticket, &val_list);
 613	ttm_bo_put(val_buf->bo);
 614	val_buf->bo = NULL;
 615}
 616
 617/**
 618 * vmw_resource_do_evict - Evict a resource, and transfer its data
 619 *                         to a backup buffer.
 620 *
 621 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 622 * @res:            The resource to evict.
 623 * @interruptible:  Whether to wait interruptible.
 624 */
 625static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 626				 struct vmw_resource *res, bool interruptible)
 627{
 628	struct ttm_validate_buffer val_buf;
 629	const struct vmw_res_func *func = res->func;
 630	int ret;
 631
 632	BUG_ON(!func->may_evict);
 633
 634	val_buf.bo = NULL;
 635	val_buf.num_shared = 0;
 636	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 637	if (unlikely(ret != 0))
 638		return ret;
 639
 640	if (unlikely(func->unbind != NULL &&
 641		     (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
 642		ret = func->unbind(res, res->res_dirty, &val_buf);
 643		if (unlikely(ret != 0))
 644			goto out_no_unbind;
 645		vmw_resource_mob_detach(res);
 646	}
 647	ret = func->destroy(res);
 648	res->guest_memory_dirty = true;
 649	res->res_dirty = false;
 650out_no_unbind:
 651	vmw_resource_backoff_reservation(ticket, &val_buf);
 652
 653	return ret;
 654}
 655
 656
 657/**
 658 * vmw_resource_validate - Make a resource up-to-date and visible
 659 *                         to the device.
 660 * @res: The resource to make visible to the device.
 661 * @intr: Perform waits interruptible if possible.
 662 * @dirtying: Pending GPU operation will dirty the resource
 663 *
 664 * On successful return, any backup DMA buffer pointed to by @res->backup will
 
 
 665 * be reserved and validated.
 666 * On hardware resource shortage, this function will repeatedly evict
 667 * resources of the same type until the validation succeeds.
 668 *
 669 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 670 * on failure.
 671 */
 672int vmw_resource_validate(struct vmw_resource *res, bool intr,
 673			  bool dirtying)
 674{
 675	int ret;
 676	struct vmw_resource *evict_res;
 677	struct vmw_private *dev_priv = res->dev_priv;
 678	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 679	struct ttm_validate_buffer val_buf;
 680	unsigned err_count = 0;
 681
 682	if (!res->func->create)
 683		return 0;
 684
 685	val_buf.bo = NULL;
 686	val_buf.num_shared = 0;
 687	if (res->guest_memory_bo)
 688		val_buf.bo = &res->guest_memory_bo->tbo;
 689	do {
 690		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
 691		if (likely(ret != -EBUSY))
 692			break;
 693
 694		spin_lock(&dev_priv->resource_lock);
 695		if (list_empty(lru_list) || !res->func->may_evict) {
 696			DRM_ERROR("Out of device device resources "
 697				  "for %s.\n", res->func->type_name);
 698			ret = -EBUSY;
 699			spin_unlock(&dev_priv->resource_lock);
 700			break;
 701		}
 702
 703		evict_res = vmw_resource_reference
 704			(list_first_entry(lru_list, struct vmw_resource,
 705					  lru_head));
 706		list_del_init(&evict_res->lru_head);
 707
 708		spin_unlock(&dev_priv->resource_lock);
 709
 710		/* Trylock backup buffers with a NULL ticket. */
 711		ret = vmw_resource_do_evict(NULL, evict_res, intr);
 712		if (unlikely(ret != 0)) {
 713			spin_lock(&dev_priv->resource_lock);
 714			list_add_tail(&evict_res->lru_head, lru_list);
 715			spin_unlock(&dev_priv->resource_lock);
 716			if (ret == -ERESTARTSYS ||
 717			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 718				vmw_resource_unreference(&evict_res);
 719				goto out_no_validate;
 720			}
 721		}
 722
 723		vmw_resource_unreference(&evict_res);
 724	} while (1);
 725
 726	if (unlikely(ret != 0))
 727		goto out_no_validate;
 728	else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
 729		WARN_ON_ONCE(vmw_resource_mob_attached(res));
 730		vmw_user_bo_unref(&res->guest_memory_bo);
 731	}
 732
 733	return 0;
 734
 735out_no_validate:
 736	return ret;
 737}
 738
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739
 740/**
 741 * vmw_resource_unbind_list
 742 *
 743 * @vbo: Pointer to the current backing MOB.
 
 
 744 *
 745 * Evicts the Guest Backed hardware resource if the backup
 746 * buffer is being moved out of MOB memory.
 747 * Note that this function will not race with the resource
 748 * validation code, since resource validation and eviction
 749 * both require the backup buffer to be reserved.
 
 
 
 
 
 
 750 */
 751void vmw_resource_unbind_list(struct vmw_bo *vbo)
 
 752{
 753	struct ttm_validate_buffer val_buf = {
 754		.bo = &vbo->tbo,
 755		.num_shared = 0
 756	};
 757
 758	dma_resv_assert_held(vbo->tbo.base.resv);
 759	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
 760		struct rb_node *node = vbo->res_tree.rb_node;
 761		struct vmw_resource *res =
 762			container_of(node, struct vmw_resource, mob_node);
 
 
 
 
 
 
 
 763
 764		if (!WARN_ON_ONCE(!res->func->unbind))
 765			(void) res->func->unbind(res, res->res_dirty, &val_buf);
 
 
 
 
 
 
 
 
 766
 767		res->guest_memory_size = true;
 768		res->res_dirty = false;
 769		vmw_resource_mob_detach(res);
 770	}
 
 771
 772	(void) ttm_bo_wait(&vbo->tbo, false, false);
 773}
 774
 775
 776/**
 777 * vmw_query_readback_all - Read back cached query states
 778 *
 779 * @dx_query_mob: Buffer containing the DX query MOB
 780 *
 781 * Read back cached states from the device if they exist.  This function
 782 * assumes binding_mutex is held.
 783 */
 784int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
 785{
 786	struct vmw_resource *dx_query_ctx;
 787	struct vmw_private *dev_priv;
 788	struct {
 789		SVGA3dCmdHeader header;
 790		SVGA3dCmdDXReadbackAllQuery body;
 791	} *cmd;
 792
 793
 794	/* No query bound, so do nothing */
 795	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 796		return 0;
 797
 798	dx_query_ctx = dx_query_mob->dx_query_ctx;
 799	dev_priv     = dx_query_ctx->dev_priv;
 800
 801	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 802	if (unlikely(cmd == NULL))
 
 
 803		return -ENOMEM;
 
 804
 805	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 806	cmd->header.size = sizeof(cmd->body);
 807	cmd->body.cid    = dx_query_ctx->id;
 808
 809	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 810
 811	/* Triggers a rebind the next time affected context is bound */
 812	dx_query_mob->dx_query_ctx = NULL;
 813
 814	return 0;
 815}
 816
 817
 818
 819/**
 820 * vmw_query_move_notify - Read back cached query states
 821 *
 822 * @bo: The TTM buffer object about to move.
 823 * @old_mem: The memory region @bo is moving from.
 824 * @new_mem: The memory region @bo is moving to.
 825 *
 826 * Called before the query MOB is swapped out to read back cached query
 827 * states from the device.
 828 */
 829void vmw_query_move_notify(struct ttm_buffer_object *bo,
 830			   struct ttm_resource *old_mem,
 831			   struct ttm_resource *new_mem)
 832{
 833	struct vmw_bo *dx_query_mob;
 834	struct ttm_device *bdev = bo->bdev;
 835	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
 
 
 
 836
 837	mutex_lock(&dev_priv->binding_mutex);
 838
 
 
 
 
 
 
 839	/* If BO is being moved from MOB to system memory */
 840	if (old_mem &&
 841	    new_mem->mem_type == TTM_PL_SYSTEM &&
 842	    old_mem->mem_type == VMW_PL_MOB) {
 843		struct vmw_fence_obj *fence;
 844
 845		dx_query_mob = to_vmw_bo(&bo->base);
 846		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
 847			mutex_unlock(&dev_priv->binding_mutex);
 848			return;
 849		}
 850
 851		(void) vmw_query_readback_all(dx_query_mob);
 852		mutex_unlock(&dev_priv->binding_mutex);
 853
 854		/* Create a fence and attach the BO to it */
 855		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 856		vmw_bo_fence_single(bo, fence);
 857
 858		if (fence != NULL)
 859			vmw_fence_obj_unreference(&fence);
 860
 861		(void) ttm_bo_wait(bo, false, false);
 862	} else
 863		mutex_unlock(&dev_priv->binding_mutex);
 
 864}
 865
 866/**
 867 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 868 *
 869 * @res:            The resource being queried.
 870 */
 871bool vmw_resource_needs_backup(const struct vmw_resource *res)
 872{
 873	return res->func->needs_guest_memory;
 874}
 875
 876/**
 877 * vmw_resource_evict_type - Evict all resources of a specific type
 878 *
 879 * @dev_priv:       Pointer to a device private struct
 880 * @type:           The resource type to evict
 881 *
 882 * To avoid thrashing starvation or as part of the hibernation sequence,
 883 * try to evict all evictable resources of a specific type.
 884 */
 885static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 886				    enum vmw_res_type type)
 887{
 888	struct list_head *lru_list = &dev_priv->res_lru[type];
 889	struct vmw_resource *evict_res;
 890	unsigned err_count = 0;
 891	int ret;
 892	struct ww_acquire_ctx ticket;
 893
 894	do {
 895		spin_lock(&dev_priv->resource_lock);
 896
 897		if (list_empty(lru_list))
 898			goto out_unlock;
 899
 900		evict_res = vmw_resource_reference(
 901			list_first_entry(lru_list, struct vmw_resource,
 902					 lru_head));
 903		list_del_init(&evict_res->lru_head);
 904		spin_unlock(&dev_priv->resource_lock);
 905
 906		/* Wait lock backup buffers with a ticket. */
 907		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 908		if (unlikely(ret != 0)) {
 909			spin_lock(&dev_priv->resource_lock);
 910			list_add_tail(&evict_res->lru_head, lru_list);
 911			spin_unlock(&dev_priv->resource_lock);
 912			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 913				vmw_resource_unreference(&evict_res);
 914				return;
 915			}
 916		}
 917
 918		vmw_resource_unreference(&evict_res);
 919	} while (1);
 920
 921out_unlock:
 922	spin_unlock(&dev_priv->resource_lock);
 923}
 924
 925/**
 926 * vmw_resource_evict_all - Evict all evictable resources
 927 *
 928 * @dev_priv:       Pointer to a device private struct
 929 *
 930 * To avoid thrashing starvation or as part of the hibernation sequence,
 931 * evict all evictable resources. In particular this means that all
 932 * guest-backed resources that are registered with the device are
 933 * evicted and the OTable becomes clean.
 934 */
 935void vmw_resource_evict_all(struct vmw_private *dev_priv)
 936{
 937	enum vmw_res_type type;
 938
 939	mutex_lock(&dev_priv->cmdbuf_mutex);
 940
 941	for (type = 0; type < vmw_res_max; ++type)
 942		vmw_resource_evict_type(dev_priv, type);
 943
 944	mutex_unlock(&dev_priv->cmdbuf_mutex);
 945}
 946
 947/*
 948 * vmw_resource_pin - Add a pin reference on a resource
 949 *
 950 * @res: The resource to add a pin reference on
 951 *
 952 * This function adds a pin reference, and if needed validates the resource.
 953 * Having a pin reference means that the resource can never be evicted, and
 954 * its id will never change as long as there is a pin reference.
 955 * This function returns 0 on success and a negative error code on failure.
 956 */
 957int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 958{
 959	struct ttm_operation_ctx ctx = { interruptible, false };
 960	struct vmw_private *dev_priv = res->dev_priv;
 961	int ret;
 962
 
 963	mutex_lock(&dev_priv->cmdbuf_mutex);
 964	ret = vmw_resource_reserve(res, interruptible, false);
 965	if (ret)
 966		goto out_no_reserve;
 967
 968	if (res->pin_count == 0) {
 969		struct vmw_bo *vbo = NULL;
 970
 971		if (res->guest_memory_bo) {
 972			vbo = res->guest_memory_bo;
 973
 974			ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
 975			if (ret)
 976				goto out_no_validate;
 977			if (!vbo->tbo.pin_count) {
 978				vmw_bo_placement_set(vbo,
 979						     res->func->domain,
 980						     res->func->busy_domain);
 981				ret = ttm_bo_validate
 982					(&vbo->tbo,
 983					 &vbo->placement,
 984					 &ctx);
 985				if (ret) {
 986					ttm_bo_unreserve(&vbo->tbo);
 987					goto out_no_validate;
 988				}
 989			}
 990
 991			/* Do we really need to pin the MOB as well? */
 992			vmw_bo_pin_reserved(vbo, true);
 993		}
 994		ret = vmw_resource_validate(res, interruptible, true);
 995		if (vbo)
 996			ttm_bo_unreserve(&vbo->tbo);
 997		if (ret)
 998			goto out_no_validate;
 999	}
1000	res->pin_count++;
1001
1002out_no_validate:
1003	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1004out_no_reserve:
1005	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1006
1007	return ret;
1008}
1009
1010/**
1011 * vmw_resource_unpin - Remove a pin reference from a resource
1012 *
1013 * @res: The resource to remove a pin reference from
1014 *
1015 * Having a pin reference means that the resource can never be evicted, and
1016 * its id will never change as long as there is a pin reference.
1017 */
1018void vmw_resource_unpin(struct vmw_resource *res)
1019{
1020	struct vmw_private *dev_priv = res->dev_priv;
1021	int ret;
1022
 
1023	mutex_lock(&dev_priv->cmdbuf_mutex);
1024
1025	ret = vmw_resource_reserve(res, false, true);
1026	WARN_ON(ret);
1027
1028	WARN_ON(res->pin_count == 0);
1029	if (--res->pin_count == 0 && res->guest_memory_bo) {
1030		struct vmw_bo *vbo = res->guest_memory_bo;
1031
1032		(void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
1033		vmw_bo_pin_reserved(vbo, false);
1034		ttm_bo_unreserve(&vbo->tbo);
1035	}
1036
1037	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1038
1039	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1040}
1041
1042/**
1043 * vmw_res_type - Return the resource type
1044 *
1045 * @res: Pointer to the resource
1046 */
1047enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1048{
1049	return res->func->res_type;
1050}
1051
1052/**
1053 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1054 * sequential range of touched backing store memory.
1055 * @res: The resource.
1056 * @start: The first page touched.
1057 * @end: The last page touched + 1.
1058 */
1059void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1060			       pgoff_t end)
1061{
1062	if (res->dirty)
1063		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1064					   end << PAGE_SHIFT);
1065}
1066
1067/**
1068 * vmw_resources_clean - Clean resources intersecting a mob range
1069 * @vbo: The mob buffer object
1070 * @start: The mob page offset starting the range
1071 * @end: The mob page offset ending the range
1072 * @num_prefault: Returns how many pages including the first have been
1073 * cleaned and are ok to prefault
1074 */
1075int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
1076			pgoff_t end, pgoff_t *num_prefault)
1077{
1078	struct rb_node *cur = vbo->res_tree.rb_node;
1079	struct vmw_resource *found = NULL;
1080	unsigned long res_start = start << PAGE_SHIFT;
1081	unsigned long res_end = end << PAGE_SHIFT;
1082	unsigned long last_cleaned = 0;
1083
1084	/*
1085	 * Find the resource with lowest backup_offset that intersects the
1086	 * range.
1087	 */
1088	while (cur) {
1089		struct vmw_resource *cur_res =
1090			container_of(cur, struct vmw_resource, mob_node);
1091
1092		if (cur_res->guest_memory_offset >= res_end) {
1093			cur = cur->rb_left;
1094		} else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
1095			   res_start) {
1096			cur = cur->rb_right;
1097		} else {
1098			found = cur_res;
1099			cur = cur->rb_left;
1100			/* Continue to look for resources with lower offsets */
1101		}
1102	}
1103
1104	/*
1105	 * In order of increasing guest_memory_offset, clean dirty resources
1106	 * intersecting the range.
1107	 */
1108	while (found) {
1109		if (found->res_dirty) {
1110			int ret;
1111
1112			if (!found->func->clean)
1113				return -EINVAL;
1114
1115			ret = found->func->clean(found);
1116			if (ret)
1117				return ret;
1118
1119			found->res_dirty = false;
1120		}
1121		last_cleaned = found->guest_memory_offset + found->guest_memory_size;
1122		cur = rb_next(&found->mob_node);
1123		if (!cur)
1124			break;
1125
1126		found = container_of(cur, struct vmw_resource, mob_node);
1127		if (found->guest_memory_offset >= res_end)
1128			break;
1129	}
1130
1131	/*
1132	 * Set number of pages allowed prefaulting and fence the buffer object
1133	 */
1134	*num_prefault = 1;
1135	if (last_cleaned > res_start) {
1136		struct ttm_buffer_object *bo = &vbo->tbo;
1137
1138		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1139						      PAGE_SIZE);
1140		vmw_bo_fence_single(bo, NULL);
1141	}
1142
1143	return 0;
1144}