Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#include <drm/ttm/ttm_placement.h>
  30
  31#include "vmwgfx_drv.h"
  32#include "ttm_object.h"
  33
  34
  35/**
  36 * struct vmw_user_buffer_object - User-space-visible buffer object
  37 *
  38 * @prime: The prime object providing user visibility.
  39 * @vbo: The struct vmw_buffer_object
  40 */
  41struct vmw_user_buffer_object {
  42	struct ttm_prime_object prime;
  43	struct vmw_buffer_object vbo;
  44};
  45
  46
  47/**
  48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
  49 * vmw_buffer_object.
  50 *
  51 * @bo: Pointer to the TTM buffer object.
  52 * Return: Pointer to the struct vmw_buffer_object embedding the
  53 * TTM buffer object.
  54 */
  55static struct vmw_buffer_object *
  56vmw_buffer_object(struct ttm_buffer_object *bo)
  57{
  58	return container_of(bo, struct vmw_buffer_object, base);
  59}
  60
  61
  62/**
  63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
  64 * vmw_user_buffer_object.
  65 *
  66 * @bo: Pointer to the TTM buffer object.
  67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
  68 * object.
  69 */
  70static struct vmw_user_buffer_object *
  71vmw_user_buffer_object(struct ttm_buffer_object *bo)
  72{
  73	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  74
  75	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
  76}
  77
  78
  79/**
  80 * vmw_bo_pin_in_placement - Validate a buffer to placement.
  81 *
  82 * @dev_priv:  Driver private.
  83 * @buf:  DMA buffer to move.
  84 * @placement:  The placement to pin it.
  85 * @interruptible:  Use interruptible wait.
  86 * Return: Zero on success, Negative error code on failure. In particular
  87 * -ERESTARTSYS if interrupted by a signal
  88 */
  89int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
  90			    struct vmw_buffer_object *buf,
  91			    struct ttm_placement *placement,
  92			    bool interruptible)
  93{
  94	struct ttm_operation_ctx ctx = {interruptible, false };
  95	struct ttm_buffer_object *bo = &buf->base;
  96	int ret;
  97	uint32_t new_flags;
  98
  99	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 100	if (unlikely(ret != 0))
 101		return ret;
 102
 103	vmw_execbuf_release_pinned_bo(dev_priv);
 104
 105	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 106	if (unlikely(ret != 0))
 107		goto err;
 108
 109	if (buf->pin_count > 0)
 110		ret = ttm_bo_mem_compat(placement, &bo->mem,
 111					&new_flags) == true ? 0 : -EINVAL;
 112	else
 113		ret = ttm_bo_validate(bo, placement, &ctx);
 114
 115	if (!ret)
 116		vmw_bo_pin_reserved(buf, true);
 117
 118	ttm_bo_unreserve(bo);
 119
 120err:
 121	ttm_write_unlock(&dev_priv->reservation_sem);
 122	return ret;
 123}
 124
 125
 126/**
 127 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
 128 *
 129 * This function takes the reservation_sem in write mode.
 130 * Flushes and unpins the query bo to avoid failures.
 131 *
 132 * @dev_priv:  Driver private.
 133 * @buf:  DMA buffer to move.
 134 * @pin:  Pin buffer if true.
 135 * @interruptible:  Use interruptible wait.
 136 * Return: Zero on success, Negative error code on failure. In particular
 137 * -ERESTARTSYS if interrupted by a signal
 138 */
 139int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 140			      struct vmw_buffer_object *buf,
 141			      bool interruptible)
 142{
 143	struct ttm_operation_ctx ctx = {interruptible, false };
 144	struct ttm_buffer_object *bo = &buf->base;
 145	int ret;
 146	uint32_t new_flags;
 147
 148	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 149	if (unlikely(ret != 0))
 150		return ret;
 151
 152	vmw_execbuf_release_pinned_bo(dev_priv);
 153
 154	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 155	if (unlikely(ret != 0))
 156		goto err;
 157
 158	if (buf->pin_count > 0) {
 159		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
 160					&new_flags) == true ? 0 : -EINVAL;
 161		goto out_unreserve;
 162	}
 163
 164	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
 165	if (likely(ret == 0) || ret == -ERESTARTSYS)
 166		goto out_unreserve;
 167
 168	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 169
 170out_unreserve:
 171	if (!ret)
 172		vmw_bo_pin_reserved(buf, true);
 173
 174	ttm_bo_unreserve(bo);
 175err:
 176	ttm_write_unlock(&dev_priv->reservation_sem);
 177	return ret;
 178}
 179
 180
 181/**
 182 * vmw_bo_pin_in_vram - Move a buffer to vram.
 183 *
 184 * This function takes the reservation_sem in write mode.
 185 * Flushes and unpins the query bo to avoid failures.
 186 *
 187 * @dev_priv:  Driver private.
 188 * @buf:  DMA buffer to move.
 189 * @interruptible:  Use interruptible wait.
 190 * Return: Zero on success, Negative error code on failure. In particular
 191 * -ERESTARTSYS if interrupted by a signal
 192 */
 193int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
 194		       struct vmw_buffer_object *buf,
 195		       bool interruptible)
 196{
 197	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
 198				       interruptible);
 199}
 200
 201
 202/**
 203 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
 204 *
 205 * This function takes the reservation_sem in write mode.
 206 * Flushes and unpins the query bo to avoid failures.
 207 *
 208 * @dev_priv:  Driver private.
 209 * @buf:  DMA buffer to pin.
 210 * @interruptible:  Use interruptible wait.
 211 * Return: Zero on success, Negative error code on failure. In particular
 212 * -ERESTARTSYS if interrupted by a signal
 213 */
 214int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 215				struct vmw_buffer_object *buf,
 216				bool interruptible)
 217{
 218	struct ttm_operation_ctx ctx = {interruptible, false };
 219	struct ttm_buffer_object *bo = &buf->base;
 220	struct ttm_placement placement;
 221	struct ttm_place place;
 222	int ret = 0;
 223	uint32_t new_flags;
 224
 225	place = vmw_vram_placement.placement[0];
 226	place.lpfn = bo->num_pages;
 227	placement.num_placement = 1;
 228	placement.placement = &place;
 229	placement.num_busy_placement = 1;
 230	placement.busy_placement = &place;
 231
 232	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 233	if (unlikely(ret != 0))
 234		return ret;
 235
 236	vmw_execbuf_release_pinned_bo(dev_priv);
 237	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 238	if (unlikely(ret != 0))
 239		goto err_unlock;
 240
 241	/*
 242	 * Is this buffer already in vram but not at the start of it?
 243	 * In that case, evict it first because TTM isn't good at handling
 244	 * that situation.
 245	 */
 246	if (bo->mem.mem_type == TTM_PL_VRAM &&
 247	    bo->mem.start < bo->num_pages &&
 248	    bo->mem.start > 0 &&
 249	    buf->pin_count == 0) {
 250		ctx.interruptible = false;
 251		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
 252	}
 253
 254	if (buf->pin_count > 0)
 255		ret = ttm_bo_mem_compat(&placement, &bo->mem,
 256					&new_flags) == true ? 0 : -EINVAL;
 257	else
 258		ret = ttm_bo_validate(bo, &placement, &ctx);
 259
 260	/* For some reason we didn't end up at the start of vram */
 261	WARN_ON(ret == 0 && bo->mem.start != 0);
 262	if (!ret)
 263		vmw_bo_pin_reserved(buf, true);
 264
 265	ttm_bo_unreserve(bo);
 266err_unlock:
 267	ttm_write_unlock(&dev_priv->reservation_sem);
 268
 269	return ret;
 270}
 271
 272
 273/**
 274 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
 275 *
 276 * This function takes the reservation_sem in write mode.
 277 *
 278 * @dev_priv:  Driver private.
 279 * @buf:  DMA buffer to unpin.
 280 * @interruptible:  Use interruptible wait.
 281 * Return: Zero on success, Negative error code on failure. In particular
 282 * -ERESTARTSYS if interrupted by a signal
 283 */
 284int vmw_bo_unpin(struct vmw_private *dev_priv,
 285		 struct vmw_buffer_object *buf,
 286		 bool interruptible)
 287{
 288	struct ttm_buffer_object *bo = &buf->base;
 289	int ret;
 290
 291	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
 292	if (unlikely(ret != 0))
 293		return ret;
 294
 295	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 296	if (unlikely(ret != 0))
 297		goto err;
 298
 299	vmw_bo_pin_reserved(buf, false);
 300
 301	ttm_bo_unreserve(bo);
 302
 303err:
 304	ttm_read_unlock(&dev_priv->reservation_sem);
 305	return ret;
 306}
 307
 308/**
 309 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
 310 * of a buffer.
 311 *
 312 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
 313 * @ptr: SVGAGuestPtr returning the result.
 314 */
 315void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
 316			  SVGAGuestPtr *ptr)
 317{
 318	if (bo->mem.mem_type == TTM_PL_VRAM) {
 319		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
 320		ptr->offset = bo->mem.start << PAGE_SHIFT;
 321	} else {
 322		ptr->gmrId = bo->mem.start;
 323		ptr->offset = 0;
 324	}
 325}
 326
 327
 328/**
 329 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
 330 *
 331 * @vbo: The buffer object. Must be reserved.
 332 * @pin: Whether to pin or unpin.
 333 *
 334 */
 335void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 336{
 337	struct ttm_operation_ctx ctx = { false, true };
 338	struct ttm_place pl;
 339	struct ttm_placement placement;
 340	struct ttm_buffer_object *bo = &vbo->base;
 341	uint32_t old_mem_type = bo->mem.mem_type;
 342	int ret;
 343
 344	dma_resv_assert_held(bo->base.resv);
 345
 346	if (pin) {
 347		if (vbo->pin_count++ > 0)
 348			return;
 349	} else {
 350		WARN_ON(vbo->pin_count <= 0);
 351		if (--vbo->pin_count > 0)
 352			return;
 353	}
 354
 355	pl.fpfn = 0;
 356	pl.lpfn = 0;
 357	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
 358		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
 359	if (pin)
 360		pl.flags |= TTM_PL_FLAG_NO_EVICT;
 361
 362	memset(&placement, 0, sizeof(placement));
 363	placement.num_placement = 1;
 364	placement.placement = &pl;
 365
 366	ret = ttm_bo_validate(bo, &placement, &ctx);
 367
 368	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 369}
 370
 371
 372/**
 373 * vmw_bo_map_and_cache - Map a buffer object and cache the map
 374 *
 375 * @vbo: The buffer object to map
 376 * Return: A kernel virtual address or NULL if mapping failed.
 377 *
 378 * This function maps a buffer object into the kernel address space, or
 379 * returns the virtual kernel address of an already existing map. The virtual
 380 * address remains valid as long as the buffer object is pinned or reserved.
 381 * The cached map is torn down on either
 382 * 1) Buffer object move
 383 * 2) Buffer object swapout
 384 * 3) Buffer object destruction
 385 *
 386 */
 387void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
 388{
 389	struct ttm_buffer_object *bo = &vbo->base;
 390	bool not_used;
 391	void *virtual;
 392	int ret;
 393
 394	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
 395	if (virtual)
 396		return virtual;
 397
 398	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
 399	if (ret)
 400		DRM_ERROR("Buffer object map failed: %d.\n", ret);
 401
 402	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
 403}
 404
 405
 406/**
 407 * vmw_bo_unmap - Tear down a cached buffer object map.
 408 *
 409 * @vbo: The buffer object whose map we are tearing down.
 410 *
 411 * This function tears down a cached map set up using
 412 * vmw_buffer_object_map_and_cache().
 413 */
 414void vmw_bo_unmap(struct vmw_buffer_object *vbo)
 415{
 416	if (vbo->map.bo == NULL)
 417		return;
 418
 419	ttm_bo_kunmap(&vbo->map);
 420}
 421
 422
 423/**
 424 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
 425 *
 426 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 427 * @size: The requested buffer size.
 428 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 429 */
 430static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
 431			      bool user)
 432{
 433	static size_t struct_size, user_struct_size;
 434	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 435	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 436
 437	if (unlikely(struct_size == 0)) {
 438		size_t backend_size = ttm_round_pot(vmw_tt_size);
 439
 440		struct_size = backend_size +
 441			ttm_round_pot(sizeof(struct vmw_buffer_object));
 442		user_struct_size = backend_size +
 443		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
 444				      TTM_OBJ_EXTRA_SIZE;
 445	}
 446
 447	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 448		page_array_size +=
 449			ttm_round_pot(num_pages * sizeof(dma_addr_t));
 450
 451	return ((user) ? user_struct_size : struct_size) +
 452		page_array_size;
 453}
 454
 455
 456/**
 457 * vmw_bo_bo_free - vmw buffer object destructor
 458 *
 459 * @bo: Pointer to the embedded struct ttm_buffer_object
 460 */
 461void vmw_bo_bo_free(struct ttm_buffer_object *bo)
 462{
 463	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
 464
 465	WARN_ON(vmw_bo->dirty);
 466	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
 467	vmw_bo_unmap(vmw_bo);
 468	kfree(vmw_bo);
 469}
 470
 471
 472/**
 473 * vmw_user_bo_destroy - vmw buffer object destructor
 474 *
 475 * @bo: Pointer to the embedded struct ttm_buffer_object
 476 */
 477static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
 478{
 479	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
 480	struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
 481
 482	WARN_ON(vbo->dirty);
 483	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
 484	vmw_bo_unmap(vbo);
 485	ttm_prime_object_kfree(vmw_user_bo, prime);
 486}
 487
 488
 489/**
 490 * vmw_bo_init - Initialize a vmw buffer object
 491 *
 492 * @dev_priv: Pointer to the device private struct
 493 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
 494 * @size: Buffer object size in bytes.
 495 * @placement: Initial placement.
 496 * @interruptible: Whether waits should be performed interruptible.
 497 * @bo_free: The buffer object destructor.
 498 * Returns: Zero on success, negative error code on error.
 499 *
 500 * Note that on error, the code will free the buffer object.
 501 */
 502int vmw_bo_init(struct vmw_private *dev_priv,
 503		struct vmw_buffer_object *vmw_bo,
 504		size_t size, struct ttm_placement *placement,
 505		bool interruptible,
 506		void (*bo_free)(struct ttm_buffer_object *bo))
 507{
 508	struct ttm_bo_device *bdev = &dev_priv->bdev;
 509	size_t acc_size;
 510	int ret;
 511	bool user = (bo_free == &vmw_user_bo_destroy);
 512
 513	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
 514
 515	acc_size = vmw_bo_acc_size(dev_priv, size, user);
 516	memset(vmw_bo, 0, sizeof(*vmw_bo));
 517	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
 518	vmw_bo->base.priority = 3;
 519	vmw_bo->res_tree = RB_ROOT;
 520
 521	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 522			  ttm_bo_type_device, placement,
 523			  0, interruptible, acc_size,
 524			  NULL, NULL, bo_free);
 525	return ret;
 526}
 527
 528
 529/**
 530 * vmw_user_bo_release - TTM reference base object release callback for
 531 * vmw user buffer objects
 532 *
 533 * @p_base: The TTM base object pointer about to be unreferenced.
 534 *
 535 * Clears the TTM base object pointer and drops the reference the
 536 * base object has on the underlying struct vmw_buffer_object.
 537 */
 538static void vmw_user_bo_release(struct ttm_base_object **p_base)
 539{
 540	struct vmw_user_buffer_object *vmw_user_bo;
 541	struct ttm_base_object *base = *p_base;
 542
 543	*p_base = NULL;
 544
 545	if (unlikely(base == NULL))
 546		return;
 547
 548	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 549				   prime.base);
 550	ttm_bo_put(&vmw_user_bo->vbo.base);
 551}
 552
 553
 554/**
 555 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
 556 * for vmw user buffer objects
 557 *
 558 * @base: Pointer to the TTM base object
 559 * @ref_type: Reference type of the reference reaching zero.
 560 *
 561 * Called when user-space drops its last synccpu reference on the buffer
 562 * object, Either explicitly or as part of a cleanup file close.
 563 */
 564static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
 565					enum ttm_ref_type ref_type)
 566{
 567	struct vmw_user_buffer_object *user_bo;
 568
 569	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
 570
 571	switch (ref_type) {
 572	case TTM_REF_SYNCCPU_WRITE:
 573		atomic_dec(&user_bo->vbo.cpu_writers);
 574		break;
 575	default:
 576		WARN_ONCE(true, "Undefined buffer object reference release.\n");
 577	}
 578}
 579
 580
 581/**
 582 * vmw_user_bo_alloc - Allocate a user buffer object
 583 *
 584 * @dev_priv: Pointer to a struct device private.
 585 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 586 * object.
 587 * @size: Size of the buffer object.
 588 * @shareable: Boolean whether the buffer is shareable with other open files.
 589 * @handle: Pointer to where the handle value should be assigned.
 590 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
 591 * should be assigned.
 592 * Return: Zero on success, negative error code on error.
 593 */
 594int vmw_user_bo_alloc(struct vmw_private *dev_priv,
 595		      struct ttm_object_file *tfile,
 596		      uint32_t size,
 597		      bool shareable,
 598		      uint32_t *handle,
 599		      struct vmw_buffer_object **p_vbo,
 600		      struct ttm_base_object **p_base)
 601{
 602	struct vmw_user_buffer_object *user_bo;
 603	int ret;
 604
 605	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 606	if (unlikely(!user_bo)) {
 607		DRM_ERROR("Failed to allocate a buffer.\n");
 608		return -ENOMEM;
 609	}
 610
 611	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
 612			  (dev_priv->has_mob) ?
 613			  &vmw_sys_placement :
 614			  &vmw_vram_sys_placement, true,
 615			  &vmw_user_bo_destroy);
 616	if (unlikely(ret != 0))
 617		return ret;
 618
 619	ttm_bo_get(&user_bo->vbo.base);
 620	ret = ttm_prime_object_init(tfile,
 621				    size,
 622				    &user_bo->prime,
 623				    shareable,
 624				    ttm_buffer_type,
 625				    &vmw_user_bo_release,
 626				    &vmw_user_bo_ref_obj_release);
 627	if (unlikely(ret != 0)) {
 628		ttm_bo_put(&user_bo->vbo.base);
 629		goto out_no_base_object;
 630	}
 631
 632	*p_vbo = &user_bo->vbo;
 633	if (p_base) {
 634		*p_base = &user_bo->prime.base;
 635		kref_get(&(*p_base)->refcount);
 636	}
 637	*handle = user_bo->prime.base.handle;
 638
 639out_no_base_object:
 640	return ret;
 641}
 642
 643
 644/**
 645 * vmw_user_bo_verify_access - verify access permissions on this
 646 * buffer object.
 647 *
 648 * @bo: Pointer to the buffer object being accessed
 649 * @tfile: Identifying the caller.
 650 */
 651int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
 652			      struct ttm_object_file *tfile)
 653{
 654	struct vmw_user_buffer_object *vmw_user_bo;
 655
 656	if (unlikely(bo->destroy != vmw_user_bo_destroy))
 657		return -EPERM;
 658
 659	vmw_user_bo = vmw_user_buffer_object(bo);
 660
 661	/* Check that the caller has opened the object. */
 662	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 663		return 0;
 664
 665	DRM_ERROR("Could not grant buffer access.\n");
 666	return -EPERM;
 667}
 668
 669
 670/**
 671 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
 672 * access, idling previous GPU operations on the buffer and optionally
 673 * blocking it for further command submissions.
 674 *
 675 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 676 * @tfile: Identifying the caller.
 677 * @flags: Flags indicating how the grab should be performed.
 678 * Return: Zero on success, Negative error code on error. In particular,
 679 * -EBUSY will be returned if a dontblock operation is requested and the
 680 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
 681 * interrupted by a signal.
 682 *
 683 * A blocking grab will be automatically released when @tfile is closed.
 684 */
 685static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 686				    struct ttm_object_file *tfile,
 687				    uint32_t flags)
 688{
 689	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 690	struct ttm_buffer_object *bo = &user_bo->vbo.base;
 691	bool existed;
 692	int ret;
 693
 694	if (flags & drm_vmw_synccpu_allow_cs) {
 695		long lret;
 696
 697		lret = dma_resv_wait_timeout_rcu
 698			(bo->base.resv, true, true,
 699			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 700		if (!lret)
 701			return -EBUSY;
 702		else if (lret < 0)
 703			return lret;
 704		return 0;
 705	}
 706
 707	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
 708	if (unlikely(ret != 0))
 709		return ret;
 710
 711	ret = ttm_bo_wait(bo, true, nonblock);
 712	if (likely(ret == 0))
 713		atomic_inc(&user_bo->vbo.cpu_writers);
 714
 715	ttm_bo_unreserve(bo);
 716	if (unlikely(ret != 0))
 717		return ret;
 718
 719	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 720				 TTM_REF_SYNCCPU_WRITE, &existed, false);
 721	if (ret != 0 || existed)
 722		atomic_dec(&user_bo->vbo.cpu_writers);
 723
 724	return ret;
 725}
 726
 727/**
 728 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
 729 * and unblock command submission on the buffer if blocked.
 730 *
 731 * @handle: Handle identifying the buffer object.
 732 * @tfile: Identifying the caller.
 733 * @flags: Flags indicating the type of release.
 734 */
 735static int vmw_user_bo_synccpu_release(uint32_t handle,
 736					   struct ttm_object_file *tfile,
 737					   uint32_t flags)
 738{
 739	if (!(flags & drm_vmw_synccpu_allow_cs))
 740		return ttm_ref_object_base_unref(tfile, handle,
 741						 TTM_REF_SYNCCPU_WRITE);
 742
 743	return 0;
 744}
 745
 746
 747/**
 748 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
 749 * functionality.
 750 *
 751 * @dev: Identifies the drm device.
 752 * @data: Pointer to the ioctl argument.
 753 * @file_priv: Identifies the caller.
 754 * Return: Zero on success, negative error code on error.
 755 *
 756 * This function checks the ioctl arguments for validity and calls the
 757 * relevant synccpu functions.
 758 */
 759int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
 760			      struct drm_file *file_priv)
 761{
 762	struct drm_vmw_synccpu_arg *arg =
 763		(struct drm_vmw_synccpu_arg *) data;
 764	struct vmw_buffer_object *vbo;
 765	struct vmw_user_buffer_object *user_bo;
 766	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 767	struct ttm_base_object *buffer_base;
 768	int ret;
 769
 770	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 771	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 772			       drm_vmw_synccpu_dontblock |
 773			       drm_vmw_synccpu_allow_cs)) != 0) {
 774		DRM_ERROR("Illegal synccpu flags.\n");
 775		return -EINVAL;
 776	}
 777
 778	switch (arg->op) {
 779	case drm_vmw_synccpu_grab:
 780		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
 781					     &buffer_base);
 782		if (unlikely(ret != 0))
 783			return ret;
 784
 785		user_bo = container_of(vbo, struct vmw_user_buffer_object,
 786				       vbo);
 787		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
 788		vmw_bo_unreference(&vbo);
 789		ttm_base_object_unref(&buffer_base);
 790		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 791			     ret != -EBUSY)) {
 792			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 793				  (unsigned int) arg->handle);
 794			return ret;
 795		}
 796		break;
 797	case drm_vmw_synccpu_release:
 798		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
 799						  arg->flags);
 800		if (unlikely(ret != 0)) {
 801			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 802				  (unsigned int) arg->handle);
 803			return ret;
 804		}
 805		break;
 806	default:
 807		DRM_ERROR("Invalid synccpu operation.\n");
 808		return -EINVAL;
 809	}
 810
 811	return 0;
 812}
 813
 814
 815/**
 816 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
 817 * allocation functionality.
 818 *
 819 * @dev: Identifies the drm device.
 820 * @data: Pointer to the ioctl argument.
 821 * @file_priv: Identifies the caller.
 822 * Return: Zero on success, negative error code on error.
 823 *
 824 * This function checks the ioctl arguments for validity and allocates a
 825 * struct vmw_user_buffer_object bo.
 826 */
 827int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
 828		       struct drm_file *file_priv)
 829{
 830	struct vmw_private *dev_priv = vmw_priv(dev);
 831	union drm_vmw_alloc_dmabuf_arg *arg =
 832	    (union drm_vmw_alloc_dmabuf_arg *)data;
 833	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 834	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 835	struct vmw_buffer_object *vbo;
 836	uint32_t handle;
 837	int ret;
 838
 839	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 840	if (unlikely(ret != 0))
 841		return ret;
 842
 843	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 844				req->size, false, &handle, &vbo,
 845				NULL);
 846	if (unlikely(ret != 0))
 847		goto out_no_bo;
 848
 849	rep->handle = handle;
 850	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
 851	rep->cur_gmr_id = handle;
 852	rep->cur_gmr_offset = 0;
 853
 854	vmw_bo_unreference(&vbo);
 855
 856out_no_bo:
 857	ttm_read_unlock(&dev_priv->reservation_sem);
 858
 859	return ret;
 860}
 861
 862
 863/**
 864 * vmw_bo_unref_ioctl - Generic handle close ioctl.
 865 *
 866 * @dev: Identifies the drm device.
 867 * @data: Pointer to the ioctl argument.
 868 * @file_priv: Identifies the caller.
 869 * Return: Zero on success, negative error code on error.
 870 *
 871 * This function checks the ioctl arguments for validity and closes a
 872 * handle to a TTM base object, optionally freeing the object.
 873 */
 874int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
 875		       struct drm_file *file_priv)
 876{
 877	struct drm_vmw_unref_dmabuf_arg *arg =
 878	    (struct drm_vmw_unref_dmabuf_arg *)data;
 879
 880	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 881					 arg->handle,
 882					 TTM_REF_USAGE);
 883}
 884
 885
 886/**
 887 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
 888 *
 889 * @tfile: The TTM object file the handle is registered with.
 890 * @handle: The user buffer object handle
 891 * @out: Pointer to a where a pointer to the embedded
 892 * struct vmw_buffer_object should be placed.
 893 * @p_base: Pointer to where a pointer to the TTM base object should be
 894 * placed, or NULL if no such pointer is required.
 895 * Return: Zero on success, Negative error code on error.
 896 *
 897 * Both the output base object pointer and the vmw buffer object pointer
 898 * will be refcounted.
 899 */
 900int vmw_user_bo_lookup(struct ttm_object_file *tfile,
 901		       uint32_t handle, struct vmw_buffer_object **out,
 902		       struct ttm_base_object **p_base)
 903{
 904	struct vmw_user_buffer_object *vmw_user_bo;
 905	struct ttm_base_object *base;
 906
 907	base = ttm_base_object_lookup(tfile, handle);
 908	if (unlikely(base == NULL)) {
 909		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 910			  (unsigned long)handle);
 911		return -ESRCH;
 912	}
 913
 914	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 915		ttm_base_object_unref(&base);
 916		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 917			  (unsigned long)handle);
 918		return -EINVAL;
 919	}
 920
 921	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 922				   prime.base);
 923	ttm_bo_get(&vmw_user_bo->vbo.base);
 924	if (p_base)
 925		*p_base = base;
 926	else
 927		ttm_base_object_unref(&base);
 928	*out = &vmw_user_bo->vbo;
 929
 930	return 0;
 931}
 932
 933/**
 934 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
 935 * @tfile: The TTM object file the handle is registered with.
 936 * @handle: The user buffer object handle.
 937 *
 938 * This function looks up a struct vmw_user_bo and returns a pointer to the
 939 * struct vmw_buffer_object it derives from without refcounting the pointer.
 940 * The returned pointer is only valid until vmw_user_bo_noref_release() is
 941 * called, and the object pointed to by the returned pointer may be doomed.
 942 * Any persistent usage of the object requires a refcount to be taken using
 943 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
 944 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
 945 * or scheduling functions may be called inbetween these function calls.
 946 *
 947 * Return: A struct vmw_buffer_object pointer if successful or negative
 948 * error pointer on failure.
 949 */
 950struct vmw_buffer_object *
 951vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
 952{
 953	struct vmw_user_buffer_object *vmw_user_bo;
 954	struct ttm_base_object *base;
 955
 956	base = ttm_base_object_noref_lookup(tfile, handle);
 957	if (!base) {
 958		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 959			  (unsigned long)handle);
 960		return ERR_PTR(-ESRCH);
 961	}
 962
 963	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 964		ttm_base_object_noref_release();
 965		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 966			  (unsigned long)handle);
 967		return ERR_PTR(-EINVAL);
 968	}
 969
 970	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 971				   prime.base);
 972	return &vmw_user_bo->vbo;
 973}
 974
 975/**
 976 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
 977 *
 978 * @tfile: The TTM object file to register the handle with.
 979 * @vbo: The embedded vmw buffer object.
 980 * @handle: Pointer to where the new handle should be placed.
 981 * Return: Zero on success, Negative error code on error.
 982 */
 983int vmw_user_bo_reference(struct ttm_object_file *tfile,
 984			  struct vmw_buffer_object *vbo,
 985			  uint32_t *handle)
 986{
 987	struct vmw_user_buffer_object *user_bo;
 988
 989	if (vbo->base.destroy != vmw_user_bo_destroy)
 990		return -EINVAL;
 991
 992	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
 993
 994	*handle = user_bo->prime.base.handle;
 995	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 996				  TTM_REF_USAGE, NULL, false);
 997}
 998
 999
1000/**
1001 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1002 *                       object without unreserving it.
1003 *
1004 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1005 * @fence:          Pointer to the fence. If NULL, this function will
1006 *                  insert a fence into the command stream..
1007 *
1008 * Contrary to the ttm_eu version of this function, it takes only
1009 * a single buffer object instead of a list, and it also doesn't
1010 * unreserve the buffer object, which needs to be done separately.
1011 */
1012void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1013			 struct vmw_fence_obj *fence)
1014{
1015	struct ttm_bo_device *bdev = bo->bdev;
1016
1017	struct vmw_private *dev_priv =
1018		container_of(bdev, struct vmw_private, bdev);
1019
1020	if (fence == NULL) {
1021		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1022		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1023		dma_fence_put(&fence->base);
1024	} else
1025		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1026}
1027
1028
1029/**
1030 * vmw_dumb_create - Create a dumb kms buffer
1031 *
1032 * @file_priv: Pointer to a struct drm_file identifying the caller.
1033 * @dev: Pointer to the drm device.
1034 * @args: Pointer to a struct drm_mode_create_dumb structure
1035 * Return: Zero on success, negative error code on failure.
1036 *
1037 * This is a driver callback for the core drm create_dumb functionality.
1038 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1039 * that the arguments have a different format.
1040 */
1041int vmw_dumb_create(struct drm_file *file_priv,
1042		    struct drm_device *dev,
1043		    struct drm_mode_create_dumb *args)
1044{
1045	struct vmw_private *dev_priv = vmw_priv(dev);
1046	struct vmw_buffer_object *vbo;
1047	int ret;
1048
1049	args->pitch = args->width * ((args->bpp + 7) / 8);
1050	args->size = args->pitch * args->height;
1051
1052	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1053	if (unlikely(ret != 0))
1054		return ret;
1055
1056	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1057				    args->size, false, &args->handle,
1058				    &vbo, NULL);
1059	if (unlikely(ret != 0))
1060		goto out_no_bo;
1061
1062	vmw_bo_unreference(&vbo);
1063out_no_bo:
1064	ttm_read_unlock(&dev_priv->reservation_sem);
1065	return ret;
1066}
1067
1068
1069/**
1070 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1071 *
1072 * @file_priv: Pointer to a struct drm_file identifying the caller.
1073 * @dev: Pointer to the drm device.
1074 * @handle: Handle identifying the dumb buffer.
1075 * @offset: The address space offset returned.
1076 * Return: Zero on success, negative error code on failure.
1077 *
1078 * This is a driver callback for the core drm dumb_map_offset functionality.
1079 */
1080int vmw_dumb_map_offset(struct drm_file *file_priv,
1081			struct drm_device *dev, uint32_t handle,
1082			uint64_t *offset)
1083{
1084	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1085	struct vmw_buffer_object *out_buf;
1086	int ret;
1087
1088	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1089	if (ret != 0)
1090		return -EINVAL;
1091
1092	*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1093	vmw_bo_unreference(&out_buf);
1094	return 0;
1095}
1096
1097
1098/**
1099 * vmw_dumb_destroy - Destroy a dumb boffer
1100 *
1101 * @file_priv: Pointer to a struct drm_file identifying the caller.
1102 * @dev: Pointer to the drm device.
1103 * @handle: Handle identifying the dumb buffer.
1104 * Return: Zero on success, negative error code on failure.
1105 *
1106 * This is a driver callback for the core drm dumb_destroy functionality.
1107 */
1108int vmw_dumb_destroy(struct drm_file *file_priv,
1109		     struct drm_device *dev,
1110		     uint32_t handle)
1111{
1112	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1113					 handle, TTM_REF_USAGE);
1114}
1115
1116
1117/**
1118 * vmw_bo_swap_notify - swapout notify callback.
1119 *
1120 * @bo: The buffer object to be swapped out.
1121 */
1122void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1123{
1124	/* Is @bo embedded in a struct vmw_buffer_object? */
1125	if (bo->destroy != vmw_bo_bo_free &&
1126	    bo->destroy != vmw_user_bo_destroy)
1127		return;
1128
1129	/* Kill any cached kernel maps before swapout */
1130	vmw_bo_unmap(vmw_buffer_object(bo));
1131}
1132
1133
1134/**
1135 * vmw_bo_move_notify - TTM move_notify_callback
1136 *
1137 * @bo: The TTM buffer object about to move.
1138 * @mem: The struct ttm_mem_reg indicating to what memory
1139 *       region the move is taking place.
1140 *
1141 * Detaches cached maps and device bindings that require that the
1142 * buffer doesn't move.
1143 */
1144void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1145			struct ttm_mem_reg *mem)
1146{
1147	struct vmw_buffer_object *vbo;
1148
1149	if (mem == NULL)
1150		return;
1151
1152	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
1153	if (bo->destroy != vmw_bo_bo_free &&
1154	    bo->destroy != vmw_user_bo_destroy)
1155		return;
1156
1157	vbo = container_of(bo, struct vmw_buffer_object, base);
1158
1159	/*
1160	 * Kill any cached kernel maps before move to or from VRAM.
1161	 * With other types of moves, the underlying pages stay the same,
1162	 * and the map can be kept.
1163	 */
1164	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1165		vmw_bo_unmap(vbo);
1166
1167	/*
1168	 * If we're moving a backup MOB out of MOB placement, then make sure we
1169	 * read back all resource content first, and unbind the MOB from
1170	 * the resource.
1171	 */
1172	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1173		vmw_resource_unbind_list(vbo);
1174}