Linux Audio

Check our new training course

Loading...
v4.17
 
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30
  31#define pr_fmt(fmt) "[TTM] " fmt
  32
  33#include <drm/ttm/ttm_module.h>
  34#include <drm/ttm/ttm_bo_driver.h>
  35#include <drm/ttm/ttm_placement.h>
  36#include <linux/jiffies.h>
  37#include <linux/slab.h>
  38#include <linux/sched.h>
  39#include <linux/mm.h>
  40#include <linux/file.h>
  41#include <linux/module.h>
  42#include <linux/atomic.h>
  43#include <linux/reservation.h>
  44
  45static void ttm_bo_global_kobj_release(struct kobject *kobj);
  46
 
 
 
 
 
 
 
  47static struct attribute ttm_bo_count = {
  48	.name = "bo_count",
  49	.mode = S_IRUGO
  50};
  51
  52/* default destructor */
  53static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
  54{
  55	kfree(bo);
  56}
  57
  58static inline int ttm_mem_type_from_place(const struct ttm_place *place,
  59					  uint32_t *mem_type)
  60{
  61	int pos;
  62
  63	pos = ffs(place->flags & TTM_PL_MASK_MEM);
  64	if (unlikely(!pos))
  65		return -EINVAL;
  66
  67	*mem_type = pos - 1;
  68	return 0;
  69}
  70
  71static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
 
  72{
  73	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  74	struct drm_printer p = drm_debug_printer(TTM_PFX);
  75
  76	pr_err("    has_type: %d\n", man->has_type);
  77	pr_err("    use_type: %d\n", man->use_type);
  78	pr_err("    flags: 0x%08X\n", man->flags);
  79	pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
  80	pr_err("    size: %llu\n", man->size);
  81	pr_err("    available_caching: 0x%08X\n", man->available_caching);
  82	pr_err("    default_caching: 0x%08X\n", man->default_caching);
  83	if (mem_type != TTM_PL_SYSTEM)
  84		(*man->func->debug)(man, &p);
  85}
  86
  87static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  88					struct ttm_placement *placement)
  89{
 
  90	int i, ret, mem_type;
  91
  92	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
  93	       bo, bo->mem.num_pages, bo->mem.size >> 10,
  94	       bo->mem.size >> 20);
  95	for (i = 0; i < placement->num_placement; i++) {
  96		ret = ttm_mem_type_from_place(&placement->placement[i],
  97						&mem_type);
  98		if (ret)
  99			return;
 100		pr_err("  placement[%d]=0x%08X (%d)\n",
 101		       i, placement->placement[i].flags, mem_type);
 102		ttm_mem_type_debug(bo->bdev, mem_type);
 103	}
 104}
 105
 106static ssize_t ttm_bo_global_show(struct kobject *kobj,
 107				  struct attribute *attr,
 108				  char *buffer)
 109{
 110	struct ttm_bo_global *glob =
 111		container_of(kobj, struct ttm_bo_global, kobj);
 112
 113	return snprintf(buffer, PAGE_SIZE, "%d\n",
 114				atomic_read(&glob->bo_count));
 115}
 116
 117static struct attribute *ttm_bo_global_attrs[] = {
 118	&ttm_bo_count,
 119	NULL
 120};
 121
 122static const struct sysfs_ops ttm_bo_global_ops = {
 123	.show = &ttm_bo_global_show
 124};
 125
 126static struct kobj_type ttm_bo_glob_kobj_type  = {
 127	.release = &ttm_bo_global_kobj_release,
 128	.sysfs_ops = &ttm_bo_global_ops,
 129	.default_attrs = ttm_bo_global_attrs
 130};
 131
 132
 133static inline uint32_t ttm_bo_type_flags(unsigned type)
 134{
 135	return 1 << (type);
 136}
 137
 138static void ttm_bo_release_list(struct kref *list_kref)
 139{
 140	struct ttm_buffer_object *bo =
 141	    container_of(list_kref, struct ttm_buffer_object, list_kref);
 142	struct ttm_bo_device *bdev = bo->bdev;
 143	size_t acc_size = bo->acc_size;
 144
 145	BUG_ON(kref_read(&bo->list_kref));
 146	BUG_ON(kref_read(&bo->kref));
 147	BUG_ON(atomic_read(&bo->cpu_writers));
 148	BUG_ON(bo->mem.mm_node != NULL);
 149	BUG_ON(!list_empty(&bo->lru));
 150	BUG_ON(!list_empty(&bo->ddestroy));
 151	ttm_tt_destroy(bo->ttm);
 152	atomic_dec(&bo->bdev->glob->bo_count);
 153	dma_fence_put(bo->moving);
 154	reservation_object_fini(&bo->ttm_resv);
 
 155	mutex_destroy(&bo->wu_mutex);
 156	bo->destroy(bo);
 157	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 158}
 159
 160void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 
 161{
 162	struct ttm_bo_device *bdev = bo->bdev;
 163	struct ttm_mem_type_manager *man;
 164
 165	reservation_object_assert_held(bo->resv);
 166
 167	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 168		BUG_ON(!list_empty(&bo->lru));
 169
 170		man = &bdev->man[bo->mem.mem_type];
 171		list_add_tail(&bo->lru, &man->lru[bo->priority]);
 172		kref_get(&bo->list_kref);
 173
 174		if (bo->ttm && !(bo->ttm->page_flags &
 175				 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
 176			list_add_tail(&bo->swap,
 177				      &bdev->glob->swap_lru[bo->priority]);
 178			kref_get(&bo->list_kref);
 179		}
 
 
 
 180	}
 181}
 
 
 
 
 
 182EXPORT_SYMBOL(ttm_bo_add_to_lru);
 183
 184static void ttm_bo_ref_bug(struct kref *list_kref)
 185{
 186	BUG();
 187}
 188
 189void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 190{
 
 
 
 191	if (!list_empty(&bo->swap)) {
 192		list_del_init(&bo->swap);
 193		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
 194	}
 195	if (!list_empty(&bo->lru)) {
 196		list_del_init(&bo->lru);
 197		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
 198	}
 199
 200	/*
 201	 * TODO: Add a driver hook to delete from
 202	 * driver-specific LRU's here.
 203	 */
 204}
 205
 206void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
 207{
 208	struct ttm_bo_global *glob = bo->bdev->glob;
 209
 210	spin_lock(&glob->lru_lock);
 211	ttm_bo_del_from_lru(bo);
 212	spin_unlock(&glob->lru_lock);
 213}
 214EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
 215
 216void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
 
 
 
 
 
 
 
 
 
 217{
 218	reservation_object_assert_held(bo->resv);
 219
 220	ttm_bo_del_from_lru(bo);
 221	ttm_bo_add_to_lru(bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 222}
 223EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 225static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 226				  struct ttm_mem_reg *mem, bool evict,
 227				  struct ttm_operation_ctx *ctx)
 228{
 229	struct ttm_bo_device *bdev = bo->bdev;
 230	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
 231	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
 232	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
 233	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
 234	int ret = 0;
 235
 236	if (old_is_pci || new_is_pci ||
 237	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
 238		ret = ttm_mem_io_lock(old_man, true);
 239		if (unlikely(ret != 0))
 240			goto out_err;
 241		ttm_bo_unmap_virtual_locked(bo);
 242		ttm_mem_io_unlock(old_man);
 243	}
 244
 245	/*
 246	 * Create and bind a ttm if required.
 247	 */
 248
 249	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
 250		if (bo->ttm == NULL) {
 251			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
 252			ret = ttm_tt_create(bo, zero);
 253			if (ret)
 254				goto out_err;
 255		}
 256
 257		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
 258		if (ret)
 259			goto out_err;
 260
 261		if (mem->mem_type != TTM_PL_SYSTEM) {
 262			ret = ttm_tt_bind(bo->ttm, mem, ctx);
 263			if (ret)
 264				goto out_err;
 265		}
 266
 267		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
 268			if (bdev->driver->move_notify)
 269				bdev->driver->move_notify(bo, evict, mem);
 270			bo->mem = *mem;
 271			mem->mm_node = NULL;
 272			goto moved;
 273		}
 274	}
 275
 276	if (bdev->driver->move_notify)
 277		bdev->driver->move_notify(bo, evict, mem);
 278
 279	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 280	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
 281		ret = ttm_bo_move_ttm(bo, ctx, mem);
 282	else if (bdev->driver->move)
 283		ret = bdev->driver->move(bo, evict, ctx, mem);
 284	else
 285		ret = ttm_bo_move_memcpy(bo, ctx, mem);
 286
 287	if (ret) {
 288		if (bdev->driver->move_notify) {
 289			struct ttm_mem_reg tmp_mem = *mem;
 290			*mem = bo->mem;
 291			bo->mem = tmp_mem;
 292			bdev->driver->move_notify(bo, false, mem);
 293			bo->mem = *mem;
 294			*mem = tmp_mem;
 295		}
 296
 297		goto out_err;
 298	}
 299
 300moved:
 301	if (bo->evicted) {
 302		if (bdev->driver->invalidate_caches) {
 303			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
 304			if (ret)
 305				pr_err("Can not flush read caches\n");
 306		}
 307		bo->evicted = false;
 308	}
 309
 310	if (bo->mem.mm_node)
 311		bo->offset = (bo->mem.start << PAGE_SHIFT) +
 312		    bdev->man[bo->mem.mem_type].gpu_offset;
 313	else
 314		bo->offset = 0;
 315
 316	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
 317	return 0;
 318
 319out_err:
 320	new_man = &bdev->man[bo->mem.mem_type];
 321	if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 322		ttm_tt_destroy(bo->ttm);
 323		bo->ttm = NULL;
 324	}
 325
 326	return ret;
 327}
 328
 329/**
 330 * Call bo::reserved.
 331 * Will release GPU memory type usage on destruction.
 332 * This is the place to put in driver specific hooks to release
 333 * driver private resources.
 334 * Will release the bo::reserved lock.
 335 */
 336
 337static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 338{
 339	if (bo->bdev->driver->move_notify)
 340		bo->bdev->driver->move_notify(bo, false, NULL);
 341
 342	ttm_tt_destroy(bo->ttm);
 343	bo->ttm = NULL;
 344	ttm_bo_mem_put(bo, &bo->mem);
 345}
 346
 347static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 348{
 349	int r;
 350
 351	if (bo->resv == &bo->ttm_resv)
 352		return 0;
 353
 354	BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
 355
 356	r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
 357	if (r)
 358		reservation_object_unlock(&bo->ttm_resv);
 359
 360	return r;
 361}
 362
 363static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 364{
 365	struct reservation_object_list *fobj;
 366	struct dma_fence *fence;
 367	int i;
 368
 369	fobj = reservation_object_get_list(&bo->ttm_resv);
 370	fence = reservation_object_get_excl(&bo->ttm_resv);
 371	if (fence && !fence->ops->signaled)
 372		dma_fence_enable_sw_signaling(fence);
 373
 374	for (i = 0; fobj && i < fobj->shared_count; ++i) {
 375		fence = rcu_dereference_protected(fobj->shared[i],
 376					reservation_object_held(bo->resv));
 377
 378		if (!fence->ops->signaled)
 379			dma_fence_enable_sw_signaling(fence);
 380	}
 381}
 382
 383static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 384{
 385	struct ttm_bo_device *bdev = bo->bdev;
 386	struct ttm_bo_global *glob = bdev->glob;
 387	int ret;
 388
 389	ret = ttm_bo_individualize_resv(bo);
 390	if (ret) {
 391		/* Last resort, if we fail to allocate memory for the
 392		 * fences block for the BO to become idle
 393		 */
 394		reservation_object_wait_timeout_rcu(bo->resv, true, false,
 395						    30 * HZ);
 396		spin_lock(&glob->lru_lock);
 397		goto error;
 398	}
 399
 400	spin_lock(&glob->lru_lock);
 401	ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
 402	if (!ret) {
 403		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
 404			ttm_bo_del_from_lru(bo);
 405			spin_unlock(&glob->lru_lock);
 406			if (bo->resv != &bo->ttm_resv)
 407				reservation_object_unlock(&bo->ttm_resv);
 408
 409			ttm_bo_cleanup_memtype_use(bo);
 410			reservation_object_unlock(bo->resv);
 411			return;
 412		}
 413
 414		ttm_bo_flush_all_fences(bo);
 415
 416		/*
 417		 * Make NO_EVICT bos immediately available to
 418		 * shrinkers, now that they are queued for
 419		 * destruction.
 420		 */
 421		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
 422			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
 423			ttm_bo_add_to_lru(bo);
 424		}
 425
 426		reservation_object_unlock(bo->resv);
 427	}
 428	if (bo->resv != &bo->ttm_resv)
 429		reservation_object_unlock(&bo->ttm_resv);
 430
 431error:
 432	kref_get(&bo->list_kref);
 433	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 434	spin_unlock(&glob->lru_lock);
 435
 436	schedule_delayed_work(&bdev->wq,
 437			      ((HZ / 100) < 1) ? 1 : HZ / 100);
 438}
 439
 440/**
 441 * function ttm_bo_cleanup_refs
 442 * If bo idle, remove from delayed- and lru lists, and unref.
 443 * If not idle, do nothing.
 444 *
 445 * Must be called with lru_lock and reservation held, this function
 446 * will drop the lru lock and optionally the reservation lock before returning.
 447 *
 448 * @interruptible         Any sleeps should occur interruptibly.
 449 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 450 * @unlock_resv           Unlock the reservation lock as well.
 451 */
 452
 453static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 454			       bool interruptible, bool no_wait_gpu,
 455			       bool unlock_resv)
 456{
 457	struct ttm_bo_global *glob = bo->bdev->glob;
 458	struct reservation_object *resv;
 459	int ret;
 460
 461	if (unlikely(list_empty(&bo->ddestroy)))
 462		resv = bo->resv;
 463	else
 464		resv = &bo->ttm_resv;
 465
 466	if (reservation_object_test_signaled_rcu(resv, true))
 467		ret = 0;
 468	else
 469		ret = -EBUSY;
 470
 471	if (ret && !no_wait_gpu) {
 472		long lret;
 473
 474		if (unlock_resv)
 475			reservation_object_unlock(bo->resv);
 476		spin_unlock(&glob->lru_lock);
 477
 478		lret = reservation_object_wait_timeout_rcu(resv, true,
 479							   interruptible,
 480							   30 * HZ);
 481
 482		if (lret < 0)
 483			return lret;
 484		else if (lret == 0)
 485			return -EBUSY;
 486
 487		spin_lock(&glob->lru_lock);
 488		if (unlock_resv && !reservation_object_trylock(bo->resv)) {
 489			/*
 490			 * We raced, and lost, someone else holds the reservation now,
 491			 * and is probably busy in ttm_bo_cleanup_memtype_use.
 492			 *
 493			 * Even if it's not the case, because we finished waiting any
 494			 * delayed destruction would succeed, so just return success
 495			 * here.
 496			 */
 497			spin_unlock(&glob->lru_lock);
 498			return 0;
 499		}
 500		ret = 0;
 501	}
 502
 503	if (ret || unlikely(list_empty(&bo->ddestroy))) {
 504		if (unlock_resv)
 505			reservation_object_unlock(bo->resv);
 506		spin_unlock(&glob->lru_lock);
 507		return ret;
 508	}
 509
 510	ttm_bo_del_from_lru(bo);
 511	list_del_init(&bo->ddestroy);
 512	kref_put(&bo->list_kref, ttm_bo_ref_bug);
 513
 514	spin_unlock(&glob->lru_lock);
 515	ttm_bo_cleanup_memtype_use(bo);
 516
 517	if (unlock_resv)
 518		reservation_object_unlock(bo->resv);
 519
 520	return 0;
 521}
 522
 523/**
 524 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 525 * encountered buffers.
 526 */
 527static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 528{
 529	struct ttm_bo_global *glob = bdev->glob;
 530	struct list_head removed;
 531	bool empty;
 532
 533	INIT_LIST_HEAD(&removed);
 534
 535	spin_lock(&glob->lru_lock);
 536	while (!list_empty(&bdev->ddestroy)) {
 537		struct ttm_buffer_object *bo;
 538
 539		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
 540				      ddestroy);
 541		kref_get(&bo->list_kref);
 542		list_move_tail(&bo->ddestroy, &removed);
 543
 544		if (remove_all || bo->resv != &bo->ttm_resv) {
 545			spin_unlock(&glob->lru_lock);
 546			reservation_object_lock(bo->resv, NULL);
 547
 548			spin_lock(&glob->lru_lock);
 549			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 550
 551		} else if (reservation_object_trylock(bo->resv)) {
 552			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 553		} else {
 554			spin_unlock(&glob->lru_lock);
 555		}
 556
 557		kref_put(&bo->list_kref, ttm_bo_release_list);
 558		spin_lock(&glob->lru_lock);
 559	}
 560	list_splice_tail(&removed, &bdev->ddestroy);
 561	empty = list_empty(&bdev->ddestroy);
 562	spin_unlock(&glob->lru_lock);
 563
 564	return empty;
 565}
 566
 567static void ttm_bo_delayed_workqueue(struct work_struct *work)
 568{
 569	struct ttm_bo_device *bdev =
 570	    container_of(work, struct ttm_bo_device, wq.work);
 571
 572	if (!ttm_bo_delayed_delete(bdev, false))
 573		schedule_delayed_work(&bdev->wq,
 574				      ((HZ / 100) < 1) ? 1 : HZ / 100);
 575}
 576
 577static void ttm_bo_release(struct kref *kref)
 578{
 579	struct ttm_buffer_object *bo =
 580	    container_of(kref, struct ttm_buffer_object, kref);
 581	struct ttm_bo_device *bdev = bo->bdev;
 582	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 583
 584	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
 
 
 
 585	ttm_mem_io_lock(man, false);
 586	ttm_mem_io_free_vm(bo);
 587	ttm_mem_io_unlock(man);
 588	ttm_bo_cleanup_refs_or_queue(bo);
 589	kref_put(&bo->list_kref, ttm_bo_release_list);
 590}
 591
 592void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 593{
 594	struct ttm_buffer_object *bo = *p_bo;
 595
 596	*p_bo = NULL;
 597	kref_put(&bo->kref, ttm_bo_release);
 598}
 599EXPORT_SYMBOL(ttm_bo_unref);
 600
 601int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
 602{
 603	return cancel_delayed_work_sync(&bdev->wq);
 604}
 605EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
 606
 607void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 608{
 609	if (resched)
 610		schedule_delayed_work(&bdev->wq,
 611				      ((HZ / 100) < 1) ? 1 : HZ / 100);
 612}
 613EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 614
 615static int ttm_bo_evict(struct ttm_buffer_object *bo,
 616			struct ttm_operation_ctx *ctx)
 617{
 618	struct ttm_bo_device *bdev = bo->bdev;
 619	struct ttm_mem_reg evict_mem;
 620	struct ttm_placement placement;
 621	int ret = 0;
 622
 623	reservation_object_assert_held(bo->resv);
 624
 625	placement.num_placement = 0;
 626	placement.num_busy_placement = 0;
 627	bdev->driver->evict_flags(bo, &placement);
 628
 629	if (!placement.num_placement && !placement.num_busy_placement) {
 630		ret = ttm_bo_pipeline_gutting(bo);
 631		if (ret)
 632			return ret;
 633
 634		return ttm_tt_create(bo, false);
 635	}
 636
 637	evict_mem = bo->mem;
 638	evict_mem.mm_node = NULL;
 639	evict_mem.bus.io_reserved_vm = false;
 640	evict_mem.bus.io_reserved_count = 0;
 641
 642	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
 643	if (ret) {
 644		if (ret != -ERESTARTSYS) {
 645			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
 646			       bo);
 647			ttm_bo_mem_space_debug(bo, &placement);
 648		}
 649		goto out;
 650	}
 651
 652	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
 653	if (unlikely(ret)) {
 654		if (ret != -ERESTARTSYS)
 655			pr_err("Buffer eviction failed\n");
 656		ttm_bo_mem_put(bo, &evict_mem);
 657		goto out;
 658	}
 659	bo->evicted = true;
 660out:
 661	return ret;
 662}
 663
 664bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 665			      const struct ttm_place *place)
 666{
 667	/* Don't evict this BO if it's outside of the
 668	 * requested placement range
 669	 */
 670	if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
 671	    (place->lpfn && place->lpfn <= bo->mem.start))
 672		return false;
 673
 674	return true;
 675}
 676EXPORT_SYMBOL(ttm_bo_eviction_valuable);
 677
 678/**
 679 * Check the target bo is allowable to be evicted or swapout, including cases:
 680 *
 681 * a. if share same reservation object with ctx->resv, have assumption
 682 * reservation objects should already be locked, so not lock again and
 683 * return true directly when either the opreation allow_reserved_eviction
 684 * or the target bo already is in delayed free list;
 685 *
 686 * b. Otherwise, trylock it.
 687 */
 688static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 689			struct ttm_operation_ctx *ctx, bool *locked)
 690{
 691	bool ret = false;
 692
 693	*locked = false;
 694	if (bo->resv == ctx->resv) {
 695		reservation_object_assert_held(bo->resv);
 696		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
 697		    || !list_empty(&bo->ddestroy))
 698			ret = true;
 
 
 
 699	} else {
 700		*locked = reservation_object_trylock(bo->resv);
 701		ret = *locked;
 
 
 702	}
 703
 704	return ret;
 705}
 706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 708			       uint32_t mem_type,
 709			       const struct ttm_place *place,
 710			       struct ttm_operation_ctx *ctx)
 
 711{
 
 712	struct ttm_bo_global *glob = bdev->glob;
 713	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 714	struct ttm_buffer_object *bo = NULL;
 715	bool locked = false;
 716	unsigned i;
 717	int ret;
 718
 719	spin_lock(&glob->lru_lock);
 720	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 721		list_for_each_entry(bo, &man->lru[i], lru) {
 722			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
 
 
 
 
 
 
 723				continue;
 
 724
 725			if (place && !bdev->driver->eviction_valuable(bo,
 726								      place)) {
 727				if (locked)
 728					reservation_object_unlock(bo->resv);
 729				continue;
 730			}
 731			break;
 732		}
 733
 734		/* If the inner loop terminated early, we have our candidate */
 735		if (&bo->lru != &man->lru[i])
 736			break;
 737
 738		bo = NULL;
 739	}
 740
 741	if (!bo) {
 
 
 742		spin_unlock(&glob->lru_lock);
 743		return -EBUSY;
 
 
 
 744	}
 745
 746	kref_get(&bo->list_kref);
 747
 748	if (!list_empty(&bo->ddestroy)) {
 749		ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
 750					  ctx->no_wait_gpu, locked);
 751		kref_put(&bo->list_kref, ttm_bo_release_list);
 752		return ret;
 753	}
 754
 755	ttm_bo_del_from_lru(bo);
 756	spin_unlock(&glob->lru_lock);
 757
 758	ret = ttm_bo_evict(bo, ctx);
 759	if (locked) {
 760		ttm_bo_unreserve(bo);
 761	} else {
 762		spin_lock(&glob->lru_lock);
 763		ttm_bo_add_to_lru(bo);
 764		spin_unlock(&glob->lru_lock);
 765	}
 766
 767	kref_put(&bo->list_kref, ttm_bo_release_list);
 768	return ret;
 769}
 770
 771void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 772{
 773	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
 774
 775	if (mem->mm_node)
 776		(*man->func->put_node)(man, mem);
 777}
 778EXPORT_SYMBOL(ttm_bo_mem_put);
 779
 780/**
 781 * Add the last move fence to the BO and reserve a new shared slot.
 782 */
 783static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 784				 struct ttm_mem_type_manager *man,
 785				 struct ttm_mem_reg *mem)
 786{
 787	struct dma_fence *fence;
 788	int ret;
 789
 790	spin_lock(&man->move_lock);
 791	fence = dma_fence_get(man->move);
 792	spin_unlock(&man->move_lock);
 793
 794	if (fence) {
 795		reservation_object_add_shared_fence(bo->resv, fence);
 796
 797		ret = reservation_object_reserve_shared(bo->resv);
 798		if (unlikely(ret))
 
 799			return ret;
 
 800
 801		dma_fence_put(bo->moving);
 802		bo->moving = fence;
 803	}
 804
 805	return 0;
 806}
 807
 808/**
 809 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 810 * space, or we've evicted everything and there isn't enough space.
 811 */
 812static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 813					uint32_t mem_type,
 814					const struct ttm_place *place,
 815					struct ttm_mem_reg *mem,
 816					struct ttm_operation_ctx *ctx)
 817{
 818	struct ttm_bo_device *bdev = bo->bdev;
 819	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 
 820	int ret;
 821
 
 822	do {
 823		ret = (*man->func->get_node)(man, bo, place, mem);
 824		if (unlikely(ret != 0))
 825			return ret;
 826		if (mem->mm_node)
 827			break;
 828		ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
 
 829		if (unlikely(ret != 0))
 830			return ret;
 831	} while (1);
 832	mem->mem_type = mem_type;
 833	return ttm_bo_add_move_fence(bo, man, mem);
 834}
 835
 836static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 837				      uint32_t cur_placement,
 838				      uint32_t proposed_placement)
 839{
 840	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
 841	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
 842
 843	/**
 844	 * Keep current caching if possible.
 845	 */
 846
 847	if ((cur_placement & caching) != 0)
 848		result |= (cur_placement & caching);
 849	else if ((man->default_caching & caching) != 0)
 850		result |= man->default_caching;
 851	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
 852		result |= TTM_PL_FLAG_CACHED;
 853	else if ((TTM_PL_FLAG_WC & caching) != 0)
 854		result |= TTM_PL_FLAG_WC;
 855	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
 856		result |= TTM_PL_FLAG_UNCACHED;
 857
 858	return result;
 859}
 860
 861static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 862				 uint32_t mem_type,
 863				 const struct ttm_place *place,
 864				 uint32_t *masked_placement)
 865{
 866	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
 867
 868	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
 869		return false;
 870
 871	if ((place->flags & man->available_caching) == 0)
 872		return false;
 873
 874	cur_flags |= (place->flags & man->available_caching);
 875
 876	*masked_placement = cur_flags;
 877	return true;
 878}
 879
 880/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 881 * Creates space for memory region @mem according to its type.
 882 *
 883 * This function first searches for free space in compatible memory types in
 884 * the priority order defined by the driver.  If free space isn't found, then
 885 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 886 * space.
 887 */
 888int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 889			struct ttm_placement *placement,
 890			struct ttm_mem_reg *mem,
 891			struct ttm_operation_ctx *ctx)
 892{
 893	struct ttm_bo_device *bdev = bo->bdev;
 894	struct ttm_mem_type_manager *man;
 895	uint32_t mem_type = TTM_PL_SYSTEM;
 896	uint32_t cur_flags = 0;
 897	bool type_found = false;
 898	bool type_ok = false;
 899	bool has_erestartsys = false;
 900	int i, ret;
 901
 902	ret = reservation_object_reserve_shared(bo->resv);
 903	if (unlikely(ret))
 904		return ret;
 905
 906	mem->mm_node = NULL;
 907	for (i = 0; i < placement->num_placement; ++i) {
 908		const struct ttm_place *place = &placement->placement[i];
 
 909
 910		ret = ttm_mem_type_from_place(place, &mem_type);
 911		if (ret)
 912			return ret;
 913		man = &bdev->man[mem_type];
 914		if (!man->has_type || !man->use_type)
 915			continue;
 916
 917		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
 918						&cur_flags);
 919
 920		if (!type_ok)
 921			continue;
 
 
 922
 923		type_found = true;
 924		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 925						  cur_flags);
 926		/*
 927		 * Use the access and other non-mapping-related flag bits from
 928		 * the memory placement flags to the current flags
 929		 */
 930		ttm_flag_masked(&cur_flags, place->flags,
 931				~TTM_PL_MASK_MEMTYPE);
 932
 933		if (mem_type == TTM_PL_SYSTEM)
 934			break;
 935
 
 936		ret = (*man->func->get_node)(man, bo, place, mem);
 937		if (unlikely(ret))
 938			return ret;
 939
 940		if (mem->mm_node) {
 941			ret = ttm_bo_add_move_fence(bo, man, mem);
 942			if (unlikely(ret)) {
 943				(*man->func->put_node)(man, mem);
 944				return ret;
 945			}
 946			break;
 947		}
 948	}
 949
 950	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
 951		mem->mem_type = mem_type;
 952		mem->placement = cur_flags;
 953		return 0;
 954	}
 955
 956	for (i = 0; i < placement->num_busy_placement; ++i) {
 957		const struct ttm_place *place = &placement->busy_placement[i];
 958
 959		ret = ttm_mem_type_from_place(place, &mem_type);
 960		if (ret)
 961			return ret;
 962		man = &bdev->man[mem_type];
 963		if (!man->has_type || !man->use_type)
 964			continue;
 965		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
 966			continue;
 
 
 967
 968		type_found = true;
 969		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 970						  cur_flags);
 971		/*
 972		 * Use the access and other non-mapping-related flag bits from
 973		 * the memory placement flags to the current flags
 974		 */
 975		ttm_flag_masked(&cur_flags, place->flags,
 976				~TTM_PL_MASK_MEMTYPE);
 977
 978		if (mem_type == TTM_PL_SYSTEM) {
 979			mem->mem_type = mem_type;
 980			mem->placement = cur_flags;
 981			mem->mm_node = NULL;
 982			return 0;
 983		}
 984
 985		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
 986		if (ret == 0 && mem->mm_node) {
 987			mem->placement = cur_flags;
 988			return 0;
 989		}
 990		if (ret == -ERESTARTSYS)
 991			has_erestartsys = true;
 992	}
 993
 
 994	if (!type_found) {
 995		pr_err(TTM_PFX "No compatible memory type found\n");
 996		return -EINVAL;
 997	}
 998
 999	return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
 
 
 
 
 
 
 
1000}
1001EXPORT_SYMBOL(ttm_bo_mem_space);
1002
1003static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1004			      struct ttm_placement *placement,
1005			      struct ttm_operation_ctx *ctx)
1006{
1007	int ret = 0;
1008	struct ttm_mem_reg mem;
1009
1010	reservation_object_assert_held(bo->resv);
1011
1012	mem.num_pages = bo->num_pages;
1013	mem.size = mem.num_pages << PAGE_SHIFT;
1014	mem.page_alignment = bo->mem.page_alignment;
1015	mem.bus.io_reserved_vm = false;
1016	mem.bus.io_reserved_count = 0;
1017	/*
1018	 * Determine where to move the buffer.
1019	 */
1020	ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1021	if (ret)
1022		goto out_unlock;
1023	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1024out_unlock:
1025	if (ret && mem.mm_node)
1026		ttm_bo_mem_put(bo, &mem);
1027	return ret;
1028}
1029
1030static bool ttm_bo_places_compat(const struct ttm_place *places,
1031				 unsigned num_placement,
1032				 struct ttm_mem_reg *mem,
1033				 uint32_t *new_flags)
1034{
1035	unsigned i;
1036
1037	for (i = 0; i < num_placement; i++) {
1038		const struct ttm_place *heap = &places[i];
1039
1040		if (mem->mm_node && (mem->start < heap->fpfn ||
1041		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1042			continue;
1043
1044		*new_flags = heap->flags;
1045		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1046		    (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1047		    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1048		     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1049			return true;
1050	}
1051	return false;
1052}
1053
1054bool ttm_bo_mem_compat(struct ttm_placement *placement,
1055		       struct ttm_mem_reg *mem,
1056		       uint32_t *new_flags)
1057{
1058	if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1059				 mem, new_flags))
1060		return true;
1061
1062	if ((placement->busy_placement != placement->placement ||
1063	     placement->num_busy_placement > placement->num_placement) &&
1064	    ttm_bo_places_compat(placement->busy_placement,
1065				 placement->num_busy_placement,
1066				 mem, new_flags))
1067		return true;
1068
1069	return false;
1070}
1071EXPORT_SYMBOL(ttm_bo_mem_compat);
1072
1073int ttm_bo_validate(struct ttm_buffer_object *bo,
1074		    struct ttm_placement *placement,
1075		    struct ttm_operation_ctx *ctx)
1076{
1077	int ret;
1078	uint32_t new_flags;
1079
1080	reservation_object_assert_held(bo->resv);
1081	/*
1082	 * Check whether we need to move buffer.
1083	 */
1084	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1085		ret = ttm_bo_move_buffer(bo, placement, ctx);
1086		if (ret)
1087			return ret;
1088	} else {
1089		/*
1090		 * Use the access and other non-mapping-related flag bits from
1091		 * the compatible memory placement flags to the active flags
1092		 */
1093		ttm_flag_masked(&bo->mem.placement, new_flags,
1094				~TTM_PL_MASK_MEMTYPE);
1095	}
1096	/*
1097	 * We might need to add a TTM.
1098	 */
1099	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1100		ret = ttm_tt_create(bo, true);
1101		if (ret)
1102			return ret;
1103	}
1104	return 0;
1105}
1106EXPORT_SYMBOL(ttm_bo_validate);
1107
1108int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1109			 struct ttm_buffer_object *bo,
1110			 unsigned long size,
1111			 enum ttm_bo_type type,
1112			 struct ttm_placement *placement,
1113			 uint32_t page_alignment,
1114			 struct ttm_operation_ctx *ctx,
1115			 size_t acc_size,
1116			 struct sg_table *sg,
1117			 struct reservation_object *resv,
1118			 void (*destroy) (struct ttm_buffer_object *))
1119{
1120	int ret = 0;
1121	unsigned long num_pages;
1122	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1123	bool locked;
1124
1125	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1126	if (ret) {
1127		pr_err("Out of kernel memory\n");
1128		if (destroy)
1129			(*destroy)(bo);
1130		else
1131			kfree(bo);
1132		return -ENOMEM;
1133	}
1134
1135	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1136	if (num_pages == 0) {
1137		pr_err("Illegal buffer object size\n");
1138		if (destroy)
1139			(*destroy)(bo);
1140		else
1141			kfree(bo);
1142		ttm_mem_global_free(mem_glob, acc_size);
1143		return -EINVAL;
1144	}
1145	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1146
1147	kref_init(&bo->kref);
1148	kref_init(&bo->list_kref);
1149	atomic_set(&bo->cpu_writers, 0);
1150	INIT_LIST_HEAD(&bo->lru);
1151	INIT_LIST_HEAD(&bo->ddestroy);
1152	INIT_LIST_HEAD(&bo->swap);
1153	INIT_LIST_HEAD(&bo->io_reserve_lru);
1154	mutex_init(&bo->wu_mutex);
1155	bo->bdev = bdev;
1156	bo->type = type;
1157	bo->num_pages = num_pages;
1158	bo->mem.size = num_pages << PAGE_SHIFT;
1159	bo->mem.mem_type = TTM_PL_SYSTEM;
1160	bo->mem.num_pages = bo->num_pages;
1161	bo->mem.mm_node = NULL;
1162	bo->mem.page_alignment = page_alignment;
1163	bo->mem.bus.io_reserved_vm = false;
1164	bo->mem.bus.io_reserved_count = 0;
1165	bo->moving = NULL;
1166	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1167	bo->acc_size = acc_size;
1168	bo->sg = sg;
1169	if (resv) {
1170		bo->resv = resv;
1171		reservation_object_assert_held(bo->resv);
1172	} else {
1173		bo->resv = &bo->ttm_resv;
 
 
 
 
 
 
 
 
1174	}
1175	reservation_object_init(&bo->ttm_resv);
1176	atomic_inc(&bo->bdev->glob->bo_count);
1177	drm_vma_node_reset(&bo->vma_node);
1178	bo->priority = 0;
1179
1180	/*
1181	 * For ttm_bo_type_device buffers, allocate
1182	 * address space from the device.
1183	 */
1184	if (bo->type == ttm_bo_type_device ||
1185	    bo->type == ttm_bo_type_sg)
1186		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1187					 bo->mem.num_pages);
1188
1189	/* passed reservation objects should already be locked,
1190	 * since otherwise lockdep will be angered in radeon.
1191	 */
1192	if (!resv) {
1193		locked = reservation_object_trylock(bo->resv);
1194		WARN_ON(!locked);
1195	}
1196
1197	if (likely(!ret))
1198		ret = ttm_bo_validate(bo, placement, ctx);
1199
1200	if (unlikely(ret)) {
1201		if (!resv)
1202			ttm_bo_unreserve(bo);
1203
1204		ttm_bo_unref(&bo);
1205		return ret;
1206	}
1207
1208	if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1209		spin_lock(&bdev->glob->lru_lock);
1210		ttm_bo_add_to_lru(bo);
1211		spin_unlock(&bdev->glob->lru_lock);
1212	}
1213
1214	return ret;
1215}
1216EXPORT_SYMBOL(ttm_bo_init_reserved);
1217
1218int ttm_bo_init(struct ttm_bo_device *bdev,
1219		struct ttm_buffer_object *bo,
1220		unsigned long size,
1221		enum ttm_bo_type type,
1222		struct ttm_placement *placement,
1223		uint32_t page_alignment,
1224		bool interruptible,
1225		size_t acc_size,
1226		struct sg_table *sg,
1227		struct reservation_object *resv,
1228		void (*destroy) (struct ttm_buffer_object *))
1229{
1230	struct ttm_operation_ctx ctx = { interruptible, false };
1231	int ret;
1232
1233	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1234				   page_alignment, &ctx, acc_size,
1235				   sg, resv, destroy);
1236	if (ret)
1237		return ret;
1238
1239	if (!resv)
1240		ttm_bo_unreserve(bo);
1241
1242	return 0;
1243}
1244EXPORT_SYMBOL(ttm_bo_init);
1245
1246size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1247		       unsigned long bo_size,
1248		       unsigned struct_size)
1249{
1250	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1251	size_t size = 0;
1252
1253	size += ttm_round_pot(struct_size);
1254	size += ttm_round_pot(npages * sizeof(void *));
1255	size += ttm_round_pot(sizeof(struct ttm_tt));
1256	return size;
1257}
1258EXPORT_SYMBOL(ttm_bo_acc_size);
1259
1260size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1261			   unsigned long bo_size,
1262			   unsigned struct_size)
1263{
1264	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1265	size_t size = 0;
1266
1267	size += ttm_round_pot(struct_size);
1268	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1269	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1270	return size;
1271}
1272EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1273
1274int ttm_bo_create(struct ttm_bo_device *bdev,
1275			unsigned long size,
1276			enum ttm_bo_type type,
1277			struct ttm_placement *placement,
1278			uint32_t page_alignment,
1279			bool interruptible,
1280			struct ttm_buffer_object **p_bo)
1281{
1282	struct ttm_buffer_object *bo;
1283	size_t acc_size;
1284	int ret;
1285
1286	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1287	if (unlikely(bo == NULL))
1288		return -ENOMEM;
1289
1290	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1291	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1292			  interruptible, acc_size,
1293			  NULL, NULL, NULL);
1294	if (likely(ret == 0))
1295		*p_bo = bo;
1296
1297	return ret;
1298}
1299EXPORT_SYMBOL(ttm_bo_create);
1300
1301static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1302				   unsigned mem_type)
1303{
1304	struct ttm_operation_ctx ctx = {
1305		.interruptible = false,
1306		.no_wait_gpu = false,
1307		.flags = TTM_OPT_FLAG_FORCE_ALLOC
1308	};
1309	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1310	struct ttm_bo_global *glob = bdev->glob;
1311	struct dma_fence *fence;
1312	int ret;
1313	unsigned i;
1314
1315	/*
1316	 * Can't use standard list traversal since we're unlocking.
1317	 */
1318
1319	spin_lock(&glob->lru_lock);
1320	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1321		while (!list_empty(&man->lru[i])) {
1322			spin_unlock(&glob->lru_lock);
1323			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
 
1324			if (ret)
1325				return ret;
1326			spin_lock(&glob->lru_lock);
1327		}
1328	}
1329	spin_unlock(&glob->lru_lock);
1330
1331	spin_lock(&man->move_lock);
1332	fence = dma_fence_get(man->move);
1333	spin_unlock(&man->move_lock);
1334
1335	if (fence) {
1336		ret = dma_fence_wait(fence, false);
1337		dma_fence_put(fence);
1338		if (ret)
1339			return ret;
1340	}
1341
1342	return 0;
1343}
1344
1345int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1346{
1347	struct ttm_mem_type_manager *man;
1348	int ret = -EINVAL;
1349
1350	if (mem_type >= TTM_NUM_MEM_TYPES) {
1351		pr_err("Illegal memory type %d\n", mem_type);
1352		return ret;
1353	}
1354	man = &bdev->man[mem_type];
1355
1356	if (!man->has_type) {
1357		pr_err("Trying to take down uninitialized memory manager type %u\n",
1358		       mem_type);
1359		return ret;
1360	}
1361
1362	man->use_type = false;
1363	man->has_type = false;
1364
1365	ret = 0;
1366	if (mem_type > 0) {
1367		ret = ttm_bo_force_list_clean(bdev, mem_type);
1368		if (ret) {
1369			pr_err("Cleanup eviction failed\n");
1370			return ret;
1371		}
1372
1373		ret = (*man->func->takedown)(man);
1374	}
1375
1376	dma_fence_put(man->move);
1377	man->move = NULL;
1378
1379	return ret;
1380}
1381EXPORT_SYMBOL(ttm_bo_clean_mm);
1382
1383int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1384{
1385	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1386
1387	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1388		pr_err("Illegal memory manager memory type %u\n", mem_type);
1389		return -EINVAL;
1390	}
1391
1392	if (!man->has_type) {
1393		pr_err("Memory type %u has not been initialized\n", mem_type);
1394		return 0;
1395	}
1396
1397	return ttm_bo_force_list_clean(bdev, mem_type);
1398}
1399EXPORT_SYMBOL(ttm_bo_evict_mm);
1400
1401int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1402			unsigned long p_size)
1403{
1404	int ret;
1405	struct ttm_mem_type_manager *man;
1406	unsigned i;
1407
1408	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1409	man = &bdev->man[type];
1410	BUG_ON(man->has_type);
1411	man->io_reserve_fastpath = true;
1412	man->use_io_reserve_lru = false;
1413	mutex_init(&man->io_reserve_mutex);
1414	spin_lock_init(&man->move_lock);
1415	INIT_LIST_HEAD(&man->io_reserve_lru);
1416
1417	ret = bdev->driver->init_mem_type(bdev, type, man);
1418	if (ret)
1419		return ret;
1420	man->bdev = bdev;
1421
1422	if (type != TTM_PL_SYSTEM) {
1423		ret = (*man->func->init)(man, p_size);
1424		if (ret)
1425			return ret;
1426	}
1427	man->has_type = true;
1428	man->use_type = true;
1429	man->size = p_size;
1430
1431	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1432		INIT_LIST_HEAD(&man->lru[i]);
1433	man->move = NULL;
1434
1435	return 0;
1436}
1437EXPORT_SYMBOL(ttm_bo_init_mm);
1438
1439static void ttm_bo_global_kobj_release(struct kobject *kobj)
1440{
1441	struct ttm_bo_global *glob =
1442		container_of(kobj, struct ttm_bo_global, kobj);
1443
1444	__free_page(glob->dummy_read_page);
1445	kfree(glob);
1446}
1447
1448void ttm_bo_global_release(struct drm_global_reference *ref)
1449{
1450	struct ttm_bo_global *glob = ref->object;
 
 
 
 
1451
1452	kobject_del(&glob->kobj);
1453	kobject_put(&glob->kobj);
 
 
 
 
1454}
1455EXPORT_SYMBOL(ttm_bo_global_release);
1456
1457int ttm_bo_global_init(struct drm_global_reference *ref)
1458{
1459	struct ttm_bo_global_ref *bo_ref =
1460		container_of(ref, struct ttm_bo_global_ref, ref);
1461	struct ttm_bo_global *glob = ref->object;
1462	int ret;
1463	unsigned i;
1464
1465	mutex_init(&glob->device_list_mutex);
 
 
 
 
 
 
 
1466	spin_lock_init(&glob->lru_lock);
1467	glob->mem_glob = bo_ref->mem_glob;
1468	glob->mem_glob->bo_glob = glob;
1469	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1470
1471	if (unlikely(glob->dummy_read_page == NULL)) {
1472		ret = -ENOMEM;
1473		goto out_no_drp;
1474	}
1475
1476	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1477		INIT_LIST_HEAD(&glob->swap_lru[i]);
1478	INIT_LIST_HEAD(&glob->device_list);
1479	atomic_set(&glob->bo_count, 0);
1480
1481	ret = kobject_init_and_add(
1482		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1483	if (unlikely(ret != 0))
1484		kobject_put(&glob->kobj);
1485	return ret;
1486out_no_drp:
1487	kfree(glob);
1488	return ret;
1489}
1490EXPORT_SYMBOL(ttm_bo_global_init);
1491
1492
1493int ttm_bo_device_release(struct ttm_bo_device *bdev)
1494{
1495	int ret = 0;
1496	unsigned i = TTM_NUM_MEM_TYPES;
1497	struct ttm_mem_type_manager *man;
1498	struct ttm_bo_global *glob = bdev->glob;
1499
1500	while (i--) {
1501		man = &bdev->man[i];
1502		if (man->has_type) {
1503			man->use_type = false;
1504			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1505				ret = -EBUSY;
1506				pr_err("DRM memory manager type %d is not clean\n",
1507				       i);
1508			}
1509			man->has_type = false;
1510		}
1511	}
1512
1513	mutex_lock(&glob->device_list_mutex);
1514	list_del(&bdev->device_list);
1515	mutex_unlock(&glob->device_list_mutex);
1516
1517	cancel_delayed_work_sync(&bdev->wq);
1518
1519	if (ttm_bo_delayed_delete(bdev, true))
1520		pr_debug("Delayed destroy list was clean\n");
1521
1522	spin_lock(&glob->lru_lock);
1523	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1524		if (list_empty(&bdev->man[0].lru[0]))
1525			pr_debug("Swap list %d was clean\n", i);
1526	spin_unlock(&glob->lru_lock);
1527
1528	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1529
 
 
 
1530	return ret;
1531}
1532EXPORT_SYMBOL(ttm_bo_device_release);
1533
1534int ttm_bo_device_init(struct ttm_bo_device *bdev,
1535		       struct ttm_bo_global *glob,
1536		       struct ttm_bo_driver *driver,
1537		       struct address_space *mapping,
1538		       uint64_t file_page_offset,
1539		       bool need_dma32)
1540{
1541	int ret = -EINVAL;
 
 
 
 
 
1542
1543	bdev->driver = driver;
1544
1545	memset(bdev->man, 0, sizeof(bdev->man));
1546
1547	/*
1548	 * Initialize the system memory buffer type.
1549	 * Other types need to be driver / IOCTL initialized.
1550	 */
1551	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1552	if (unlikely(ret != 0))
1553		goto out_no_sys;
1554
1555	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1556				    0x10000000);
 
1557	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1558	INIT_LIST_HEAD(&bdev->ddestroy);
1559	bdev->dev_mapping = mapping;
1560	bdev->glob = glob;
1561	bdev->need_dma32 = need_dma32;
1562	mutex_lock(&glob->device_list_mutex);
1563	list_add_tail(&bdev->device_list, &glob->device_list);
1564	mutex_unlock(&glob->device_list_mutex);
1565
1566	return 0;
1567out_no_sys:
 
1568	return ret;
1569}
1570EXPORT_SYMBOL(ttm_bo_device_init);
1571
1572/*
1573 * buffer object vm functions.
1574 */
1575
1576bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1577{
1578	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1579
1580	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1581		if (mem->mem_type == TTM_PL_SYSTEM)
1582			return false;
1583
1584		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1585			return false;
1586
1587		if (mem->placement & TTM_PL_FLAG_CACHED)
1588			return false;
1589	}
1590	return true;
1591}
1592
1593void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1594{
1595	struct ttm_bo_device *bdev = bo->bdev;
1596
1597	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1598	ttm_mem_io_free_vm(bo);
1599}
1600
1601void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1602{
1603	struct ttm_bo_device *bdev = bo->bdev;
1604	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1605
1606	ttm_mem_io_lock(man, false);
1607	ttm_bo_unmap_virtual_locked(bo);
1608	ttm_mem_io_unlock(man);
1609}
1610
1611
1612EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1613
1614int ttm_bo_wait(struct ttm_buffer_object *bo,
1615		bool interruptible, bool no_wait)
1616{
1617	long timeout = 15 * HZ;
1618
1619	if (no_wait) {
1620		if (reservation_object_test_signaled_rcu(bo->resv, true))
1621			return 0;
1622		else
1623			return -EBUSY;
1624	}
1625
1626	timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1627						      interruptible, timeout);
1628	if (timeout < 0)
1629		return timeout;
1630
1631	if (timeout == 0)
1632		return -EBUSY;
1633
1634	reservation_object_add_excl_fence(bo->resv, NULL);
1635	return 0;
1636}
1637EXPORT_SYMBOL(ttm_bo_wait);
1638
1639int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1640{
1641	int ret = 0;
1642
1643	/*
1644	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1645	 */
1646
1647	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1648	if (unlikely(ret != 0))
1649		return ret;
1650	ret = ttm_bo_wait(bo, true, no_wait);
1651	if (likely(ret == 0))
1652		atomic_inc(&bo->cpu_writers);
1653	ttm_bo_unreserve(bo);
1654	return ret;
1655}
1656EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1657
1658void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1659{
1660	atomic_dec(&bo->cpu_writers);
1661}
1662EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1663
1664/**
1665 * A buffer object shrink method that tries to swap out the first
1666 * buffer object on the bo_global::swap_lru list.
1667 */
1668int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1669{
1670	struct ttm_buffer_object *bo;
1671	int ret = -EBUSY;
1672	bool locked;
1673	unsigned i;
1674
1675	spin_lock(&glob->lru_lock);
1676	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1677		list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1678			if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
 
1679				ret = 0;
1680				break;
1681			}
1682		}
1683		if (!ret)
1684			break;
1685	}
1686
1687	if (ret) {
1688		spin_unlock(&glob->lru_lock);
1689		return ret;
1690	}
1691
1692	kref_get(&bo->list_kref);
1693
1694	if (!list_empty(&bo->ddestroy)) {
1695		ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1696		kref_put(&bo->list_kref, ttm_bo_release_list);
1697		return ret;
1698	}
1699
1700	ttm_bo_del_from_lru(bo);
1701	spin_unlock(&glob->lru_lock);
1702
1703	/**
1704	 * Move to system cached
1705	 */
1706
1707	if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1708	    bo->ttm->caching_state != tt_cached) {
1709		struct ttm_operation_ctx ctx = { false, false };
1710		struct ttm_mem_reg evict_mem;
1711
1712		evict_mem = bo->mem;
1713		evict_mem.mm_node = NULL;
1714		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1715		evict_mem.mem_type = TTM_PL_SYSTEM;
1716
1717		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1718		if (unlikely(ret != 0))
1719			goto out;
1720	}
1721
1722	/**
1723	 * Make sure BO is idle.
1724	 */
1725
1726	ret = ttm_bo_wait(bo, false, false);
1727	if (unlikely(ret != 0))
1728		goto out;
1729
1730	ttm_bo_unmap_virtual(bo);
1731
1732	/**
1733	 * Swap out. Buffer will be swapped in again as soon as
1734	 * anyone tries to access a ttm page.
1735	 */
1736
1737	if (bo->bdev->driver->swap_notify)
1738		bo->bdev->driver->swap_notify(bo);
1739
1740	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1741out:
1742
1743	/**
1744	 *
1745	 * Unreserve without putting on LRU to avoid swapping out an
1746	 * already swapped buffer.
1747	 */
1748	if (locked)
1749		reservation_object_unlock(bo->resv);
1750	kref_put(&bo->list_kref, ttm_bo_release_list);
1751	return ret;
1752}
1753EXPORT_SYMBOL(ttm_bo_swapout);
1754
1755void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1756{
1757	struct ttm_operation_ctx ctx = {
1758		.interruptible = false,
1759		.no_wait_gpu = false
1760	};
1761
1762	while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
1763		;
1764}
1765EXPORT_SYMBOL(ttm_bo_swapout_all);
1766
1767/**
1768 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1769 * unreserved
1770 *
1771 * @bo: Pointer to buffer
1772 */
1773int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1774{
1775	int ret;
1776
1777	/*
1778	 * In the absense of a wait_unlocked API,
1779	 * Use the bo::wu_mutex to avoid triggering livelocks due to
1780	 * concurrent use of this function. Note that this use of
1781	 * bo::wu_mutex can go away if we change locking order to
1782	 * mmap_sem -> bo::reserve.
1783	 */
1784	ret = mutex_lock_interruptible(&bo->wu_mutex);
1785	if (unlikely(ret != 0))
1786		return -ERESTARTSYS;
1787	if (!ww_mutex_is_locked(&bo->resv->lock))
1788		goto out_unlock;
1789	ret = reservation_object_lock_interruptible(bo->resv, NULL);
1790	if (ret == -EINTR)
1791		ret = -ERESTARTSYS;
1792	if (unlikely(ret != 0))
1793		goto out_unlock;
1794	reservation_object_unlock(bo->resv);
1795
1796out_unlock:
1797	mutex_unlock(&bo->wu_mutex);
1798	return ret;
1799}
v5.4
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 */
  31
  32#define pr_fmt(fmt) "[TTM] " fmt
  33
  34#include <drm/ttm/ttm_module.h>
  35#include <drm/ttm/ttm_bo_driver.h>
  36#include <drm/ttm/ttm_placement.h>
  37#include <linux/jiffies.h>
  38#include <linux/slab.h>
  39#include <linux/sched.h>
  40#include <linux/mm.h>
  41#include <linux/file.h>
  42#include <linux/module.h>
  43#include <linux/atomic.h>
  44#include <linux/dma-resv.h>
  45
  46static void ttm_bo_global_kobj_release(struct kobject *kobj);
  47
  48/**
  49 * ttm_global_mutex - protecting the global BO state
  50 */
  51DEFINE_MUTEX(ttm_global_mutex);
  52unsigned ttm_bo_glob_use_count;
  53struct ttm_bo_global ttm_bo_glob;
  54
  55static struct attribute ttm_bo_count = {
  56	.name = "bo_count",
  57	.mode = S_IRUGO
  58};
  59
  60/* default destructor */
  61static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
  62{
  63	kfree(bo);
  64}
  65
  66static inline int ttm_mem_type_from_place(const struct ttm_place *place,
  67					  uint32_t *mem_type)
  68{
  69	int pos;
  70
  71	pos = ffs(place->flags & TTM_PL_MASK_MEM);
  72	if (unlikely(!pos))
  73		return -EINVAL;
  74
  75	*mem_type = pos - 1;
  76	return 0;
  77}
  78
  79static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
  80			       int mem_type)
  81{
  82	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 
  83
  84	drm_printf(p, "    has_type: %d\n", man->has_type);
  85	drm_printf(p, "    use_type: %d\n", man->use_type);
  86	drm_printf(p, "    flags: 0x%08X\n", man->flags);
  87	drm_printf(p, "    gpu_offset: 0x%08llX\n", man->gpu_offset);
  88	drm_printf(p, "    size: %llu\n", man->size);
  89	drm_printf(p, "    available_caching: 0x%08X\n", man->available_caching);
  90	drm_printf(p, "    default_caching: 0x%08X\n", man->default_caching);
  91	if (mem_type != TTM_PL_SYSTEM)
  92		(*man->func->debug)(man, p);
  93}
  94
  95static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  96					struct ttm_placement *placement)
  97{
  98	struct drm_printer p = drm_debug_printer(TTM_PFX);
  99	int i, ret, mem_type;
 100
 101	drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
 102		   bo, bo->mem.num_pages, bo->mem.size >> 10,
 103		   bo->mem.size >> 20);
 104	for (i = 0; i < placement->num_placement; i++) {
 105		ret = ttm_mem_type_from_place(&placement->placement[i],
 106						&mem_type);
 107		if (ret)
 108			return;
 109		drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
 110			   i, placement->placement[i].flags, mem_type);
 111		ttm_mem_type_debug(bo->bdev, &p, mem_type);
 112	}
 113}
 114
 115static ssize_t ttm_bo_global_show(struct kobject *kobj,
 116				  struct attribute *attr,
 117				  char *buffer)
 118{
 119	struct ttm_bo_global *glob =
 120		container_of(kobj, struct ttm_bo_global, kobj);
 121
 122	return snprintf(buffer, PAGE_SIZE, "%d\n",
 123				atomic_read(&glob->bo_count));
 124}
 125
 126static struct attribute *ttm_bo_global_attrs[] = {
 127	&ttm_bo_count,
 128	NULL
 129};
 130
 131static const struct sysfs_ops ttm_bo_global_ops = {
 132	.show = &ttm_bo_global_show
 133};
 134
 135static struct kobj_type ttm_bo_glob_kobj_type  = {
 136	.release = &ttm_bo_global_kobj_release,
 137	.sysfs_ops = &ttm_bo_global_ops,
 138	.default_attrs = ttm_bo_global_attrs
 139};
 140
 141
 142static inline uint32_t ttm_bo_type_flags(unsigned type)
 143{
 144	return 1 << (type);
 145}
 146
 147static void ttm_bo_release_list(struct kref *list_kref)
 148{
 149	struct ttm_buffer_object *bo =
 150	    container_of(list_kref, struct ttm_buffer_object, list_kref);
 151	struct ttm_bo_device *bdev = bo->bdev;
 152	size_t acc_size = bo->acc_size;
 153
 154	BUG_ON(kref_read(&bo->list_kref));
 155	BUG_ON(kref_read(&bo->kref));
 156	BUG_ON(atomic_read(&bo->cpu_writers));
 157	BUG_ON(bo->mem.mm_node != NULL);
 158	BUG_ON(!list_empty(&bo->lru));
 159	BUG_ON(!list_empty(&bo->ddestroy));
 160	ttm_tt_destroy(bo->ttm);
 161	atomic_dec(&bo->bdev->glob->bo_count);
 162	dma_fence_put(bo->moving);
 163	if (!ttm_bo_uses_embedded_gem_object(bo))
 164		dma_resv_fini(&bo->base._resv);
 165	mutex_destroy(&bo->wu_mutex);
 166	bo->destroy(bo);
 167	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 168}
 169
 170static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
 171				  struct ttm_mem_reg *mem)
 172{
 173	struct ttm_bo_device *bdev = bo->bdev;
 174	struct ttm_mem_type_manager *man;
 175
 176	dma_resv_assert_held(bo->base.resv);
 177
 178	if (!list_empty(&bo->lru))
 179		return;
 180
 181	if (mem->placement & TTM_PL_FLAG_NO_EVICT)
 182		return;
 
 183
 184	man = &bdev->man[mem->mem_type];
 185	list_add_tail(&bo->lru, &man->lru[bo->priority]);
 186	kref_get(&bo->list_kref);
 187
 188	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
 189	    !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
 190				     TTM_PAGE_FLAG_SWAPPED))) {
 191		list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
 192		kref_get(&bo->list_kref);
 193	}
 194}
 195
 196void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 197{
 198	ttm_bo_add_mem_to_lru(bo, &bo->mem);
 199}
 200EXPORT_SYMBOL(ttm_bo_add_to_lru);
 201
 202static void ttm_bo_ref_bug(struct kref *list_kref)
 203{
 204	BUG();
 205}
 206
 207void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 208{
 209	struct ttm_bo_device *bdev = bo->bdev;
 210	bool notify = false;
 211
 212	if (!list_empty(&bo->swap)) {
 213		list_del_init(&bo->swap);
 214		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 215		notify = true;
 216	}
 217	if (!list_empty(&bo->lru)) {
 218		list_del_init(&bo->lru);
 219		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 220		notify = true;
 221	}
 222
 223	if (notify && bdev->driver->del_from_lru_notify)
 224		bdev->driver->del_from_lru_notify(bo);
 
 
 225}
 226
 227void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
 228{
 229	struct ttm_bo_global *glob = bo->bdev->glob;
 230
 231	spin_lock(&glob->lru_lock);
 232	ttm_bo_del_from_lru(bo);
 233	spin_unlock(&glob->lru_lock);
 234}
 235EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
 236
 237static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
 238				     struct ttm_buffer_object *bo)
 239{
 240	if (!pos->first)
 241		pos->first = bo;
 242	pos->last = bo;
 243}
 244
 245void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
 246			     struct ttm_lru_bulk_move *bulk)
 247{
 248	dma_resv_assert_held(bo->base.resv);
 249
 250	ttm_bo_del_from_lru(bo);
 251	ttm_bo_add_to_lru(bo);
 252
 253	if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 254		switch (bo->mem.mem_type) {
 255		case TTM_PL_TT:
 256			ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
 257			break;
 258
 259		case TTM_PL_VRAM:
 260			ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
 261			break;
 262		}
 263		if (bo->ttm && !(bo->ttm->page_flags &
 264				 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
 265			ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
 266	}
 267}
 268EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 269
 270void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 271{
 272	unsigned i;
 273
 274	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 275		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
 276		struct ttm_mem_type_manager *man;
 277
 278		if (!pos->first)
 279			continue;
 280
 281		dma_resv_assert_held(pos->first->base.resv);
 282		dma_resv_assert_held(pos->last->base.resv);
 283
 284		man = &pos->first->bdev->man[TTM_PL_TT];
 285		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
 286				    &pos->last->lru);
 287	}
 288
 289	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 290		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
 291		struct ttm_mem_type_manager *man;
 292
 293		if (!pos->first)
 294			continue;
 295
 296		dma_resv_assert_held(pos->first->base.resv);
 297		dma_resv_assert_held(pos->last->base.resv);
 298
 299		man = &pos->first->bdev->man[TTM_PL_VRAM];
 300		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
 301				    &pos->last->lru);
 302	}
 303
 304	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 305		struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
 306		struct list_head *lru;
 307
 308		if (!pos->first)
 309			continue;
 310
 311		dma_resv_assert_held(pos->first->base.resv);
 312		dma_resv_assert_held(pos->last->base.resv);
 313
 314		lru = &pos->first->bdev->glob->swap_lru[i];
 315		list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
 316	}
 317}
 318EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
 319
 320static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 321				  struct ttm_mem_reg *mem, bool evict,
 322				  struct ttm_operation_ctx *ctx)
 323{
 324	struct ttm_bo_device *bdev = bo->bdev;
 325	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
 326	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
 327	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
 328	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
 329	int ret = 0;
 330
 331	if (old_is_pci || new_is_pci ||
 332	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
 333		ret = ttm_mem_io_lock(old_man, true);
 334		if (unlikely(ret != 0))
 335			goto out_err;
 336		ttm_bo_unmap_virtual_locked(bo);
 337		ttm_mem_io_unlock(old_man);
 338	}
 339
 340	/*
 341	 * Create and bind a ttm if required.
 342	 */
 343
 344	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
 345		if (bo->ttm == NULL) {
 346			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
 347			ret = ttm_tt_create(bo, zero);
 348			if (ret)
 349				goto out_err;
 350		}
 351
 352		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
 353		if (ret)
 354			goto out_err;
 355
 356		if (mem->mem_type != TTM_PL_SYSTEM) {
 357			ret = ttm_tt_bind(bo->ttm, mem, ctx);
 358			if (ret)
 359				goto out_err;
 360		}
 361
 362		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
 363			if (bdev->driver->move_notify)
 364				bdev->driver->move_notify(bo, evict, mem);
 365			bo->mem = *mem;
 366			mem->mm_node = NULL;
 367			goto moved;
 368		}
 369	}
 370
 371	if (bdev->driver->move_notify)
 372		bdev->driver->move_notify(bo, evict, mem);
 373
 374	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 375	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
 376		ret = ttm_bo_move_ttm(bo, ctx, mem);
 377	else if (bdev->driver->move)
 378		ret = bdev->driver->move(bo, evict, ctx, mem);
 379	else
 380		ret = ttm_bo_move_memcpy(bo, ctx, mem);
 381
 382	if (ret) {
 383		if (bdev->driver->move_notify) {
 384			swap(*mem, bo->mem);
 
 
 385			bdev->driver->move_notify(bo, false, mem);
 386			swap(*mem, bo->mem);
 
 387		}
 388
 389		goto out_err;
 390	}
 391
 392moved:
 393	if (bo->evicted) {
 394		if (bdev->driver->invalidate_caches) {
 395			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
 396			if (ret)
 397				pr_err("Can not flush read caches\n");
 398		}
 399		bo->evicted = false;
 400	}
 401
 402	if (bo->mem.mm_node)
 403		bo->offset = (bo->mem.start << PAGE_SHIFT) +
 404		    bdev->man[bo->mem.mem_type].gpu_offset;
 405	else
 406		bo->offset = 0;
 407
 408	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
 409	return 0;
 410
 411out_err:
 412	new_man = &bdev->man[bo->mem.mem_type];
 413	if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 414		ttm_tt_destroy(bo->ttm);
 415		bo->ttm = NULL;
 416	}
 417
 418	return ret;
 419}
 420
 421/**
 422 * Call bo::reserved.
 423 * Will release GPU memory type usage on destruction.
 424 * This is the place to put in driver specific hooks to release
 425 * driver private resources.
 426 * Will release the bo::reserved lock.
 427 */
 428
 429static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 430{
 431	if (bo->bdev->driver->move_notify)
 432		bo->bdev->driver->move_notify(bo, false, NULL);
 433
 434	ttm_tt_destroy(bo->ttm);
 435	bo->ttm = NULL;
 436	ttm_bo_mem_put(bo, &bo->mem);
 437}
 438
 439static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 440{
 441	int r;
 442
 443	if (bo->base.resv == &bo->base._resv)
 444		return 0;
 445
 446	BUG_ON(!dma_resv_trylock(&bo->base._resv));
 447
 448	r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
 449	if (r)
 450		dma_resv_unlock(&bo->base._resv);
 451
 452	return r;
 453}
 454
 455static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 456{
 457	struct dma_resv_list *fobj;
 458	struct dma_fence *fence;
 459	int i;
 460
 461	fobj = dma_resv_get_list(&bo->base._resv);
 462	fence = dma_resv_get_excl(&bo->base._resv);
 463	if (fence && !fence->ops->signaled)
 464		dma_fence_enable_sw_signaling(fence);
 465
 466	for (i = 0; fobj && i < fobj->shared_count; ++i) {
 467		fence = rcu_dereference_protected(fobj->shared[i],
 468					dma_resv_held(bo->base.resv));
 469
 470		if (!fence->ops->signaled)
 471			dma_fence_enable_sw_signaling(fence);
 472	}
 473}
 474
 475static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 476{
 477	struct ttm_bo_device *bdev = bo->bdev;
 478	struct ttm_bo_global *glob = bdev->glob;
 479	int ret;
 480
 481	ret = ttm_bo_individualize_resv(bo);
 482	if (ret) {
 483		/* Last resort, if we fail to allocate memory for the
 484		 * fences block for the BO to become idle
 485		 */
 486		dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
 487						    30 * HZ);
 488		spin_lock(&glob->lru_lock);
 489		goto error;
 490	}
 491
 492	spin_lock(&glob->lru_lock);
 493	ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
 494	if (!ret) {
 495		if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
 496			ttm_bo_del_from_lru(bo);
 497			spin_unlock(&glob->lru_lock);
 498			if (bo->base.resv != &bo->base._resv)
 499				dma_resv_unlock(&bo->base._resv);
 500
 501			ttm_bo_cleanup_memtype_use(bo);
 502			dma_resv_unlock(bo->base.resv);
 503			return;
 504		}
 505
 506		ttm_bo_flush_all_fences(bo);
 507
 508		/*
 509		 * Make NO_EVICT bos immediately available to
 510		 * shrinkers, now that they are queued for
 511		 * destruction.
 512		 */
 513		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
 514			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
 515			ttm_bo_add_to_lru(bo);
 516		}
 517
 518		dma_resv_unlock(bo->base.resv);
 519	}
 520	if (bo->base.resv != &bo->base._resv)
 521		dma_resv_unlock(&bo->base._resv);
 522
 523error:
 524	kref_get(&bo->list_kref);
 525	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 526	spin_unlock(&glob->lru_lock);
 527
 528	schedule_delayed_work(&bdev->wq,
 529			      ((HZ / 100) < 1) ? 1 : HZ / 100);
 530}
 531
 532/**
 533 * function ttm_bo_cleanup_refs
 534 * If bo idle, remove from delayed- and lru lists, and unref.
 535 * If not idle, do nothing.
 536 *
 537 * Must be called with lru_lock and reservation held, this function
 538 * will drop the lru lock and optionally the reservation lock before returning.
 539 *
 540 * @interruptible         Any sleeps should occur interruptibly.
 541 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 542 * @unlock_resv           Unlock the reservation lock as well.
 543 */
 544
 545static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 546			       bool interruptible, bool no_wait_gpu,
 547			       bool unlock_resv)
 548{
 549	struct ttm_bo_global *glob = bo->bdev->glob;
 550	struct dma_resv *resv;
 551	int ret;
 552
 553	if (unlikely(list_empty(&bo->ddestroy)))
 554		resv = bo->base.resv;
 555	else
 556		resv = &bo->base._resv;
 557
 558	if (dma_resv_test_signaled_rcu(resv, true))
 559		ret = 0;
 560	else
 561		ret = -EBUSY;
 562
 563	if (ret && !no_wait_gpu) {
 564		long lret;
 565
 566		if (unlock_resv)
 567			dma_resv_unlock(bo->base.resv);
 568		spin_unlock(&glob->lru_lock);
 569
 570		lret = dma_resv_wait_timeout_rcu(resv, true,
 571							   interruptible,
 572							   30 * HZ);
 573
 574		if (lret < 0)
 575			return lret;
 576		else if (lret == 0)
 577			return -EBUSY;
 578
 579		spin_lock(&glob->lru_lock);
 580		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
 581			/*
 582			 * We raced, and lost, someone else holds the reservation now,
 583			 * and is probably busy in ttm_bo_cleanup_memtype_use.
 584			 *
 585			 * Even if it's not the case, because we finished waiting any
 586			 * delayed destruction would succeed, so just return success
 587			 * here.
 588			 */
 589			spin_unlock(&glob->lru_lock);
 590			return 0;
 591		}
 592		ret = 0;
 593	}
 594
 595	if (ret || unlikely(list_empty(&bo->ddestroy))) {
 596		if (unlock_resv)
 597			dma_resv_unlock(bo->base.resv);
 598		spin_unlock(&glob->lru_lock);
 599		return ret;
 600	}
 601
 602	ttm_bo_del_from_lru(bo);
 603	list_del_init(&bo->ddestroy);
 604	kref_put(&bo->list_kref, ttm_bo_ref_bug);
 605
 606	spin_unlock(&glob->lru_lock);
 607	ttm_bo_cleanup_memtype_use(bo);
 608
 609	if (unlock_resv)
 610		dma_resv_unlock(bo->base.resv);
 611
 612	return 0;
 613}
 614
 615/**
 616 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 617 * encountered buffers.
 618 */
 619static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 620{
 621	struct ttm_bo_global *glob = bdev->glob;
 622	struct list_head removed;
 623	bool empty;
 624
 625	INIT_LIST_HEAD(&removed);
 626
 627	spin_lock(&glob->lru_lock);
 628	while (!list_empty(&bdev->ddestroy)) {
 629		struct ttm_buffer_object *bo;
 630
 631		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
 632				      ddestroy);
 633		kref_get(&bo->list_kref);
 634		list_move_tail(&bo->ddestroy, &removed);
 635
 636		if (remove_all || bo->base.resv != &bo->base._resv) {
 637			spin_unlock(&glob->lru_lock);
 638			dma_resv_lock(bo->base.resv, NULL);
 639
 640			spin_lock(&glob->lru_lock);
 641			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 642
 643		} else if (dma_resv_trylock(bo->base.resv)) {
 644			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 645		} else {
 646			spin_unlock(&glob->lru_lock);
 647		}
 648
 649		kref_put(&bo->list_kref, ttm_bo_release_list);
 650		spin_lock(&glob->lru_lock);
 651	}
 652	list_splice_tail(&removed, &bdev->ddestroy);
 653	empty = list_empty(&bdev->ddestroy);
 654	spin_unlock(&glob->lru_lock);
 655
 656	return empty;
 657}
 658
 659static void ttm_bo_delayed_workqueue(struct work_struct *work)
 660{
 661	struct ttm_bo_device *bdev =
 662	    container_of(work, struct ttm_bo_device, wq.work);
 663
 664	if (!ttm_bo_delayed_delete(bdev, false))
 665		schedule_delayed_work(&bdev->wq,
 666				      ((HZ / 100) < 1) ? 1 : HZ / 100);
 667}
 668
 669static void ttm_bo_release(struct kref *kref)
 670{
 671	struct ttm_buffer_object *bo =
 672	    container_of(kref, struct ttm_buffer_object, kref);
 673	struct ttm_bo_device *bdev = bo->bdev;
 674	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 675
 676	if (bo->bdev->driver->release_notify)
 677		bo->bdev->driver->release_notify(bo);
 678
 679	drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
 680	ttm_mem_io_lock(man, false);
 681	ttm_mem_io_free_vm(bo);
 682	ttm_mem_io_unlock(man);
 683	ttm_bo_cleanup_refs_or_queue(bo);
 684	kref_put(&bo->list_kref, ttm_bo_release_list);
 685}
 686
 687void ttm_bo_put(struct ttm_buffer_object *bo)
 688{
 
 
 
 689	kref_put(&bo->kref, ttm_bo_release);
 690}
 691EXPORT_SYMBOL(ttm_bo_put);
 692
 693int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
 694{
 695	return cancel_delayed_work_sync(&bdev->wq);
 696}
 697EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
 698
 699void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 700{
 701	if (resched)
 702		schedule_delayed_work(&bdev->wq,
 703				      ((HZ / 100) < 1) ? 1 : HZ / 100);
 704}
 705EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 706
 707static int ttm_bo_evict(struct ttm_buffer_object *bo,
 708			struct ttm_operation_ctx *ctx)
 709{
 710	struct ttm_bo_device *bdev = bo->bdev;
 711	struct ttm_mem_reg evict_mem;
 712	struct ttm_placement placement;
 713	int ret = 0;
 714
 715	dma_resv_assert_held(bo->base.resv);
 716
 717	placement.num_placement = 0;
 718	placement.num_busy_placement = 0;
 719	bdev->driver->evict_flags(bo, &placement);
 720
 721	if (!placement.num_placement && !placement.num_busy_placement) {
 722		ret = ttm_bo_pipeline_gutting(bo);
 723		if (ret)
 724			return ret;
 725
 726		return ttm_tt_create(bo, false);
 727	}
 728
 729	evict_mem = bo->mem;
 730	evict_mem.mm_node = NULL;
 731	evict_mem.bus.io_reserved_vm = false;
 732	evict_mem.bus.io_reserved_count = 0;
 733
 734	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
 735	if (ret) {
 736		if (ret != -ERESTARTSYS) {
 737			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
 738			       bo);
 739			ttm_bo_mem_space_debug(bo, &placement);
 740		}
 741		goto out;
 742	}
 743
 744	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
 745	if (unlikely(ret)) {
 746		if (ret != -ERESTARTSYS)
 747			pr_err("Buffer eviction failed\n");
 748		ttm_bo_mem_put(bo, &evict_mem);
 749		goto out;
 750	}
 751	bo->evicted = true;
 752out:
 753	return ret;
 754}
 755
 756bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 757			      const struct ttm_place *place)
 758{
 759	/* Don't evict this BO if it's outside of the
 760	 * requested placement range
 761	 */
 762	if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
 763	    (place->lpfn && place->lpfn <= bo->mem.start))
 764		return false;
 765
 766	return true;
 767}
 768EXPORT_SYMBOL(ttm_bo_eviction_valuable);
 769
 770/**
 771 * Check the target bo is allowable to be evicted or swapout, including cases:
 772 *
 773 * a. if share same reservation object with ctx->resv, have assumption
 774 * reservation objects should already be locked, so not lock again and
 775 * return true directly when either the opreation allow_reserved_eviction
 776 * or the target bo already is in delayed free list;
 777 *
 778 * b. Otherwise, trylock it.
 779 */
 780static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 781			struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
 782{
 783	bool ret = false;
 784
 785	if (bo->base.resv == ctx->resv) {
 786		dma_resv_assert_held(bo->base.resv);
 
 787		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
 788		    || !list_empty(&bo->ddestroy))
 789			ret = true;
 790		*locked = false;
 791		if (busy)
 792			*busy = false;
 793	} else {
 794		ret = dma_resv_trylock(bo->base.resv);
 795		*locked = ret;
 796		if (busy)
 797			*busy = !ret;
 798	}
 799
 800	return ret;
 801}
 802
 803/**
 804 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
 805 *
 806 * @busy_bo: BO which couldn't be locked with trylock
 807 * @ctx: operation context
 808 * @ticket: acquire ticket
 809 *
 810 * Try to lock a busy buffer object to avoid failing eviction.
 811 */
 812static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
 813				   struct ttm_operation_ctx *ctx,
 814				   struct ww_acquire_ctx *ticket)
 815{
 816	int r;
 817
 818	if (!busy_bo || !ticket)
 819		return -EBUSY;
 820
 821	if (ctx->interruptible)
 822		r = dma_resv_lock_interruptible(busy_bo->base.resv,
 823							  ticket);
 824	else
 825		r = dma_resv_lock(busy_bo->base.resv, ticket);
 826
 827	/*
 828	 * TODO: It would be better to keep the BO locked until allocation is at
 829	 * least tried one more time, but that would mean a much larger rework
 830	 * of TTM.
 831	 */
 832	if (!r)
 833		dma_resv_unlock(busy_bo->base.resv);
 834
 835	return r == -EDEADLK ? -EBUSY : r;
 836}
 837
 838static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 839			       uint32_t mem_type,
 840			       const struct ttm_place *place,
 841			       struct ttm_operation_ctx *ctx,
 842			       struct ww_acquire_ctx *ticket)
 843{
 844	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
 845	struct ttm_bo_global *glob = bdev->glob;
 846	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 
 847	bool locked = false;
 848	unsigned i;
 849	int ret;
 850
 851	spin_lock(&glob->lru_lock);
 852	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 853		list_for_each_entry(bo, &man->lru[i], lru) {
 854			bool busy;
 855
 856			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
 857							    &busy)) {
 858				if (busy && !busy_bo && ticket !=
 859				    dma_resv_locking_ctx(bo->base.resv))
 860					busy_bo = bo;
 861				continue;
 862			}
 863
 864			if (place && !bdev->driver->eviction_valuable(bo,
 865								      place)) {
 866				if (locked)
 867					dma_resv_unlock(bo->base.resv);
 868				continue;
 869			}
 870			break;
 871		}
 872
 873		/* If the inner loop terminated early, we have our candidate */
 874		if (&bo->lru != &man->lru[i])
 875			break;
 876
 877		bo = NULL;
 878	}
 879
 880	if (!bo) {
 881		if (busy_bo)
 882			kref_get(&busy_bo->list_kref);
 883		spin_unlock(&glob->lru_lock);
 884		ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
 885		if (busy_bo)
 886			kref_put(&busy_bo->list_kref, ttm_bo_release_list);
 887		return ret;
 888	}
 889
 890	kref_get(&bo->list_kref);
 891
 892	if (!list_empty(&bo->ddestroy)) {
 893		ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
 894					  ctx->no_wait_gpu, locked);
 895		kref_put(&bo->list_kref, ttm_bo_release_list);
 896		return ret;
 897	}
 898
 899	ttm_bo_del_from_lru(bo);
 900	spin_unlock(&glob->lru_lock);
 901
 902	ret = ttm_bo_evict(bo, ctx);
 903	if (locked) {
 904		ttm_bo_unreserve(bo);
 905	} else {
 906		spin_lock(&glob->lru_lock);
 907		ttm_bo_add_to_lru(bo);
 908		spin_unlock(&glob->lru_lock);
 909	}
 910
 911	kref_put(&bo->list_kref, ttm_bo_release_list);
 912	return ret;
 913}
 914
 915void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 916{
 917	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
 918
 919	if (mem->mm_node)
 920		(*man->func->put_node)(man, mem);
 921}
 922EXPORT_SYMBOL(ttm_bo_mem_put);
 923
 924/**
 925 * Add the last move fence to the BO and reserve a new shared slot.
 926 */
 927static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 928				 struct ttm_mem_type_manager *man,
 929				 struct ttm_mem_reg *mem)
 930{
 931	struct dma_fence *fence;
 932	int ret;
 933
 934	spin_lock(&man->move_lock);
 935	fence = dma_fence_get(man->move);
 936	spin_unlock(&man->move_lock);
 937
 938	if (fence) {
 939		dma_resv_add_shared_fence(bo->base.resv, fence);
 940
 941		ret = dma_resv_reserve_shared(bo->base.resv, 1);
 942		if (unlikely(ret)) {
 943			dma_fence_put(fence);
 944			return ret;
 945		}
 946
 947		dma_fence_put(bo->moving);
 948		bo->moving = fence;
 949	}
 950
 951	return 0;
 952}
 953
 954/**
 955 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 956 * space, or we've evicted everything and there isn't enough space.
 957 */
 958static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 959				  const struct ttm_place *place,
 960				  struct ttm_mem_reg *mem,
 961				  struct ttm_operation_ctx *ctx)
 
 962{
 963	struct ttm_bo_device *bdev = bo->bdev;
 964	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 965	struct ww_acquire_ctx *ticket;
 966	int ret;
 967
 968	ticket = dma_resv_locking_ctx(bo->base.resv);
 969	do {
 970		ret = (*man->func->get_node)(man, bo, place, mem);
 971		if (unlikely(ret != 0))
 972			return ret;
 973		if (mem->mm_node)
 974			break;
 975		ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
 976					  ticket);
 977		if (unlikely(ret != 0))
 978			return ret;
 979	} while (1);
 980
 981	return ttm_bo_add_move_fence(bo, man, mem);
 982}
 983
 984static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 985				      uint32_t cur_placement,
 986				      uint32_t proposed_placement)
 987{
 988	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
 989	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
 990
 991	/**
 992	 * Keep current caching if possible.
 993	 */
 994
 995	if ((cur_placement & caching) != 0)
 996		result |= (cur_placement & caching);
 997	else if ((man->default_caching & caching) != 0)
 998		result |= man->default_caching;
 999	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
1000		result |= TTM_PL_FLAG_CACHED;
1001	else if ((TTM_PL_FLAG_WC & caching) != 0)
1002		result |= TTM_PL_FLAG_WC;
1003	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
1004		result |= TTM_PL_FLAG_UNCACHED;
1005
1006	return result;
1007}
1008
1009static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
1010				 uint32_t mem_type,
1011				 const struct ttm_place *place,
1012				 uint32_t *masked_placement)
1013{
1014	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
1015
1016	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
1017		return false;
1018
1019	if ((place->flags & man->available_caching) == 0)
1020		return false;
1021
1022	cur_flags |= (place->flags & man->available_caching);
1023
1024	*masked_placement = cur_flags;
1025	return true;
1026}
1027
1028/**
1029 * ttm_bo_mem_placement - check if placement is compatible
1030 * @bo: BO to find memory for
1031 * @place: where to search
1032 * @mem: the memory object to fill in
1033 * @ctx: operation context
1034 *
1035 * Check if placement is compatible and fill in mem structure.
1036 * Returns -EBUSY if placement won't work or negative error code.
1037 * 0 when placement can be used.
1038 */
1039static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
1040				const struct ttm_place *place,
1041				struct ttm_mem_reg *mem,
1042				struct ttm_operation_ctx *ctx)
1043{
1044	struct ttm_bo_device *bdev = bo->bdev;
1045	uint32_t mem_type = TTM_PL_SYSTEM;
1046	struct ttm_mem_type_manager *man;
1047	uint32_t cur_flags = 0;
1048	int ret;
1049
1050	ret = ttm_mem_type_from_place(place, &mem_type);
1051	if (ret)
1052		return ret;
1053
1054	man = &bdev->man[mem_type];
1055	if (!man->has_type || !man->use_type)
1056		return -EBUSY;
1057
1058	if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
1059		return -EBUSY;
1060
1061	cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
1062	/*
1063	 * Use the access and other non-mapping-related flag bits from
1064	 * the memory placement flags to the current flags
1065	 */
1066	ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
1067
1068	mem->mem_type = mem_type;
1069	mem->placement = cur_flags;
1070
1071	if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
1072		spin_lock(&bo->bdev->glob->lru_lock);
1073		ttm_bo_del_from_lru(bo);
1074		ttm_bo_add_mem_to_lru(bo, mem);
1075		spin_unlock(&bo->bdev->glob->lru_lock);
1076	}
1077
1078	return 0;
1079}
1080
1081/**
1082 * Creates space for memory region @mem according to its type.
1083 *
1084 * This function first searches for free space in compatible memory types in
1085 * the priority order defined by the driver.  If free space isn't found, then
1086 * ttm_bo_mem_force_space is attempted in priority order to evict and find
1087 * space.
1088 */
1089int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1090			struct ttm_placement *placement,
1091			struct ttm_mem_reg *mem,
1092			struct ttm_operation_ctx *ctx)
1093{
1094	struct ttm_bo_device *bdev = bo->bdev;
 
 
 
1095	bool type_found = false;
 
 
1096	int i, ret;
1097
1098	ret = dma_resv_reserve_shared(bo->base.resv, 1);
1099	if (unlikely(ret))
1100		return ret;
1101
1102	mem->mm_node = NULL;
1103	for (i = 0; i < placement->num_placement; ++i) {
1104		const struct ttm_place *place = &placement->placement[i];
1105		struct ttm_mem_type_manager *man;
1106
1107		ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1108		if (ret == -EBUSY)
 
 
 
 
 
 
 
 
 
1109			continue;
1110		if (ret)
1111			goto error;
1112
1113		type_found = true;
1114		mem->mm_node = NULL;
1115		if (mem->mem_type == TTM_PL_SYSTEM)
1116			return 0;
 
 
 
 
 
 
 
 
1117
1118		man = &bdev->man[mem->mem_type];
1119		ret = (*man->func->get_node)(man, bo, place, mem);
1120		if (unlikely(ret))
1121			goto error;
1122
1123		if (mem->mm_node) {
1124			ret = ttm_bo_add_move_fence(bo, man, mem);
1125			if (unlikely(ret)) {
1126				(*man->func->put_node)(man, mem);
1127				goto error;
1128			}
1129			return 0;
1130		}
1131	}
1132
 
 
 
 
 
 
1133	for (i = 0; i < placement->num_busy_placement; ++i) {
1134		const struct ttm_place *place = &placement->busy_placement[i];
1135
1136		ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1137		if (ret == -EBUSY)
 
 
 
 
 
1138			continue;
1139		if (ret)
1140			goto error;
1141
1142		type_found = true;
1143		mem->mm_node = NULL;
1144		if (mem->mem_type == TTM_PL_SYSTEM)
 
 
 
 
 
 
 
 
 
 
 
1145			return 0;
 
1146
1147		ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
1148		if (ret == 0 && mem->mm_node)
 
1149			return 0;
1150
1151		if (ret && ret != -EBUSY)
1152			goto error;
1153	}
1154
1155	ret = -ENOMEM;
1156	if (!type_found) {
1157		pr_err(TTM_PFX "No compatible memory type found\n");
1158		ret = -EINVAL;
1159	}
1160
1161error:
1162	if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
1163		spin_lock(&bo->bdev->glob->lru_lock);
1164		ttm_bo_move_to_lru_tail(bo, NULL);
1165		spin_unlock(&bo->bdev->glob->lru_lock);
1166	}
1167
1168	return ret;
1169}
1170EXPORT_SYMBOL(ttm_bo_mem_space);
1171
1172static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1173			      struct ttm_placement *placement,
1174			      struct ttm_operation_ctx *ctx)
1175{
1176	int ret = 0;
1177	struct ttm_mem_reg mem;
1178
1179	dma_resv_assert_held(bo->base.resv);
1180
1181	mem.num_pages = bo->num_pages;
1182	mem.size = mem.num_pages << PAGE_SHIFT;
1183	mem.page_alignment = bo->mem.page_alignment;
1184	mem.bus.io_reserved_vm = false;
1185	mem.bus.io_reserved_count = 0;
1186	/*
1187	 * Determine where to move the buffer.
1188	 */
1189	ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1190	if (ret)
1191		goto out_unlock;
1192	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1193out_unlock:
1194	if (ret && mem.mm_node)
1195		ttm_bo_mem_put(bo, &mem);
1196	return ret;
1197}
1198
1199static bool ttm_bo_places_compat(const struct ttm_place *places,
1200				 unsigned num_placement,
1201				 struct ttm_mem_reg *mem,
1202				 uint32_t *new_flags)
1203{
1204	unsigned i;
1205
1206	for (i = 0; i < num_placement; i++) {
1207		const struct ttm_place *heap = &places[i];
1208
1209		if (mem->mm_node && (mem->start < heap->fpfn ||
1210		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1211			continue;
1212
1213		*new_flags = heap->flags;
1214		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1215		    (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1216		    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1217		     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1218			return true;
1219	}
1220	return false;
1221}
1222
1223bool ttm_bo_mem_compat(struct ttm_placement *placement,
1224		       struct ttm_mem_reg *mem,
1225		       uint32_t *new_flags)
1226{
1227	if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1228				 mem, new_flags))
1229		return true;
1230
1231	if ((placement->busy_placement != placement->placement ||
1232	     placement->num_busy_placement > placement->num_placement) &&
1233	    ttm_bo_places_compat(placement->busy_placement,
1234				 placement->num_busy_placement,
1235				 mem, new_flags))
1236		return true;
1237
1238	return false;
1239}
1240EXPORT_SYMBOL(ttm_bo_mem_compat);
1241
1242int ttm_bo_validate(struct ttm_buffer_object *bo,
1243		    struct ttm_placement *placement,
1244		    struct ttm_operation_ctx *ctx)
1245{
1246	int ret;
1247	uint32_t new_flags;
1248
1249	dma_resv_assert_held(bo->base.resv);
1250	/*
1251	 * Check whether we need to move buffer.
1252	 */
1253	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1254		ret = ttm_bo_move_buffer(bo, placement, ctx);
1255		if (ret)
1256			return ret;
1257	} else {
1258		/*
1259		 * Use the access and other non-mapping-related flag bits from
1260		 * the compatible memory placement flags to the active flags
1261		 */
1262		ttm_flag_masked(&bo->mem.placement, new_flags,
1263				~TTM_PL_MASK_MEMTYPE);
1264	}
1265	/*
1266	 * We might need to add a TTM.
1267	 */
1268	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1269		ret = ttm_tt_create(bo, true);
1270		if (ret)
1271			return ret;
1272	}
1273	return 0;
1274}
1275EXPORT_SYMBOL(ttm_bo_validate);
1276
1277int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1278			 struct ttm_buffer_object *bo,
1279			 unsigned long size,
1280			 enum ttm_bo_type type,
1281			 struct ttm_placement *placement,
1282			 uint32_t page_alignment,
1283			 struct ttm_operation_ctx *ctx,
1284			 size_t acc_size,
1285			 struct sg_table *sg,
1286			 struct dma_resv *resv,
1287			 void (*destroy) (struct ttm_buffer_object *))
1288{
1289	int ret = 0;
1290	unsigned long num_pages;
1291	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1292	bool locked;
1293
1294	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1295	if (ret) {
1296		pr_err("Out of kernel memory\n");
1297		if (destroy)
1298			(*destroy)(bo);
1299		else
1300			kfree(bo);
1301		return -ENOMEM;
1302	}
1303
1304	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1305	if (num_pages == 0) {
1306		pr_err("Illegal buffer object size\n");
1307		if (destroy)
1308			(*destroy)(bo);
1309		else
1310			kfree(bo);
1311		ttm_mem_global_free(mem_glob, acc_size);
1312		return -EINVAL;
1313	}
1314	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1315
1316	kref_init(&bo->kref);
1317	kref_init(&bo->list_kref);
1318	atomic_set(&bo->cpu_writers, 0);
1319	INIT_LIST_HEAD(&bo->lru);
1320	INIT_LIST_HEAD(&bo->ddestroy);
1321	INIT_LIST_HEAD(&bo->swap);
1322	INIT_LIST_HEAD(&bo->io_reserve_lru);
1323	mutex_init(&bo->wu_mutex);
1324	bo->bdev = bdev;
1325	bo->type = type;
1326	bo->num_pages = num_pages;
1327	bo->mem.size = num_pages << PAGE_SHIFT;
1328	bo->mem.mem_type = TTM_PL_SYSTEM;
1329	bo->mem.num_pages = bo->num_pages;
1330	bo->mem.mm_node = NULL;
1331	bo->mem.page_alignment = page_alignment;
1332	bo->mem.bus.io_reserved_vm = false;
1333	bo->mem.bus.io_reserved_count = 0;
1334	bo->moving = NULL;
1335	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1336	bo->acc_size = acc_size;
1337	bo->sg = sg;
1338	if (resv) {
1339		bo->base.resv = resv;
1340		dma_resv_assert_held(bo->base.resv);
1341	} else {
1342		bo->base.resv = &bo->base._resv;
1343	}
1344	if (!ttm_bo_uses_embedded_gem_object(bo)) {
1345		/*
1346		 * bo.gem is not initialized, so we have to setup the
1347		 * struct elements we want use regardless.
1348		 */
1349		dma_resv_init(&bo->base._resv);
1350		drm_vma_node_reset(&bo->base.vma_node);
1351	}
 
1352	atomic_inc(&bo->bdev->glob->bo_count);
 
 
1353
1354	/*
1355	 * For ttm_bo_type_device buffers, allocate
1356	 * address space from the device.
1357	 */
1358	if (bo->type == ttm_bo_type_device ||
1359	    bo->type == ttm_bo_type_sg)
1360		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
1361					 bo->mem.num_pages);
1362
1363	/* passed reservation objects should already be locked,
1364	 * since otherwise lockdep will be angered in radeon.
1365	 */
1366	if (!resv) {
1367		locked = dma_resv_trylock(bo->base.resv);
1368		WARN_ON(!locked);
1369	}
1370
1371	if (likely(!ret))
1372		ret = ttm_bo_validate(bo, placement, ctx);
1373
1374	if (unlikely(ret)) {
1375		if (!resv)
1376			ttm_bo_unreserve(bo);
1377
1378		ttm_bo_put(bo);
1379		return ret;
1380	}
1381
1382	if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1383		spin_lock(&bdev->glob->lru_lock);
1384		ttm_bo_add_to_lru(bo);
1385		spin_unlock(&bdev->glob->lru_lock);
1386	}
1387
1388	return ret;
1389}
1390EXPORT_SYMBOL(ttm_bo_init_reserved);
1391
1392int ttm_bo_init(struct ttm_bo_device *bdev,
1393		struct ttm_buffer_object *bo,
1394		unsigned long size,
1395		enum ttm_bo_type type,
1396		struct ttm_placement *placement,
1397		uint32_t page_alignment,
1398		bool interruptible,
1399		size_t acc_size,
1400		struct sg_table *sg,
1401		struct dma_resv *resv,
1402		void (*destroy) (struct ttm_buffer_object *))
1403{
1404	struct ttm_operation_ctx ctx = { interruptible, false };
1405	int ret;
1406
1407	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1408				   page_alignment, &ctx, acc_size,
1409				   sg, resv, destroy);
1410	if (ret)
1411		return ret;
1412
1413	if (!resv)
1414		ttm_bo_unreserve(bo);
1415
1416	return 0;
1417}
1418EXPORT_SYMBOL(ttm_bo_init);
1419
1420size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1421		       unsigned long bo_size,
1422		       unsigned struct_size)
1423{
1424	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1425	size_t size = 0;
1426
1427	size += ttm_round_pot(struct_size);
1428	size += ttm_round_pot(npages * sizeof(void *));
1429	size += ttm_round_pot(sizeof(struct ttm_tt));
1430	return size;
1431}
1432EXPORT_SYMBOL(ttm_bo_acc_size);
1433
1434size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1435			   unsigned long bo_size,
1436			   unsigned struct_size)
1437{
1438	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1439	size_t size = 0;
1440
1441	size += ttm_round_pot(struct_size);
1442	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1443	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1444	return size;
1445}
1446EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1447
1448int ttm_bo_create(struct ttm_bo_device *bdev,
1449			unsigned long size,
1450			enum ttm_bo_type type,
1451			struct ttm_placement *placement,
1452			uint32_t page_alignment,
1453			bool interruptible,
1454			struct ttm_buffer_object **p_bo)
1455{
1456	struct ttm_buffer_object *bo;
1457	size_t acc_size;
1458	int ret;
1459
1460	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1461	if (unlikely(bo == NULL))
1462		return -ENOMEM;
1463
1464	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1465	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1466			  interruptible, acc_size,
1467			  NULL, NULL, NULL);
1468	if (likely(ret == 0))
1469		*p_bo = bo;
1470
1471	return ret;
1472}
1473EXPORT_SYMBOL(ttm_bo_create);
1474
1475static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1476				   unsigned mem_type)
1477{
1478	struct ttm_operation_ctx ctx = {
1479		.interruptible = false,
1480		.no_wait_gpu = false,
1481		.flags = TTM_OPT_FLAG_FORCE_ALLOC
1482	};
1483	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1484	struct ttm_bo_global *glob = bdev->glob;
1485	struct dma_fence *fence;
1486	int ret;
1487	unsigned i;
1488
1489	/*
1490	 * Can't use standard list traversal since we're unlocking.
1491	 */
1492
1493	spin_lock(&glob->lru_lock);
1494	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1495		while (!list_empty(&man->lru[i])) {
1496			spin_unlock(&glob->lru_lock);
1497			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
1498						  NULL);
1499			if (ret)
1500				return ret;
1501			spin_lock(&glob->lru_lock);
1502		}
1503	}
1504	spin_unlock(&glob->lru_lock);
1505
1506	spin_lock(&man->move_lock);
1507	fence = dma_fence_get(man->move);
1508	spin_unlock(&man->move_lock);
1509
1510	if (fence) {
1511		ret = dma_fence_wait(fence, false);
1512		dma_fence_put(fence);
1513		if (ret)
1514			return ret;
1515	}
1516
1517	return 0;
1518}
1519
1520int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1521{
1522	struct ttm_mem_type_manager *man;
1523	int ret = -EINVAL;
1524
1525	if (mem_type >= TTM_NUM_MEM_TYPES) {
1526		pr_err("Illegal memory type %d\n", mem_type);
1527		return ret;
1528	}
1529	man = &bdev->man[mem_type];
1530
1531	if (!man->has_type) {
1532		pr_err("Trying to take down uninitialized memory manager type %u\n",
1533		       mem_type);
1534		return ret;
1535	}
1536
1537	man->use_type = false;
1538	man->has_type = false;
1539
1540	ret = 0;
1541	if (mem_type > 0) {
1542		ret = ttm_bo_force_list_clean(bdev, mem_type);
1543		if (ret) {
1544			pr_err("Cleanup eviction failed\n");
1545			return ret;
1546		}
1547
1548		ret = (*man->func->takedown)(man);
1549	}
1550
1551	dma_fence_put(man->move);
1552	man->move = NULL;
1553
1554	return ret;
1555}
1556EXPORT_SYMBOL(ttm_bo_clean_mm);
1557
1558int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1559{
1560	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1561
1562	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1563		pr_err("Illegal memory manager memory type %u\n", mem_type);
1564		return -EINVAL;
1565	}
1566
1567	if (!man->has_type) {
1568		pr_err("Memory type %u has not been initialized\n", mem_type);
1569		return 0;
1570	}
1571
1572	return ttm_bo_force_list_clean(bdev, mem_type);
1573}
1574EXPORT_SYMBOL(ttm_bo_evict_mm);
1575
1576int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1577			unsigned long p_size)
1578{
1579	int ret;
1580	struct ttm_mem_type_manager *man;
1581	unsigned i;
1582
1583	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1584	man = &bdev->man[type];
1585	BUG_ON(man->has_type);
1586	man->io_reserve_fastpath = true;
1587	man->use_io_reserve_lru = false;
1588	mutex_init(&man->io_reserve_mutex);
1589	spin_lock_init(&man->move_lock);
1590	INIT_LIST_HEAD(&man->io_reserve_lru);
1591
1592	ret = bdev->driver->init_mem_type(bdev, type, man);
1593	if (ret)
1594		return ret;
1595	man->bdev = bdev;
1596
1597	if (type != TTM_PL_SYSTEM) {
1598		ret = (*man->func->init)(man, p_size);
1599		if (ret)
1600			return ret;
1601	}
1602	man->has_type = true;
1603	man->use_type = true;
1604	man->size = p_size;
1605
1606	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1607		INIT_LIST_HEAD(&man->lru[i]);
1608	man->move = NULL;
1609
1610	return 0;
1611}
1612EXPORT_SYMBOL(ttm_bo_init_mm);
1613
1614static void ttm_bo_global_kobj_release(struct kobject *kobj)
1615{
1616	struct ttm_bo_global *glob =
1617		container_of(kobj, struct ttm_bo_global, kobj);
1618
1619	__free_page(glob->dummy_read_page);
 
1620}
1621
1622static void ttm_bo_global_release(void)
1623{
1624	struct ttm_bo_global *glob = &ttm_bo_glob;
1625
1626	mutex_lock(&ttm_global_mutex);
1627	if (--ttm_bo_glob_use_count > 0)
1628		goto out;
1629
1630	kobject_del(&glob->kobj);
1631	kobject_put(&glob->kobj);
1632	ttm_mem_global_release(&ttm_mem_glob);
1633	memset(glob, 0, sizeof(*glob));
1634out:
1635	mutex_unlock(&ttm_global_mutex);
1636}
 
1637
1638static int ttm_bo_global_init(void)
1639{
1640	struct ttm_bo_global *glob = &ttm_bo_glob;
1641	int ret = 0;
 
 
1642	unsigned i;
1643
1644	mutex_lock(&ttm_global_mutex);
1645	if (++ttm_bo_glob_use_count > 1)
1646		goto out;
1647
1648	ret = ttm_mem_global_init(&ttm_mem_glob);
1649	if (ret)
1650		goto out;
1651
1652	spin_lock_init(&glob->lru_lock);
1653	glob->mem_glob = &ttm_mem_glob;
1654	glob->mem_glob->bo_glob = glob;
1655	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1656
1657	if (unlikely(glob->dummy_read_page == NULL)) {
1658		ret = -ENOMEM;
1659		goto out;
1660	}
1661
1662	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1663		INIT_LIST_HEAD(&glob->swap_lru[i]);
1664	INIT_LIST_HEAD(&glob->device_list);
1665	atomic_set(&glob->bo_count, 0);
1666
1667	ret = kobject_init_and_add(
1668		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1669	if (unlikely(ret != 0))
1670		kobject_put(&glob->kobj);
1671out:
1672	mutex_unlock(&ttm_global_mutex);
 
1673	return ret;
1674}
 
 
1675
1676int ttm_bo_device_release(struct ttm_bo_device *bdev)
1677{
1678	int ret = 0;
1679	unsigned i = TTM_NUM_MEM_TYPES;
1680	struct ttm_mem_type_manager *man;
1681	struct ttm_bo_global *glob = bdev->glob;
1682
1683	while (i--) {
1684		man = &bdev->man[i];
1685		if (man->has_type) {
1686			man->use_type = false;
1687			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1688				ret = -EBUSY;
1689				pr_err("DRM memory manager type %d is not clean\n",
1690				       i);
1691			}
1692			man->has_type = false;
1693		}
1694	}
1695
1696	mutex_lock(&ttm_global_mutex);
1697	list_del(&bdev->device_list);
1698	mutex_unlock(&ttm_global_mutex);
1699
1700	cancel_delayed_work_sync(&bdev->wq);
1701
1702	if (ttm_bo_delayed_delete(bdev, true))
1703		pr_debug("Delayed destroy list was clean\n");
1704
1705	spin_lock(&glob->lru_lock);
1706	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1707		if (list_empty(&bdev->man[0].lru[0]))
1708			pr_debug("Swap list %d was clean\n", i);
1709	spin_unlock(&glob->lru_lock);
1710
1711	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1712
1713	if (!ret)
1714		ttm_bo_global_release();
1715
1716	return ret;
1717}
1718EXPORT_SYMBOL(ttm_bo_device_release);
1719
1720int ttm_bo_device_init(struct ttm_bo_device *bdev,
 
1721		       struct ttm_bo_driver *driver,
1722		       struct address_space *mapping,
 
1723		       bool need_dma32)
1724{
1725	struct ttm_bo_global *glob = &ttm_bo_glob;
1726	int ret;
1727
1728	ret = ttm_bo_global_init();
1729	if (ret)
1730		return ret;
1731
1732	bdev->driver = driver;
1733
1734	memset(bdev->man, 0, sizeof(bdev->man));
1735
1736	/*
1737	 * Initialize the system memory buffer type.
1738	 * Other types need to be driver / IOCTL initialized.
1739	 */
1740	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1741	if (unlikely(ret != 0))
1742		goto out_no_sys;
1743
1744	drm_vma_offset_manager_init(&bdev->vma_manager,
1745				    DRM_FILE_PAGE_OFFSET_START,
1746				    DRM_FILE_PAGE_OFFSET_SIZE);
1747	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1748	INIT_LIST_HEAD(&bdev->ddestroy);
1749	bdev->dev_mapping = mapping;
1750	bdev->glob = glob;
1751	bdev->need_dma32 = need_dma32;
1752	mutex_lock(&ttm_global_mutex);
1753	list_add_tail(&bdev->device_list, &glob->device_list);
1754	mutex_unlock(&ttm_global_mutex);
1755
1756	return 0;
1757out_no_sys:
1758	ttm_bo_global_release();
1759	return ret;
1760}
1761EXPORT_SYMBOL(ttm_bo_device_init);
1762
1763/*
1764 * buffer object vm functions.
1765 */
1766
1767bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1768{
1769	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1770
1771	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1772		if (mem->mem_type == TTM_PL_SYSTEM)
1773			return false;
1774
1775		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1776			return false;
1777
1778		if (mem->placement & TTM_PL_FLAG_CACHED)
1779			return false;
1780	}
1781	return true;
1782}
1783
1784void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1785{
1786	struct ttm_bo_device *bdev = bo->bdev;
1787
1788	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1789	ttm_mem_io_free_vm(bo);
1790}
1791
1792void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1793{
1794	struct ttm_bo_device *bdev = bo->bdev;
1795	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1796
1797	ttm_mem_io_lock(man, false);
1798	ttm_bo_unmap_virtual_locked(bo);
1799	ttm_mem_io_unlock(man);
1800}
1801
1802
1803EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1804
1805int ttm_bo_wait(struct ttm_buffer_object *bo,
1806		bool interruptible, bool no_wait)
1807{
1808	long timeout = 15 * HZ;
1809
1810	if (no_wait) {
1811		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1812			return 0;
1813		else
1814			return -EBUSY;
1815	}
1816
1817	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1818						      interruptible, timeout);
1819	if (timeout < 0)
1820		return timeout;
1821
1822	if (timeout == 0)
1823		return -EBUSY;
1824
1825	dma_resv_add_excl_fence(bo->base.resv, NULL);
1826	return 0;
1827}
1828EXPORT_SYMBOL(ttm_bo_wait);
1829
1830int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1831{
1832	int ret = 0;
1833
1834	/*
1835	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1836	 */
1837
1838	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1839	if (unlikely(ret != 0))
1840		return ret;
1841	ret = ttm_bo_wait(bo, true, no_wait);
1842	if (likely(ret == 0))
1843		atomic_inc(&bo->cpu_writers);
1844	ttm_bo_unreserve(bo);
1845	return ret;
1846}
1847EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1848
1849void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1850{
1851	atomic_dec(&bo->cpu_writers);
1852}
1853EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1854
1855/**
1856 * A buffer object shrink method that tries to swap out the first
1857 * buffer object on the bo_global::swap_lru list.
1858 */
1859int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1860{
1861	struct ttm_buffer_object *bo;
1862	int ret = -EBUSY;
1863	bool locked;
1864	unsigned i;
1865
1866	spin_lock(&glob->lru_lock);
1867	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1868		list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1869			if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1870							   NULL)) {
1871				ret = 0;
1872				break;
1873			}
1874		}
1875		if (!ret)
1876			break;
1877	}
1878
1879	if (ret) {
1880		spin_unlock(&glob->lru_lock);
1881		return ret;
1882	}
1883
1884	kref_get(&bo->list_kref);
1885
1886	if (!list_empty(&bo->ddestroy)) {
1887		ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1888		kref_put(&bo->list_kref, ttm_bo_release_list);
1889		return ret;
1890	}
1891
1892	ttm_bo_del_from_lru(bo);
1893	spin_unlock(&glob->lru_lock);
1894
1895	/**
1896	 * Move to system cached
1897	 */
1898
1899	if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1900	    bo->ttm->caching_state != tt_cached) {
1901		struct ttm_operation_ctx ctx = { false, false };
1902		struct ttm_mem_reg evict_mem;
1903
1904		evict_mem = bo->mem;
1905		evict_mem.mm_node = NULL;
1906		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1907		evict_mem.mem_type = TTM_PL_SYSTEM;
1908
1909		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1910		if (unlikely(ret != 0))
1911			goto out;
1912	}
1913
1914	/**
1915	 * Make sure BO is idle.
1916	 */
1917
1918	ret = ttm_bo_wait(bo, false, false);
1919	if (unlikely(ret != 0))
1920		goto out;
1921
1922	ttm_bo_unmap_virtual(bo);
1923
1924	/**
1925	 * Swap out. Buffer will be swapped in again as soon as
1926	 * anyone tries to access a ttm page.
1927	 */
1928
1929	if (bo->bdev->driver->swap_notify)
1930		bo->bdev->driver->swap_notify(bo);
1931
1932	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1933out:
1934
1935	/**
1936	 *
1937	 * Unreserve without putting on LRU to avoid swapping out an
1938	 * already swapped buffer.
1939	 */
1940	if (locked)
1941		dma_resv_unlock(bo->base.resv);
1942	kref_put(&bo->list_kref, ttm_bo_release_list);
1943	return ret;
1944}
1945EXPORT_SYMBOL(ttm_bo_swapout);
1946
1947void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1948{
1949	struct ttm_operation_ctx ctx = {
1950		.interruptible = false,
1951		.no_wait_gpu = false
1952	};
1953
1954	while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
1955		;
1956}
1957EXPORT_SYMBOL(ttm_bo_swapout_all);
1958
1959/**
1960 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1961 * unreserved
1962 *
1963 * @bo: Pointer to buffer
1964 */
1965int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1966{
1967	int ret;
1968
1969	/*
1970	 * In the absense of a wait_unlocked API,
1971	 * Use the bo::wu_mutex to avoid triggering livelocks due to
1972	 * concurrent use of this function. Note that this use of
1973	 * bo::wu_mutex can go away if we change locking order to
1974	 * mmap_sem -> bo::reserve.
1975	 */
1976	ret = mutex_lock_interruptible(&bo->wu_mutex);
1977	if (unlikely(ret != 0))
1978		return -ERESTARTSYS;
1979	if (!dma_resv_is_locked(bo->base.resv))
1980		goto out_unlock;
1981	ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
1982	if (ret == -EINTR)
1983		ret = -ERESTARTSYS;
1984	if (unlikely(ret != 0))
1985		goto out_unlock;
1986	dma_resv_unlock(bo->base.resv);
1987
1988out_unlock:
1989	mutex_unlock(&bo->wu_mutex);
1990	return ret;
1991}