Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30
  31#define pr_fmt(fmt) "[TTM] " fmt
  32
  33#include "ttm/ttm_module.h"
  34#include "ttm/ttm_bo_driver.h"
  35#include "ttm/ttm_placement.h"
  36#include <linux/jiffies.h>
  37#include <linux/slab.h>
  38#include <linux/sched.h>
  39#include <linux/mm.h>
  40#include <linux/file.h>
  41#include <linux/module.h>
  42#include <linux/atomic.h>
 
  43
  44#define TTM_ASSERT_LOCKED(param)
  45#define TTM_DEBUG(fmt, arg...)
  46#define TTM_BO_HASH_ORDER 13
  47
  48static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
  49static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  50static void ttm_bo_global_kobj_release(struct kobject *kobj);
  51
 
 
 
 
 
 
 
  52static struct attribute ttm_bo_count = {
  53	.name = "bo_count",
  54	.mode = S_IRUGO
  55};
  56
  57static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
 
  58{
  59	int i;
 
  60
  61	for (i = 0; i <= TTM_PL_PRIV5; i++)
  62		if (flags & (1 << i)) {
  63			*mem_type = i;
  64			return 0;
  65		}
  66	return -EINVAL;
 
 
 
 
 
  67}
  68
  69static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
 
  70{
  71	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  72
  73	pr_err("    has_type: %d\n", man->has_type);
  74	pr_err("    use_type: %d\n", man->use_type);
  75	pr_err("    flags: 0x%08X\n", man->flags);
  76	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
  77	pr_err("    size: %llu\n", man->size);
  78	pr_err("    available_caching: 0x%08X\n", man->available_caching);
  79	pr_err("    default_caching: 0x%08X\n", man->default_caching);
  80	if (mem_type != TTM_PL_SYSTEM)
  81		(*man->func->debug)(man, TTM_PFX);
  82}
  83
  84static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  85					struct ttm_placement *placement)
  86{
 
  87	int i, ret, mem_type;
  88
  89	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
  90	       bo, bo->mem.num_pages, bo->mem.size >> 10,
  91	       bo->mem.size >> 20);
  92	for (i = 0; i < placement->num_placement; i++) {
  93		ret = ttm_mem_type_from_flags(placement->placement[i],
  94						&mem_type);
  95		if (ret)
  96			return;
  97		pr_err("  placement[%d]=0x%08X (%d)\n",
  98		       i, placement->placement[i], mem_type);
  99		ttm_mem_type_debug(bo->bdev, mem_type);
 100	}
 101}
 102
 103static ssize_t ttm_bo_global_show(struct kobject *kobj,
 104				  struct attribute *attr,
 105				  char *buffer)
 106{
 107	struct ttm_bo_global *glob =
 108		container_of(kobj, struct ttm_bo_global, kobj);
 109
 110	return snprintf(buffer, PAGE_SIZE, "%lu\n",
 111			(unsigned long) atomic_read(&glob->bo_count));
 112}
 113
 114static struct attribute *ttm_bo_global_attrs[] = {
 115	&ttm_bo_count,
 116	NULL
 117};
 118
 119static const struct sysfs_ops ttm_bo_global_ops = {
 120	.show = &ttm_bo_global_show
 121};
 122
 123static struct kobj_type ttm_bo_glob_kobj_type  = {
 124	.release = &ttm_bo_global_kobj_release,
 125	.sysfs_ops = &ttm_bo_global_ops,
 126	.default_attrs = ttm_bo_global_attrs
 127};
 128
 129
 130static inline uint32_t ttm_bo_type_flags(unsigned type)
 131{
 132	return 1 << (type);
 133}
 134
 135static void ttm_bo_release_list(struct kref *list_kref)
 136{
 137	struct ttm_buffer_object *bo =
 138	    container_of(list_kref, struct ttm_buffer_object, list_kref);
 139	struct ttm_bo_device *bdev = bo->bdev;
 140	size_t acc_size = bo->acc_size;
 141
 142	BUG_ON(atomic_read(&bo->list_kref.refcount));
 143	BUG_ON(atomic_read(&bo->kref.refcount));
 144	BUG_ON(atomic_read(&bo->cpu_writers));
 145	BUG_ON(bo->sync_obj != NULL);
 146	BUG_ON(bo->mem.mm_node != NULL);
 147	BUG_ON(!list_empty(&bo->lru));
 148	BUG_ON(!list_empty(&bo->ddestroy));
 149
 150	if (bo->ttm)
 151		ttm_tt_destroy(bo->ttm);
 152	atomic_dec(&bo->glob->bo_count);
 153	if (bo->destroy)
 154		bo->destroy(bo);
 155	else {
 156		kfree(bo);
 157	}
 158	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 159}
 160
 161int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
 162{
 163	if (interruptible) {
 164		return wait_event_interruptible(bo->event_queue,
 165					       atomic_read(&bo->reserved) == 0);
 166	} else {
 167		wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
 168		return 0;
 169	}
 170}
 171EXPORT_SYMBOL(ttm_bo_wait_unreserved);
 172
 173void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 174{
 175	struct ttm_bo_device *bdev = bo->bdev;
 176	struct ttm_mem_type_manager *man;
 177
 178	BUG_ON(!atomic_read(&bo->reserved));
 
 
 
 179
 180	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 
 181
 182		BUG_ON(!list_empty(&bo->lru));
 
 
 183
 184		man = &bdev->man[bo->mem.mem_type];
 185		list_add_tail(&bo->lru, &man->lru);
 
 
 186		kref_get(&bo->list_kref);
 187
 188		if (bo->ttm != NULL) {
 189			list_add_tail(&bo->swap, &bo->glob->swap_lru);
 190			kref_get(&bo->list_kref);
 191		}
 192	}
 193}
 194
 195int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 
 
 
 
 
 
 
 
 
 
 
 196{
 197	int put_count = 0;
 
 198
 199	if (!list_empty(&bo->swap)) {
 200		list_del_init(&bo->swap);
 201		++put_count;
 
 202	}
 203	if (!list_empty(&bo->lru)) {
 204		list_del_init(&bo->lru);
 205		++put_count;
 
 206	}
 207
 208	/*
 209	 * TODO: Add a driver hook to delete from
 210	 * driver-specific LRU's here.
 211	 */
 212
 213	return put_count;
 214}
 215
 216int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
 217			  bool interruptible,
 218			  bool no_wait, bool use_sequence, uint32_t sequence)
 219{
 220	struct ttm_bo_global *glob = bo->glob;
 221	int ret;
 222
 223	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
 224		/**
 225		 * Deadlock avoidance for multi-bo reserving.
 226		 */
 227		if (use_sequence && bo->seq_valid) {
 228			/**
 229			 * We've already reserved this one.
 230			 */
 231			if (unlikely(sequence == bo->val_seq))
 232				return -EDEADLK;
 233			/**
 234			 * Already reserved by a thread that will not back
 235			 * off for us. We need to back off.
 236			 */
 237			if (unlikely(sequence - bo->val_seq < (1 << 31)))
 238				return -EAGAIN;
 239		}
 240
 241		if (no_wait)
 242			return -EBUSY;
 
 
 
 
 
 243
 244		spin_unlock(&glob->lru_lock);
 245		ret = ttm_bo_wait_unreserved(bo, interruptible);
 246		spin_lock(&glob->lru_lock);
 
 247
 248		if (unlikely(ret))
 249			return ret;
 250	}
 251
 252	if (use_sequence) {
 253		/**
 254		 * Wake up waiters that may need to recheck for deadlock,
 255		 * if we decreased the sequence number.
 256		 */
 257		if (unlikely((bo->val_seq - sequence < (1 << 31))
 258			     || !bo->seq_valid))
 259			wake_up_all(&bo->event_queue);
 260
 261		bo->val_seq = sequence;
 262		bo->seq_valid = true;
 263	} else {
 264		bo->seq_valid = false;
 
 
 
 265	}
 266
 267	return 0;
 268}
 269EXPORT_SYMBOL(ttm_bo_reserve);
 270
 271static void ttm_bo_ref_bug(struct kref *list_kref)
 272{
 273	BUG();
 274}
 275
 276void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
 277			 bool never_free)
 278{
 279	kref_sub(&bo->list_kref, count,
 280		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
 281}
 282
 283int ttm_bo_reserve(struct ttm_buffer_object *bo,
 284		   bool interruptible,
 285		   bool no_wait, bool use_sequence, uint32_t sequence)
 286{
 287	struct ttm_bo_global *glob = bo->glob;
 288	int put_count = 0;
 289	int ret;
 290
 291	spin_lock(&glob->lru_lock);
 292	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
 293				    sequence);
 294	if (likely(ret == 0))
 295		put_count = ttm_bo_del_from_lru(bo);
 296	spin_unlock(&glob->lru_lock);
 297
 298	ttm_bo_list_ref_sub(bo, put_count, true);
 
 
 
 299
 300	return ret;
 301}
 
 302
 303void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
 304{
 305	ttm_bo_add_to_lru(bo);
 306	atomic_set(&bo->reserved, 0);
 307	wake_up_all(&bo->event_queue);
 308}
 309
 310void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 311{
 312	struct ttm_bo_global *glob = bo->glob;
 313
 314	spin_lock(&glob->lru_lock);
 315	ttm_bo_unreserve_locked(bo);
 316	spin_unlock(&glob->lru_lock);
 317}
 318EXPORT_SYMBOL(ttm_bo_unreserve);
 319
 320/*
 321 * Call bo->mutex locked.
 322 */
 323static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 324{
 325	struct ttm_bo_device *bdev = bo->bdev;
 326	struct ttm_bo_global *glob = bo->glob;
 327	int ret = 0;
 328	uint32_t page_flags = 0;
 329
 330	TTM_ASSERT_LOCKED(&bo->mutex);
 331	bo->ttm = NULL;
 332
 333	if (bdev->need_dma32)
 334		page_flags |= TTM_PAGE_FLAG_DMA32;
 335
 336	switch (bo->type) {
 337	case ttm_bo_type_device:
 338		if (zero_alloc)
 339			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
 340	case ttm_bo_type_kernel:
 341		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
 342						      page_flags, glob->dummy_read_page);
 343		if (unlikely(bo->ttm == NULL))
 344			ret = -ENOMEM;
 345		break;
 346	case ttm_bo_type_sg:
 347		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
 348						      page_flags | TTM_PAGE_FLAG_SG,
 349						      glob->dummy_read_page);
 350		if (unlikely(bo->ttm == NULL)) {
 351			ret = -ENOMEM;
 352			break;
 353		}
 354		bo->ttm->sg = bo->sg;
 355		break;
 356	default:
 357		pr_err("Illegal buffer object type\n");
 358		ret = -EINVAL;
 359		break;
 360	}
 361
 362	return ret;
 363}
 
 364
 365static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 366				  struct ttm_mem_reg *mem,
 367				  bool evict, bool interruptible,
 368				  bool no_wait_reserve, bool no_wait_gpu)
 369{
 370	struct ttm_bo_device *bdev = bo->bdev;
 371	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
 372	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
 373	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
 374	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
 375	int ret = 0;
 376
 377	if (old_is_pci || new_is_pci ||
 378	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
 379		ret = ttm_mem_io_lock(old_man, true);
 380		if (unlikely(ret != 0))
 381			goto out_err;
 382		ttm_bo_unmap_virtual_locked(bo);
 383		ttm_mem_io_unlock(old_man);
 384	}
 385
 386	/*
 387	 * Create and bind a ttm if required.
 388	 */
 389
 390	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
 391		if (bo->ttm == NULL) {
 392			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
 393			ret = ttm_bo_add_ttm(bo, zero);
 394			if (ret)
 395				goto out_err;
 396		}
 397
 398		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
 399		if (ret)
 400			goto out_err;
 401
 402		if (mem->mem_type != TTM_PL_SYSTEM) {
 403			ret = ttm_tt_bind(bo->ttm, mem);
 404			if (ret)
 405				goto out_err;
 406		}
 407
 408		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
 409			if (bdev->driver->move_notify)
 410				bdev->driver->move_notify(bo, mem);
 411			bo->mem = *mem;
 412			mem->mm_node = NULL;
 413			goto moved;
 414		}
 415	}
 416
 417	if (bdev->driver->move_notify)
 418		bdev->driver->move_notify(bo, mem);
 419
 420	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 421	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
 422		ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
 423	else if (bdev->driver->move)
 424		ret = bdev->driver->move(bo, evict, interruptible,
 425					 no_wait_reserve, no_wait_gpu, mem);
 426	else
 427		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
 428
 429	if (ret) {
 430		if (bdev->driver->move_notify) {
 431			struct ttm_mem_reg tmp_mem = *mem;
 432			*mem = bo->mem;
 433			bo->mem = tmp_mem;
 434			bdev->driver->move_notify(bo, mem);
 435			bo->mem = *mem;
 436		}
 437
 438		goto out_err;
 439	}
 440
 441moved:
 442	if (bo->evicted) {
 443		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
 444		if (ret)
 445			pr_err("Can not flush read caches\n");
 
 
 446		bo->evicted = false;
 447	}
 448
 449	if (bo->mem.mm_node) {
 450		bo->offset = (bo->mem.start << PAGE_SHIFT) +
 451		    bdev->man[bo->mem.mem_type].gpu_offset;
 452		bo->cur_placement = bo->mem.placement;
 453	} else
 454		bo->offset = 0;
 455
 
 456	return 0;
 457
 458out_err:
 459	new_man = &bdev->man[bo->mem.mem_type];
 460	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
 461		ttm_tt_unbind(bo->ttm);
 462		ttm_tt_destroy(bo->ttm);
 463		bo->ttm = NULL;
 464	}
 465
 466	return ret;
 467}
 468
 469/**
 470 * Call bo::reserved.
 471 * Will release GPU memory type usage on destruction.
 472 * This is the place to put in driver specific hooks to release
 473 * driver private resources.
 474 * Will release the bo::reserved lock.
 475 */
 476
 477static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 478{
 479	if (bo->bdev->driver->move_notify)
 480		bo->bdev->driver->move_notify(bo, NULL);
 481
 482	if (bo->ttm) {
 483		ttm_tt_unbind(bo->ttm);
 484		ttm_tt_destroy(bo->ttm);
 485		bo->ttm = NULL;
 486	}
 487	ttm_bo_mem_put(bo, &bo->mem);
 
 
 
 
 
 488
 489	atomic_set(&bo->reserved, 0);
 
 490
 491	/*
 492	 * Make processes trying to reserve really pick it up.
 493	 */
 494	smp_mb__after_atomic_dec();
 495	wake_up_all(&bo->event_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 496}
 497
 498static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 499{
 500	struct ttm_bo_device *bdev = bo->bdev;
 501	struct ttm_bo_global *glob = bo->glob;
 502	struct ttm_bo_driver *driver;
 503	void *sync_obj = NULL;
 504	void *sync_obj_arg;
 505	int put_count;
 506	int ret;
 507
 508	spin_lock(&bdev->fence_lock);
 509	(void) ttm_bo_wait(bo, false, false, true);
 510	if (!bo->sync_obj) {
 511
 512		spin_lock(&glob->lru_lock);
 513
 514		/**
 515		 * Lock inversion between bo:reserve and bdev::fence_lock here,
 516		 * but that's OK, since we're only trylocking.
 517		 */
 
 
 
 
 
 518
 519		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
 520
 521		if (unlikely(ret == -EBUSY))
 522			goto queue;
 
 
 
 
 523
 524		spin_unlock(&bdev->fence_lock);
 525		put_count = ttm_bo_del_from_lru(bo);
 
 
 526
 527		spin_unlock(&glob->lru_lock);
 528		ttm_bo_cleanup_memtype_use(bo);
 529
 530		ttm_bo_list_ref_sub(bo, put_count, true);
 
 
 
 
 
 
 
 
 531
 532		return;
 533	} else {
 534		spin_lock(&glob->lru_lock);
 535	}
 536queue:
 537	driver = bdev->driver;
 538	if (bo->sync_obj)
 539		sync_obj = driver->sync_obj_ref(bo->sync_obj);
 540	sync_obj_arg = bo->sync_obj_arg;
 541
 
 542	kref_get(&bo->list_kref);
 543	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 544	spin_unlock(&glob->lru_lock);
 545	spin_unlock(&bdev->fence_lock);
 546
 547	if (sync_obj) {
 548		driver->sync_obj_flush(sync_obj, sync_obj_arg);
 549		driver->sync_obj_unref(&sync_obj);
 550	}
 551	schedule_delayed_work(&bdev->wq,
 552			      ((HZ / 100) < 1) ? 1 : HZ / 100);
 553}
 554
 555/**
 556 * function ttm_bo_cleanup_refs
 557 * If bo idle, remove from delayed- and lru lists, and unref.
 558 * If not idle, do nothing.
 559 *
 
 
 
 560 * @interruptible         Any sleeps should occur interruptibly.
 561 * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
 562 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 
 563 */
 564
 565static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 566			       bool interruptible,
 567			       bool no_wait_reserve,
 568			       bool no_wait_gpu)
 569{
 570	struct ttm_bo_device *bdev = bo->bdev;
 571	struct ttm_bo_global *glob = bo->glob;
 572	int put_count;
 573	int ret = 0;
 574
 575retry:
 576	spin_lock(&bdev->fence_lock);
 577	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
 578	spin_unlock(&bdev->fence_lock);
 579
 580	if (unlikely(ret != 0))
 581		return ret;
 
 
 582
 583	spin_lock(&glob->lru_lock);
 
 584
 585	if (unlikely(list_empty(&bo->ddestroy))) {
 
 586		spin_unlock(&glob->lru_lock);
 587		return 0;
 588	}
 589
 590	ret = ttm_bo_reserve_locked(bo, interruptible,
 591				    no_wait_reserve, false, 0);
 
 
 
 
 
 
 592
 593	if (unlikely(ret != 0)) {
 594		spin_unlock(&glob->lru_lock);
 595		return ret;
 
 
 
 
 
 
 
 
 
 
 
 596	}
 597
 598	/**
 599	 * We can re-check for sync object without taking
 600	 * the bo::lock since setting the sync object requires
 601	 * also bo::reserved. A busy object at this point may
 602	 * be caused by another thread recently starting an accelerated
 603	 * eviction.
 604	 */
 605
 606	if (unlikely(bo->sync_obj)) {
 607		atomic_set(&bo->reserved, 0);
 608		wake_up_all(&bo->event_queue);
 609		spin_unlock(&glob->lru_lock);
 610		goto retry;
 611	}
 612
 613	put_count = ttm_bo_del_from_lru(bo);
 614	list_del_init(&bo->ddestroy);
 615	++put_count;
 616
 617	spin_unlock(&glob->lru_lock);
 618	ttm_bo_cleanup_memtype_use(bo);
 619
 620	ttm_bo_list_ref_sub(bo, put_count, true);
 
 621
 622	return 0;
 623}
 624
 625/**
 626 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 627 * encountered buffers.
 628 */
 629
 630static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 631{
 632	struct ttm_bo_global *glob = bdev->glob;
 633	struct ttm_buffer_object *entry = NULL;
 634	int ret = 0;
 
 
 635
 636	spin_lock(&glob->lru_lock);
 637	if (list_empty(&bdev->ddestroy))
 638		goto out_unlock;
 639
 640	entry = list_first_entry(&bdev->ddestroy,
 641		struct ttm_buffer_object, ddestroy);
 642	kref_get(&entry->list_kref);
 643
 644	for (;;) {
 645		struct ttm_buffer_object *nentry = NULL;
 646
 647		if (entry->ddestroy.next != &bdev->ddestroy) {
 648			nentry = list_first_entry(&entry->ddestroy,
 649				struct ttm_buffer_object, ddestroy);
 650			kref_get(&nentry->list_kref);
 651		}
 652
 653		spin_unlock(&glob->lru_lock);
 654		ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
 655					  !remove_all);
 656		kref_put(&entry->list_kref, ttm_bo_release_list);
 657		entry = nentry;
 658
 659		if (ret || !entry)
 660			goto out;
 661
 
 
 
 
 
 
 
 662		spin_lock(&glob->lru_lock);
 663		if (list_empty(&entry->ddestroy))
 664			break;
 665	}
 666
 667out_unlock:
 668	spin_unlock(&glob->lru_lock);
 669out:
 670	if (entry)
 671		kref_put(&entry->list_kref, ttm_bo_release_list);
 672	return ret;
 673}
 674
 675static void ttm_bo_delayed_workqueue(struct work_struct *work)
 676{
 677	struct ttm_bo_device *bdev =
 678	    container_of(work, struct ttm_bo_device, wq.work);
 679
 680	if (ttm_bo_delayed_delete(bdev, false)) {
 681		schedule_delayed_work(&bdev->wq,
 682				      ((HZ / 100) < 1) ? 1 : HZ / 100);
 683	}
 684}
 685
 686static void ttm_bo_release(struct kref *kref)
 687{
 688	struct ttm_buffer_object *bo =
 689	    container_of(kref, struct ttm_buffer_object, kref);
 690	struct ttm_bo_device *bdev = bo->bdev;
 691	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 692
 693	if (likely(bo->vm_node != NULL)) {
 694		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
 695		drm_mm_put_block(bo->vm_node);
 696		bo->vm_node = NULL;
 697	}
 698	write_unlock(&bdev->vm_lock);
 699	ttm_mem_io_lock(man, false);
 700	ttm_mem_io_free_vm(bo);
 701	ttm_mem_io_unlock(man);
 702	ttm_bo_cleanup_refs_or_queue(bo);
 703	kref_put(&bo->list_kref, ttm_bo_release_list);
 704	write_lock(&bdev->vm_lock);
 705}
 706
 707void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 708{
 709	struct ttm_buffer_object *bo = *p_bo;
 710	struct ttm_bo_device *bdev = bo->bdev;
 711
 712	*p_bo = NULL;
 713	write_lock(&bdev->vm_lock);
 714	kref_put(&bo->kref, ttm_bo_release);
 715	write_unlock(&bdev->vm_lock);
 716}
 717EXPORT_SYMBOL(ttm_bo_unref);
 718
 719int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
 720{
 721	return cancel_delayed_work_sync(&bdev->wq);
 722}
 723EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
 724
 725void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 726{
 727	if (resched)
 728		schedule_delayed_work(&bdev->wq,
 729				      ((HZ / 100) < 1) ? 1 : HZ / 100);
 730}
 731EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 732
 733static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 734			bool no_wait_reserve, bool no_wait_gpu)
 735{
 736	struct ttm_bo_device *bdev = bo->bdev;
 737	struct ttm_mem_reg evict_mem;
 738	struct ttm_placement placement;
 739	int ret = 0;
 740
 741	spin_lock(&bdev->fence_lock);
 742	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
 743	spin_unlock(&bdev->fence_lock);
 744
 745	if (unlikely(ret != 0)) {
 746		if (ret != -ERESTARTSYS) {
 747			pr_err("Failed to expire sync object before buffer eviction\n");
 748		}
 749		goto out;
 750	}
 751
 752	BUG_ON(!atomic_read(&bo->reserved));
 
 
 
 
 
 
 753
 754	evict_mem = bo->mem;
 755	evict_mem.mm_node = NULL;
 756	evict_mem.bus.io_reserved_vm = false;
 757	evict_mem.bus.io_reserved_count = 0;
 758
 759	placement.fpfn = 0;
 760	placement.lpfn = 0;
 761	placement.num_placement = 0;
 762	placement.num_busy_placement = 0;
 763	bdev->driver->evict_flags(bo, &placement);
 764	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
 765				no_wait_reserve, no_wait_gpu);
 766	if (ret) {
 767		if (ret != -ERESTARTSYS) {
 768			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
 769			       bo);
 770			ttm_bo_mem_space_debug(bo, &placement);
 771		}
 772		goto out;
 773	}
 774
 775	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
 776				     no_wait_reserve, no_wait_gpu);
 777	if (ret) {
 778		if (ret != -ERESTARTSYS)
 779			pr_err("Buffer eviction failed\n");
 780		ttm_bo_mem_put(bo, &evict_mem);
 781		goto out;
 782	}
 783	bo->evicted = true;
 784out:
 785	return ret;
 786}
 787
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 788static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 789				uint32_t mem_type,
 790				bool interruptible, bool no_wait_reserve,
 791				bool no_wait_gpu)
 
 792{
 
 793	struct ttm_bo_global *glob = bdev->glob;
 794	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 795	struct ttm_buffer_object *bo;
 796	int ret, put_count = 0;
 
 797
 798retry:
 799	spin_lock(&glob->lru_lock);
 800	if (list_empty(&man->lru)) {
 801		spin_unlock(&glob->lru_lock);
 802		return -EBUSY;
 803	}
 804
 805	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
 806	kref_get(&bo->list_kref);
 
 
 
 
 807
 808	if (!list_empty(&bo->ddestroy)) {
 809		spin_unlock(&glob->lru_lock);
 810		ret = ttm_bo_cleanup_refs(bo, interruptible,
 811					  no_wait_reserve, no_wait_gpu);
 812		kref_put(&bo->list_kref, ttm_bo_release_list);
 
 
 
 813
 814		if (likely(ret == 0 || ret == -ERESTARTSYS))
 815			return ret;
 
 816
 817		goto retry;
 818	}
 819
 820	ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
 821
 822	if (unlikely(ret == -EBUSY)) {
 823		spin_unlock(&glob->lru_lock);
 824		if (likely(!no_wait_gpu))
 825			ret = ttm_bo_wait_unreserved(bo, interruptible);
 826
 827		kref_put(&bo->list_kref, ttm_bo_release_list);
 
 828
 829		/**
 830		 * We *need* to retry after releasing the lru lock.
 831		 */
 832
 833		if (unlikely(ret != 0))
 834			return ret;
 835		goto retry;
 
 
 836	}
 837
 838	put_count = ttm_bo_del_from_lru(bo);
 839	spin_unlock(&glob->lru_lock);
 840
 841	BUG_ON(ret != 0);
 842
 843	ttm_bo_list_ref_sub(bo, put_count, true);
 844
 845	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
 846	ttm_bo_unreserve(bo);
 
 
 847
 848	kref_put(&bo->list_kref, ttm_bo_release_list);
 849	return ret;
 850}
 851
 852void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 853{
 854	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
 855
 856	if (mem->mm_node)
 857		(*man->func->put_node)(man, mem);
 858}
 859EXPORT_SYMBOL(ttm_bo_mem_put);
 860
 861/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 863 * space, or we've evicted everything and there isn't enough space.
 864 */
 865static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 866					uint32_t mem_type,
 867					struct ttm_placement *placement,
 868					struct ttm_mem_reg *mem,
 869					bool interruptible,
 870					bool no_wait_reserve,
 871					bool no_wait_gpu)
 872{
 873	struct ttm_bo_device *bdev = bo->bdev;
 874	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 
 875	int ret;
 876
 
 877	do {
 878		ret = (*man->func->get_node)(man, bo, placement, mem);
 879		if (unlikely(ret != 0))
 880			return ret;
 881		if (mem->mm_node)
 882			break;
 883		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
 884						no_wait_reserve, no_wait_gpu);
 885		if (unlikely(ret != 0))
 886			return ret;
 887	} while (1);
 888	if (mem->mm_node == NULL)
 889		return -ENOMEM;
 890	mem->mem_type = mem_type;
 891	return 0;
 892}
 893
 894static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 895				      uint32_t cur_placement,
 896				      uint32_t proposed_placement)
 897{
 898	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
 899	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
 900
 901	/**
 902	 * Keep current caching if possible.
 903	 */
 904
 905	if ((cur_placement & caching) != 0)
 906		result |= (cur_placement & caching);
 907	else if ((man->default_caching & caching) != 0)
 908		result |= man->default_caching;
 909	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
 910		result |= TTM_PL_FLAG_CACHED;
 911	else if ((TTM_PL_FLAG_WC & caching) != 0)
 912		result |= TTM_PL_FLAG_WC;
 913	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
 914		result |= TTM_PL_FLAG_UNCACHED;
 915
 916	return result;
 917}
 918
 919static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 920				 uint32_t mem_type,
 921				 uint32_t proposed_placement,
 922				 uint32_t *masked_placement)
 923{
 924	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
 925
 926	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
 927		return false;
 928
 929	if ((proposed_placement & man->available_caching) == 0)
 930		return false;
 931
 932	cur_flags |= (proposed_placement & man->available_caching);
 933
 934	*masked_placement = cur_flags;
 935	return true;
 936}
 937
 938/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939 * Creates space for memory region @mem according to its type.
 940 *
 941 * This function first searches for free space in compatible memory types in
 942 * the priority order defined by the driver.  If free space isn't found, then
 943 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 944 * space.
 945 */
 946int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 947			struct ttm_placement *placement,
 948			struct ttm_mem_reg *mem,
 949			bool interruptible, bool no_wait_reserve,
 950			bool no_wait_gpu)
 951{
 952	struct ttm_bo_device *bdev = bo->bdev;
 953	struct ttm_mem_type_manager *man;
 954	uint32_t mem_type = TTM_PL_SYSTEM;
 955	uint32_t cur_flags = 0;
 956	bool type_found = false;
 957	bool type_ok = false;
 958	bool has_erestartsys = false;
 959	int i, ret;
 960
 
 
 
 
 961	mem->mm_node = NULL;
 962	for (i = 0; i < placement->num_placement; ++i) {
 963		ret = ttm_mem_type_from_flags(placement->placement[i],
 964						&mem_type);
 965		if (ret)
 966			return ret;
 967		man = &bdev->man[mem_type];
 968
 969		type_ok = ttm_bo_mt_compatible(man,
 970						mem_type,
 971						placement->placement[i],
 972						&cur_flags);
 973
 974		if (!type_ok)
 975			continue;
 
 
 976
 977		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 978						  cur_flags);
 979		/*
 980		 * Use the access and other non-mapping-related flag bits from
 981		 * the memory placement flags to the current flags
 982		 */
 983		ttm_flag_masked(&cur_flags, placement->placement[i],
 984				~TTM_PL_MASK_MEMTYPE);
 985
 986		if (mem_type == TTM_PL_SYSTEM)
 987			break;
 
 
 988
 989		if (man->has_type && man->use_type) {
 990			type_found = true;
 991			ret = (*man->func->get_node)(man, bo, placement, mem);
 992			if (unlikely(ret))
 993				return ret;
 
 
 994		}
 995		if (mem->mm_node)
 996			break;
 997	}
 998
 999	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1000		mem->mem_type = mem_type;
1001		mem->placement = cur_flags;
1002		return 0;
1003	}
1004
1005	if (!type_found)
1006		return -EINVAL;
1007
1008	for (i = 0; i < placement->num_busy_placement; ++i) {
1009		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1010						&mem_type);
1011		if (ret)
1012			return ret;
1013		man = &bdev->man[mem_type];
1014		if (!man->has_type)
1015			continue;
1016		if (!ttm_bo_mt_compatible(man,
1017						mem_type,
1018						placement->busy_placement[i],
1019						&cur_flags))
1020			continue;
1021
1022		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1023						  cur_flags);
1024		/*
1025		 * Use the access and other non-mapping-related flag bits from
1026		 * the memory placement flags to the current flags
1027		 */
1028		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1029				~TTM_PL_MASK_MEMTYPE);
1030
 
 
 
 
 
1031
1032		if (mem_type == TTM_PL_SYSTEM) {
1033			mem->mem_type = mem_type;
1034			mem->placement = cur_flags;
1035			mem->mm_node = NULL;
1036			return 0;
1037		}
1038
1039		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1040						interruptible, no_wait_reserve, no_wait_gpu);
1041		if (ret == 0 && mem->mm_node) {
1042			mem->placement = cur_flags;
1043			return 0;
1044		}
1045		if (ret == -ERESTARTSYS)
1046			has_erestartsys = true;
1047	}
1048	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1049	return ret;
1050}
1051EXPORT_SYMBOL(ttm_bo_mem_space);
1052
1053int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1054{
1055	if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1056		return -EBUSY;
 
1057
1058	return wait_event_interruptible(bo->event_queue,
1059					atomic_read(&bo->cpu_writers) == 0);
 
 
 
 
 
 
1060}
1061EXPORT_SYMBOL(ttm_bo_wait_cpu);
1062
1063int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1064			struct ttm_placement *placement,
1065			bool interruptible, bool no_wait_reserve,
1066			bool no_wait_gpu)
1067{
1068	int ret = 0;
1069	struct ttm_mem_reg mem;
1070	struct ttm_bo_device *bdev = bo->bdev;
1071
1072	BUG_ON(!atomic_read(&bo->reserved));
1073
1074	/*
1075	 * FIXME: It's possible to pipeline buffer moves.
1076	 * Have the driver move function wait for idle when necessary,
1077	 * instead of doing it here.
1078	 */
1079	spin_lock(&bdev->fence_lock);
1080	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1081	spin_unlock(&bdev->fence_lock);
1082	if (ret)
1083		return ret;
1084	mem.num_pages = bo->num_pages;
1085	mem.size = mem.num_pages << PAGE_SHIFT;
1086	mem.page_alignment = bo->mem.page_alignment;
1087	mem.bus.io_reserved_vm = false;
1088	mem.bus.io_reserved_count = 0;
1089	/*
1090	 * Determine where to move the buffer.
1091	 */
1092	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1093	if (ret)
1094		goto out_unlock;
1095	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1096out_unlock:
1097	if (ret && mem.mm_node)
1098		ttm_bo_mem_put(bo, &mem);
1099	return ret;
1100}
1101
1102static int ttm_bo_mem_compat(struct ttm_placement *placement,
1103			     struct ttm_mem_reg *mem)
 
 
1104{
1105	int i;
1106
1107	if (mem->mm_node && placement->lpfn != 0 &&
1108	    (mem->start < placement->fpfn ||
1109	     mem->start + mem->num_pages > placement->lpfn))
1110		return -1;
1111
1112	for (i = 0; i < placement->num_placement; i++) {
1113		if ((placement->placement[i] & mem->placement &
1114			TTM_PL_MASK_CACHING) &&
1115			(placement->placement[i] & mem->placement &
1116			TTM_PL_MASK_MEM))
1117			return i;
1118	}
1119	return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120}
 
1121
1122int ttm_bo_validate(struct ttm_buffer_object *bo,
1123			struct ttm_placement *placement,
1124			bool interruptible, bool no_wait_reserve,
1125			bool no_wait_gpu)
1126{
1127	int ret;
 
1128
1129	BUG_ON(!atomic_read(&bo->reserved));
1130	/* Check that range is valid */
1131	if (placement->lpfn || placement->fpfn)
1132		if (placement->fpfn > placement->lpfn ||
1133			(placement->lpfn - placement->fpfn) < bo->num_pages)
1134			return -EINVAL;
1135	/*
1136	 * Check whether we need to move buffer.
1137	 */
1138	ret = ttm_bo_mem_compat(placement, &bo->mem);
1139	if (ret < 0) {
1140		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1141		if (ret)
1142			return ret;
1143	} else {
1144		/*
1145		 * Use the access and other non-mapping-related flag bits from
1146		 * the compatible memory placement flags to the active flags
1147		 */
1148		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1149				~TTM_PL_MASK_MEMTYPE);
1150	}
1151	/*
1152	 * We might need to add a TTM.
1153	 */
1154	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1155		ret = ttm_bo_add_ttm(bo, true);
1156		if (ret)
1157			return ret;
1158	}
1159	return 0;
1160}
1161EXPORT_SYMBOL(ttm_bo_validate);
1162
1163int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1164				struct ttm_placement *placement)
1165{
1166	BUG_ON((placement->fpfn || placement->lpfn) &&
1167	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1168
1169	return 0;
1170}
1171
1172int ttm_bo_init(struct ttm_bo_device *bdev,
1173		struct ttm_buffer_object *bo,
1174		unsigned long size,
1175		enum ttm_bo_type type,
1176		struct ttm_placement *placement,
1177		uint32_t page_alignment,
1178		unsigned long buffer_start,
1179		bool interruptible,
1180		struct file *persistent_swap_storage,
1181		size_t acc_size,
1182		struct sg_table *sg,
1183		void (*destroy) (struct ttm_buffer_object *))
1184{
1185	int ret = 0;
1186	unsigned long num_pages;
1187	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
 
1188
1189	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1190	if (ret) {
1191		pr_err("Out of kernel memory\n");
1192		if (destroy)
1193			(*destroy)(bo);
1194		else
1195			kfree(bo);
1196		return -ENOMEM;
1197	}
1198
1199	size += buffer_start & ~PAGE_MASK;
1200	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1201	if (num_pages == 0) {
1202		pr_err("Illegal buffer object size\n");
1203		if (destroy)
1204			(*destroy)(bo);
1205		else
1206			kfree(bo);
1207		ttm_mem_global_free(mem_glob, acc_size);
1208		return -EINVAL;
1209	}
1210	bo->destroy = destroy;
1211
1212	kref_init(&bo->kref);
1213	kref_init(&bo->list_kref);
1214	atomic_set(&bo->cpu_writers, 0);
1215	atomic_set(&bo->reserved, 1);
1216	init_waitqueue_head(&bo->event_queue);
1217	INIT_LIST_HEAD(&bo->lru);
1218	INIT_LIST_HEAD(&bo->ddestroy);
1219	INIT_LIST_HEAD(&bo->swap);
1220	INIT_LIST_HEAD(&bo->io_reserve_lru);
 
1221	bo->bdev = bdev;
1222	bo->glob = bdev->glob;
1223	bo->type = type;
1224	bo->num_pages = num_pages;
1225	bo->mem.size = num_pages << PAGE_SHIFT;
1226	bo->mem.mem_type = TTM_PL_SYSTEM;
1227	bo->mem.num_pages = bo->num_pages;
1228	bo->mem.mm_node = NULL;
1229	bo->mem.page_alignment = page_alignment;
1230	bo->mem.bus.io_reserved_vm = false;
1231	bo->mem.bus.io_reserved_count = 0;
1232	bo->buffer_start = buffer_start & PAGE_MASK;
1233	bo->priv_flags = 0;
1234	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1235	bo->seq_valid = false;
1236	bo->persistent_swap_storage = persistent_swap_storage;
1237	bo->acc_size = acc_size;
1238	bo->sg = sg;
1239	atomic_inc(&bo->glob->bo_count);
1240
1241	ret = ttm_bo_check_placement(bo, placement);
1242	if (unlikely(ret != 0))
1243		goto out_err;
 
 
 
 
 
 
 
 
 
 
1244
1245	/*
1246	 * For ttm_bo_type_device buffers, allocate
1247	 * address space from the device.
1248	 */
1249	if (bo->type == ttm_bo_type_device ||
1250	    bo->type == ttm_bo_type_sg) {
1251		ret = ttm_bo_setup_vm(bo);
1252		if (ret)
1253			goto out_err;
 
 
 
 
 
 
1254	}
1255
1256	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1257	if (ret)
1258		goto out_err;
1259
1260	ttm_bo_unreserve(bo);
1261	return 0;
 
1262
1263out_err:
1264	ttm_bo_unreserve(bo);
1265	ttm_bo_unref(&bo);
 
 
 
 
 
 
1266
1267	return ret;
1268}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1269EXPORT_SYMBOL(ttm_bo_init);
1270
1271size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1272		       unsigned long bo_size,
1273		       unsigned struct_size)
1274{
1275	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1276	size_t size = 0;
1277
1278	size += ttm_round_pot(struct_size);
1279	size += PAGE_ALIGN(npages * sizeof(void *));
1280	size += ttm_round_pot(sizeof(struct ttm_tt));
1281	return size;
1282}
1283EXPORT_SYMBOL(ttm_bo_acc_size);
1284
1285size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1286			   unsigned long bo_size,
1287			   unsigned struct_size)
1288{
1289	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1290	size_t size = 0;
1291
1292	size += ttm_round_pot(struct_size);
1293	size += PAGE_ALIGN(npages * sizeof(void *));
1294	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1295	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1296	return size;
1297}
1298EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1299
1300int ttm_bo_create(struct ttm_bo_device *bdev,
1301			unsigned long size,
1302			enum ttm_bo_type type,
1303			struct ttm_placement *placement,
1304			uint32_t page_alignment,
1305			unsigned long buffer_start,
1306			bool interruptible,
1307			struct file *persistent_swap_storage,
1308			struct ttm_buffer_object **p_bo)
1309{
1310	struct ttm_buffer_object *bo;
1311	size_t acc_size;
1312	int ret;
1313
1314	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1315	if (unlikely(bo == NULL))
1316		return -ENOMEM;
1317
1318	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1319	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1320				buffer_start, interruptible,
1321			  persistent_swap_storage, acc_size, NULL, NULL);
1322	if (likely(ret == 0))
1323		*p_bo = bo;
1324
1325	return ret;
1326}
1327EXPORT_SYMBOL(ttm_bo_create);
1328
1329static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1330					unsigned mem_type, bool allow_errors)
1331{
 
 
 
 
 
1332	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1333	struct ttm_bo_global *glob = bdev->glob;
 
1334	int ret;
 
1335
1336	/*
1337	 * Can't use standard list traversal since we're unlocking.
1338	 */
1339
1340	spin_lock(&glob->lru_lock);
1341	while (!list_empty(&man->lru)) {
1342		spin_unlock(&glob->lru_lock);
1343		ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1344		if (ret) {
1345			if (allow_errors) {
 
1346				return ret;
1347			} else {
1348				pr_err("Cleanup eviction failed\n");
1349			}
1350		}
1351		spin_lock(&glob->lru_lock);
1352	}
1353	spin_unlock(&glob->lru_lock);
 
 
 
 
 
 
 
 
 
 
 
 
1354	return 0;
1355}
1356
1357int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1358{
1359	struct ttm_mem_type_manager *man;
1360	int ret = -EINVAL;
1361
1362	if (mem_type >= TTM_NUM_MEM_TYPES) {
1363		pr_err("Illegal memory type %d\n", mem_type);
1364		return ret;
1365	}
1366	man = &bdev->man[mem_type];
1367
1368	if (!man->has_type) {
1369		pr_err("Trying to take down uninitialized memory manager type %u\n",
1370		       mem_type);
1371		return ret;
1372	}
1373
1374	man->use_type = false;
1375	man->has_type = false;
1376
1377	ret = 0;
1378	if (mem_type > 0) {
1379		ttm_bo_force_list_clean(bdev, mem_type, false);
 
 
 
 
1380
1381		ret = (*man->func->takedown)(man);
1382	}
1383
 
 
 
1384	return ret;
1385}
1386EXPORT_SYMBOL(ttm_bo_clean_mm);
1387
1388int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1389{
1390	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1391
1392	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1393		pr_err("Illegal memory manager memory type %u\n", mem_type);
1394		return -EINVAL;
1395	}
1396
1397	if (!man->has_type) {
1398		pr_err("Memory type %u has not been initialized\n", mem_type);
1399		return 0;
1400	}
1401
1402	return ttm_bo_force_list_clean(bdev, mem_type, true);
1403}
1404EXPORT_SYMBOL(ttm_bo_evict_mm);
1405
1406int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1407			unsigned long p_size)
1408{
1409	int ret = -EINVAL;
1410	struct ttm_mem_type_manager *man;
 
1411
1412	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1413	man = &bdev->man[type];
1414	BUG_ON(man->has_type);
1415	man->io_reserve_fastpath = true;
1416	man->use_io_reserve_lru = false;
1417	mutex_init(&man->io_reserve_mutex);
 
1418	INIT_LIST_HEAD(&man->io_reserve_lru);
1419
1420	ret = bdev->driver->init_mem_type(bdev, type, man);
1421	if (ret)
1422		return ret;
1423	man->bdev = bdev;
1424
1425	ret = 0;
1426	if (type != TTM_PL_SYSTEM) {
1427		ret = (*man->func->init)(man, p_size);
1428		if (ret)
1429			return ret;
1430	}
1431	man->has_type = true;
1432	man->use_type = true;
1433	man->size = p_size;
1434
1435	INIT_LIST_HEAD(&man->lru);
 
 
1436
1437	return 0;
1438}
1439EXPORT_SYMBOL(ttm_bo_init_mm);
1440
1441static void ttm_bo_global_kobj_release(struct kobject *kobj)
1442{
1443	struct ttm_bo_global *glob =
1444		container_of(kobj, struct ttm_bo_global, kobj);
1445
1446	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1447	__free_page(glob->dummy_read_page);
1448	kfree(glob);
1449}
1450
1451void ttm_bo_global_release(struct drm_global_reference *ref)
1452{
1453	struct ttm_bo_global *glob = ref->object;
 
 
 
 
1454
1455	kobject_del(&glob->kobj);
1456	kobject_put(&glob->kobj);
 
 
 
 
1457}
1458EXPORT_SYMBOL(ttm_bo_global_release);
1459
1460int ttm_bo_global_init(struct drm_global_reference *ref)
1461{
1462	struct ttm_bo_global_ref *bo_ref =
1463		container_of(ref, struct ttm_bo_global_ref, ref);
1464	struct ttm_bo_global *glob = ref->object;
1465	int ret;
 
 
 
 
 
 
 
1466
1467	mutex_init(&glob->device_list_mutex);
1468	spin_lock_init(&glob->lru_lock);
1469	glob->mem_glob = bo_ref->mem_glob;
 
1470	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1471
1472	if (unlikely(glob->dummy_read_page == NULL)) {
1473		ret = -ENOMEM;
1474		goto out_no_drp;
1475	}
1476
1477	INIT_LIST_HEAD(&glob->swap_lru);
 
1478	INIT_LIST_HEAD(&glob->device_list);
1479
1480	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1481	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1482	if (unlikely(ret != 0)) {
1483		pr_err("Could not register buffer object swapout\n");
1484		goto out_no_shrink;
1485	}
1486
1487	atomic_set(&glob->bo_count, 0);
1488
1489	ret = kobject_init_and_add(
1490		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1491	if (unlikely(ret != 0))
1492		kobject_put(&glob->kobj);
1493	return ret;
1494out_no_shrink:
1495	__free_page(glob->dummy_read_page);
1496out_no_drp:
1497	kfree(glob);
1498	return ret;
1499}
1500EXPORT_SYMBOL(ttm_bo_global_init);
1501
1502
1503int ttm_bo_device_release(struct ttm_bo_device *bdev)
1504{
1505	int ret = 0;
1506	unsigned i = TTM_NUM_MEM_TYPES;
1507	struct ttm_mem_type_manager *man;
1508	struct ttm_bo_global *glob = bdev->glob;
1509
1510	while (i--) {
1511		man = &bdev->man[i];
1512		if (man->has_type) {
1513			man->use_type = false;
1514			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1515				ret = -EBUSY;
1516				pr_err("DRM memory manager type %d is not clean\n",
1517				       i);
1518			}
1519			man->has_type = false;
1520		}
1521	}
1522
1523	mutex_lock(&glob->device_list_mutex);
1524	list_del(&bdev->device_list);
1525	mutex_unlock(&glob->device_list_mutex);
1526
1527	cancel_delayed_work_sync(&bdev->wq);
1528
1529	while (ttm_bo_delayed_delete(bdev, true))
1530		;
1531
1532	spin_lock(&glob->lru_lock);
1533	if (list_empty(&bdev->ddestroy))
1534		TTM_DEBUG("Delayed destroy list was clean\n");
1535
1536	if (list_empty(&bdev->man[0].lru))
1537		TTM_DEBUG("Swap list was clean\n");
1538	spin_unlock(&glob->lru_lock);
1539
1540	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1541	write_lock(&bdev->vm_lock);
1542	drm_mm_takedown(&bdev->addr_space_mm);
1543	write_unlock(&bdev->vm_lock);
1544
1545	return ret;
1546}
1547EXPORT_SYMBOL(ttm_bo_device_release);
1548
1549int ttm_bo_device_init(struct ttm_bo_device *bdev,
1550		       struct ttm_bo_global *glob,
1551		       struct ttm_bo_driver *driver,
1552		       uint64_t file_page_offset,
1553		       bool need_dma32)
1554{
1555	int ret = -EINVAL;
 
 
 
 
 
1556
1557	rwlock_init(&bdev->vm_lock);
1558	bdev->driver = driver;
1559
1560	memset(bdev->man, 0, sizeof(bdev->man));
1561
1562	/*
1563	 * Initialize the system memory buffer type.
1564	 * Other types need to be driver / IOCTL initialized.
1565	 */
1566	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1567	if (unlikely(ret != 0))
1568		goto out_no_sys;
1569
1570	bdev->addr_space_rb = RB_ROOT;
1571	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1572	if (unlikely(ret != 0))
1573		goto out_no_addr_mm;
1574
1575	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1576	bdev->nice_mode = true;
1577	INIT_LIST_HEAD(&bdev->ddestroy);
1578	bdev->dev_mapping = NULL;
1579	bdev->glob = glob;
1580	bdev->need_dma32 = need_dma32;
1581	bdev->val_seq = 0;
1582	spin_lock_init(&bdev->fence_lock);
1583	mutex_lock(&glob->device_list_mutex);
1584	list_add_tail(&bdev->device_list, &glob->device_list);
1585	mutex_unlock(&glob->device_list_mutex);
1586
1587	return 0;
1588out_no_addr_mm:
1589	ttm_bo_clean_mm(bdev, 0);
1590out_no_sys:
 
1591	return ret;
1592}
1593EXPORT_SYMBOL(ttm_bo_device_init);
1594
1595/*
1596 * buffer object vm functions.
1597 */
1598
1599bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1600{
1601	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1602
1603	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1604		if (mem->mem_type == TTM_PL_SYSTEM)
1605			return false;
1606
1607		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1608			return false;
1609
1610		if (mem->placement & TTM_PL_FLAG_CACHED)
1611			return false;
1612	}
1613	return true;
1614}
1615
1616void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1617{
1618	struct ttm_bo_device *bdev = bo->bdev;
1619	loff_t offset = (loff_t) bo->addr_space_offset;
1620	loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1621
1622	if (!bdev->dev_mapping)
1623		return;
1624	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1625	ttm_mem_io_free_vm(bo);
1626}
1627
1628void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1629{
1630	struct ttm_bo_device *bdev = bo->bdev;
1631	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1632
1633	ttm_mem_io_lock(man, false);
1634	ttm_bo_unmap_virtual_locked(bo);
1635	ttm_mem_io_unlock(man);
1636}
1637
1638
1639EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1640
1641static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1642{
1643	struct ttm_bo_device *bdev = bo->bdev;
1644	struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1645	struct rb_node *parent = NULL;
1646	struct ttm_buffer_object *cur_bo;
1647	unsigned long offset = bo->vm_node->start;
1648	unsigned long cur_offset;
1649
1650	while (*cur) {
1651		parent = *cur;
1652		cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1653		cur_offset = cur_bo->vm_node->start;
1654		if (offset < cur_offset)
1655			cur = &parent->rb_left;
1656		else if (offset > cur_offset)
1657			cur = &parent->rb_right;
1658		else
1659			BUG();
1660	}
1661
1662	rb_link_node(&bo->vm_rb, parent, cur);
1663	rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1664}
1665
1666/**
1667 * ttm_bo_setup_vm:
1668 *
1669 * @bo: the buffer to allocate address space for
1670 *
1671 * Allocate address space in the drm device so that applications
1672 * can mmap the buffer and access the contents. This only
1673 * applies to ttm_bo_type_device objects as others are not
1674 * placed in the drm device address space.
1675 */
1676
1677static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1678{
1679	struct ttm_bo_device *bdev = bo->bdev;
1680	int ret;
1681
1682retry_pre_get:
1683	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1684	if (unlikely(ret != 0))
1685		return ret;
1686
1687	write_lock(&bdev->vm_lock);
1688	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1689					 bo->mem.num_pages, 0, 0);
1690
1691	if (unlikely(bo->vm_node == NULL)) {
1692		ret = -ENOMEM;
1693		goto out_unlock;
1694	}
1695
1696	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1697					      bo->mem.num_pages, 0);
1698
1699	if (unlikely(bo->vm_node == NULL)) {
1700		write_unlock(&bdev->vm_lock);
1701		goto retry_pre_get;
1702	}
1703
1704	ttm_bo_vm_insert_rb(bo);
1705	write_unlock(&bdev->vm_lock);
1706	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1707
1708	return 0;
1709out_unlock:
1710	write_unlock(&bdev->vm_lock);
1711	return ret;
1712}
1713
1714int ttm_bo_wait(struct ttm_buffer_object *bo,
1715		bool lazy, bool interruptible, bool no_wait)
1716{
1717	struct ttm_bo_driver *driver = bo->bdev->driver;
1718	struct ttm_bo_device *bdev = bo->bdev;
1719	void *sync_obj;
1720	void *sync_obj_arg;
1721	int ret = 0;
1722
1723	if (likely(bo->sync_obj == NULL))
1724		return 0;
1725
1726	while (bo->sync_obj) {
 
 
1727
1728		if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1729			void *tmp_obj = bo->sync_obj;
1730			bo->sync_obj = NULL;
1731			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1732			spin_unlock(&bdev->fence_lock);
1733			driver->sync_obj_unref(&tmp_obj);
1734			spin_lock(&bdev->fence_lock);
1735			continue;
1736		}
1737
1738		if (no_wait)
1739			return -EBUSY;
1740
1741		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1742		sync_obj_arg = bo->sync_obj_arg;
1743		spin_unlock(&bdev->fence_lock);
1744		ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1745					    lazy, interruptible);
1746		if (unlikely(ret != 0)) {
1747			driver->sync_obj_unref(&sync_obj);
1748			spin_lock(&bdev->fence_lock);
1749			return ret;
1750		}
1751		spin_lock(&bdev->fence_lock);
1752		if (likely(bo->sync_obj == sync_obj &&
1753			   bo->sync_obj_arg == sync_obj_arg)) {
1754			void *tmp_obj = bo->sync_obj;
1755			bo->sync_obj = NULL;
1756			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1757				  &bo->priv_flags);
1758			spin_unlock(&bdev->fence_lock);
1759			driver->sync_obj_unref(&sync_obj);
1760			driver->sync_obj_unref(&tmp_obj);
1761			spin_lock(&bdev->fence_lock);
1762		} else {
1763			spin_unlock(&bdev->fence_lock);
1764			driver->sync_obj_unref(&sync_obj);
1765			spin_lock(&bdev->fence_lock);
1766		}
1767	}
1768	return 0;
1769}
1770EXPORT_SYMBOL(ttm_bo_wait);
1771
1772int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1773{
1774	struct ttm_bo_device *bdev = bo->bdev;
1775	int ret = 0;
1776
1777	/*
1778	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1779	 */
1780
1781	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1782	if (unlikely(ret != 0))
1783		return ret;
1784	spin_lock(&bdev->fence_lock);
1785	ret = ttm_bo_wait(bo, false, true, no_wait);
1786	spin_unlock(&bdev->fence_lock);
1787	if (likely(ret == 0))
1788		atomic_inc(&bo->cpu_writers);
1789	ttm_bo_unreserve(bo);
1790	return ret;
1791}
1792EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1793
1794void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1795{
1796	if (atomic_dec_and_test(&bo->cpu_writers))
1797		wake_up_all(&bo->event_queue);
1798}
1799EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1800
1801/**
1802 * A buffer object shrink method that tries to swap out the first
1803 * buffer object on the bo_global::swap_lru list.
1804 */
1805
1806static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1807{
1808	struct ttm_bo_global *glob =
1809	    container_of(shrink, struct ttm_bo_global, shrink);
1810	struct ttm_buffer_object *bo;
1811	int ret = -EBUSY;
1812	int put_count;
1813	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1814
1815	spin_lock(&glob->lru_lock);
1816	while (ret == -EBUSY) {
1817		if (unlikely(list_empty(&glob->swap_lru))) {
1818			spin_unlock(&glob->lru_lock);
1819			return -EBUSY;
 
 
 
1820		}
 
 
 
1821
1822		bo = list_first_entry(&glob->swap_lru,
1823				      struct ttm_buffer_object, swap);
1824		kref_get(&bo->list_kref);
1825
1826		if (!list_empty(&bo->ddestroy)) {
1827			spin_unlock(&glob->lru_lock);
1828			(void) ttm_bo_cleanup_refs(bo, false, false, false);
1829			kref_put(&bo->list_kref, ttm_bo_release_list);
1830			spin_lock(&glob->lru_lock);
1831			continue;
1832		}
1833
1834		/**
1835		 * Reserve buffer. Since we unlock while sleeping, we need
1836		 * to re-check that nobody removed us from the swap-list while
1837		 * we slept.
1838		 */
1839
1840		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1841		if (unlikely(ret == -EBUSY)) {
1842			spin_unlock(&glob->lru_lock);
1843			ttm_bo_wait_unreserved(bo, false);
1844			kref_put(&bo->list_kref, ttm_bo_release_list);
1845			spin_lock(&glob->lru_lock);
1846		}
1847	}
1848
1849	BUG_ON(ret != 0);
1850	put_count = ttm_bo_del_from_lru(bo);
1851	spin_unlock(&glob->lru_lock);
1852
1853	ttm_bo_list_ref_sub(bo, put_count, true);
1854
1855	/**
1856	 * Wait for GPU, then move to system cached.
1857	 */
1858
1859	spin_lock(&bo->bdev->fence_lock);
1860	ret = ttm_bo_wait(bo, false, false, false);
1861	spin_unlock(&bo->bdev->fence_lock);
1862
1863	if (unlikely(ret != 0))
1864		goto out;
1865
1866	if ((bo->mem.placement & swap_placement) != swap_placement) {
1867		struct ttm_mem_reg evict_mem;
1868
1869		evict_mem = bo->mem;
1870		evict_mem.mm_node = NULL;
1871		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1872		evict_mem.mem_type = TTM_PL_SYSTEM;
1873
1874		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1875					     false, false, false);
1876		if (unlikely(ret != 0))
1877			goto out;
1878	}
1879
 
 
 
 
 
 
 
 
1880	ttm_bo_unmap_virtual(bo);
1881
1882	/**
1883	 * Swap out. Buffer will be swapped in again as soon as
1884	 * anyone tries to access a ttm page.
1885	 */
1886
1887	if (bo->bdev->driver->swap_notify)
1888		bo->bdev->driver->swap_notify(bo);
1889
1890	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1891out:
1892
1893	/**
1894	 *
1895	 * Unreserve without putting on LRU to avoid swapping out an
1896	 * already swapped buffer.
1897	 */
1898
1899	atomic_set(&bo->reserved, 0);
1900	wake_up_all(&bo->event_queue);
1901	kref_put(&bo->list_kref, ttm_bo_release_list);
1902	return ret;
1903}
 
1904
1905void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1906{
1907	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
 
 
 
 
 
1908		;
1909}
1910EXPORT_SYMBOL(ttm_bo_swapout_all);
v5.4
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 */
  31
  32#define pr_fmt(fmt) "[TTM] " fmt
  33
  34#include <drm/ttm/ttm_module.h>
  35#include <drm/ttm/ttm_bo_driver.h>
  36#include <drm/ttm/ttm_placement.h>
  37#include <linux/jiffies.h>
  38#include <linux/slab.h>
  39#include <linux/sched.h>
  40#include <linux/mm.h>
  41#include <linux/file.h>
  42#include <linux/module.h>
  43#include <linux/atomic.h>
  44#include <linux/dma-resv.h>
  45
 
 
 
 
 
 
  46static void ttm_bo_global_kobj_release(struct kobject *kobj);
  47
  48/**
  49 * ttm_global_mutex - protecting the global BO state
  50 */
  51DEFINE_MUTEX(ttm_global_mutex);
  52unsigned ttm_bo_glob_use_count;
  53struct ttm_bo_global ttm_bo_glob;
  54
  55static struct attribute ttm_bo_count = {
  56	.name = "bo_count",
  57	.mode = S_IRUGO
  58};
  59
  60/* default destructor */
  61static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
  62{
  63	kfree(bo);
  64}
  65
  66static inline int ttm_mem_type_from_place(const struct ttm_place *place,
  67					  uint32_t *mem_type)
  68{
  69	int pos;
  70
  71	pos = ffs(place->flags & TTM_PL_MASK_MEM);
  72	if (unlikely(!pos))
  73		return -EINVAL;
  74
  75	*mem_type = pos - 1;
  76	return 0;
  77}
  78
  79static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
  80			       int mem_type)
  81{
  82	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  83
  84	drm_printf(p, "    has_type: %d\n", man->has_type);
  85	drm_printf(p, "    use_type: %d\n", man->use_type);
  86	drm_printf(p, "    flags: 0x%08X\n", man->flags);
  87	drm_printf(p, "    gpu_offset: 0x%08llX\n", man->gpu_offset);
  88	drm_printf(p, "    size: %llu\n", man->size);
  89	drm_printf(p, "    available_caching: 0x%08X\n", man->available_caching);
  90	drm_printf(p, "    default_caching: 0x%08X\n", man->default_caching);
  91	if (mem_type != TTM_PL_SYSTEM)
  92		(*man->func->debug)(man, p);
  93}
  94
  95static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  96					struct ttm_placement *placement)
  97{
  98	struct drm_printer p = drm_debug_printer(TTM_PFX);
  99	int i, ret, mem_type;
 100
 101	drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
 102		   bo, bo->mem.num_pages, bo->mem.size >> 10,
 103		   bo->mem.size >> 20);
 104	for (i = 0; i < placement->num_placement; i++) {
 105		ret = ttm_mem_type_from_place(&placement->placement[i],
 106						&mem_type);
 107		if (ret)
 108			return;
 109		drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
 110			   i, placement->placement[i].flags, mem_type);
 111		ttm_mem_type_debug(bo->bdev, &p, mem_type);
 112	}
 113}
 114
 115static ssize_t ttm_bo_global_show(struct kobject *kobj,
 116				  struct attribute *attr,
 117				  char *buffer)
 118{
 119	struct ttm_bo_global *glob =
 120		container_of(kobj, struct ttm_bo_global, kobj);
 121
 122	return snprintf(buffer, PAGE_SIZE, "%d\n",
 123				atomic_read(&glob->bo_count));
 124}
 125
 126static struct attribute *ttm_bo_global_attrs[] = {
 127	&ttm_bo_count,
 128	NULL
 129};
 130
 131static const struct sysfs_ops ttm_bo_global_ops = {
 132	.show = &ttm_bo_global_show
 133};
 134
 135static struct kobj_type ttm_bo_glob_kobj_type  = {
 136	.release = &ttm_bo_global_kobj_release,
 137	.sysfs_ops = &ttm_bo_global_ops,
 138	.default_attrs = ttm_bo_global_attrs
 139};
 140
 141
 142static inline uint32_t ttm_bo_type_flags(unsigned type)
 143{
 144	return 1 << (type);
 145}
 146
 147static void ttm_bo_release_list(struct kref *list_kref)
 148{
 149	struct ttm_buffer_object *bo =
 150	    container_of(list_kref, struct ttm_buffer_object, list_kref);
 151	struct ttm_bo_device *bdev = bo->bdev;
 152	size_t acc_size = bo->acc_size;
 153
 154	BUG_ON(kref_read(&bo->list_kref));
 155	BUG_ON(kref_read(&bo->kref));
 156	BUG_ON(atomic_read(&bo->cpu_writers));
 
 157	BUG_ON(bo->mem.mm_node != NULL);
 158	BUG_ON(!list_empty(&bo->lru));
 159	BUG_ON(!list_empty(&bo->ddestroy));
 160	ttm_tt_destroy(bo->ttm);
 161	atomic_dec(&bo->bdev->glob->bo_count);
 162	dma_fence_put(bo->moving);
 163	if (!ttm_bo_uses_embedded_gem_object(bo))
 164		dma_resv_fini(&bo->base._resv);
 165	mutex_destroy(&bo->wu_mutex);
 166	bo->destroy(bo);
 
 
 167	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 168}
 169
 170static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
 171				  struct ttm_mem_reg *mem)
 
 
 
 
 
 
 
 
 
 
 
 172{
 173	struct ttm_bo_device *bdev = bo->bdev;
 174	struct ttm_mem_type_manager *man;
 175
 176	dma_resv_assert_held(bo->base.resv);
 177
 178	if (!list_empty(&bo->lru))
 179		return;
 180
 181	if (mem->placement & TTM_PL_FLAG_NO_EVICT)
 182		return;
 183
 184	man = &bdev->man[mem->mem_type];
 185	list_add_tail(&bo->lru, &man->lru[bo->priority]);
 186	kref_get(&bo->list_kref);
 187
 188	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
 189	    !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
 190				     TTM_PAGE_FLAG_SWAPPED))) {
 191		list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
 192		kref_get(&bo->list_kref);
 
 
 
 
 
 193	}
 194}
 195
 196void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 197{
 198	ttm_bo_add_mem_to_lru(bo, &bo->mem);
 199}
 200EXPORT_SYMBOL(ttm_bo_add_to_lru);
 201
 202static void ttm_bo_ref_bug(struct kref *list_kref)
 203{
 204	BUG();
 205}
 206
 207void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 208{
 209	struct ttm_bo_device *bdev = bo->bdev;
 210	bool notify = false;
 211
 212	if (!list_empty(&bo->swap)) {
 213		list_del_init(&bo->swap);
 214		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 215		notify = true;
 216	}
 217	if (!list_empty(&bo->lru)) {
 218		list_del_init(&bo->lru);
 219		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 220		notify = true;
 221	}
 222
 223	if (notify && bdev->driver->del_from_lru_notify)
 224		bdev->driver->del_from_lru_notify(bo);
 
 
 
 
 225}
 226
 227void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
 
 
 228{
 229	struct ttm_bo_global *glob = bo->bdev->glob;
 
 230
 231	spin_lock(&glob->lru_lock);
 232	ttm_bo_del_from_lru(bo);
 233	spin_unlock(&glob->lru_lock);
 234}
 235EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
 
 
 
 
 
 
 
 
 
 
 
 
 236
 237static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
 238				     struct ttm_buffer_object *bo)
 239{
 240	if (!pos->first)
 241		pos->first = bo;
 242	pos->last = bo;
 243}
 244
 245void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
 246			     struct ttm_lru_bulk_move *bulk)
 247{
 248	dma_resv_assert_held(bo->base.resv);
 249
 250	ttm_bo_del_from_lru(bo);
 251	ttm_bo_add_to_lru(bo);
 
 252
 253	if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 254		switch (bo->mem.mem_type) {
 255		case TTM_PL_TT:
 256			ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
 257			break;
 
 
 
 258
 259		case TTM_PL_VRAM:
 260			ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
 261			break;
 262		}
 263		if (bo->ttm && !(bo->ttm->page_flags &
 264				 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
 265			ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
 266	}
 
 
 267}
 268EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 269
 270void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 271{
 272	unsigned i;
 
 273
 274	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 275		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
 276		struct ttm_mem_type_manager *man;
 
 
 
 277
 278		if (!pos->first)
 279			continue;
 
 
 
 
 
 280
 281		dma_resv_assert_held(pos->first->base.resv);
 282		dma_resv_assert_held(pos->last->base.resv);
 
 
 
 
 283
 284		man = &pos->first->bdev->man[TTM_PL_TT];
 285		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
 286				    &pos->last->lru);
 287	}
 288
 289	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 290		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
 291		struct ttm_mem_type_manager *man;
 292
 293		if (!pos->first)
 294			continue;
 
 
 
 
 295
 296		dma_resv_assert_held(pos->first->base.resv);
 297		dma_resv_assert_held(pos->last->base.resv);
 
 298
 299		man = &pos->first->bdev->man[TTM_PL_VRAM];
 300		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
 301				    &pos->last->lru);
 302	}
 
 303
 304	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 305		struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
 306		struct list_head *lru;
 
 
 
 
 
 
 307
 308		if (!pos->first)
 309			continue;
 310
 311		dma_resv_assert_held(pos->first->base.resv);
 312		dma_resv_assert_held(pos->last->base.resv);
 313
 314		lru = &pos->first->bdev->glob->swap_lru[i];
 315		list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 316	}
 
 
 317}
 318EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
 319
 320static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 321				  struct ttm_mem_reg *mem, bool evict,
 322				  struct ttm_operation_ctx *ctx)
 
 323{
 324	struct ttm_bo_device *bdev = bo->bdev;
 325	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
 326	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
 327	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
 328	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
 329	int ret = 0;
 330
 331	if (old_is_pci || new_is_pci ||
 332	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
 333		ret = ttm_mem_io_lock(old_man, true);
 334		if (unlikely(ret != 0))
 335			goto out_err;
 336		ttm_bo_unmap_virtual_locked(bo);
 337		ttm_mem_io_unlock(old_man);
 338	}
 339
 340	/*
 341	 * Create and bind a ttm if required.
 342	 */
 343
 344	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
 345		if (bo->ttm == NULL) {
 346			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
 347			ret = ttm_tt_create(bo, zero);
 348			if (ret)
 349				goto out_err;
 350		}
 351
 352		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
 353		if (ret)
 354			goto out_err;
 355
 356		if (mem->mem_type != TTM_PL_SYSTEM) {
 357			ret = ttm_tt_bind(bo->ttm, mem, ctx);
 358			if (ret)
 359				goto out_err;
 360		}
 361
 362		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
 363			if (bdev->driver->move_notify)
 364				bdev->driver->move_notify(bo, evict, mem);
 365			bo->mem = *mem;
 366			mem->mm_node = NULL;
 367			goto moved;
 368		}
 369	}
 370
 371	if (bdev->driver->move_notify)
 372		bdev->driver->move_notify(bo, evict, mem);
 373
 374	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 375	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
 376		ret = ttm_bo_move_ttm(bo, ctx, mem);
 377	else if (bdev->driver->move)
 378		ret = bdev->driver->move(bo, evict, ctx, mem);
 
 379	else
 380		ret = ttm_bo_move_memcpy(bo, ctx, mem);
 381
 382	if (ret) {
 383		if (bdev->driver->move_notify) {
 384			swap(*mem, bo->mem);
 385			bdev->driver->move_notify(bo, false, mem);
 386			swap(*mem, bo->mem);
 
 
 387		}
 388
 389		goto out_err;
 390	}
 391
 392moved:
 393	if (bo->evicted) {
 394		if (bdev->driver->invalidate_caches) {
 395			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
 396			if (ret)
 397				pr_err("Can not flush read caches\n");
 398		}
 399		bo->evicted = false;
 400	}
 401
 402	if (bo->mem.mm_node)
 403		bo->offset = (bo->mem.start << PAGE_SHIFT) +
 404		    bdev->man[bo->mem.mem_type].gpu_offset;
 405	else
 
 406		bo->offset = 0;
 407
 408	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
 409	return 0;
 410
 411out_err:
 412	new_man = &bdev->man[bo->mem.mem_type];
 413	if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 
 414		ttm_tt_destroy(bo->ttm);
 415		bo->ttm = NULL;
 416	}
 417
 418	return ret;
 419}
 420
 421/**
 422 * Call bo::reserved.
 423 * Will release GPU memory type usage on destruction.
 424 * This is the place to put in driver specific hooks to release
 425 * driver private resources.
 426 * Will release the bo::reserved lock.
 427 */
 428
 429static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 430{
 431	if (bo->bdev->driver->move_notify)
 432		bo->bdev->driver->move_notify(bo, false, NULL);
 433
 434	ttm_tt_destroy(bo->ttm);
 435	bo->ttm = NULL;
 
 
 
 436	ttm_bo_mem_put(bo, &bo->mem);
 437}
 438
 439static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 440{
 441	int r;
 442
 443	if (bo->base.resv == &bo->base._resv)
 444		return 0;
 445
 446	BUG_ON(!dma_resv_trylock(&bo->base._resv));
 447
 448	r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
 449	if (r)
 450		dma_resv_unlock(&bo->base._resv);
 451
 452	return r;
 453}
 454
 455static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 456{
 457	struct dma_resv_list *fobj;
 458	struct dma_fence *fence;
 459	int i;
 460
 461	fobj = dma_resv_get_list(&bo->base._resv);
 462	fence = dma_resv_get_excl(&bo->base._resv);
 463	if (fence && !fence->ops->signaled)
 464		dma_fence_enable_sw_signaling(fence);
 465
 466	for (i = 0; fobj && i < fobj->shared_count; ++i) {
 467		fence = rcu_dereference_protected(fobj->shared[i],
 468					dma_resv_held(bo->base.resv));
 469
 470		if (!fence->ops->signaled)
 471			dma_fence_enable_sw_signaling(fence);
 472	}
 473}
 474
 475static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 476{
 477	struct ttm_bo_device *bdev = bo->bdev;
 478	struct ttm_bo_global *glob = bdev->glob;
 
 
 
 
 479	int ret;
 480
 481	ret = ttm_bo_individualize_resv(bo);
 482	if (ret) {
 483		/* Last resort, if we fail to allocate memory for the
 484		 * fences block for the BO to become idle
 
 
 
 
 
 485		 */
 486		dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
 487						    30 * HZ);
 488		spin_lock(&glob->lru_lock);
 489		goto error;
 490	}
 491
 492	spin_lock(&glob->lru_lock);
 493	ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
 494	if (!ret) {
 495		if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
 496			ttm_bo_del_from_lru(bo);
 497			spin_unlock(&glob->lru_lock);
 498			if (bo->base.resv != &bo->base._resv)
 499				dma_resv_unlock(&bo->base._resv);
 500
 501			ttm_bo_cleanup_memtype_use(bo);
 502			dma_resv_unlock(bo->base.resv);
 503			return;
 504		}
 505
 506		ttm_bo_flush_all_fences(bo);
 
 507
 508		/*
 509		 * Make NO_EVICT bos immediately available to
 510		 * shrinkers, now that they are queued for
 511		 * destruction.
 512		 */
 513		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
 514			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
 515			ttm_bo_add_to_lru(bo);
 516		}
 517
 518		dma_resv_unlock(bo->base.resv);
 
 
 519	}
 520	if (bo->base.resv != &bo->base._resv)
 521		dma_resv_unlock(&bo->base._resv);
 
 
 
 522
 523error:
 524	kref_get(&bo->list_kref);
 525	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 526	spin_unlock(&glob->lru_lock);
 
 527
 
 
 
 
 528	schedule_delayed_work(&bdev->wq,
 529			      ((HZ / 100) < 1) ? 1 : HZ / 100);
 530}
 531
 532/**
 533 * function ttm_bo_cleanup_refs
 534 * If bo idle, remove from delayed- and lru lists, and unref.
 535 * If not idle, do nothing.
 536 *
 537 * Must be called with lru_lock and reservation held, this function
 538 * will drop the lru lock and optionally the reservation lock before returning.
 539 *
 540 * @interruptible         Any sleeps should occur interruptibly.
 
 541 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 542 * @unlock_resv           Unlock the reservation lock as well.
 543 */
 544
 545static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 546			       bool interruptible, bool no_wait_gpu,
 547			       bool unlock_resv)
 
 548{
 549	struct ttm_bo_global *glob = bo->bdev->glob;
 550	struct dma_resv *resv;
 551	int ret;
 
 552
 553	if (unlikely(list_empty(&bo->ddestroy)))
 554		resv = bo->base.resv;
 555	else
 556		resv = &bo->base._resv;
 557
 558	if (dma_resv_test_signaled_rcu(resv, true))
 559		ret = 0;
 560	else
 561		ret = -EBUSY;
 562
 563	if (ret && !no_wait_gpu) {
 564		long lret;
 565
 566		if (unlock_resv)
 567			dma_resv_unlock(bo->base.resv);
 568		spin_unlock(&glob->lru_lock);
 
 
 569
 570		lret = dma_resv_wait_timeout_rcu(resv, true,
 571							   interruptible,
 572							   30 * HZ);
 573
 574		if (lret < 0)
 575			return lret;
 576		else if (lret == 0)
 577			return -EBUSY;
 578
 579		spin_lock(&glob->lru_lock);
 580		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
 581			/*
 582			 * We raced, and lost, someone else holds the reservation now,
 583			 * and is probably busy in ttm_bo_cleanup_memtype_use.
 584			 *
 585			 * Even if it's not the case, because we finished waiting any
 586			 * delayed destruction would succeed, so just return success
 587			 * here.
 588			 */
 589			spin_unlock(&glob->lru_lock);
 590			return 0;
 591		}
 592		ret = 0;
 593	}
 594
 595	if (ret || unlikely(list_empty(&bo->ddestroy))) {
 596		if (unlock_resv)
 597			dma_resv_unlock(bo->base.resv);
 
 
 
 
 
 
 
 
 598		spin_unlock(&glob->lru_lock);
 599		return ret;
 600	}
 601
 602	ttm_bo_del_from_lru(bo);
 603	list_del_init(&bo->ddestroy);
 604	kref_put(&bo->list_kref, ttm_bo_ref_bug);
 605
 606	spin_unlock(&glob->lru_lock);
 607	ttm_bo_cleanup_memtype_use(bo);
 608
 609	if (unlock_resv)
 610		dma_resv_unlock(bo->base.resv);
 611
 612	return 0;
 613}
 614
 615/**
 616 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 617 * encountered buffers.
 618 */
 619static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 
 620{
 621	struct ttm_bo_global *glob = bdev->glob;
 622	struct list_head removed;
 623	bool empty;
 624
 625	INIT_LIST_HEAD(&removed);
 626
 627	spin_lock(&glob->lru_lock);
 628	while (!list_empty(&bdev->ddestroy)) {
 629		struct ttm_buffer_object *bo;
 630
 631		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
 632				      ddestroy);
 633		kref_get(&bo->list_kref);
 634		list_move_tail(&bo->ddestroy, &removed);
 
 
 
 
 
 
 
 
 635
 636		if (remove_all || bo->base.resv != &bo->base._resv) {
 637			spin_unlock(&glob->lru_lock);
 638			dma_resv_lock(bo->base.resv, NULL);
 
 
 639
 640			spin_lock(&glob->lru_lock);
 641			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 642
 643		} else if (dma_resv_trylock(bo->base.resv)) {
 644			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 645		} else {
 646			spin_unlock(&glob->lru_lock);
 647		}
 648
 649		kref_put(&bo->list_kref, ttm_bo_release_list);
 650		spin_lock(&glob->lru_lock);
 
 
 651	}
 652	list_splice_tail(&removed, &bdev->ddestroy);
 653	empty = list_empty(&bdev->ddestroy);
 654	spin_unlock(&glob->lru_lock);
 655
 656	return empty;
 
 
 657}
 658
 659static void ttm_bo_delayed_workqueue(struct work_struct *work)
 660{
 661	struct ttm_bo_device *bdev =
 662	    container_of(work, struct ttm_bo_device, wq.work);
 663
 664	if (!ttm_bo_delayed_delete(bdev, false))
 665		schedule_delayed_work(&bdev->wq,
 666				      ((HZ / 100) < 1) ? 1 : HZ / 100);
 
 667}
 668
 669static void ttm_bo_release(struct kref *kref)
 670{
 671	struct ttm_buffer_object *bo =
 672	    container_of(kref, struct ttm_buffer_object, kref);
 673	struct ttm_bo_device *bdev = bo->bdev;
 674	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 675
 676	if (bo->bdev->driver->release_notify)
 677		bo->bdev->driver->release_notify(bo);
 678
 679	drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
 
 
 680	ttm_mem_io_lock(man, false);
 681	ttm_mem_io_free_vm(bo);
 682	ttm_mem_io_unlock(man);
 683	ttm_bo_cleanup_refs_or_queue(bo);
 684	kref_put(&bo->list_kref, ttm_bo_release_list);
 
 685}
 686
 687void ttm_bo_put(struct ttm_buffer_object *bo)
 688{
 
 
 
 
 
 689	kref_put(&bo->kref, ttm_bo_release);
 
 690}
 691EXPORT_SYMBOL(ttm_bo_put);
 692
 693int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
 694{
 695	return cancel_delayed_work_sync(&bdev->wq);
 696}
 697EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
 698
 699void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 700{
 701	if (resched)
 702		schedule_delayed_work(&bdev->wq,
 703				      ((HZ / 100) < 1) ? 1 : HZ / 100);
 704}
 705EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 706
 707static int ttm_bo_evict(struct ttm_buffer_object *bo,
 708			struct ttm_operation_ctx *ctx)
 709{
 710	struct ttm_bo_device *bdev = bo->bdev;
 711	struct ttm_mem_reg evict_mem;
 712	struct ttm_placement placement;
 713	int ret = 0;
 714
 715	dma_resv_assert_held(bo->base.resv);
 
 
 716
 717	placement.num_placement = 0;
 718	placement.num_busy_placement = 0;
 719	bdev->driver->evict_flags(bo, &placement);
 
 
 
 720
 721	if (!placement.num_placement && !placement.num_busy_placement) {
 722		ret = ttm_bo_pipeline_gutting(bo);
 723		if (ret)
 724			return ret;
 725
 726		return ttm_tt_create(bo, false);
 727	}
 728
 729	evict_mem = bo->mem;
 730	evict_mem.mm_node = NULL;
 731	evict_mem.bus.io_reserved_vm = false;
 732	evict_mem.bus.io_reserved_count = 0;
 733
 734	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
 
 
 
 
 
 
 735	if (ret) {
 736		if (ret != -ERESTARTSYS) {
 737			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
 738			       bo);
 739			ttm_bo_mem_space_debug(bo, &placement);
 740		}
 741		goto out;
 742	}
 743
 744	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
 745	if (unlikely(ret)) {
 
 746		if (ret != -ERESTARTSYS)
 747			pr_err("Buffer eviction failed\n");
 748		ttm_bo_mem_put(bo, &evict_mem);
 749		goto out;
 750	}
 751	bo->evicted = true;
 752out:
 753	return ret;
 754}
 755
 756bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 757			      const struct ttm_place *place)
 758{
 759	/* Don't evict this BO if it's outside of the
 760	 * requested placement range
 761	 */
 762	if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
 763	    (place->lpfn && place->lpfn <= bo->mem.start))
 764		return false;
 765
 766	return true;
 767}
 768EXPORT_SYMBOL(ttm_bo_eviction_valuable);
 769
 770/**
 771 * Check the target bo is allowable to be evicted or swapout, including cases:
 772 *
 773 * a. if share same reservation object with ctx->resv, have assumption
 774 * reservation objects should already be locked, so not lock again and
 775 * return true directly when either the opreation allow_reserved_eviction
 776 * or the target bo already is in delayed free list;
 777 *
 778 * b. Otherwise, trylock it.
 779 */
 780static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 781			struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
 782{
 783	bool ret = false;
 784
 785	if (bo->base.resv == ctx->resv) {
 786		dma_resv_assert_held(bo->base.resv);
 787		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
 788		    || !list_empty(&bo->ddestroy))
 789			ret = true;
 790		*locked = false;
 791		if (busy)
 792			*busy = false;
 793	} else {
 794		ret = dma_resv_trylock(bo->base.resv);
 795		*locked = ret;
 796		if (busy)
 797			*busy = !ret;
 798	}
 799
 800	return ret;
 801}
 802
 803/**
 804 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
 805 *
 806 * @busy_bo: BO which couldn't be locked with trylock
 807 * @ctx: operation context
 808 * @ticket: acquire ticket
 809 *
 810 * Try to lock a busy buffer object to avoid failing eviction.
 811 */
 812static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
 813				   struct ttm_operation_ctx *ctx,
 814				   struct ww_acquire_ctx *ticket)
 815{
 816	int r;
 817
 818	if (!busy_bo || !ticket)
 819		return -EBUSY;
 820
 821	if (ctx->interruptible)
 822		r = dma_resv_lock_interruptible(busy_bo->base.resv,
 823							  ticket);
 824	else
 825		r = dma_resv_lock(busy_bo->base.resv, ticket);
 826
 827	/*
 828	 * TODO: It would be better to keep the BO locked until allocation is at
 829	 * least tried one more time, but that would mean a much larger rework
 830	 * of TTM.
 831	 */
 832	if (!r)
 833		dma_resv_unlock(busy_bo->base.resv);
 834
 835	return r == -EDEADLK ? -EBUSY : r;
 836}
 837
 838static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 839			       uint32_t mem_type,
 840			       const struct ttm_place *place,
 841			       struct ttm_operation_ctx *ctx,
 842			       struct ww_acquire_ctx *ticket)
 843{
 844	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
 845	struct ttm_bo_global *glob = bdev->glob;
 846	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 847	bool locked = false;
 848	unsigned i;
 849	int ret;
 850
 
 851	spin_lock(&glob->lru_lock);
 852	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 853		list_for_each_entry(bo, &man->lru[i], lru) {
 854			bool busy;
 855
 856			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
 857							    &busy)) {
 858				if (busy && !busy_bo && ticket !=
 859				    dma_resv_locking_ctx(bo->base.resv))
 860					busy_bo = bo;
 861				continue;
 862			}
 863
 864			if (place && !bdev->driver->eviction_valuable(bo,
 865								      place)) {
 866				if (locked)
 867					dma_resv_unlock(bo->base.resv);
 868				continue;
 869			}
 870			break;
 871		}
 872
 873		/* If the inner loop terminated early, we have our candidate */
 874		if (&bo->lru != &man->lru[i])
 875			break;
 876
 877		bo = NULL;
 878	}
 879
 880	if (!bo) {
 881		if (busy_bo)
 882			kref_get(&busy_bo->list_kref);
 883		spin_unlock(&glob->lru_lock);
 884		ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
 885		if (busy_bo)
 886			kref_put(&busy_bo->list_kref, ttm_bo_release_list);
 887		return ret;
 888	}
 889
 890	kref_get(&bo->list_kref);
 
 
 891
 892	if (!list_empty(&bo->ddestroy)) {
 893		ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
 894					  ctx->no_wait_gpu, locked);
 895		kref_put(&bo->list_kref, ttm_bo_release_list);
 896		return ret;
 897	}
 898
 899	ttm_bo_del_from_lru(bo);
 900	spin_unlock(&glob->lru_lock);
 901
 902	ret = ttm_bo_evict(bo, ctx);
 903	if (locked) {
 904		ttm_bo_unreserve(bo);
 905	} else {
 906		spin_lock(&glob->lru_lock);
 907		ttm_bo_add_to_lru(bo);
 908		spin_unlock(&glob->lru_lock);
 909	}
 910
 911	kref_put(&bo->list_kref, ttm_bo_release_list);
 912	return ret;
 913}
 914
 915void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 916{
 917	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
 918
 919	if (mem->mm_node)
 920		(*man->func->put_node)(man, mem);
 921}
 922EXPORT_SYMBOL(ttm_bo_mem_put);
 923
 924/**
 925 * Add the last move fence to the BO and reserve a new shared slot.
 926 */
 927static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 928				 struct ttm_mem_type_manager *man,
 929				 struct ttm_mem_reg *mem)
 930{
 931	struct dma_fence *fence;
 932	int ret;
 933
 934	spin_lock(&man->move_lock);
 935	fence = dma_fence_get(man->move);
 936	spin_unlock(&man->move_lock);
 937
 938	if (fence) {
 939		dma_resv_add_shared_fence(bo->base.resv, fence);
 940
 941		ret = dma_resv_reserve_shared(bo->base.resv, 1);
 942		if (unlikely(ret)) {
 943			dma_fence_put(fence);
 944			return ret;
 945		}
 946
 947		dma_fence_put(bo->moving);
 948		bo->moving = fence;
 949	}
 950
 951	return 0;
 952}
 953
 954/**
 955 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 956 * space, or we've evicted everything and there isn't enough space.
 957 */
 958static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 959				  const struct ttm_place *place,
 960				  struct ttm_mem_reg *mem,
 961				  struct ttm_operation_ctx *ctx)
 
 
 
 962{
 963	struct ttm_bo_device *bdev = bo->bdev;
 964	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 965	struct ww_acquire_ctx *ticket;
 966	int ret;
 967
 968	ticket = dma_resv_locking_ctx(bo->base.resv);
 969	do {
 970		ret = (*man->func->get_node)(man, bo, place, mem);
 971		if (unlikely(ret != 0))
 972			return ret;
 973		if (mem->mm_node)
 974			break;
 975		ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
 976					  ticket);
 977		if (unlikely(ret != 0))
 978			return ret;
 979	} while (1);
 980
 981	return ttm_bo_add_move_fence(bo, man, mem);
 
 
 982}
 983
 984static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 985				      uint32_t cur_placement,
 986				      uint32_t proposed_placement)
 987{
 988	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
 989	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
 990
 991	/**
 992	 * Keep current caching if possible.
 993	 */
 994
 995	if ((cur_placement & caching) != 0)
 996		result |= (cur_placement & caching);
 997	else if ((man->default_caching & caching) != 0)
 998		result |= man->default_caching;
 999	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
1000		result |= TTM_PL_FLAG_CACHED;
1001	else if ((TTM_PL_FLAG_WC & caching) != 0)
1002		result |= TTM_PL_FLAG_WC;
1003	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
1004		result |= TTM_PL_FLAG_UNCACHED;
1005
1006	return result;
1007}
1008
1009static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
1010				 uint32_t mem_type,
1011				 const struct ttm_place *place,
1012				 uint32_t *masked_placement)
1013{
1014	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
1015
1016	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
1017		return false;
1018
1019	if ((place->flags & man->available_caching) == 0)
1020		return false;
1021
1022	cur_flags |= (place->flags & man->available_caching);
1023
1024	*masked_placement = cur_flags;
1025	return true;
1026}
1027
1028/**
1029 * ttm_bo_mem_placement - check if placement is compatible
1030 * @bo: BO to find memory for
1031 * @place: where to search
1032 * @mem: the memory object to fill in
1033 * @ctx: operation context
1034 *
1035 * Check if placement is compatible and fill in mem structure.
1036 * Returns -EBUSY if placement won't work or negative error code.
1037 * 0 when placement can be used.
1038 */
1039static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
1040				const struct ttm_place *place,
1041				struct ttm_mem_reg *mem,
1042				struct ttm_operation_ctx *ctx)
1043{
1044	struct ttm_bo_device *bdev = bo->bdev;
1045	uint32_t mem_type = TTM_PL_SYSTEM;
1046	struct ttm_mem_type_manager *man;
1047	uint32_t cur_flags = 0;
1048	int ret;
1049
1050	ret = ttm_mem_type_from_place(place, &mem_type);
1051	if (ret)
1052		return ret;
1053
1054	man = &bdev->man[mem_type];
1055	if (!man->has_type || !man->use_type)
1056		return -EBUSY;
1057
1058	if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
1059		return -EBUSY;
1060
1061	cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
1062	/*
1063	 * Use the access and other non-mapping-related flag bits from
1064	 * the memory placement flags to the current flags
1065	 */
1066	ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
1067
1068	mem->mem_type = mem_type;
1069	mem->placement = cur_flags;
1070
1071	if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
1072		spin_lock(&bo->bdev->glob->lru_lock);
1073		ttm_bo_del_from_lru(bo);
1074		ttm_bo_add_mem_to_lru(bo, mem);
1075		spin_unlock(&bo->bdev->glob->lru_lock);
1076	}
1077
1078	return 0;
1079}
1080
1081/**
1082 * Creates space for memory region @mem according to its type.
1083 *
1084 * This function first searches for free space in compatible memory types in
1085 * the priority order defined by the driver.  If free space isn't found, then
1086 * ttm_bo_mem_force_space is attempted in priority order to evict and find
1087 * space.
1088 */
1089int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1090			struct ttm_placement *placement,
1091			struct ttm_mem_reg *mem,
1092			struct ttm_operation_ctx *ctx)
 
1093{
1094	struct ttm_bo_device *bdev = bo->bdev;
 
 
 
1095	bool type_found = false;
 
 
1096	int i, ret;
1097
1098	ret = dma_resv_reserve_shared(bo->base.resv, 1);
1099	if (unlikely(ret))
1100		return ret;
1101
1102	mem->mm_node = NULL;
1103	for (i = 0; i < placement->num_placement; ++i) {
1104		const struct ttm_place *place = &placement->placement[i];
1105		struct ttm_mem_type_manager *man;
 
 
 
1106
1107		ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1108		if (ret == -EBUSY)
 
 
 
 
1109			continue;
1110		if (ret)
1111			goto error;
1112
1113		type_found = true;
1114		mem->mm_node = NULL;
1115		if (mem->mem_type == TTM_PL_SYSTEM)
1116			return 0;
 
 
 
 
1117
1118		man = &bdev->man[mem->mem_type];
1119		ret = (*man->func->get_node)(man, bo, place, mem);
1120		if (unlikely(ret))
1121			goto error;
1122
1123		if (mem->mm_node) {
1124			ret = ttm_bo_add_move_fence(bo, man, mem);
1125			if (unlikely(ret)) {
1126				(*man->func->put_node)(man, mem);
1127				goto error;
1128			}
1129			return 0;
1130		}
 
 
1131	}
1132
 
 
 
 
 
 
 
 
 
1133	for (i = 0; i < placement->num_busy_placement; ++i) {
1134		const struct ttm_place *place = &placement->busy_placement[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135
1136		ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1137		if (ret == -EBUSY)
1138			continue;
1139		if (ret)
1140			goto error;
1141
1142		type_found = true;
1143		mem->mm_node = NULL;
1144		if (mem->mem_type == TTM_PL_SYSTEM)
 
1145			return 0;
 
1146
1147		ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
1148		if (ret == 0 && mem->mm_node)
 
 
1149			return 0;
1150
1151		if (ret && ret != -EBUSY)
1152			goto error;
1153	}
 
 
 
 
1154
1155	ret = -ENOMEM;
1156	if (!type_found) {
1157		pr_err(TTM_PFX "No compatible memory type found\n");
1158		ret = -EINVAL;
1159	}
1160
1161error:
1162	if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
1163		spin_lock(&bo->bdev->glob->lru_lock);
1164		ttm_bo_move_to_lru_tail(bo, NULL);
1165		spin_unlock(&bo->bdev->glob->lru_lock);
1166	}
1167
1168	return ret;
1169}
1170EXPORT_SYMBOL(ttm_bo_mem_space);
1171
1172static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1173			      struct ttm_placement *placement,
1174			      struct ttm_operation_ctx *ctx)
 
1175{
1176	int ret = 0;
1177	struct ttm_mem_reg mem;
 
1178
1179	dma_resv_assert_held(bo->base.resv);
1180
 
 
 
 
 
 
 
 
 
 
1181	mem.num_pages = bo->num_pages;
1182	mem.size = mem.num_pages << PAGE_SHIFT;
1183	mem.page_alignment = bo->mem.page_alignment;
1184	mem.bus.io_reserved_vm = false;
1185	mem.bus.io_reserved_count = 0;
1186	/*
1187	 * Determine where to move the buffer.
1188	 */
1189	ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1190	if (ret)
1191		goto out_unlock;
1192	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1193out_unlock:
1194	if (ret && mem.mm_node)
1195		ttm_bo_mem_put(bo, &mem);
1196	return ret;
1197}
1198
1199static bool ttm_bo_places_compat(const struct ttm_place *places,
1200				 unsigned num_placement,
1201				 struct ttm_mem_reg *mem,
1202				 uint32_t *new_flags)
1203{
1204	unsigned i;
1205
1206	for (i = 0; i < num_placement; i++) {
1207		const struct ttm_place *heap = &places[i];
 
 
1208
1209		if (mem->mm_node && (mem->start < heap->fpfn ||
1210		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1211			continue;
1212
1213		*new_flags = heap->flags;
1214		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1215		    (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1216		    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1217		     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1218			return true;
1219	}
1220	return false;
1221}
1222
1223bool ttm_bo_mem_compat(struct ttm_placement *placement,
1224		       struct ttm_mem_reg *mem,
1225		       uint32_t *new_flags)
1226{
1227	if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1228				 mem, new_flags))
1229		return true;
1230
1231	if ((placement->busy_placement != placement->placement ||
1232	     placement->num_busy_placement > placement->num_placement) &&
1233	    ttm_bo_places_compat(placement->busy_placement,
1234				 placement->num_busy_placement,
1235				 mem, new_flags))
1236		return true;
1237
1238	return false;
1239}
1240EXPORT_SYMBOL(ttm_bo_mem_compat);
1241
1242int ttm_bo_validate(struct ttm_buffer_object *bo,
1243		    struct ttm_placement *placement,
1244		    struct ttm_operation_ctx *ctx)
 
1245{
1246	int ret;
1247	uint32_t new_flags;
1248
1249	dma_resv_assert_held(bo->base.resv);
 
 
 
 
 
1250	/*
1251	 * Check whether we need to move buffer.
1252	 */
1253	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1254		ret = ttm_bo_move_buffer(bo, placement, ctx);
 
1255		if (ret)
1256			return ret;
1257	} else {
1258		/*
1259		 * Use the access and other non-mapping-related flag bits from
1260		 * the compatible memory placement flags to the active flags
1261		 */
1262		ttm_flag_masked(&bo->mem.placement, new_flags,
1263				~TTM_PL_MASK_MEMTYPE);
1264	}
1265	/*
1266	 * We might need to add a TTM.
1267	 */
1268	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1269		ret = ttm_tt_create(bo, true);
1270		if (ret)
1271			return ret;
1272	}
1273	return 0;
1274}
1275EXPORT_SYMBOL(ttm_bo_validate);
1276
1277int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1278			 struct ttm_buffer_object *bo,
1279			 unsigned long size,
1280			 enum ttm_bo_type type,
1281			 struct ttm_placement *placement,
1282			 uint32_t page_alignment,
1283			 struct ttm_operation_ctx *ctx,
1284			 size_t acc_size,
1285			 struct sg_table *sg,
1286			 struct dma_resv *resv,
1287			 void (*destroy) (struct ttm_buffer_object *))
 
 
 
 
 
 
 
 
 
 
1288{
1289	int ret = 0;
1290	unsigned long num_pages;
1291	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1292	bool locked;
1293
1294	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1295	if (ret) {
1296		pr_err("Out of kernel memory\n");
1297		if (destroy)
1298			(*destroy)(bo);
1299		else
1300			kfree(bo);
1301		return -ENOMEM;
1302	}
1303
 
1304	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1305	if (num_pages == 0) {
1306		pr_err("Illegal buffer object size\n");
1307		if (destroy)
1308			(*destroy)(bo);
1309		else
1310			kfree(bo);
1311		ttm_mem_global_free(mem_glob, acc_size);
1312		return -EINVAL;
1313	}
1314	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1315
1316	kref_init(&bo->kref);
1317	kref_init(&bo->list_kref);
1318	atomic_set(&bo->cpu_writers, 0);
 
 
1319	INIT_LIST_HEAD(&bo->lru);
1320	INIT_LIST_HEAD(&bo->ddestroy);
1321	INIT_LIST_HEAD(&bo->swap);
1322	INIT_LIST_HEAD(&bo->io_reserve_lru);
1323	mutex_init(&bo->wu_mutex);
1324	bo->bdev = bdev;
 
1325	bo->type = type;
1326	bo->num_pages = num_pages;
1327	bo->mem.size = num_pages << PAGE_SHIFT;
1328	bo->mem.mem_type = TTM_PL_SYSTEM;
1329	bo->mem.num_pages = bo->num_pages;
1330	bo->mem.mm_node = NULL;
1331	bo->mem.page_alignment = page_alignment;
1332	bo->mem.bus.io_reserved_vm = false;
1333	bo->mem.bus.io_reserved_count = 0;
1334	bo->moving = NULL;
 
1335	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
 
 
1336	bo->acc_size = acc_size;
1337	bo->sg = sg;
1338	if (resv) {
1339		bo->base.resv = resv;
1340		dma_resv_assert_held(bo->base.resv);
1341	} else {
1342		bo->base.resv = &bo->base._resv;
1343	}
1344	if (!ttm_bo_uses_embedded_gem_object(bo)) {
1345		/*
1346		 * bo.gem is not initialized, so we have to setup the
1347		 * struct elements we want use regardless.
1348		 */
1349		dma_resv_init(&bo->base._resv);
1350		drm_vma_node_reset(&bo->base.vma_node);
1351	}
1352	atomic_inc(&bo->bdev->glob->bo_count);
1353
1354	/*
1355	 * For ttm_bo_type_device buffers, allocate
1356	 * address space from the device.
1357	 */
1358	if (bo->type == ttm_bo_type_device ||
1359	    bo->type == ttm_bo_type_sg)
1360		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
1361					 bo->mem.num_pages);
1362
1363	/* passed reservation objects should already be locked,
1364	 * since otherwise lockdep will be angered in radeon.
1365	 */
1366	if (!resv) {
1367		locked = dma_resv_trylock(bo->base.resv);
1368		WARN_ON(!locked);
1369	}
1370
1371	if (likely(!ret))
1372		ret = ttm_bo_validate(bo, placement, ctx);
 
1373
1374	if (unlikely(ret)) {
1375		if (!resv)
1376			ttm_bo_unreserve(bo);
1377
1378		ttm_bo_put(bo);
1379		return ret;
1380	}
1381
1382	if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1383		spin_lock(&bdev->glob->lru_lock);
1384		ttm_bo_add_to_lru(bo);
1385		spin_unlock(&bdev->glob->lru_lock);
1386	}
1387
1388	return ret;
1389}
1390EXPORT_SYMBOL(ttm_bo_init_reserved);
1391
1392int ttm_bo_init(struct ttm_bo_device *bdev,
1393		struct ttm_buffer_object *bo,
1394		unsigned long size,
1395		enum ttm_bo_type type,
1396		struct ttm_placement *placement,
1397		uint32_t page_alignment,
1398		bool interruptible,
1399		size_t acc_size,
1400		struct sg_table *sg,
1401		struct dma_resv *resv,
1402		void (*destroy) (struct ttm_buffer_object *))
1403{
1404	struct ttm_operation_ctx ctx = { interruptible, false };
1405	int ret;
1406
1407	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1408				   page_alignment, &ctx, acc_size,
1409				   sg, resv, destroy);
1410	if (ret)
1411		return ret;
1412
1413	if (!resv)
1414		ttm_bo_unreserve(bo);
1415
1416	return 0;
1417}
1418EXPORT_SYMBOL(ttm_bo_init);
1419
1420size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1421		       unsigned long bo_size,
1422		       unsigned struct_size)
1423{
1424	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1425	size_t size = 0;
1426
1427	size += ttm_round_pot(struct_size);
1428	size += ttm_round_pot(npages * sizeof(void *));
1429	size += ttm_round_pot(sizeof(struct ttm_tt));
1430	return size;
1431}
1432EXPORT_SYMBOL(ttm_bo_acc_size);
1433
1434size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1435			   unsigned long bo_size,
1436			   unsigned struct_size)
1437{
1438	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1439	size_t size = 0;
1440
1441	size += ttm_round_pot(struct_size);
1442	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
 
1443	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1444	return size;
1445}
1446EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1447
1448int ttm_bo_create(struct ttm_bo_device *bdev,
1449			unsigned long size,
1450			enum ttm_bo_type type,
1451			struct ttm_placement *placement,
1452			uint32_t page_alignment,
 
1453			bool interruptible,
 
1454			struct ttm_buffer_object **p_bo)
1455{
1456	struct ttm_buffer_object *bo;
1457	size_t acc_size;
1458	int ret;
1459
1460	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1461	if (unlikely(bo == NULL))
1462		return -ENOMEM;
1463
1464	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1465	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1466			  interruptible, acc_size,
1467			  NULL, NULL, NULL);
1468	if (likely(ret == 0))
1469		*p_bo = bo;
1470
1471	return ret;
1472}
1473EXPORT_SYMBOL(ttm_bo_create);
1474
1475static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1476				   unsigned mem_type)
1477{
1478	struct ttm_operation_ctx ctx = {
1479		.interruptible = false,
1480		.no_wait_gpu = false,
1481		.flags = TTM_OPT_FLAG_FORCE_ALLOC
1482	};
1483	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1484	struct ttm_bo_global *glob = bdev->glob;
1485	struct dma_fence *fence;
1486	int ret;
1487	unsigned i;
1488
1489	/*
1490	 * Can't use standard list traversal since we're unlocking.
1491	 */
1492
1493	spin_lock(&glob->lru_lock);
1494	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1495		while (!list_empty(&man->lru[i])) {
1496			spin_unlock(&glob->lru_lock);
1497			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
1498						  NULL);
1499			if (ret)
1500				return ret;
1501			spin_lock(&glob->lru_lock);
 
 
1502		}
 
1503	}
1504	spin_unlock(&glob->lru_lock);
1505
1506	spin_lock(&man->move_lock);
1507	fence = dma_fence_get(man->move);
1508	spin_unlock(&man->move_lock);
1509
1510	if (fence) {
1511		ret = dma_fence_wait(fence, false);
1512		dma_fence_put(fence);
1513		if (ret)
1514			return ret;
1515	}
1516
1517	return 0;
1518}
1519
1520int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1521{
1522	struct ttm_mem_type_manager *man;
1523	int ret = -EINVAL;
1524
1525	if (mem_type >= TTM_NUM_MEM_TYPES) {
1526		pr_err("Illegal memory type %d\n", mem_type);
1527		return ret;
1528	}
1529	man = &bdev->man[mem_type];
1530
1531	if (!man->has_type) {
1532		pr_err("Trying to take down uninitialized memory manager type %u\n",
1533		       mem_type);
1534		return ret;
1535	}
1536
1537	man->use_type = false;
1538	man->has_type = false;
1539
1540	ret = 0;
1541	if (mem_type > 0) {
1542		ret = ttm_bo_force_list_clean(bdev, mem_type);
1543		if (ret) {
1544			pr_err("Cleanup eviction failed\n");
1545			return ret;
1546		}
1547
1548		ret = (*man->func->takedown)(man);
1549	}
1550
1551	dma_fence_put(man->move);
1552	man->move = NULL;
1553
1554	return ret;
1555}
1556EXPORT_SYMBOL(ttm_bo_clean_mm);
1557
1558int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1559{
1560	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1561
1562	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1563		pr_err("Illegal memory manager memory type %u\n", mem_type);
1564		return -EINVAL;
1565	}
1566
1567	if (!man->has_type) {
1568		pr_err("Memory type %u has not been initialized\n", mem_type);
1569		return 0;
1570	}
1571
1572	return ttm_bo_force_list_clean(bdev, mem_type);
1573}
1574EXPORT_SYMBOL(ttm_bo_evict_mm);
1575
1576int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1577			unsigned long p_size)
1578{
1579	int ret;
1580	struct ttm_mem_type_manager *man;
1581	unsigned i;
1582
1583	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1584	man = &bdev->man[type];
1585	BUG_ON(man->has_type);
1586	man->io_reserve_fastpath = true;
1587	man->use_io_reserve_lru = false;
1588	mutex_init(&man->io_reserve_mutex);
1589	spin_lock_init(&man->move_lock);
1590	INIT_LIST_HEAD(&man->io_reserve_lru);
1591
1592	ret = bdev->driver->init_mem_type(bdev, type, man);
1593	if (ret)
1594		return ret;
1595	man->bdev = bdev;
1596
 
1597	if (type != TTM_PL_SYSTEM) {
1598		ret = (*man->func->init)(man, p_size);
1599		if (ret)
1600			return ret;
1601	}
1602	man->has_type = true;
1603	man->use_type = true;
1604	man->size = p_size;
1605
1606	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1607		INIT_LIST_HEAD(&man->lru[i]);
1608	man->move = NULL;
1609
1610	return 0;
1611}
1612EXPORT_SYMBOL(ttm_bo_init_mm);
1613
1614static void ttm_bo_global_kobj_release(struct kobject *kobj)
1615{
1616	struct ttm_bo_global *glob =
1617		container_of(kobj, struct ttm_bo_global, kobj);
1618
 
1619	__free_page(glob->dummy_read_page);
 
1620}
1621
1622static void ttm_bo_global_release(void)
1623{
1624	struct ttm_bo_global *glob = &ttm_bo_glob;
1625
1626	mutex_lock(&ttm_global_mutex);
1627	if (--ttm_bo_glob_use_count > 0)
1628		goto out;
1629
1630	kobject_del(&glob->kobj);
1631	kobject_put(&glob->kobj);
1632	ttm_mem_global_release(&ttm_mem_glob);
1633	memset(glob, 0, sizeof(*glob));
1634out:
1635	mutex_unlock(&ttm_global_mutex);
1636}
 
1637
1638static int ttm_bo_global_init(void)
1639{
1640	struct ttm_bo_global *glob = &ttm_bo_glob;
1641	int ret = 0;
1642	unsigned i;
1643
1644	mutex_lock(&ttm_global_mutex);
1645	if (++ttm_bo_glob_use_count > 1)
1646		goto out;
1647
1648	ret = ttm_mem_global_init(&ttm_mem_glob);
1649	if (ret)
1650		goto out;
1651
 
1652	spin_lock_init(&glob->lru_lock);
1653	glob->mem_glob = &ttm_mem_glob;
1654	glob->mem_glob->bo_glob = glob;
1655	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1656
1657	if (unlikely(glob->dummy_read_page == NULL)) {
1658		ret = -ENOMEM;
1659		goto out;
1660	}
1661
1662	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1663		INIT_LIST_HEAD(&glob->swap_lru[i]);
1664	INIT_LIST_HEAD(&glob->device_list);
 
 
 
 
 
 
 
 
1665	atomic_set(&glob->bo_count, 0);
1666
1667	ret = kobject_init_and_add(
1668		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1669	if (unlikely(ret != 0))
1670		kobject_put(&glob->kobj);
1671out:
1672	mutex_unlock(&ttm_global_mutex);
 
 
 
1673	return ret;
1674}
 
 
1675
1676int ttm_bo_device_release(struct ttm_bo_device *bdev)
1677{
1678	int ret = 0;
1679	unsigned i = TTM_NUM_MEM_TYPES;
1680	struct ttm_mem_type_manager *man;
1681	struct ttm_bo_global *glob = bdev->glob;
1682
1683	while (i--) {
1684		man = &bdev->man[i];
1685		if (man->has_type) {
1686			man->use_type = false;
1687			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1688				ret = -EBUSY;
1689				pr_err("DRM memory manager type %d is not clean\n",
1690				       i);
1691			}
1692			man->has_type = false;
1693		}
1694	}
1695
1696	mutex_lock(&ttm_global_mutex);
1697	list_del(&bdev->device_list);
1698	mutex_unlock(&ttm_global_mutex);
1699
1700	cancel_delayed_work_sync(&bdev->wq);
1701
1702	if (ttm_bo_delayed_delete(bdev, true))
1703		pr_debug("Delayed destroy list was clean\n");
1704
1705	spin_lock(&glob->lru_lock);
1706	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1707		if (list_empty(&bdev->man[0].lru[0]))
1708			pr_debug("Swap list %d was clean\n", i);
 
 
1709	spin_unlock(&glob->lru_lock);
1710
1711	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1712
1713	if (!ret)
1714		ttm_bo_global_release();
1715
1716	return ret;
1717}
1718EXPORT_SYMBOL(ttm_bo_device_release);
1719
1720int ttm_bo_device_init(struct ttm_bo_device *bdev,
 
1721		       struct ttm_bo_driver *driver,
1722		       struct address_space *mapping,
1723		       bool need_dma32)
1724{
1725	struct ttm_bo_global *glob = &ttm_bo_glob;
1726	int ret;
1727
1728	ret = ttm_bo_global_init();
1729	if (ret)
1730		return ret;
1731
 
1732	bdev->driver = driver;
1733
1734	memset(bdev->man, 0, sizeof(bdev->man));
1735
1736	/*
1737	 * Initialize the system memory buffer type.
1738	 * Other types need to be driver / IOCTL initialized.
1739	 */
1740	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1741	if (unlikely(ret != 0))
1742		goto out_no_sys;
1743
1744	drm_vma_offset_manager_init(&bdev->vma_manager,
1745				    DRM_FILE_PAGE_OFFSET_START,
1746				    DRM_FILE_PAGE_OFFSET_SIZE);
 
 
1747	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
 
1748	INIT_LIST_HEAD(&bdev->ddestroy);
1749	bdev->dev_mapping = mapping;
1750	bdev->glob = glob;
1751	bdev->need_dma32 = need_dma32;
1752	mutex_lock(&ttm_global_mutex);
 
 
1753	list_add_tail(&bdev->device_list, &glob->device_list);
1754	mutex_unlock(&ttm_global_mutex);
1755
1756	return 0;
 
 
1757out_no_sys:
1758	ttm_bo_global_release();
1759	return ret;
1760}
1761EXPORT_SYMBOL(ttm_bo_device_init);
1762
1763/*
1764 * buffer object vm functions.
1765 */
1766
1767bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1768{
1769	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1770
1771	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1772		if (mem->mem_type == TTM_PL_SYSTEM)
1773			return false;
1774
1775		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1776			return false;
1777
1778		if (mem->placement & TTM_PL_FLAG_CACHED)
1779			return false;
1780	}
1781	return true;
1782}
1783
1784void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1785{
1786	struct ttm_bo_device *bdev = bo->bdev;
 
 
1787
1788	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
 
 
1789	ttm_mem_io_free_vm(bo);
1790}
1791
1792void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1793{
1794	struct ttm_bo_device *bdev = bo->bdev;
1795	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1796
1797	ttm_mem_io_lock(man, false);
1798	ttm_bo_unmap_virtual_locked(bo);
1799	ttm_mem_io_unlock(man);
1800}
1801
1802
1803EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1804
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805int ttm_bo_wait(struct ttm_buffer_object *bo,
1806		bool interruptible, bool no_wait)
1807{
1808	long timeout = 15 * HZ;
 
 
 
 
1809
1810	if (no_wait) {
1811		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1812			return 0;
1813		else
1814			return -EBUSY;
1815	}
1816
1817	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1818						      interruptible, timeout);
1819	if (timeout < 0)
1820		return timeout;
 
 
 
 
 
1821
1822	if (timeout == 0)
1823		return -EBUSY;
1824
1825	dma_resv_add_excl_fence(bo->base.resv, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1826	return 0;
1827}
1828EXPORT_SYMBOL(ttm_bo_wait);
1829
1830int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1831{
 
1832	int ret = 0;
1833
1834	/*
1835	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1836	 */
1837
1838	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1839	if (unlikely(ret != 0))
1840		return ret;
1841	ret = ttm_bo_wait(bo, true, no_wait);
 
 
1842	if (likely(ret == 0))
1843		atomic_inc(&bo->cpu_writers);
1844	ttm_bo_unreserve(bo);
1845	return ret;
1846}
1847EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1848
1849void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1850{
1851	atomic_dec(&bo->cpu_writers);
 
1852}
1853EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1854
1855/**
1856 * A buffer object shrink method that tries to swap out the first
1857 * buffer object on the bo_global::swap_lru list.
1858 */
1859int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
 
1860{
 
 
1861	struct ttm_buffer_object *bo;
1862	int ret = -EBUSY;
1863	bool locked;
1864	unsigned i;
1865
1866	spin_lock(&glob->lru_lock);
1867	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1868		list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1869			if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1870							   NULL)) {
1871				ret = 0;
1872				break;
1873			}
1874		}
1875		if (!ret)
1876			break;
1877	}
1878
1879	if (ret) {
1880		spin_unlock(&glob->lru_lock);
1881		return ret;
1882	}
 
 
 
 
 
 
 
1883
1884	kref_get(&bo->list_kref);
 
 
 
 
1885
1886	if (!list_empty(&bo->ddestroy)) {
1887		ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1888		kref_put(&bo->list_kref, ttm_bo_release_list);
1889		return ret;
 
 
 
1890	}
1891
1892	ttm_bo_del_from_lru(bo);
 
1893	spin_unlock(&glob->lru_lock);
1894
 
 
1895	/**
1896	 * Move to system cached
1897	 */
1898
1899	if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1900	    bo->ttm->caching_state != tt_cached) {
1901		struct ttm_operation_ctx ctx = { false, false };
 
 
 
 
 
1902		struct ttm_mem_reg evict_mem;
1903
1904		evict_mem = bo->mem;
1905		evict_mem.mm_node = NULL;
1906		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1907		evict_mem.mem_type = TTM_PL_SYSTEM;
1908
1909		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
 
1910		if (unlikely(ret != 0))
1911			goto out;
1912	}
1913
1914	/**
1915	 * Make sure BO is idle.
1916	 */
1917
1918	ret = ttm_bo_wait(bo, false, false);
1919	if (unlikely(ret != 0))
1920		goto out;
1921
1922	ttm_bo_unmap_virtual(bo);
1923
1924	/**
1925	 * Swap out. Buffer will be swapped in again as soon as
1926	 * anyone tries to access a ttm page.
1927	 */
1928
1929	if (bo->bdev->driver->swap_notify)
1930		bo->bdev->driver->swap_notify(bo);
1931
1932	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1933out:
1934
1935	/**
1936	 *
1937	 * Unreserve without putting on LRU to avoid swapping out an
1938	 * already swapped buffer.
1939	 */
1940	if (locked)
1941		dma_resv_unlock(bo->base.resv);
 
1942	kref_put(&bo->list_kref, ttm_bo_release_list);
1943	return ret;
1944}
1945EXPORT_SYMBOL(ttm_bo_swapout);
1946
1947void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1948{
1949	struct ttm_operation_ctx ctx = {
1950		.interruptible = false,
1951		.no_wait_gpu = false
1952	};
1953
1954	while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
1955		;
1956}
1957EXPORT_SYMBOL(ttm_bo_swapout_all);
1958
1959/**
1960 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1961 * unreserved
1962 *
1963 * @bo: Pointer to buffer
1964 */
1965int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1966{
1967	int ret;
1968
1969	/*
1970	 * In the absense of a wait_unlocked API,
1971	 * Use the bo::wu_mutex to avoid triggering livelocks due to
1972	 * concurrent use of this function. Note that this use of
1973	 * bo::wu_mutex can go away if we change locking order to
1974	 * mmap_sem -> bo::reserve.
1975	 */
1976	ret = mutex_lock_interruptible(&bo->wu_mutex);
1977	if (unlikely(ret != 0))
1978		return -ERESTARTSYS;
1979	if (!dma_resv_is_locked(bo->base.resv))
1980		goto out_unlock;
1981	ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
1982	if (ret == -EINTR)
1983		ret = -ERESTARTSYS;
1984	if (unlikely(ret != 0))
1985		goto out_unlock;
1986	dma_resv_unlock(bo->base.resv);
1987
1988out_unlock:
1989	mutex_unlock(&bo->wu_mutex);
1990	return ret;
1991}