Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32#include <linux/list.h>
  33#include <linux/slab.h>
  34#include <drm/drmP.h>
  35#include <drm/amdgpu_drm.h>
  36#include <drm/drm_cache.h>
  37#include "amdgpu.h"
  38#include "amdgpu_trace.h"
  39#include "amdgpu_amdkfd.h"
  40
  41static bool amdgpu_need_backup(struct amdgpu_device *adev)
 
 
 
 
 
  42{
  43	if (adev->flags & AMD_IS_APU)
  44		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45
  46	if (amdgpu_gpu_recovery == 0 ||
  47	    (amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))
  48		return false;
 
 
 
 
 
 
 
 
 
  49
  50	return true;
 
 
 
 
 
 
 
 
 
 
 
  51}
  52
  53static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
  54{
  55	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  56	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  57
  58	if (bo->kfd_bo)
  59		amdgpu_amdkfd_unreserve_system_memory_limit(bo);
  60
  61	amdgpu_bo_kunmap(bo);
  62
  63	if (bo->gem_base.import_attach)
  64		drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
  65	drm_gem_object_release(&bo->gem_base);
  66	amdgpu_bo_unref(&bo->parent);
  67	if (!list_empty(&bo->shadow_list)) {
  68		mutex_lock(&adev->shadow_list_lock);
  69		list_del_init(&bo->shadow_list);
  70		mutex_unlock(&adev->shadow_list_lock);
  71	}
  72	kfree(bo->metadata);
  73	kfree(bo);
  74}
  75
  76bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
  77{
  78	if (bo->destroy == &amdgpu_ttm_bo_destroy)
  79		return true;
  80	return false;
  81}
  82
  83void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 
 
 
  84{
  85	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
  86	struct ttm_placement *placement = &abo->placement;
  87	struct ttm_place *places = abo->placements;
  88	u64 flags = abo->flags;
  89	u32 c = 0;
  90
  91	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
  92		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
  93
  94		places[c].fpfn = 0;
  95		places[c].lpfn = 0;
  96		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 
 
 
 
 
 
 
  97			TTM_PL_FLAG_VRAM;
  98
  99		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
 100			places[c].lpfn = visible_pfn;
 101		else
 102			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 103
 104		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
 105			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
 106		c++;
 107	}
 108
 109	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 110		places[c].fpfn = 0;
 111		if (flags & AMDGPU_GEM_CREATE_SHADOW)
 112			places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
 113		else
 114			places[c].lpfn = 0;
 115		places[c].flags = TTM_PL_FLAG_TT;
 116		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 117			places[c].flags |= TTM_PL_FLAG_WC |
 118				TTM_PL_FLAG_UNCACHED;
 119		else
 120			places[c].flags |= TTM_PL_FLAG_CACHED;
 121		c++;
 
 122	}
 123
 124	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
 125		places[c].fpfn = 0;
 126		places[c].lpfn = 0;
 127		places[c].flags = TTM_PL_FLAG_SYSTEM;
 128		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 129			places[c].flags |= TTM_PL_FLAG_WC |
 130				TTM_PL_FLAG_UNCACHED;
 131		else
 132			places[c].flags |= TTM_PL_FLAG_CACHED;
 133		c++;
 
 134	}
 135
 136	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
 137		places[c].fpfn = 0;
 138		places[c].lpfn = 0;
 139		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
 140		c++;
 141	}
 142
 143	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
 144		places[c].fpfn = 0;
 145		places[c].lpfn = 0;
 146		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
 147		c++;
 148	}
 149
 150	if (domain & AMDGPU_GEM_DOMAIN_OA) {
 151		places[c].fpfn = 0;
 152		places[c].lpfn = 0;
 153		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
 154		c++;
 155	}
 156
 157	if (!c) {
 158		places[c].fpfn = 0;
 159		places[c].lpfn = 0;
 160		places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 161		c++;
 162	}
 163
 164	placement->num_placement = c;
 165	placement->placement = places;
 166
 167	placement->num_busy_placement = c;
 168	placement->busy_placement = places;
 169}
 170
 171/**
 172 * amdgpu_bo_create_reserved - create reserved BO for kernel use
 173 *
 174 * @adev: amdgpu device object
 175 * @size: size for the new BO
 176 * @align: alignment for the new BO
 177 * @domain: where to place it
 178 * @bo_ptr: used to initialize BOs in structures
 179 * @gpu_addr: GPU addr of the pinned BO
 180 * @cpu_addr: optional CPU address mapping
 181 *
 182 * Allocates and pins a BO for kernel internal use, and returns it still
 183 * reserved.
 184 *
 185 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 186 *
 187 * Returns 0 on success, negative error code otherwise.
 188 */
 189int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 190			      unsigned long size, int align,
 191			      u32 domain, struct amdgpu_bo **bo_ptr,
 192			      u64 *gpu_addr, void **cpu_addr)
 193{
 194	bool free = false;
 195	int r;
 196
 197	if (!*bo_ptr) {
 198		r = amdgpu_bo_create(adev, size, align, domain,
 199				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 200				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 201				     ttm_bo_type_kernel, NULL, bo_ptr);
 202		if (r) {
 203			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
 204				r);
 205			return r;
 206		}
 207		free = true;
 208	}
 209
 210	r = amdgpu_bo_reserve(*bo_ptr, false);
 211	if (r) {
 212		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
 213		goto error_free;
 214	}
 215
 216	r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
 217	if (r) {
 218		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
 219		goto error_unreserve;
 220	}
 221
 222	if (cpu_addr) {
 223		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 224		if (r) {
 225			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
 226			goto error_unreserve;
 227		}
 228	}
 229
 230	return 0;
 231
 232error_unreserve:
 233	amdgpu_bo_unreserve(*bo_ptr);
 234
 235error_free:
 236	if (free)
 237		amdgpu_bo_unref(bo_ptr);
 238
 239	return r;
 240}
 241
 242/**
 243 * amdgpu_bo_create_kernel - create BO for kernel use
 244 *
 245 * @adev: amdgpu device object
 246 * @size: size for the new BO
 247 * @align: alignment for the new BO
 248 * @domain: where to place it
 249 * @bo_ptr:  used to initialize BOs in structures
 250 * @gpu_addr: GPU addr of the pinned BO
 251 * @cpu_addr: optional CPU address mapping
 252 *
 253 * Allocates and pins a BO for kernel internal use.
 254 *
 255 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 256 *
 257 * Returns 0 on success, negative error code otherwise.
 258 */
 259int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 260			    unsigned long size, int align,
 261			    u32 domain, struct amdgpu_bo **bo_ptr,
 262			    u64 *gpu_addr, void **cpu_addr)
 263{
 264	int r;
 265
 266	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
 267				      gpu_addr, cpu_addr);
 268
 269	if (r)
 270		return r;
 271
 272	amdgpu_bo_unreserve(*bo_ptr);
 273
 274	return 0;
 275}
 276
 277/**
 278 * amdgpu_bo_free_kernel - free BO for kernel use
 279 *
 280 * @bo: amdgpu BO to free
 281 *
 282 * unmaps and unpin a BO for kernel internal use.
 283 */
 284void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
 285			   void **cpu_addr)
 286{
 287	if (*bo == NULL)
 288		return;
 289
 290	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
 291		if (cpu_addr)
 292			amdgpu_bo_kunmap(*bo);
 293
 294		amdgpu_bo_unpin(*bo);
 295		amdgpu_bo_unreserve(*bo);
 296	}
 297	amdgpu_bo_unref(bo);
 298
 299	if (gpu_addr)
 300		*gpu_addr = 0;
 301
 302	if (cpu_addr)
 303		*cpu_addr = NULL;
 
 304}
 305
 306/* Validate bo size is bit bigger then the request domain */
 307static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
 308					  unsigned long size, u32 domain)
 
 
 
 
 309{
 310	struct ttm_mem_type_manager *man = NULL;
 311
 312	/*
 313	 * If GTT is part of requested domains the check must succeed to
 314	 * allow fall back to GTT
 315	 */
 316	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 317		man = &adev->mman.bdev.man[TTM_PL_TT];
 318
 319		if (size < (man->size << PAGE_SHIFT))
 320			return true;
 321		else
 322			goto fail;
 323	}
 324
 325	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 326		man = &adev->mman.bdev.man[TTM_PL_VRAM];
 327
 328		if (size < (man->size << PAGE_SHIFT))
 329			return true;
 330		else
 331			goto fail;
 332	}
 333
 334
 335	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
 336	return true;
 337
 338fail:
 339	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
 340		  man->size << PAGE_SHIFT);
 341	return false;
 342}
 343
 344static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 345			       int byte_align, u32 domain,
 346			       u64 flags, enum ttm_bo_type type,
 347			       struct reservation_object *resv,
 348			       struct amdgpu_bo **bo_ptr)
 349{
 350	struct ttm_operation_ctx ctx = {
 351		.interruptible = (type != ttm_bo_type_kernel),
 352		.no_wait_gpu = false,
 353		.resv = resv,
 354		.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
 355	};
 356	struct amdgpu_bo *bo;
 
 357	unsigned long page_align;
 358	size_t acc_size;
 359	int r;
 360
 361	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 362	size = ALIGN(size, PAGE_SIZE);
 363
 364	if (!amdgpu_bo_validate_size(adev, size, domain))
 365		return -ENOMEM;
 366
 
 
 
 
 367	*bo_ptr = NULL;
 368
 369	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 370				       sizeof(struct amdgpu_bo));
 371
 372	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
 373	if (bo == NULL)
 374		return -ENOMEM;
 375	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
 376	INIT_LIST_HEAD(&bo->shadow_list);
 
 
 
 
 
 377	INIT_LIST_HEAD(&bo->va);
 378	bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
 379					 AMDGPU_GEM_DOMAIN_GTT |
 380					 AMDGPU_GEM_DOMAIN_CPU |
 381					 AMDGPU_GEM_DOMAIN_GDS |
 382					 AMDGPU_GEM_DOMAIN_GWS |
 383					 AMDGPU_GEM_DOMAIN_OA);
 384	bo->allowed_domains = bo->preferred_domains;
 385	if (type != ttm_bo_type_kernel &&
 386	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 387		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 388
 389	bo->flags = flags;
 390
 391#ifdef CONFIG_X86_32
 392	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
 393	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
 394	 */
 395	bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 396#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
 397	/* Don't try to enable write-combining when it can't work, or things
 398	 * may be slow
 399	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 400	 */
 401
 402#ifndef CONFIG_COMPILE_TEST
 403#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 404	 thanks to write-combining
 405#endif
 406
 407	if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 408		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 409			      "better performance thanks to write-combining\n");
 410	bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 411#else
 412	/* For architectures that don't support WC memory,
 413	 * mask out the WC flag from the BO
 414	 */
 415	if (!drm_arch_can_wc_memory())
 416		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 417#endif
 418
 419	bo->tbo.bdev = &adev->mman.bdev;
 420	amdgpu_ttm_placement_from_domain(bo, domain);
 421
 422	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
 423				 &bo->placement, page_align, &ctx, acc_size,
 424				 NULL, resv, &amdgpu_ttm_bo_destroy);
 425	if (unlikely(r != 0))
 426		return r;
 427
 428	if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 429	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 430	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 431		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 432					     ctx.bytes_moved);
 433	else
 434		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 435
 436	if (type == ttm_bo_type_kernel)
 437		bo->tbo.priority = 1;
 438
 439	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 440	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
 441		struct dma_fence *fence;
 442
 443		r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
 444		if (unlikely(r))
 445			goto fail_unreserve;
 446
 447		amdgpu_bo_fence(bo, fence, false);
 448		dma_fence_put(bo->tbo.moving);
 449		bo->tbo.moving = dma_fence_get(fence);
 450		dma_fence_put(fence);
 451	}
 452	if (!resv)
 453		amdgpu_bo_unreserve(bo);
 454	*bo_ptr = bo;
 455
 456	trace_amdgpu_bo_create(bo);
 457
 458	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
 459	if (type == ttm_bo_type_device)
 460		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 461
 462	return 0;
 463
 464fail_unreserve:
 465	if (!resv)
 466		ww_mutex_unlock(&bo->tbo.resv->lock);
 467	amdgpu_bo_unref(&bo);
 468	return r;
 469}
 470
 471static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
 472				   unsigned long size, int byte_align,
 473				   struct amdgpu_bo *bo)
 474{
 475	int r;
 476
 477	if (bo->shadow)
 478		return 0;
 479
 480	r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
 481				AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 482				AMDGPU_GEM_CREATE_SHADOW,
 483				ttm_bo_type_kernel,
 484				bo->tbo.resv, &bo->shadow);
 485	if (!r) {
 486		bo->shadow->parent = amdgpu_bo_ref(bo);
 487		mutex_lock(&adev->shadow_list_lock);
 488		list_add_tail(&bo->shadow_list, &adev->shadow_list);
 489		mutex_unlock(&adev->shadow_list_lock);
 490	}
 491
 492	return r;
 493}
 494
 495int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
 496		     int byte_align, u32 domain,
 497		     u64 flags, enum ttm_bo_type type,
 
 498		     struct reservation_object *resv,
 499		     struct amdgpu_bo **bo_ptr)
 500{
 501	uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
 502	int r;
 503
 504	r = amdgpu_bo_do_create(adev, size, byte_align, domain,
 505				parent_flags, type, resv, bo_ptr);
 506	if (r)
 507		return r;
 508
 509	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
 510		if (!resv)
 511			WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
 512							NULL));
 513
 514		r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
 515
 516		if (!resv)
 517			reservation_object_unlock((*bo_ptr)->tbo.resv);
 518
 519		if (r)
 520			amdgpu_bo_unref(bo_ptr);
 521	}
 522
 523	return r;
 524}
 525
 526int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 527			       struct amdgpu_ring *ring,
 528			       struct amdgpu_bo *bo,
 529			       struct reservation_object *resv,
 530			       struct dma_fence **fence,
 531			       bool direct)
 532
 533{
 534	struct amdgpu_bo *shadow = bo->shadow;
 535	uint64_t bo_addr, shadow_addr;
 536	int r;
 537
 538	if (!shadow)
 539		return -EINVAL;
 540
 541	bo_addr = amdgpu_bo_gpu_offset(bo);
 542	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
 543
 544	r = reservation_object_reserve_shared(bo->tbo.resv);
 545	if (r)
 546		goto err;
 547
 548	r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
 549			       amdgpu_bo_size(bo), resv, fence,
 550			       direct, false);
 551	if (!r)
 552		amdgpu_bo_fence(bo, *fence, true);
 553
 554err:
 555	return r;
 556}
 557
 558int amdgpu_bo_validate(struct amdgpu_bo *bo)
 559{
 560	struct ttm_operation_ctx ctx = { false, false };
 561	uint32_t domain;
 562	int r;
 563
 564	if (bo->pin_count)
 565		return 0;
 566
 567	domain = bo->preferred_domains;
 568
 569retry:
 570	amdgpu_ttm_placement_from_domain(bo, domain);
 571	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 572	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 573		domain = bo->allowed_domains;
 574		goto retry;
 575	}
 576
 577	return r;
 578}
 579
 580int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
 581				  struct amdgpu_ring *ring,
 582				  struct amdgpu_bo *bo,
 583				  struct reservation_object *resv,
 584				  struct dma_fence **fence,
 585				  bool direct)
 586
 587{
 588	struct amdgpu_bo *shadow = bo->shadow;
 589	uint64_t bo_addr, shadow_addr;
 590	int r;
 591
 592	if (!shadow)
 593		return -EINVAL;
 594
 595	bo_addr = amdgpu_bo_gpu_offset(bo);
 596	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
 597
 598	r = reservation_object_reserve_shared(bo->tbo.resv);
 599	if (r)
 600		goto err;
 601
 602	r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
 603			       amdgpu_bo_size(bo), resv, fence,
 604			       direct, false);
 605	if (!r)
 606		amdgpu_bo_fence(bo, *fence, true);
 607
 608err:
 609	return r;
 610}
 611
 612int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 613{
 614	void *kptr;
 615	long r;
 616
 617	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 618		return -EPERM;
 619
 620	kptr = amdgpu_bo_kptr(bo);
 621	if (kptr) {
 622		if (ptr)
 623			*ptr = kptr;
 624		return 0;
 625	}
 626
 627	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
 628						MAX_SCHEDULE_TIMEOUT);
 629	if (r < 0)
 630		return r;
 631
 632	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
 633	if (r)
 634		return r;
 635
 
 636	if (ptr)
 637		*ptr = amdgpu_bo_kptr(bo);
 638
 639	return 0;
 640}
 641
 642void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 643{
 644	bool is_iomem;
 645
 646	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 647}
 648
 649void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 650{
 651	if (bo->kmap.bo)
 652		ttm_bo_kunmap(&bo->kmap);
 
 
 653}
 654
 655struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 656{
 657	if (bo == NULL)
 658		return NULL;
 659
 660	ttm_bo_reference(&bo->tbo);
 661	return bo;
 662}
 663
 664void amdgpu_bo_unref(struct amdgpu_bo **bo)
 665{
 666	struct ttm_buffer_object *tbo;
 667
 668	if ((*bo) == NULL)
 669		return;
 670
 671	tbo = &((*bo)->tbo);
 672	ttm_bo_unref(&tbo);
 673	if (tbo == NULL)
 674		*bo = NULL;
 675}
 676
 677int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 678			     u64 min_offset, u64 max_offset,
 679			     u64 *gpu_addr)
 680{
 681	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 682	struct ttm_operation_ctx ctx = { false, false };
 683	int r, i;
 
 684
 685	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 686		return -EPERM;
 687
 688	if (WARN_ON_ONCE(min_offset > max_offset))
 689		return -EINVAL;
 690
 691	/* A shared bo cannot be migrated to VRAM */
 692	if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
 693		return -EINVAL;
 694
 695	if (bo->pin_count) {
 696		uint32_t mem_type = bo->tbo.mem.mem_type;
 697
 698		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 699			return -EINVAL;
 700
 701		bo->pin_count++;
 702		if (gpu_addr)
 703			*gpu_addr = amdgpu_bo_gpu_offset(bo);
 704
 705		if (max_offset != 0) {
 706			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
 
 
 
 
 707			WARN_ON_ONCE(max_offset <
 708				     (amdgpu_bo_gpu_offset(bo) - domain_start));
 709		}
 710
 711		return 0;
 712	}
 713
 714	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 715	/* force to pin into visible video ram */
 716	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
 717		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 718	amdgpu_ttm_placement_from_domain(bo, domain);
 719	for (i = 0; i < bo->placement.num_placement; i++) {
 720		unsigned fpfn, lpfn;
 721
 722		fpfn = min_offset >> PAGE_SHIFT;
 723		lpfn = max_offset >> PAGE_SHIFT;
 724
 
 
 
 
 
 
 
 
 725		if (fpfn > bo->placements[i].fpfn)
 726			bo->placements[i].fpfn = fpfn;
 727		if (!bo->placements[i].lpfn ||
 728		    (lpfn && lpfn < bo->placements[i].lpfn))
 729			bo->placements[i].lpfn = lpfn;
 730		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 731	}
 732
 733	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 734	if (unlikely(r)) {
 735		dev_err(adev->dev, "%p pin failed\n", bo);
 736		goto error;
 737	}
 738
 739	r = amdgpu_ttm_alloc_gart(&bo->tbo);
 740	if (unlikely(r)) {
 741		dev_err(adev->dev, "%p bind failed\n", bo);
 742		goto error;
 743	}
 744
 745	bo->pin_count = 1;
 746	if (gpu_addr != NULL)
 747		*gpu_addr = amdgpu_bo_gpu_offset(bo);
 748
 749	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 750	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 751		adev->vram_pin_size += amdgpu_bo_size(bo);
 752		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 753			adev->invisible_pin_size += amdgpu_bo_size(bo);
 754	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 755		adev->gart_pin_size += amdgpu_bo_size(bo);
 756	}
 757
 758error:
 759	return r;
 760}
 761
 762int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 763{
 764	return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
 765}
 766
 767int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 768{
 769	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 770	struct ttm_operation_ctx ctx = { false, false };
 771	int r, i;
 772
 773	if (!bo->pin_count) {
 774		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
 775		return 0;
 776	}
 777	bo->pin_count--;
 778	if (bo->pin_count)
 779		return 0;
 780	for (i = 0; i < bo->placement.num_placement; i++) {
 781		bo->placements[i].lpfn = 0;
 782		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
 783	}
 784	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 785	if (unlikely(r)) {
 786		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
 787		goto error;
 788	}
 789
 790	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
 791		adev->vram_pin_size -= amdgpu_bo_size(bo);
 792		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 793			adev->invisible_pin_size -= amdgpu_bo_size(bo);
 794	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
 795		adev->gart_pin_size -= amdgpu_bo_size(bo);
 796	}
 797
 798error:
 799	return r;
 800}
 801
 802int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 803{
 804	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
 805	if (0 && (adev->flags & AMD_IS_APU)) {
 806		/* Useless to evict on IGP chips */
 807		return 0;
 808	}
 809	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
 810}
 811
 812static const char *amdgpu_vram_names[] = {
 813	"UNKNOWN",
 814	"GDDR1",
 815	"DDR2",
 816	"GDDR3",
 817	"GDDR4",
 818	"GDDR5",
 819	"HBM",
 820	"DDR3",
 821	"DDR4",
 822};
 823
 824int amdgpu_bo_init(struct amdgpu_device *adev)
 825{
 826	/* reserve PAT memory space to WC for VRAM */
 827	arch_io_reserve_memtype_wc(adev->gmc.aper_base,
 828				   adev->gmc.aper_size);
 829
 830	/* Add an MTRR for the VRAM */
 831	adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
 832					      adev->gmc.aper_size);
 833	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
 834		 adev->gmc.mc_vram_size >> 20,
 835		 (unsigned long long)adev->gmc.aper_size >> 20);
 836	DRM_INFO("RAM width %dbits %s\n",
 837		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
 838	return amdgpu_ttm_init(adev);
 839}
 840
 841void amdgpu_bo_fini(struct amdgpu_device *adev)
 842{
 843	amdgpu_ttm_fini(adev);
 844	arch_phys_wc_del(adev->gmc.vram_mtrr);
 845	arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
 846}
 847
 848int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
 849			     struct vm_area_struct *vma)
 850{
 851	return ttm_fbdev_mmap(vma, &bo->tbo);
 852}
 853
 854int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
 855{
 856	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 857
 858	if (adev->family <= AMDGPU_FAMILY_CZ &&
 859	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
 860		return -EINVAL;
 861
 862	bo->tiling_flags = tiling_flags;
 863	return 0;
 864}
 865
 866void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 867{
 868	lockdep_assert_held(&bo->tbo.resv->lock.base);
 869
 870	if (tiling_flags)
 871		*tiling_flags = bo->tiling_flags;
 872}
 873
 874int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
 875			    uint32_t metadata_size, uint64_t flags)
 876{
 877	void *buffer;
 878
 879	if (!metadata_size) {
 880		if (bo->metadata_size) {
 881			kfree(bo->metadata);
 882			bo->metadata = NULL;
 883			bo->metadata_size = 0;
 884		}
 885		return 0;
 886	}
 887
 888	if (metadata == NULL)
 889		return -EINVAL;
 890
 891	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
 892	if (buffer == NULL)
 893		return -ENOMEM;
 894
 895	kfree(bo->metadata);
 896	bo->metadata_flags = flags;
 897	bo->metadata = buffer;
 898	bo->metadata_size = metadata_size;
 899
 900	return 0;
 901}
 902
 903int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 904			   size_t buffer_size, uint32_t *metadata_size,
 905			   uint64_t *flags)
 906{
 907	if (!buffer && !metadata_size)
 908		return -EINVAL;
 909
 910	if (buffer) {
 911		if (buffer_size < bo->metadata_size)
 912			return -EINVAL;
 913
 914		if (bo->metadata_size)
 915			memcpy(buffer, bo->metadata, bo->metadata_size);
 916	}
 917
 918	if (metadata_size)
 919		*metadata_size = bo->metadata_size;
 920	if (flags)
 921		*flags = bo->metadata_flags;
 922
 923	return 0;
 924}
 925
 926void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 927			   bool evict,
 928			   struct ttm_mem_reg *new_mem)
 929{
 930	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 931	struct amdgpu_bo *abo;
 932	struct ttm_mem_reg *old_mem = &bo->mem;
 933
 934	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
 935		return;
 936
 937	abo = ttm_to_amdgpu_bo(bo);
 938	amdgpu_vm_bo_invalidate(adev, abo, evict);
 939
 940	amdgpu_bo_kunmap(abo);
 941
 942	/* remember the eviction */
 943	if (evict)
 944		atomic64_inc(&adev->num_evictions);
 945
 946	/* update statistics */
 947	if (!new_mem)
 948		return;
 949
 950	/* move_notify is called before move happens */
 951	trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 952}
 953
 954int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 955{
 956	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 957	struct ttm_operation_ctx ctx = { false, false };
 958	struct amdgpu_bo *abo;
 959	unsigned long offset, size;
 960	int r;
 961
 962	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
 963		return 0;
 964
 965	abo = ttm_to_amdgpu_bo(bo);
 966
 967	/* Remember that this BO was accessed by the CPU */
 968	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 969
 970	if (bo->mem.mem_type != TTM_PL_VRAM)
 971		return 0;
 972
 973	size = bo->mem.num_pages << PAGE_SHIFT;
 974	offset = bo->mem.start << PAGE_SHIFT;
 975	if ((offset + size) <= adev->gmc.visible_vram_size)
 976		return 0;
 977
 978	/* Can't move a pinned BO to visible VRAM */
 979	if (abo->pin_count > 0)
 980		return -EINVAL;
 981
 982	/* hurrah the memory is not visible ! */
 983	atomic64_inc(&adev->num_vram_cpu_page_faults);
 984	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
 985					 AMDGPU_GEM_DOMAIN_GTT);
 986
 987	/* Avoid costly evictions; only set GTT as a busy placement */
 988	abo->placement.num_busy_placement = 1;
 989	abo->placement.busy_placement = &abo->placements[1];
 990
 991	r = ttm_bo_validate(bo, &abo->placement, &ctx);
 992	if (unlikely(r != 0))
 
 
 
 993		return r;
 
 994
 995	offset = bo->mem.start << PAGE_SHIFT;
 996	/* this should never happen */
 997	if (bo->mem.mem_type == TTM_PL_VRAM &&
 998	    (offset + size) > adev->gmc.visible_vram_size)
 999		return -EINVAL;
1000
1001	return 0;
1002}
1003
1004/**
1005 * amdgpu_bo_fence - add fence to buffer object
1006 *
1007 * @bo: buffer object in question
1008 * @fence: fence to add
1009 * @shared: true if fence should be added shared
1010 *
1011 */
1012void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1013		     bool shared)
1014{
1015	struct reservation_object *resv = bo->tbo.resv;
1016
1017	if (shared)
1018		reservation_object_add_shared_fence(resv, fence);
1019	else
1020		reservation_object_add_excl_fence(resv, fence);
1021}
1022
1023/**
1024 * amdgpu_bo_gpu_offset - return GPU offset of bo
1025 * @bo:	amdgpu object for which we query the offset
1026 *
1027 * Returns current GPU offset of the object.
1028 *
1029 * Note: object should either be pinned or reserved when calling this
1030 * function, it might be useful to add check for this for debugging.
1031 */
1032u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1033{
1034	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1035	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
1036		     !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
1037	WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1038		     !bo->pin_count);
1039	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1040	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1041		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1042
1043	return bo->tbo.offset;
1044}
v4.6
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 30 *    Dave Airlie
 31 */
 32#include <linux/list.h>
 33#include <linux/slab.h>
 34#include <drm/drmP.h>
 35#include <drm/amdgpu_drm.h>
 36#include <drm/drm_cache.h>
 37#include "amdgpu.h"
 38#include "amdgpu_trace.h"
 
 39
 40
 41int amdgpu_ttm_init(struct amdgpu_device *adev);
 42void amdgpu_ttm_fini(struct amdgpu_device *adev);
 43
 44static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
 45						struct ttm_mem_reg *mem)
 46{
 47	u64 ret = 0;
 48	if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
 49		ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
 50			   adev->mc.visible_vram_size ?
 51			   adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
 52			   mem->size;
 53	}
 54	return ret;
 55}
 56
 57static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
 58		       struct ttm_mem_reg *old_mem,
 59		       struct ttm_mem_reg *new_mem)
 60{
 61	u64 vis_size;
 62	if (!adev)
 63		return;
 64
 65	if (new_mem) {
 66		switch (new_mem->mem_type) {
 67		case TTM_PL_TT:
 68			atomic64_add(new_mem->size, &adev->gtt_usage);
 69			break;
 70		case TTM_PL_VRAM:
 71			atomic64_add(new_mem->size, &adev->vram_usage);
 72			vis_size = amdgpu_get_vis_part_size(adev, new_mem);
 73			atomic64_add(vis_size, &adev->vram_vis_usage);
 74			break;
 75		}
 76	}
 77
 78	if (old_mem) {
 79		switch (old_mem->mem_type) {
 80		case TTM_PL_TT:
 81			atomic64_sub(old_mem->size, &adev->gtt_usage);
 82			break;
 83		case TTM_PL_VRAM:
 84			atomic64_sub(old_mem->size, &adev->vram_usage);
 85			vis_size = amdgpu_get_vis_part_size(adev, old_mem);
 86			atomic64_sub(vis_size, &adev->vram_vis_usage);
 87			break;
 88		}
 89	}
 90}
 91
 92static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 93{
 94	struct amdgpu_bo *bo;
 
 95
 96	bo = container_of(tbo, struct amdgpu_bo, tbo);
 
 97
 98	amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
 99
 
 
100	drm_gem_object_release(&bo->gem_base);
101	amdgpu_bo_unref(&bo->parent);
 
 
 
 
 
102	kfree(bo->metadata);
103	kfree(bo);
104}
105
106bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
107{
108	if (bo->destroy == &amdgpu_ttm_bo_destroy)
109		return true;
110	return false;
111}
112
113static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
114				      struct ttm_placement *placement,
115				      struct ttm_place *placements,
116				      u32 domain, u64 flags)
117{
118	u32 c = 0, i;
 
 
 
 
119
120	placement->placement = placements;
121	placement->busy_placement = placements;
122
123	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
124		if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
125			adev->mc.visible_vram_size < adev->mc.real_vram_size) {
126			placements[c].fpfn =
127				adev->mc.visible_vram_size >> PAGE_SHIFT;
128			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
129				TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
130		}
131		placements[c].fpfn = 0;
132		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
133			TTM_PL_FLAG_VRAM;
134		if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
135			placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
 
 
 
 
 
 
 
136	}
137
138	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
139		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
140			placements[c].fpfn = 0;
141			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
 
 
 
 
 
142				TTM_PL_FLAG_UNCACHED;
143		} else {
144			placements[c].fpfn = 0;
145			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
146		}
147	}
148
149	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
150		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
151			placements[c].fpfn = 0;
152			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
 
 
153				TTM_PL_FLAG_UNCACHED;
154		} else {
155			placements[c].fpfn = 0;
156			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
157		}
158	}
159
160	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
161		placements[c].fpfn = 0;
162		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
163			AMDGPU_PL_FLAG_GDS;
 
164	}
 
165	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
166		placements[c].fpfn = 0;
167		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
168			AMDGPU_PL_FLAG_GWS;
 
169	}
 
170	if (domain & AMDGPU_GEM_DOMAIN_OA) {
171		placements[c].fpfn = 0;
172		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
173			AMDGPU_PL_FLAG_OA;
 
174	}
175
176	if (!c) {
177		placements[c].fpfn = 0;
178		placements[c++].flags = TTM_PL_MASK_CACHING |
179			TTM_PL_FLAG_SYSTEM;
 
180	}
 
181	placement->num_placement = c;
 
 
182	placement->num_busy_placement = c;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
184	for (i = 0; i < c; i++) {
185		if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
186			(placements[i].flags & TTM_PL_FLAG_VRAM) &&
187			!placements[i].fpfn)
188			placements[i].lpfn =
189				adev->mc.visible_vram_size >> PAGE_SHIFT;
190		else
191			placements[i].lpfn = 0;
 
 
 
 
 
 
 
 
 
 
192	}
 
 
 
 
 
 
 
 
 
 
 
193}
194
195void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196{
197	amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
198				  rbo->placements, domain, rbo->flags);
 
 
 
 
 
 
 
 
 
199}
200
201static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
202					struct ttm_placement *placement)
 
 
 
 
 
 
 
203{
204	BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
 
 
 
 
 
 
 
 
 
 
205
206	memcpy(bo->placements, placement->placement,
207	       placement->num_placement * sizeof(struct ttm_place));
208	bo->placement.num_placement = placement->num_placement;
209	bo->placement.num_busy_placement = placement->num_busy_placement;
210	bo->placement.placement = bo->placements;
211	bo->placement.busy_placement = bo->placements;
212}
213
214int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
215				unsigned long size, int byte_align,
216				bool kernel, u32 domain, u64 flags,
217				struct sg_table *sg,
218				struct ttm_placement *placement,
219				struct reservation_object *resv,
220				struct amdgpu_bo **bo_ptr)
221{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222	struct amdgpu_bo *bo;
223	enum ttm_bo_type type;
224	unsigned long page_align;
225	size_t acc_size;
226	int r;
227
228	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
229	size = ALIGN(size, PAGE_SIZE);
230
231	if (kernel) {
232		type = ttm_bo_type_kernel;
233	} else if (sg) {
234		type = ttm_bo_type_sg;
235	} else {
236		type = ttm_bo_type_device;
237	}
238	*bo_ptr = NULL;
239
240	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
241				       sizeof(struct amdgpu_bo));
242
243	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
244	if (bo == NULL)
245		return -ENOMEM;
246	r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
247	if (unlikely(r)) {
248		kfree(bo);
249		return r;
250	}
251	bo->adev = adev;
252	INIT_LIST_HEAD(&bo->list);
253	INIT_LIST_HEAD(&bo->va);
254	bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
255					 AMDGPU_GEM_DOMAIN_GTT |
256					 AMDGPU_GEM_DOMAIN_CPU |
257					 AMDGPU_GEM_DOMAIN_GDS |
258					 AMDGPU_GEM_DOMAIN_GWS |
259					 AMDGPU_GEM_DOMAIN_OA);
260	bo->allowed_domains = bo->prefered_domains;
261	if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 
262		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
263
264	bo->flags = flags;
265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266	/* For architectures that don't support WC memory,
267	 * mask out the WC flag from the BO
268	 */
269	if (!drm_arch_can_wc_memory())
270		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 
271
272	amdgpu_fill_placement_to_bo(bo, placement);
273	/* Kernel allocation are uninterruptible */
274	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
275			&bo->placement, page_align, !kernel, NULL,
276			acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
277	if (unlikely(r != 0)) {
 
278		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279	}
 
 
280	*bo_ptr = bo;
281
282	trace_amdgpu_bo_create(bo);
283
 
 
 
 
284	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285}
286
287int amdgpu_bo_create(struct amdgpu_device *adev,
288		     unsigned long size, int byte_align,
289		     bool kernel, u32 domain, u64 flags,
290		     struct sg_table *sg,
291		     struct reservation_object *resv,
292		     struct amdgpu_bo **bo_ptr)
293{
294	struct ttm_placement placement = {0};
295	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
297	memset(&placements, 0,
298	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
299
300	amdgpu_ttm_placement_init(adev, &placement,
301				  placements, domain, flags);
 
302
303	return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
304					   domain, flags, sg, &placement,
305					   resv, bo_ptr);
 
 
 
 
 
306}
307
308int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
309{
310	bool is_iomem;
311	long r;
312
313	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
314		return -EPERM;
315
316	if (bo->kptr) {
317		if (ptr) {
318			*ptr = bo->kptr;
319		}
320		return 0;
321	}
322
323	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
324						MAX_SCHEDULE_TIMEOUT);
325	if (r < 0)
326		return r;
327
328	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
329	if (r)
330		return r;
331
332	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
333	if (ptr)
334		*ptr = bo->kptr;
335
336	return 0;
337}
338
 
 
 
 
 
 
 
339void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
340{
341	if (bo->kptr == NULL)
342		return;
343	bo->kptr = NULL;
344	ttm_bo_kunmap(&bo->kmap);
345}
346
347struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
348{
349	if (bo == NULL)
350		return NULL;
351
352	ttm_bo_reference(&bo->tbo);
353	return bo;
354}
355
356void amdgpu_bo_unref(struct amdgpu_bo **bo)
357{
358	struct ttm_buffer_object *tbo;
359
360	if ((*bo) == NULL)
361		return;
362
363	tbo = &((*bo)->tbo);
364	ttm_bo_unref(&tbo);
365	if (tbo == NULL)
366		*bo = NULL;
367}
368
369int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
370			     u64 min_offset, u64 max_offset,
371			     u64 *gpu_addr)
372{
 
 
373	int r, i;
374	unsigned fpfn, lpfn;
375
376	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
377		return -EPERM;
378
379	if (WARN_ON_ONCE(min_offset > max_offset))
380		return -EINVAL;
381
 
 
 
 
382	if (bo->pin_count) {
 
 
 
 
 
383		bo->pin_count++;
384		if (gpu_addr)
385			*gpu_addr = amdgpu_bo_gpu_offset(bo);
386
387		if (max_offset != 0) {
388			u64 domain_start;
389			if (domain == AMDGPU_GEM_DOMAIN_VRAM)
390				domain_start = bo->adev->mc.vram_start;
391			else
392				domain_start = bo->adev->mc.gtt_start;
393			WARN_ON_ONCE(max_offset <
394				     (amdgpu_bo_gpu_offset(bo) - domain_start));
395		}
396
397		return 0;
398	}
 
 
 
 
 
399	amdgpu_ttm_placement_from_domain(bo, domain);
400	for (i = 0; i < bo->placement.num_placement; i++) {
401		/* force to pin into visible video ram */
402		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
403		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
404		    (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
405			if (WARN_ON_ONCE(min_offset >
406					 bo->adev->mc.visible_vram_size))
407				return -EINVAL;
408			fpfn = min_offset >> PAGE_SHIFT;
409			lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
410		} else {
411			fpfn = min_offset >> PAGE_SHIFT;
412			lpfn = max_offset >> PAGE_SHIFT;
413		}
414		if (fpfn > bo->placements[i].fpfn)
415			bo->placements[i].fpfn = fpfn;
416		if (!bo->placements[i].lpfn ||
417		    (lpfn && lpfn < bo->placements[i].lpfn))
418			bo->placements[i].lpfn = lpfn;
419		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
420	}
421
422	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
423	if (likely(r == 0)) {
424		bo->pin_count = 1;
425		if (gpu_addr != NULL)
426			*gpu_addr = amdgpu_bo_gpu_offset(bo);
427		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
428			bo->adev->vram_pin_size += amdgpu_bo_size(bo);
429			if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
430				bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
431		} else
432			bo->adev->gart_pin_size += amdgpu_bo_size(bo);
433	} else {
434		dev_err(bo->adev->dev, "%p pin failed\n", bo);
 
 
 
 
 
 
 
 
 
 
435	}
 
 
436	return r;
437}
438
439int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
440{
441	return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
442}
443
444int amdgpu_bo_unpin(struct amdgpu_bo *bo)
445{
 
 
446	int r, i;
447
448	if (!bo->pin_count) {
449		dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
450		return 0;
451	}
452	bo->pin_count--;
453	if (bo->pin_count)
454		return 0;
455	for (i = 0; i < bo->placement.num_placement; i++) {
456		bo->placements[i].lpfn = 0;
457		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
458	}
459	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
460	if (likely(r == 0)) {
461		if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
462			bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
463			if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
464				bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
465		} else
466			bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
467	} else {
468		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
 
 
469	}
 
 
470	return r;
471}
472
473int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
474{
475	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
476	if (0 && (adev->flags & AMD_IS_APU)) {
477		/* Useless to evict on IGP chips */
478		return 0;
479	}
480	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
481}
482
483static const char *amdgpu_vram_names[] = {
484	"UNKNOWN",
485	"GDDR1",
486	"DDR2",
487	"GDDR3",
488	"GDDR4",
489	"GDDR5",
490	"HBM",
491	"DDR3"
 
492};
493
494int amdgpu_bo_init(struct amdgpu_device *adev)
495{
 
 
 
 
496	/* Add an MTRR for the VRAM */
497	adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
498					      adev->mc.aper_size);
499	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
500		adev->mc.mc_vram_size >> 20,
501		(unsigned long long)adev->mc.aper_size >> 20);
502	DRM_INFO("RAM width %dbits %s\n",
503		 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
504	return amdgpu_ttm_init(adev);
505}
506
507void amdgpu_bo_fini(struct amdgpu_device *adev)
508{
509	amdgpu_ttm_fini(adev);
510	arch_phys_wc_del(adev->mc.vram_mtrr);
 
511}
512
513int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
514			     struct vm_area_struct *vma)
515{
516	return ttm_fbdev_mmap(vma, &bo->tbo);
517}
518
519int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
520{
521	if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
 
 
 
522		return -EINVAL;
523
524	bo->tiling_flags = tiling_flags;
525	return 0;
526}
527
528void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
529{
530	lockdep_assert_held(&bo->tbo.resv->lock.base);
531
532	if (tiling_flags)
533		*tiling_flags = bo->tiling_flags;
534}
535
536int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
537			    uint32_t metadata_size, uint64_t flags)
538{
539	void *buffer;
540
541	if (!metadata_size) {
542		if (bo->metadata_size) {
543			kfree(bo->metadata);
544			bo->metadata = NULL;
545			bo->metadata_size = 0;
546		}
547		return 0;
548	}
549
550	if (metadata == NULL)
551		return -EINVAL;
552
553	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
554	if (buffer == NULL)
555		return -ENOMEM;
556
557	kfree(bo->metadata);
558	bo->metadata_flags = flags;
559	bo->metadata = buffer;
560	bo->metadata_size = metadata_size;
561
562	return 0;
563}
564
565int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
566			   size_t buffer_size, uint32_t *metadata_size,
567			   uint64_t *flags)
568{
569	if (!buffer && !metadata_size)
570		return -EINVAL;
571
572	if (buffer) {
573		if (buffer_size < bo->metadata_size)
574			return -EINVAL;
575
576		if (bo->metadata_size)
577			memcpy(buffer, bo->metadata, bo->metadata_size);
578	}
579
580	if (metadata_size)
581		*metadata_size = bo->metadata_size;
582	if (flags)
583		*flags = bo->metadata_flags;
584
585	return 0;
586}
587
588void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 
589			   struct ttm_mem_reg *new_mem)
590{
591	struct amdgpu_bo *rbo;
 
 
592
593	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
594		return;
595
596	rbo = container_of(bo, struct amdgpu_bo, tbo);
597	amdgpu_vm_bo_invalidate(rbo->adev, rbo);
 
 
 
 
 
 
598
599	/* update statistics */
600	if (!new_mem)
601		return;
602
603	/* move_notify is called before move happens */
604	amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
605}
606
607int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
608{
609	struct amdgpu_device *adev;
 
610	struct amdgpu_bo *abo;
611	unsigned long offset, size, lpfn;
612	int i, r;
613
614	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
615		return 0;
616
617	abo = container_of(bo, struct amdgpu_bo, tbo);
618	adev = abo->adev;
 
 
 
619	if (bo->mem.mem_type != TTM_PL_VRAM)
620		return 0;
621
622	size = bo->mem.num_pages << PAGE_SHIFT;
623	offset = bo->mem.start << PAGE_SHIFT;
624	if ((offset + size) <= adev->mc.visible_vram_size)
625		return 0;
626
627	/* Can't move a pinned BO to visible VRAM */
628	if (abo->pin_count > 0)
629		return -EINVAL;
630
631	/* hurrah the memory is not visible ! */
632	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
633	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
634	for (i = 0; i < abo->placement.num_placement; i++) {
635		/* Force into visible VRAM */
636		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
637		    (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
638			abo->placements[i].lpfn = lpfn;
639	}
640	r = ttm_bo_validate(bo, &abo->placement, false, false);
641	if (unlikely(r == -ENOMEM)) {
642		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
643		return ttm_bo_validate(bo, &abo->placement, false, false);
644	} else if (unlikely(r != 0)) {
645		return r;
646	}
647
648	offset = bo->mem.start << PAGE_SHIFT;
649	/* this should never happen */
650	if ((offset + size) > adev->mc.visible_vram_size)
 
651		return -EINVAL;
652
653	return 0;
654}
655
656/**
657 * amdgpu_bo_fence - add fence to buffer object
658 *
659 * @bo: buffer object in question
660 * @fence: fence to add
661 * @shared: true if fence should be added shared
662 *
663 */
664void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
665		     bool shared)
666{
667	struct reservation_object *resv = bo->tbo.resv;
668
669	if (shared)
670		reservation_object_add_shared_fence(resv, fence);
671	else
672		reservation_object_add_excl_fence(resv, fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
673}