Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32#include <linux/list.h>
  33#include <linux/slab.h>
  34#include <linux/dma-buf.h>
  35
  36#include <drm/drm_drv.h>
  37#include <drm/amdgpu_drm.h>
  38#include <drm/drm_cache.h>
  39#include "amdgpu.h"
  40#include "amdgpu_trace.h"
  41#include "amdgpu_amdkfd.h"
  42
  43/**
  44 * DOC: amdgpu_object
  45 *
  46 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
  47 * represents memory used by driver (VRAM, system memory, etc.). The driver
  48 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
  49 * to create/destroy/set buffer object which are then managed by the kernel TTM
  50 * memory manager.
  51 * The interfaces are also used internally by kernel clients, including gfx,
  52 * uvd, etc. for kernel managed allocations used by the GPU.
  53 *
  54 */
  55
  56static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
  57{
  58	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  59
  60	amdgpu_bo_kunmap(bo);
 
 
 
 
  61
  62	if (bo->tbo.base.import_attach)
  63		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
  64	drm_gem_object_release(&bo->tbo.base);
  65	amdgpu_bo_unref(&bo->parent);
  66	kvfree(bo);
  67}
  68
  69static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
 
 
  70{
  71	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  72	struct amdgpu_bo_user *ubo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73
  74	ubo = to_amdgpu_bo_user(bo);
  75	kfree(ubo->metadata);
  76	amdgpu_bo_destroy(tbo);
 
 
 
 
 
 
 
 
 
  77}
  78
  79static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
  80{
  81	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  82	struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
  83	struct amdgpu_bo_vm *vmbo;
  84
  85	bo = shadow_bo->parent;
  86	vmbo = to_amdgpu_bo_vm(bo);
  87	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
  88	if (!list_empty(&vmbo->shadow_list)) {
 
 
 
  89		mutex_lock(&adev->shadow_list_lock);
  90		list_del_init(&vmbo->shadow_list);
  91		mutex_unlock(&adev->shadow_list_lock);
  92	}
  93
  94	amdgpu_bo_destroy(tbo);
  95}
  96
  97/**
  98 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
  99 * @bo: buffer object to be checked
 100 *
 101 * Uses destroy function associated with the object to determine if this is
 102 * an &amdgpu_bo.
 103 *
 104 * Returns:
 105 * true if the object belongs to &amdgpu_bo, false if not.
 106 */
 107bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 108{
 109	if (bo->destroy == &amdgpu_bo_destroy ||
 110	    bo->destroy == &amdgpu_bo_user_destroy ||
 111	    bo->destroy == &amdgpu_bo_vm_destroy)
 112		return true;
 113
 114	return false;
 115}
 116
 117/**
 118 * amdgpu_bo_placement_from_domain - set buffer's placement
 119 * @abo: &amdgpu_bo buffer object whose placement is to be set
 120 * @domain: requested domain
 121 *
 122 * Sets buffer's placement according to requested domain and the buffer's
 123 * flags.
 124 */
 125void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 126{
 127	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 128	struct ttm_placement *placement = &abo->placement;
 129	struct ttm_place *places = abo->placements;
 130	u64 flags = abo->flags;
 131	u32 c = 0;
 132
 133	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 134		unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 135		int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
 136
 137		if (adev->gmc.mem_partitions && mem_id >= 0) {
 138			places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
 139			/*
 140			 * memory partition range lpfn is inclusive start + size - 1
 141			 * TTM place lpfn is exclusive start + size
 142			 */
 143			places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
 144		} else {
 145			places[c].fpfn = 0;
 146			places[c].lpfn = 0;
 147		}
 148		places[c].mem_type = TTM_PL_VRAM;
 149		places[c].flags = 0;
 150
 
 
 
 
 151		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
 152			places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
 153		else
 154			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 155
 156		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
 157			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
 158		c++;
 159	}
 160
 161	if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
 162		places[c].fpfn = 0;
 163		places[c].lpfn = 0;
 164		places[c].mem_type = AMDGPU_PL_DOORBELL;
 165		places[c].flags = 0;
 166		c++;
 167	}
 168
 169	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 170		places[c].fpfn = 0;
 171		places[c].lpfn = 0;
 172		places[c].mem_type =
 173			abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
 174			AMDGPU_PL_PREEMPT : TTM_PL_TT;
 175		places[c].flags = 0;
 
 
 176		c++;
 177	}
 178
 179	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
 180		places[c].fpfn = 0;
 181		places[c].lpfn = 0;
 182		places[c].mem_type = TTM_PL_SYSTEM;
 183		places[c].flags = 0;
 
 
 
 
 184		c++;
 185	}
 186
 187	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
 188		places[c].fpfn = 0;
 189		places[c].lpfn = 0;
 190		places[c].mem_type = AMDGPU_PL_GDS;
 191		places[c].flags = 0;
 192		c++;
 193	}
 194
 195	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
 196		places[c].fpfn = 0;
 197		places[c].lpfn = 0;
 198		places[c].mem_type = AMDGPU_PL_GWS;
 199		places[c].flags = 0;
 200		c++;
 201	}
 202
 203	if (domain & AMDGPU_GEM_DOMAIN_OA) {
 204		places[c].fpfn = 0;
 205		places[c].lpfn = 0;
 206		places[c].mem_type = AMDGPU_PL_OA;
 207		places[c].flags = 0;
 208		c++;
 209	}
 210
 211	if (!c) {
 212		places[c].fpfn = 0;
 213		places[c].lpfn = 0;
 214		places[c].mem_type = TTM_PL_SYSTEM;
 215		places[c].flags = 0;
 216		c++;
 217	}
 218
 219	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
 220
 221	placement->num_placement = c;
 222	placement->placement = places;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 223}
 224
 225/**
 226 * amdgpu_bo_create_reserved - create reserved BO for kernel use
 227 *
 228 * @adev: amdgpu device object
 229 * @size: size for the new BO
 230 * @align: alignment for the new BO
 231 * @domain: where to place it
 232 * @bo_ptr: used to initialize BOs in structures
 233 * @gpu_addr: GPU addr of the pinned BO
 234 * @cpu_addr: optional CPU address mapping
 235 *
 236 * Allocates and pins a BO for kernel internal use, and returns it still
 237 * reserved.
 238 *
 239 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 240 *
 241 * Returns:
 242 * 0 on success, negative error code otherwise.
 243 */
 244int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 245			      unsigned long size, int align,
 246			      u32 domain, struct amdgpu_bo **bo_ptr,
 247			      u64 *gpu_addr, void **cpu_addr)
 248{
 249	struct amdgpu_bo_param bp;
 250	bool free = false;
 251	int r;
 252
 253	if (!size) {
 254		amdgpu_bo_unref(bo_ptr);
 255		return 0;
 256	}
 257
 258	memset(&bp, 0, sizeof(bp));
 259	bp.size = size;
 260	bp.byte_align = align;
 261	bp.domain = domain;
 262	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
 263		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 264	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 265	bp.type = ttm_bo_type_kernel;
 266	bp.resv = NULL;
 267	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 268
 269	if (!*bo_ptr) {
 270		r = amdgpu_bo_create(adev, &bp, bo_ptr);
 271		if (r) {
 272			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
 273				r);
 274			return r;
 275		}
 276		free = true;
 277	}
 278
 279	r = amdgpu_bo_reserve(*bo_ptr, false);
 280	if (r) {
 281		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
 282		goto error_free;
 283	}
 284
 285	r = amdgpu_bo_pin(*bo_ptr, domain);
 286	if (r) {
 287		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
 288		goto error_unreserve;
 289	}
 290
 291	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
 292	if (r) {
 293		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
 294		goto error_unpin;
 295	}
 296
 297	if (gpu_addr)
 298		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
 299
 300	if (cpu_addr) {
 301		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 302		if (r) {
 303			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
 304			goto error_unpin;
 305		}
 306	}
 307
 308	return 0;
 309
 310error_unpin:
 311	amdgpu_bo_unpin(*bo_ptr);
 312error_unreserve:
 313	amdgpu_bo_unreserve(*bo_ptr);
 314
 315error_free:
 316	if (free)
 317		amdgpu_bo_unref(bo_ptr);
 318
 319	return r;
 320}
 321
 322/**
 323 * amdgpu_bo_create_kernel - create BO for kernel use
 324 *
 325 * @adev: amdgpu device object
 326 * @size: size for the new BO
 327 * @align: alignment for the new BO
 328 * @domain: where to place it
 329 * @bo_ptr:  used to initialize BOs in structures
 330 * @gpu_addr: GPU addr of the pinned BO
 331 * @cpu_addr: optional CPU address mapping
 332 *
 333 * Allocates and pins a BO for kernel internal use.
 334 *
 335 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 336 *
 337 * Returns:
 338 * 0 on success, negative error code otherwise.
 339 */
 340int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 341			    unsigned long size, int align,
 342			    u32 domain, struct amdgpu_bo **bo_ptr,
 343			    u64 *gpu_addr, void **cpu_addr)
 344{
 345	int r;
 346
 347	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
 348				      gpu_addr, cpu_addr);
 349
 350	if (r)
 351		return r;
 352
 353	if (*bo_ptr)
 354		amdgpu_bo_unreserve(*bo_ptr);
 355
 356	return 0;
 357}
 358
 359/**
 360 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
 361 *
 362 * @adev: amdgpu device object
 363 * @offset: offset of the BO
 364 * @size: size of the BO
 365 * @bo_ptr:  used to initialize BOs in structures
 366 * @cpu_addr: optional CPU address mapping
 367 *
 368 * Creates a kernel BO at a specific offset in VRAM.
 369 *
 370 * Returns:
 371 * 0 on success, negative error code otherwise.
 372 */
 373int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 374			       uint64_t offset, uint64_t size,
 375			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
 376{
 377	struct ttm_operation_ctx ctx = { false, false };
 378	unsigned int i;
 379	int r;
 380
 381	offset &= PAGE_MASK;
 382	size = ALIGN(size, PAGE_SIZE);
 383
 384	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
 385				      AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
 386				      cpu_addr);
 387	if (r)
 388		return r;
 389
 390	if ((*bo_ptr) == NULL)
 391		return 0;
 392
 393	/*
 394	 * Remove the original mem node and create a new one at the request
 395	 * position.
 396	 */
 397	if (cpu_addr)
 398		amdgpu_bo_kunmap(*bo_ptr);
 399
 400	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
 401
 402	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
 403		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
 404		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 405	}
 406	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
 407			     &(*bo_ptr)->tbo.resource, &ctx);
 408	if (r)
 409		goto error;
 410
 411	if (cpu_addr) {
 412		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 413		if (r)
 414			goto error;
 415	}
 416
 
 417	amdgpu_bo_unreserve(*bo_ptr);
 418	return 0;
 419
 420error:
 421	amdgpu_bo_unreserve(*bo_ptr);
 422	amdgpu_bo_unref(bo_ptr);
 
 423	return r;
 424}
 425
 426/**
 427 * amdgpu_bo_free_kernel - free BO for kernel use
 428 *
 429 * @bo: amdgpu BO to free
 430 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
 431 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
 432 *
 433 * unmaps and unpin a BO for kernel internal use.
 434 */
 435void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
 436			   void **cpu_addr)
 437{
 438	if (*bo == NULL)
 439		return;
 440
 441	WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
 442
 443	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
 444		if (cpu_addr)
 445			amdgpu_bo_kunmap(*bo);
 446
 447		amdgpu_bo_unpin(*bo);
 448		amdgpu_bo_unreserve(*bo);
 449	}
 450	amdgpu_bo_unref(bo);
 451
 452	if (gpu_addr)
 453		*gpu_addr = 0;
 454
 455	if (cpu_addr)
 456		*cpu_addr = NULL;
 457}
 458
 459/* Validate bo size is bit bigger than the request domain */
 460static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
 461					  unsigned long size, u32 domain)
 462{
 463	struct ttm_resource_manager *man = NULL;
 464
 465	/*
 466	 * If GTT is part of requested domains the check must succeed to
 467	 * allow fall back to GTT.
 468	 */
 469	if (domain & AMDGPU_GEM_DOMAIN_GTT)
 470		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 471	else if (domain & AMDGPU_GEM_DOMAIN_VRAM)
 472		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 473	else
 474		return true;
 475
 476	if (!man) {
 477		if (domain & AMDGPU_GEM_DOMAIN_GTT)
 478			WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
 479		return false;
 480	}
 481
 482	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
 483	if (size < man->size)
 484		return true;
 485
 486	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size);
 487	return false;
 488}
 489
 490bool amdgpu_bo_support_uswc(u64 bo_flags)
 491{
 492
 493#ifdef CONFIG_X86_32
 494	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
 495	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
 496	 */
 497	return false;
 498#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
 499	/* Don't try to enable write-combining when it can't work, or things
 500	 * may be slow
 501	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 502	 */
 503
 504#ifndef CONFIG_COMPILE_TEST
 505#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 506	 thanks to write-combining
 507#endif
 508
 509	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 510		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 511			      "better performance thanks to write-combining\n");
 512	return false;
 513#else
 514	/* For architectures that don't support WC memory,
 515	 * mask out the WC flag from the BO
 516	 */
 517	if (!drm_arch_can_wc_memory())
 518		return false;
 519
 520	return true;
 521#endif
 522}
 523
 524/**
 525 * amdgpu_bo_create - create an &amdgpu_bo buffer object
 526 * @adev: amdgpu device object
 527 * @bp: parameters to be used for the buffer object
 528 * @bo_ptr: pointer to the buffer object pointer
 529 *
 530 * Creates an &amdgpu_bo buffer object.
 531 *
 532 * Returns:
 533 * 0 for success or a negative error code on failure.
 534 */
 535int amdgpu_bo_create(struct amdgpu_device *adev,
 536			       struct amdgpu_bo_param *bp,
 537			       struct amdgpu_bo **bo_ptr)
 538{
 539	struct ttm_operation_ctx ctx = {
 540		.interruptible = (bp->type != ttm_bo_type_kernel),
 541		.no_wait_gpu = bp->no_wait_gpu,
 542		/* We opt to avoid OOM on system pages allocations */
 543		.gfp_retry_mayfail = true,
 544		.allow_res_evict = bp->type != ttm_bo_type_kernel,
 545		.resv = bp->resv
 546	};
 547	struct amdgpu_bo *bo;
 548	unsigned long page_align, size = bp->size;
 
 
 549	int r;
 550
 551	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
 552	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 553		/* GWS and OA don't need any alignment. */
 554		page_align = bp->byte_align;
 555		size <<= PAGE_SHIFT;
 556
 557	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
 558		/* Both size and alignment must be a multiple of 4. */
 559		page_align = ALIGN(bp->byte_align, 4);
 560		size = ALIGN(size, 4) << PAGE_SHIFT;
 561	} else {
 562		/* Memory should be aligned at least to a page size. */
 563		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 564		size = ALIGN(size, PAGE_SIZE);
 565	}
 
 566
 567	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
 568		return -ENOMEM;
 569
 570	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 571
 572	*bo_ptr = NULL;
 573	bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
 574	if (bo == NULL)
 575		return -ENOMEM;
 576	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
 577	bo->vm_bo = NULL;
 578	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
 579		bp->domain;
 580	bo->allowed_domains = bo->preferred_domains;
 581	if (bp->type != ttm_bo_type_kernel &&
 582	    !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
 583	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 
 
 
 
 
 
 
 584		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 585
 586	bo->flags = bp->flags;
 587
 588	if (adev->gmc.mem_partitions)
 589		/* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
 590		bo->xcp_id = bp->xcp_id_plus1 - 1;
 591	else
 592		/* For GPUs without spatial partitioning */
 593		bo->xcp_id = 0;
 594
 595	if (!amdgpu_bo_support_uswc(bo->flags))
 
 
 
 596		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 597
 598	if (adev->ras_enabled)
 599		bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
 600
 601	bo->tbo.bdev = &adev->mman.bdev;
 602	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
 603			  AMDGPU_GEM_DOMAIN_GDS))
 604		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 605	else
 606		amdgpu_bo_placement_from_domain(bo, bp->domain);
 607	if (bp->type == ttm_bo_type_kernel)
 608		bo->tbo.priority = 2;
 609	else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
 610		bo->tbo.priority = 1;
 611
 612	if (!bp->destroy)
 613		bp->destroy = &amdgpu_bo_destroy;
 614
 615	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
 616				 &bo->placement, page_align, &ctx,  NULL,
 617				 bp->resv, bp->destroy);
 618	if (unlikely(r != 0))
 619		return r;
 620
 621	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 622	    amdgpu_res_cpu_visible(adev, bo->tbo.resource))
 623		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 624					     ctx.bytes_moved);
 625	else
 626		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 627
 628	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 629	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 630		struct dma_fence *fence;
 631
 632		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
 633		if (unlikely(r))
 634			goto fail_unreserve;
 635
 636		dma_resv_add_fence(bo->tbo.base.resv, fence,
 637				   DMA_RESV_USAGE_KERNEL);
 
 638		dma_fence_put(fence);
 639	}
 640	if (!bp->resv)
 641		amdgpu_bo_unreserve(bo);
 642	*bo_ptr = bo;
 643
 644	trace_amdgpu_bo_create(bo);
 645
 646	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
 647	if (bp->type == ttm_bo_type_device)
 648		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 649
 650	return 0;
 651
 652fail_unreserve:
 653	if (!bp->resv)
 654		dma_resv_unlock(bo->tbo.base.resv);
 655	amdgpu_bo_unref(&bo);
 656	return r;
 657}
 658
 659/**
 660 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
 661 * @adev: amdgpu device object
 662 * @bp: parameters to be used for the buffer object
 663 * @ubo_ptr: pointer to the buffer object pointer
 664 *
 665 * Create a BO to be used by user application;
 666 *
 667 * Returns:
 668 * 0 for success or a negative error code on failure.
 669 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 670
 671int amdgpu_bo_create_user(struct amdgpu_device *adev,
 672			  struct amdgpu_bo_param *bp,
 673			  struct amdgpu_bo_user **ubo_ptr)
 
 
 
 
 
 
 674{
 675	struct amdgpu_bo *bo_ptr;
 
 676	int r;
 677
 678	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
 679	bp->destroy = &amdgpu_bo_user_destroy;
 680	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 
 
 
 
 
 
 681	if (r)
 682		return r;
 683
 684	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
 
 
 
 
 
 685	return r;
 686}
 687
 688/**
 689 * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
 690 * @adev: amdgpu device object
 691 * @bp: parameters to be used for the buffer object
 692 * @vmbo_ptr: pointer to the buffer object pointer
 693 *
 694 * Create a BO to be for GPUVM.
 695 *
 696 * Returns:
 697 * 0 for success or a negative error code on failure.
 698 */
 699
 700int amdgpu_bo_create_vm(struct amdgpu_device *adev,
 701			struct amdgpu_bo_param *bp,
 702			struct amdgpu_bo_vm **vmbo_ptr)
 703{
 704	struct amdgpu_bo *bo_ptr;
 
 705	int r;
 706
 707	/* bo_ptr_size will be determined by the caller and it depends on
 708	 * num of amdgpu_vm_pt entries.
 709	 */
 710	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
 711	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 
 
 712	if (r)
 713		return r;
 714
 715	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
 
 
 
 
 
 
 716	return r;
 717}
 718
 719/**
 720 * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
 721 *
 722 * @vmbo: BO that will be inserted into the shadow list
 723 *
 724 * Insert a BO to the shadow list.
 725 */
 726void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
 727{
 728	struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
 
 
 729
 730	mutex_lock(&adev->shadow_list_lock);
 731	list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
 732	vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
 733	vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
 734	mutex_unlock(&adev->shadow_list_lock);
 735}
 736
 737/**
 738 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
 739 *
 740 * @shadow: &amdgpu_bo shadow to be restored
 741 * @fence: dma_fence associated with the operation
 742 *
 743 * Copies a buffer object's shadow content back to the object.
 744 * This is used for recovering a buffer from its shadow in case of a gpu
 745 * reset where vram context may be lost.
 746 *
 747 * Returns:
 748 * 0 for success or a negative error code on failure.
 749 */
 750int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
 751
 752{
 753	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
 754	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 755	uint64_t shadow_addr, parent_addr;
 756
 757	shadow_addr = amdgpu_bo_gpu_offset(shadow);
 758	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
 
 
 
 759
 760	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
 761				  amdgpu_bo_size(shadow), NULL, fence,
 762				  true, false, false);
 763}
 764
 765/**
 766 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
 767 * @bo: &amdgpu_bo buffer object to be mapped
 768 * @ptr: kernel virtual address to be returned
 769 *
 770 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
 771 * amdgpu_bo_kptr() to get the kernel virtual address.
 772 *
 773 * Returns:
 774 * 0 for success or a negative error code on failure.
 775 */
 776int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 777{
 778	void *kptr;
 779	long r;
 780
 781	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 782		return -EPERM;
 783
 784	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
 785				  false, MAX_SCHEDULE_TIMEOUT);
 786	if (r < 0)
 787		return r;
 788
 789	kptr = amdgpu_bo_kptr(bo);
 790	if (kptr) {
 791		if (ptr)
 792			*ptr = kptr;
 793		return 0;
 794	}
 795
 796	r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
 
 
 
 
 
 797	if (r)
 798		return r;
 799
 
 800	if (ptr)
 801		*ptr = amdgpu_bo_kptr(bo);
 802
 803	return 0;
 804}
 805
 806/**
 807 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
 808 * @bo: &amdgpu_bo buffer object
 809 *
 810 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
 811 *
 812 * Returns:
 813 * the virtual address of a buffer object area.
 814 */
 815void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 816{
 817	bool is_iomem;
 818
 819	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 820}
 821
 822/**
 823 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
 824 * @bo: &amdgpu_bo buffer object to be unmapped
 825 *
 826 * Unmaps a kernel map set up by amdgpu_bo_kmap().
 827 */
 828void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 829{
 830	if (bo->kmap.bo)
 831		ttm_bo_kunmap(&bo->kmap);
 
 
 832}
 833
 834/**
 835 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
 836 * @bo: &amdgpu_bo buffer object
 837 *
 838 * References the contained &ttm_buffer_object.
 839 *
 840 * Returns:
 841 * a refcounted pointer to the &amdgpu_bo buffer object.
 842 */
 843struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 844{
 845	if (bo == NULL)
 846		return NULL;
 847
 848	ttm_bo_get(&bo->tbo);
 849	return bo;
 850}
 851
 852/**
 853 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
 854 * @bo: &amdgpu_bo buffer object
 855 *
 856 * Unreferences the contained &ttm_buffer_object and clear the pointer
 857 */
 858void amdgpu_bo_unref(struct amdgpu_bo **bo)
 859{
 860	struct ttm_buffer_object *tbo;
 861
 862	if ((*bo) == NULL)
 863		return;
 864
 865	tbo = &((*bo)->tbo);
 866	ttm_bo_put(tbo);
 867	*bo = NULL;
 
 868}
 869
 870/**
 871 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
 872 * @bo: &amdgpu_bo buffer object to be pinned
 873 * @domain: domain to be pinned to
 874 * @min_offset: the start of requested address range
 875 * @max_offset: the end of requested address range
 876 *
 877 * Pins the buffer object according to requested domain and address range. If
 878 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
 879 * pin_count and pin_size accordingly.
 880 *
 881 * Pinning means to lock pages in memory along with keeping them at a fixed
 882 * offset. It is required when a buffer can not be moved, for example, when
 883 * a display buffer is being scanned out.
 884 *
 885 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
 886 * where to pin a buffer if there are specific restrictions on where a buffer
 887 * must be located.
 888 *
 889 * Returns:
 890 * 0 for success or a negative error code on failure.
 891 */
 892int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 893			     u64 min_offset, u64 max_offset)
 
 894{
 895	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 896	struct ttm_operation_ctx ctx = { false, false };
 897	int r, i;
 
 898
 899	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 900		return -EPERM;
 901
 902	if (WARN_ON_ONCE(min_offset > max_offset))
 903		return -EINVAL;
 904
 905	/* Check domain to be pinned to against preferred domains */
 906	if (bo->preferred_domains & domain)
 907		domain = bo->preferred_domains & domain;
 908
 909	/* A shared bo cannot be migrated to VRAM */
 910	if (bo->tbo.base.import_attach) {
 911		if (domain & AMDGPU_GEM_DOMAIN_GTT)
 912			domain = AMDGPU_GEM_DOMAIN_GTT;
 913		else
 914			return -EINVAL;
 915	}
 916
 917	if (bo->tbo.pin_count) {
 918		uint32_t mem_type = bo->tbo.resource->mem_type;
 919		uint32_t mem_flags = bo->tbo.resource->placement;
 920
 921		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 922			return -EINVAL;
 923
 924		if ((mem_type == TTM_PL_VRAM) &&
 925		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
 926		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
 927			return -EINVAL;
 928
 929		ttm_bo_pin(&bo->tbo);
 
 
 930
 931		if (max_offset != 0) {
 932			u64 domain_start = amdgpu_ttm_domain_start(adev,
 933								   mem_type);
 934			WARN_ON_ONCE(max_offset <
 935				     (amdgpu_bo_gpu_offset(bo) - domain_start));
 936		}
 937
 938		return 0;
 939	}
 940
 941	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
 942	 * See function amdgpu_display_supported_domains()
 943	 */
 944	domain = amdgpu_bo_get_preferred_domain(adev, domain);
 945
 946	if (bo->tbo.base.import_attach)
 947		dma_buf_pin(bo->tbo.base.import_attach);
 948
 949	/* force to pin into visible video ram */
 950	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
 951		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 952	amdgpu_bo_placement_from_domain(bo, domain);
 953	for (i = 0; i < bo->placement.num_placement; i++) {
 954		unsigned int fpfn, lpfn;
 955
 956		fpfn = min_offset >> PAGE_SHIFT;
 957		lpfn = max_offset >> PAGE_SHIFT;
 958
 
 
 
 
 
 
 
 
 
 959		if (fpfn > bo->placements[i].fpfn)
 960			bo->placements[i].fpfn = fpfn;
 961		if (!bo->placements[i].lpfn ||
 962		    (lpfn && lpfn < bo->placements[i].lpfn))
 963			bo->placements[i].lpfn = lpfn;
 
 964	}
 965
 966	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 967	if (unlikely(r)) {
 968		dev_err(adev->dev, "%p pin failed\n", bo);
 969		goto error;
 970	}
 
 
 
 
 
 971
 972	ttm_bo_pin(&bo->tbo);
 973
 974	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 975	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 976		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
 977		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
 978			     &adev->visible_pin_size);
 979	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 980		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
 981	}
 982
 983error:
 984	return r;
 985}
 986
 987/**
 988 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
 989 * @bo: &amdgpu_bo buffer object to be pinned
 990 * @domain: domain to be pinned to
 991 *
 992 * A simple wrapper to amdgpu_bo_pin_restricted().
 993 * Provides a simpler API for buffers that do not have any strict restrictions
 994 * on where a buffer must be located.
 995 *
 996 * Returns:
 997 * 0 for success or a negative error code on failure.
 998 */
 999int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
1000{
1001	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1002	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1003}
1004
1005/**
1006 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
1007 * @bo: &amdgpu_bo buffer object to be unpinned
1008 *
1009 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1010 * Changes placement and pin size accordingly.
1011 *
1012 * Returns:
1013 * 0 for success or a negative error code on failure.
1014 */
1015void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1016{
1017	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
1018
1019	ttm_bo_unpin(&bo->tbo);
1020	if (bo->tbo.pin_count)
1021		return;
1022
1023	if (bo->tbo.base.import_attach)
1024		dma_buf_unpin(bo->tbo.base.import_attach);
 
 
 
 
 
 
 
 
 
 
1025
1026	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1027		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1028		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1029			     &adev->visible_pin_size);
1030	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1031		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1032	}
1033
 
 
1034}
1035
1036static const char * const amdgpu_vram_names[] = {
 
 
 
 
 
 
 
 
 
 
1037	"UNKNOWN",
1038	"GDDR1",
1039	"DDR2",
1040	"GDDR3",
1041	"GDDR4",
1042	"GDDR5",
1043	"HBM",
1044	"DDR3",
1045	"DDR4",
1046	"GDDR6",
1047	"DDR5",
1048	"LPDDR4",
1049	"LPDDR5"
1050};
1051
1052/**
1053 * amdgpu_bo_init - initialize memory manager
1054 * @adev: amdgpu device object
1055 *
1056 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1057 *
1058 * Returns:
1059 * 0 for success or a negative error code on failure.
1060 */
1061int amdgpu_bo_init(struct amdgpu_device *adev)
1062{
1063	/* On A+A platform, VRAM can be mapped as WB */
1064	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
1065		/* reserve PAT memory space to WC for VRAM */
1066		int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1067				adev->gmc.aper_size);
1068
1069		if (r) {
1070			DRM_ERROR("Unable to set WC memtype for the aperture base\n");
1071			return r;
1072		}
1073
1074		/* Add an MTRR for the VRAM */
1075		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1076				adev->gmc.aper_size);
1077	}
1078
1079	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1080		 adev->gmc.mc_vram_size >> 20,
1081		 (unsigned long long)adev->gmc.aper_size >> 20);
1082	DRM_INFO("RAM width %dbits %s\n",
1083		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1084	return amdgpu_ttm_init(adev);
1085}
1086
1087/**
1088 * amdgpu_bo_fini - tear down memory manager
1089 * @adev: amdgpu device object
1090 *
1091 * Reverses amdgpu_bo_init() to tear down memory manager.
1092 */
1093void amdgpu_bo_fini(struct amdgpu_device *adev)
1094{
1095	int idx;
1096
1097	amdgpu_ttm_fini(adev);
 
 
 
1098
1099	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1100		if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
1101			arch_phys_wc_del(adev->gmc.vram_mtrr);
1102			arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1103		}
1104		drm_dev_exit(idx);
1105	}
1106}
1107
1108/**
1109 * amdgpu_bo_set_tiling_flags - set tiling flags
1110 * @bo: &amdgpu_bo buffer object
1111 * @tiling_flags: new flags
1112 *
1113 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1114 * kernel driver to set the tiling flags on a buffer.
1115 *
1116 * Returns:
1117 * 0 for success or a negative error code on failure.
1118 */
1119int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1120{
1121	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1122	struct amdgpu_bo_user *ubo;
1123
1124	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1125	if (adev->family <= AMDGPU_FAMILY_CZ &&
1126	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1127		return -EINVAL;
1128
1129	ubo = to_amdgpu_bo_user(bo);
1130	ubo->tiling_flags = tiling_flags;
1131	return 0;
1132}
1133
1134/**
1135 * amdgpu_bo_get_tiling_flags - get tiling flags
1136 * @bo: &amdgpu_bo buffer object
1137 * @tiling_flags: returned flags
1138 *
1139 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1140 * set the tiling flags on a buffer.
1141 */
1142void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1143{
1144	struct amdgpu_bo_user *ubo;
1145
1146	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1147	dma_resv_assert_held(bo->tbo.base.resv);
1148	ubo = to_amdgpu_bo_user(bo);
1149
1150	if (tiling_flags)
1151		*tiling_flags = ubo->tiling_flags;
1152}
1153
1154/**
1155 * amdgpu_bo_set_metadata - set metadata
1156 * @bo: &amdgpu_bo buffer object
1157 * @metadata: new metadata
1158 * @metadata_size: size of the new metadata
1159 * @flags: flags of the new metadata
1160 *
1161 * Sets buffer object's metadata, its size and flags.
1162 * Used via GEM ioctl.
1163 *
1164 * Returns:
1165 * 0 for success or a negative error code on failure.
1166 */
1167int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
1168			   u32 metadata_size, uint64_t flags)
1169{
1170	struct amdgpu_bo_user *ubo;
1171	void *buffer;
1172
1173	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1174	ubo = to_amdgpu_bo_user(bo);
1175	if (!metadata_size) {
1176		if (ubo->metadata_size) {
1177			kfree(ubo->metadata);
1178			ubo->metadata = NULL;
1179			ubo->metadata_size = 0;
1180		}
1181		return 0;
1182	}
1183
1184	if (metadata == NULL)
1185		return -EINVAL;
1186
1187	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1188	if (buffer == NULL)
1189		return -ENOMEM;
1190
1191	kfree(ubo->metadata);
1192	ubo->metadata_flags = flags;
1193	ubo->metadata = buffer;
1194	ubo->metadata_size = metadata_size;
1195
1196	return 0;
1197}
1198
1199/**
1200 * amdgpu_bo_get_metadata - get metadata
1201 * @bo: &amdgpu_bo buffer object
1202 * @buffer: returned metadata
1203 * @buffer_size: size of the buffer
1204 * @metadata_size: size of the returned metadata
1205 * @flags: flags of the returned metadata
1206 *
1207 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1208 * less than metadata_size.
1209 * Used via GEM ioctl.
1210 *
1211 * Returns:
1212 * 0 for success or a negative error code on failure.
1213 */
1214int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1215			   size_t buffer_size, uint32_t *metadata_size,
1216			   uint64_t *flags)
1217{
1218	struct amdgpu_bo_user *ubo;
1219
1220	if (!buffer && !metadata_size)
1221		return -EINVAL;
1222
1223	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1224	ubo = to_amdgpu_bo_user(bo);
1225	if (metadata_size)
1226		*metadata_size = ubo->metadata_size;
1227
1228	if (buffer) {
1229		if (buffer_size < ubo->metadata_size)
1230			return -EINVAL;
1231
1232		if (ubo->metadata_size)
1233			memcpy(buffer, ubo->metadata, ubo->metadata_size);
1234	}
1235
 
 
1236	if (flags)
1237		*flags = ubo->metadata_flags;
1238
1239	return 0;
1240}
1241
1242/**
1243 * amdgpu_bo_move_notify - notification about a memory move
1244 * @bo: pointer to a buffer object
1245 * @evict: if this move is evicting the buffer from the graphics address space
1246 * @new_mem: new resource for backing the BO
1247 *
1248 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1249 * bookkeeping.
1250 * TTM driver callback which is called when ttm moves a buffer.
1251 */
1252void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1253			   bool evict,
1254			   struct ttm_resource *new_mem)
1255{
1256	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1257	struct ttm_resource *old_mem = bo->resource;
1258	struct amdgpu_bo *abo;
 
1259
1260	if (!amdgpu_bo_is_amdgpu_bo(bo))
1261		return;
1262
1263	abo = ttm_to_amdgpu_bo(bo);
1264	amdgpu_vm_bo_invalidate(adev, abo, evict);
1265
1266	amdgpu_bo_kunmap(abo);
1267
1268	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1269	    old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
1270		dma_buf_move_notify(abo->tbo.base.dma_buf);
1271
1272	/* move_notify is called before move happens */
1273	trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
1274			     old_mem ? old_mem->mem_type : -1);
1275}
1276
1277void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
1278			  struct amdgpu_mem_stats *stats)
1279{
1280	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1281	struct ttm_resource *res = bo->tbo.resource;
1282	uint64_t size = amdgpu_bo_size(bo);
1283	struct drm_gem_object *obj;
1284	unsigned int domain;
1285	bool shared;
1286
1287	/* Abort if the BO doesn't currently have a backing store */
1288	if (!res)
1289		return;
1290
1291	obj = &bo->tbo.base;
1292	shared = drm_gem_object_is_shared_for_memory_stats(obj);
1293
1294	domain = amdgpu_mem_type_to_domain(res->mem_type);
1295	switch (domain) {
1296	case AMDGPU_GEM_DOMAIN_VRAM:
1297		stats->vram += size;
1298		if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
1299			stats->visible_vram += size;
1300		if (shared)
1301			stats->vram_shared += size;
1302		break;
1303	case AMDGPU_GEM_DOMAIN_GTT:
1304		stats->gtt += size;
1305		if (shared)
1306			stats->gtt_shared += size;
1307		break;
1308	case AMDGPU_GEM_DOMAIN_CPU:
1309	default:
1310		stats->cpu += size;
1311		if (shared)
1312			stats->cpu_shared += size;
1313		break;
1314	}
1315
1316	if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
1317		stats->requested_vram += size;
1318		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
1319			stats->requested_visible_vram += size;
1320
1321		if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
1322			stats->evicted_vram += size;
1323			if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
1324				stats->evicted_visible_vram += size;
1325		}
1326	} else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
1327		stats->requested_gtt += size;
1328	}
1329}
1330
1331/**
1332 * amdgpu_bo_release_notify - notification about a BO being released
1333 * @bo: pointer to a buffer object
1334 *
1335 * Wipes VRAM buffers whose contents should not be leaked before the
1336 * memory is released.
1337 */
1338void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1339{
1340	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1341	struct dma_fence *fence = NULL;
1342	struct amdgpu_bo *abo;
1343	int r;
1344
1345	if (!amdgpu_bo_is_amdgpu_bo(bo))
1346		return;
1347
1348	abo = ttm_to_amdgpu_bo(bo);
1349
1350	WARN_ON(abo->vm_bo);
1351
1352	if (abo->kfd_bo)
1353		amdgpu_amdkfd_release_notify(abo);
1354
1355	/* We only remove the fence if the resv has individualized. */
1356	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1357			&& bo->base.resv != &bo->base._resv);
1358	if (bo->base.resv == &bo->base._resv)
1359		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1360
1361	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
1362	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
1363	    adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
1364		return;
1365
1366	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
1367		return;
1368
1369	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
1370	if (!WARN_ON(r)) {
1371		amdgpu_bo_fence(abo, fence, false);
1372		dma_fence_put(fence);
1373	}
1374
1375	dma_resv_unlock(bo->base.resv);
1376}
1377
1378/**
1379 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1380 * @bo: pointer to a buffer object
1381 *
1382 * Notifies the driver we are taking a fault on this BO and have reserved it,
1383 * also performs bookkeeping.
1384 * TTM driver callback for dealing with vm faults.
1385 *
1386 * Returns:
1387 * 0 for success or a negative error code on failure.
1388 */
1389vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1390{
1391	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1392	struct ttm_operation_ctx ctx = { false, false };
1393	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1394	int r;
1395
1396	/* Remember that this BO was accessed by the CPU */
1397	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
1398
1399	if (amdgpu_res_cpu_visible(adev, bo->resource))
 
 
 
 
1400		return 0;
1401
1402	/* Can't move a pinned BO to visible VRAM */
1403	if (abo->tbo.pin_count > 0)
1404		return VM_FAULT_SIGBUS;
1405
1406	/* hurrah the memory is not visible ! */
1407	atomic64_inc(&adev->num_vram_cpu_page_faults);
1408	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1409					AMDGPU_GEM_DOMAIN_GTT);
1410
1411	/* Avoid costly evictions; only set GTT as a busy placement */
1412	abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
1413
1414	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1415	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1416		return VM_FAULT_NOPAGE;
1417	else if (unlikely(r))
1418		return VM_FAULT_SIGBUS;
 
 
 
 
 
1419
 
1420	/* this should never happen */
1421	if (bo->resource->mem_type == TTM_PL_VRAM &&
1422	    !amdgpu_res_cpu_visible(adev, bo->resource))
1423		return VM_FAULT_SIGBUS;
1424
1425	ttm_bo_move_to_lru_tail_unlocked(bo);
1426	return 0;
1427}
1428
1429/**
1430 * amdgpu_bo_fence - add fence to buffer object
1431 *
1432 * @bo: buffer object in question
1433 * @fence: fence to add
1434 * @shared: true if fence should be added shared
1435 *
1436 */
1437void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1438		     bool shared)
1439{
1440	struct dma_resv *resv = bo->tbo.base.resv;
1441	int r;
1442
1443	r = dma_resv_reserve_fences(resv, 1);
1444	if (r) {
1445		/* As last resort on OOM we block for the fence */
1446		dma_fence_wait(fence, false);
1447		return;
1448	}
1449
1450	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
1451			   DMA_RESV_USAGE_WRITE);
1452}
1453
1454/**
1455 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1456 *
1457 * @adev: amdgpu device pointer
1458 * @resv: reservation object to sync to
1459 * @sync_mode: synchronization mode
1460 * @owner: fence owner
1461 * @intr: Whether the wait is interruptible
1462 *
1463 * Extract the fences from the reservation object and waits for them to finish.
1464 *
1465 * Returns:
1466 * 0 on success, errno otherwise.
1467 */
1468int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1469			     enum amdgpu_sync_mode sync_mode, void *owner,
1470			     bool intr)
1471{
1472	struct amdgpu_sync sync;
1473	int r;
1474
1475	amdgpu_sync_create(&sync);
1476	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1477	r = amdgpu_sync_wait(&sync, intr);
1478	amdgpu_sync_free(&sync);
1479	return r;
1480}
1481
1482/**
1483 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1484 * @bo: buffer object to wait for
1485 * @owner: fence owner
1486 * @intr: Whether the wait is interruptible
1487 *
1488 * Wrapper to wait for fences in a BO.
1489 * Returns:
1490 * 0 on success, errno otherwise.
1491 */
1492int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1493{
1494	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1495
1496	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1497					AMDGPU_SYNC_NE_OWNER, owner, intr);
1498}
1499
1500/**
1501 * amdgpu_bo_gpu_offset - return GPU offset of bo
1502 * @bo:	amdgpu object for which we query the offset
1503 *
 
 
1504 * Note: object should either be pinned or reserved when calling this
1505 * function, it might be useful to add check for this for debugging.
1506 *
1507 * Returns:
1508 * current GPU offset of the object.
1509 */
1510u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1511{
1512	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1513	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1514		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1515	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1516	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
 
 
1517		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1518
1519	return amdgpu_bo_gpu_offset_no_check(bo);
1520}
1521
1522/**
1523 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1524 * @bo:	amdgpu object for which we query the offset
1525 *
1526 * Returns:
1527 * current GPU offset of the object without raising warnings.
1528 */
1529u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1530{
1531	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1532	uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
1533
1534	if (bo->tbo.resource->mem_type == TTM_PL_TT)
1535		offset = amdgpu_gmc_agp_addr(&bo->tbo);
1536
1537	if (offset == AMDGPU_BO_INVALID_OFFSET)
1538		offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1539			amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1540
1541	return amdgpu_gmc_sign_extend(offset);
1542}
1543
1544/**
1545 * amdgpu_bo_get_preferred_domain - get preferred domain
1546 * @adev: amdgpu device object
1547 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1548 *
1549 * Returns:
1550 * Which of the allowed domains is preferred for allocating the BO.
1551 */
1552uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
1553					    uint32_t domain)
1554{
1555	if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
1556	    ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
1557		domain = AMDGPU_GEM_DOMAIN_VRAM;
1558		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1559			domain = AMDGPU_GEM_DOMAIN_GTT;
1560	}
1561	return domain;
1562}
1563
1564#if defined(CONFIG_DEBUG_FS)
1565#define amdgpu_bo_print_flag(m, bo, flag)		        \
1566	do {							\
1567		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
1568			seq_printf((m), " " #flag);		\
1569		}						\
1570	} while (0)
1571
1572/**
1573 * amdgpu_bo_print_info - print BO info in debugfs file
1574 *
1575 * @id: Index or Id of the BO
1576 * @bo: Requested BO for printing info
1577 * @m: debugfs file
1578 *
1579 * Print BO information in debugfs file
1580 *
1581 * Returns:
1582 * Size of the BO in bytes.
1583 */
1584u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1585{
1586	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1587	struct dma_buf_attachment *attachment;
1588	struct dma_buf *dma_buf;
1589	const char *placement;
1590	unsigned int pin_count;
1591	u64 size;
1592
1593	if (dma_resv_trylock(bo->tbo.base.resv)) {
1594		unsigned int domain;
1595
1596		domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1597		switch (domain) {
1598		case AMDGPU_GEM_DOMAIN_VRAM:
1599			if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
1600				placement = "VRAM VISIBLE";
1601			else
1602				placement = "VRAM";
1603			break;
1604		case AMDGPU_GEM_DOMAIN_GTT:
1605			placement = "GTT";
1606			break;
1607		case AMDGPU_GEM_DOMAIN_CPU:
1608		default:
1609			placement = "CPU";
1610			break;
1611		}
1612		dma_resv_unlock(bo->tbo.base.resv);
1613	} else {
1614		placement = "UNKNOWN";
1615	}
1616
1617	size = amdgpu_bo_size(bo);
1618	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1619			id, size, placement);
1620
1621	pin_count = READ_ONCE(bo->tbo.pin_count);
1622	if (pin_count)
1623		seq_printf(m, " pin count %d", pin_count);
1624
1625	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1626	attachment = READ_ONCE(bo->tbo.base.import_attach);
1627
1628	if (attachment)
1629		seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
1630	else if (dma_buf)
1631		seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
1632
1633	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1634	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1635	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1636	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1637	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1638	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1639	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1640
1641	seq_puts(m, "\n");
1642
1643	return size;
1644}
1645#endif
v4.10.11
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 30 *    Dave Airlie
 31 */
 32#include <linux/list.h>
 33#include <linux/slab.h>
 34#include <drm/drmP.h>
 
 
 35#include <drm/amdgpu_drm.h>
 36#include <drm/drm_cache.h>
 37#include "amdgpu.h"
 38#include "amdgpu_trace.h"
 
 39
 
 
 
 
 
 
 
 
 
 
 
 
 40
 
 
 
 41
 42static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
 43						struct ttm_mem_reg *mem)
 44{
 45	if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
 46		return 0;
 47
 48	return ((mem->start << PAGE_SHIFT) + mem->size) >
 49		adev->mc.visible_vram_size ?
 50		adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
 51		mem->size;
 
 52}
 53
 54static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
 55		       struct ttm_mem_reg *old_mem,
 56		       struct ttm_mem_reg *new_mem)
 57{
 58	u64 vis_size;
 59	if (!adev)
 60		return;
 61
 62	if (new_mem) {
 63		switch (new_mem->mem_type) {
 64		case TTM_PL_TT:
 65			atomic64_add(new_mem->size, &adev->gtt_usage);
 66			break;
 67		case TTM_PL_VRAM:
 68			atomic64_add(new_mem->size, &adev->vram_usage);
 69			vis_size = amdgpu_get_vis_part_size(adev, new_mem);
 70			atomic64_add(vis_size, &adev->vram_vis_usage);
 71			break;
 72		}
 73	}
 74
 75	if (old_mem) {
 76		switch (old_mem->mem_type) {
 77		case TTM_PL_TT:
 78			atomic64_sub(old_mem->size, &adev->gtt_usage);
 79			break;
 80		case TTM_PL_VRAM:
 81			atomic64_sub(old_mem->size, &adev->vram_usage);
 82			vis_size = amdgpu_get_vis_part_size(adev, old_mem);
 83			atomic64_sub(vis_size, &adev->vram_vis_usage);
 84			break;
 85		}
 86	}
 87}
 88
 89static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 90{
 91	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 92	struct amdgpu_bo *bo;
 
 93
 94	bo = container_of(tbo, struct amdgpu_bo, tbo);
 95
 96	amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
 97
 98	drm_gem_object_release(&bo->gem_base);
 99	amdgpu_bo_unref(&bo->parent);
100	if (!list_empty(&bo->shadow_list)) {
101		mutex_lock(&adev->shadow_list_lock);
102		list_del_init(&bo->shadow_list);
103		mutex_unlock(&adev->shadow_list_lock);
104	}
105	kfree(bo->metadata);
106	kfree(bo);
107}
108
109bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 
 
 
 
 
 
 
 
 
 
110{
111	if (bo->destroy == &amdgpu_ttm_bo_destroy)
 
 
112		return true;
 
113	return false;
114}
115
116static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
117				      struct ttm_placement *placement,
118				      struct ttm_place *places,
119				      u32 domain, u64 flags)
 
 
 
 
 
120{
 
 
 
 
121	u32 c = 0;
122
123	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
124		unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
125		unsigned lpfn = 0;
126
127		/* This forces a reallocation if the flag wasn't set before */
128		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
129			lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
130
131		places[c].fpfn = 0;
132		places[c].lpfn = lpfn;
133		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
134			TTM_PL_FLAG_VRAM;
135		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
136			places[c].lpfn = visible_pfn;
137		else
138			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 
 
 
 
 
 
 
 
 
 
 
139		c++;
140	}
141
142	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
143		places[c].fpfn = 0;
144		places[c].lpfn = 0;
145		places[c].flags = TTM_PL_FLAG_TT;
146		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
147			places[c].flags |= TTM_PL_FLAG_WC |
148				TTM_PL_FLAG_UNCACHED;
149		else
150			places[c].flags |= TTM_PL_FLAG_CACHED;
151		c++;
152	}
153
154	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
155		places[c].fpfn = 0;
156		places[c].lpfn = 0;
157		places[c].flags = TTM_PL_FLAG_SYSTEM;
158		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
159			places[c].flags |= TTM_PL_FLAG_WC |
160				TTM_PL_FLAG_UNCACHED;
161		else
162			places[c].flags |= TTM_PL_FLAG_CACHED;
163		c++;
164	}
165
166	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
167		places[c].fpfn = 0;
168		places[c].lpfn = 0;
169		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
 
170		c++;
171	}
172
173	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
174		places[c].fpfn = 0;
175		places[c].lpfn = 0;
176		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
 
177		c++;
178	}
179
180	if (domain & AMDGPU_GEM_DOMAIN_OA) {
181		places[c].fpfn = 0;
182		places[c].lpfn = 0;
183		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
 
184		c++;
185	}
186
187	if (!c) {
188		places[c].fpfn = 0;
189		places[c].lpfn = 0;
190		places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 
191		c++;
192	}
193
 
 
194	placement->num_placement = c;
195	placement->placement = places;
196
197	placement->num_busy_placement = c;
198	placement->busy_placement = places;
199}
200
201void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
202{
203	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
204
205	amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
206				  domain, abo->flags);
207}
208
209static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
210					struct ttm_placement *placement)
211{
212	BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
213
214	memcpy(bo->placements, placement->placement,
215	       placement->num_placement * sizeof(struct ttm_place));
216	bo->placement.num_placement = placement->num_placement;
217	bo->placement.num_busy_placement = placement->num_busy_placement;
218	bo->placement.placement = bo->placements;
219	bo->placement.busy_placement = bo->placements;
220}
221
222/**
223 * amdgpu_bo_create_kernel - create BO for kernel use
224 *
225 * @adev: amdgpu device object
226 * @size: size for the new BO
227 * @align: alignment for the new BO
228 * @domain: where to place it
229 * @bo_ptr: resulting BO
230 * @gpu_addr: GPU addr of the pinned BO
231 * @cpu_addr: optional CPU address mapping
232 *
233 * Allocates and pins a BO for kernel internal use.
 
 
 
234 *
235 * Returns 0 on success, negative error code otherwise.
 
236 */
237int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
238			    unsigned long size, int align,
239			    u32 domain, struct amdgpu_bo **bo_ptr,
240			    u64 *gpu_addr, void **cpu_addr)
241{
 
 
242	int r;
243
244	r = amdgpu_bo_create(adev, size, align, true, domain,
245			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
246			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
247			     NULL, NULL, bo_ptr);
248	if (r) {
249		dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
250		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251	}
252
253	r = amdgpu_bo_reserve(*bo_ptr, false);
254	if (r) {
255		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
256		goto error_free;
257	}
258
259	r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
260	if (r) {
261		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
262		goto error_unreserve;
263	}
264
 
 
 
 
 
 
 
 
 
265	if (cpu_addr) {
266		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
267		if (r) {
268			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
269			goto error_unreserve;
270		}
271	}
272
 
 
 
 
 
273	amdgpu_bo_unreserve(*bo_ptr);
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
277error_unreserve:
278	amdgpu_bo_unreserve(*bo_ptr);
 
279
280error_free:
 
281	amdgpu_bo_unref(bo_ptr);
282
283	return r;
284}
285
286/**
287 * amdgpu_bo_free_kernel - free BO for kernel use
288 *
289 * @bo: amdgpu BO to free
 
 
290 *
291 * unmaps and unpin a BO for kernel internal use.
292 */
293void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
294			   void **cpu_addr)
295{
296	if (*bo == NULL)
297		return;
298
299	if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
 
 
300		if (cpu_addr)
301			amdgpu_bo_kunmap(*bo);
302
303		amdgpu_bo_unpin(*bo);
304		amdgpu_bo_unreserve(*bo);
305	}
306	amdgpu_bo_unref(bo);
307
308	if (gpu_addr)
309		*gpu_addr = 0;
310
311	if (cpu_addr)
312		*cpu_addr = NULL;
313}
314
315int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
316				unsigned long size, int byte_align,
317				bool kernel, u32 domain, u64 flags,
318				struct sg_table *sg,
319				struct ttm_placement *placement,
320				struct reservation_object *resv,
321				struct amdgpu_bo **bo_ptr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322{
 
 
 
 
 
 
 
 
323	struct amdgpu_bo *bo;
324	enum ttm_bo_type type;
325	unsigned long page_align;
326	size_t acc_size;
327	int r;
328
329	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
330	size = ALIGN(size, PAGE_SIZE);
331
332	if (kernel) {
333		type = ttm_bo_type_kernel;
334	} else if (sg) {
335		type = ttm_bo_type_sg;
 
 
 
336	} else {
337		type = ttm_bo_type_device;
 
 
338	}
339	*bo_ptr = NULL;
340
341	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
342				       sizeof(struct amdgpu_bo));
 
 
343
344	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
 
345	if (bo == NULL)
346		return -ENOMEM;
347	r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
348	if (unlikely(r)) {
349		kfree(bo);
350		return r;
351	}
352	INIT_LIST_HEAD(&bo->shadow_list);
353	INIT_LIST_HEAD(&bo->va);
354	bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
355					 AMDGPU_GEM_DOMAIN_GTT |
356					 AMDGPU_GEM_DOMAIN_CPU |
357					 AMDGPU_GEM_DOMAIN_GDS |
358					 AMDGPU_GEM_DOMAIN_GWS |
359					 AMDGPU_GEM_DOMAIN_OA);
360	bo->allowed_domains = bo->prefered_domains;
361	if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
362		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
363
364	bo->flags = flags;
 
 
 
 
 
 
 
365
366	/* For architectures that don't support WC memory,
367	 * mask out the WC flag from the BO
368	 */
369	if (!drm_arch_can_wc_memory())
370		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
371
372	amdgpu_fill_placement_to_bo(bo, placement);
373	/* Kernel allocation are uninterruptible */
374
375	if (!resv) {
376		bool locked;
377
378		reservation_object_init(&bo->tbo.ttm_resv);
379		locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
380		WARN_ON(!locked);
381	}
382	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
383			&bo->placement, page_align, !kernel, NULL,
384			acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
385			&amdgpu_ttm_bo_destroy);
 
 
 
 
 
 
386	if (unlikely(r != 0))
387		return r;
388
389	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
390	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
 
 
 
 
 
 
 
391		struct dma_fence *fence;
392
393		r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
394		if (unlikely(r))
395			goto fail_unreserve;
396
397		amdgpu_bo_fence(bo, fence, false);
398		dma_fence_put(bo->tbo.moving);
399		bo->tbo.moving = dma_fence_get(fence);
400		dma_fence_put(fence);
401	}
402	if (!resv)
403		ww_mutex_unlock(&bo->tbo.resv->lock);
404	*bo_ptr = bo;
405
406	trace_amdgpu_bo_create(bo);
407
 
 
 
 
408	return 0;
409
410fail_unreserve:
411	ww_mutex_unlock(&bo->tbo.resv->lock);
 
412	amdgpu_bo_unref(&bo);
413	return r;
414}
415
416static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
417				   unsigned long size, int byte_align,
418				   struct amdgpu_bo *bo)
419{
420	struct ttm_placement placement = {0};
421	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
422	int r;
423
424	if (bo->shadow)
425		return 0;
426
427	bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
428	memset(&placements, 0,
429	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
430
431	amdgpu_ttm_placement_init(adev, &placement,
432				  placements, AMDGPU_GEM_DOMAIN_GTT,
433				  AMDGPU_GEM_CREATE_CPU_GTT_USWC);
434
435	r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
436					AMDGPU_GEM_DOMAIN_GTT,
437					AMDGPU_GEM_CREATE_CPU_GTT_USWC,
438					NULL, &placement,
439					bo->tbo.resv,
440					&bo->shadow);
441	if (!r) {
442		bo->shadow->parent = amdgpu_bo_ref(bo);
443		mutex_lock(&adev->shadow_list_lock);
444		list_add_tail(&bo->shadow_list, &adev->shadow_list);
445		mutex_unlock(&adev->shadow_list_lock);
446	}
447
448	return r;
449}
450
451int amdgpu_bo_create(struct amdgpu_device *adev,
452		     unsigned long size, int byte_align,
453		     bool kernel, u32 domain, u64 flags,
454		     struct sg_table *sg,
455		     struct reservation_object *resv,
456		     struct amdgpu_bo **bo_ptr)
457{
458	struct ttm_placement placement = {0};
459	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
460	int r;
461
462	memset(&placements, 0,
463	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
464
465	amdgpu_ttm_placement_init(adev, &placement,
466				  placements, domain, flags);
467
468	r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
469					domain, flags, sg, &placement,
470					resv, bo_ptr);
471	if (r)
472		return r;
473
474	if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
475		r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
476		if (r)
477			amdgpu_bo_unref(bo_ptr);
478	}
479
480	return r;
481}
482
483int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
484			       struct amdgpu_ring *ring,
485			       struct amdgpu_bo *bo,
486			       struct reservation_object *resv,
487			       struct dma_fence **fence,
488			       bool direct)
 
 
 
 
 
489
 
 
 
490{
491	struct amdgpu_bo *shadow = bo->shadow;
492	uint64_t bo_addr, shadow_addr;
493	int r;
494
495	if (!shadow)
496		return -EINVAL;
497
498	bo_addr = amdgpu_bo_gpu_offset(bo);
499	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
500
501	r = reservation_object_reserve_shared(bo->tbo.resv);
502	if (r)
503		goto err;
504
505	r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
506			       amdgpu_bo_size(bo), resv, fence,
507			       direct);
508	if (!r)
509		amdgpu_bo_fence(bo, *fence, true);
510
511err:
512	return r;
513}
514
515int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
516				  struct amdgpu_ring *ring,
517				  struct amdgpu_bo *bo,
518				  struct reservation_object *resv,
519				  struct dma_fence **fence,
520				  bool direct)
521
 
522{
523	struct amdgpu_bo *shadow = bo->shadow;
524	uint64_t bo_addr, shadow_addr;
525	int r;
526
527	if (!shadow)
528		return -EINVAL;
 
 
 
 
529
530	bo_addr = amdgpu_bo_gpu_offset(bo);
531	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
 
 
 
 
 
 
 
 
 
 
 
 
532
533	r = reservation_object_reserve_shared(bo->tbo.resv);
534	if (r)
535		goto err;
 
536
537	r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
538			       amdgpu_bo_size(bo), resv, fence,
539			       direct);
540	if (!r)
541		amdgpu_bo_fence(bo, *fence, true);
542
543err:
544	return r;
 
545}
546
 
 
 
 
 
 
 
 
 
 
 
547int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
548{
549	bool is_iomem;
550	long r;
551
552	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
553		return -EPERM;
554
555	if (bo->kptr) {
556		if (ptr) {
557			*ptr = bo->kptr;
558		}
 
 
 
 
 
559		return 0;
560	}
561
562	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
563						MAX_SCHEDULE_TIMEOUT);
564	if (r < 0)
565		return r;
566
567	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
568	if (r)
569		return r;
570
571	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
572	if (ptr)
573		*ptr = bo->kptr;
574
575	return 0;
576}
577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
579{
580	if (bo->kptr == NULL)
581		return;
582	bo->kptr = NULL;
583	ttm_bo_kunmap(&bo->kmap);
584}
585
 
 
 
 
 
 
 
 
 
586struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
587{
588	if (bo == NULL)
589		return NULL;
590
591	ttm_bo_reference(&bo->tbo);
592	return bo;
593}
594
 
 
 
 
 
 
595void amdgpu_bo_unref(struct amdgpu_bo **bo)
596{
597	struct ttm_buffer_object *tbo;
598
599	if ((*bo) == NULL)
600		return;
601
602	tbo = &((*bo)->tbo);
603	ttm_bo_unref(&tbo);
604	if (tbo == NULL)
605		*bo = NULL;
606}
607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
608int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
609			     u64 min_offset, u64 max_offset,
610			     u64 *gpu_addr)
611{
612	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
613	int r, i;
614	unsigned fpfn, lpfn;
615
616	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
617		return -EPERM;
618
619	if (WARN_ON_ONCE(min_offset > max_offset))
620		return -EINVAL;
621
622	if (bo->pin_count) {
623		uint32_t mem_type = bo->tbo.mem.mem_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
625		if (domain != amdgpu_mem_type_to_domain(mem_type))
 
 
626			return -EINVAL;
627
628		bo->pin_count++;
629		if (gpu_addr)
630			*gpu_addr = amdgpu_bo_gpu_offset(bo);
631
632		if (max_offset != 0) {
633			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
 
634			WARN_ON_ONCE(max_offset <
635				     (amdgpu_bo_gpu_offset(bo) - domain_start));
636		}
637
638		return 0;
639	}
640
641	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
642	amdgpu_ttm_placement_from_domain(bo, domain);
 
 
 
 
 
 
 
 
 
 
643	for (i = 0; i < bo->placement.num_placement; i++) {
644		/* force to pin into visible video ram */
645		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
646		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
647		    (!max_offset || max_offset >
648		     adev->mc.visible_vram_size)) {
649			if (WARN_ON_ONCE(min_offset >
650					 adev->mc.visible_vram_size))
651				return -EINVAL;
652			fpfn = min_offset >> PAGE_SHIFT;
653			lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
654		} else {
655			fpfn = min_offset >> PAGE_SHIFT;
656			lpfn = max_offset >> PAGE_SHIFT;
657		}
658		if (fpfn > bo->placements[i].fpfn)
659			bo->placements[i].fpfn = fpfn;
660		if (!bo->placements[i].lpfn ||
661		    (lpfn && lpfn < bo->placements[i].lpfn))
662			bo->placements[i].lpfn = lpfn;
663		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
664	}
665
666	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
667	if (unlikely(r)) {
668		dev_err(adev->dev, "%p pin failed\n", bo);
669		goto error;
670	}
671	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
672	if (unlikely(r)) {
673		dev_err(adev->dev, "%p bind failed\n", bo);
674		goto error;
675	}
676
677	bo->pin_count = 1;
678	if (gpu_addr != NULL)
679		*gpu_addr = amdgpu_bo_gpu_offset(bo);
680	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
681		adev->vram_pin_size += amdgpu_bo_size(bo);
682		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
683			adev->invisible_pin_size += amdgpu_bo_size(bo);
684	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
685		adev->gart_pin_size += amdgpu_bo_size(bo);
686	}
687
688error:
689	return r;
690}
691
692int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 
 
 
 
 
 
 
 
 
 
 
 
693{
694	return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
 
695}
696
697int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 
 
 
 
 
 
 
 
 
 
698{
699	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
700	int r, i;
701
702	if (!bo->pin_count) {
703		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
704		return 0;
705	}
706	bo->pin_count--;
707	if (bo->pin_count)
708		return 0;
709	for (i = 0; i < bo->placement.num_placement; i++) {
710		bo->placements[i].lpfn = 0;
711		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
712	}
713	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
714	if (unlikely(r)) {
715		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
716		goto error;
717	}
718
719	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
720		adev->vram_pin_size -= amdgpu_bo_size(bo);
721		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
722			adev->invisible_pin_size -= amdgpu_bo_size(bo);
723	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
724		adev->gart_pin_size -= amdgpu_bo_size(bo);
725	}
726
727error:
728	return r;
729}
730
731int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
732{
733	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
734	if (0 && (adev->flags & AMD_IS_APU)) {
735		/* Useless to evict on IGP chips */
736		return 0;
737	}
738	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
739}
740
741static const char *amdgpu_vram_names[] = {
742	"UNKNOWN",
743	"GDDR1",
744	"DDR2",
745	"GDDR3",
746	"GDDR4",
747	"GDDR5",
748	"HBM",
749	"DDR3"
 
 
 
 
 
750};
751
 
 
 
 
 
 
 
 
 
752int amdgpu_bo_init(struct amdgpu_device *adev)
753{
754	/* reserve PAT memory space to WC for VRAM */
755	arch_io_reserve_memtype_wc(adev->mc.aper_base,
756				   adev->mc.aper_size);
757
758	/* Add an MTRR for the VRAM */
759	adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
760					      adev->mc.aper_size);
 
 
 
 
 
 
 
 
 
761	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
762		adev->mc.mc_vram_size >> 20,
763		(unsigned long long)adev->mc.aper_size >> 20);
764	DRM_INFO("RAM width %dbits %s\n",
765		 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
766	return amdgpu_ttm_init(adev);
767}
768
 
 
 
 
 
 
769void amdgpu_bo_fini(struct amdgpu_device *adev)
770{
 
 
771	amdgpu_ttm_fini(adev);
772	arch_phys_wc_del(adev->mc.vram_mtrr);
773	arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
774}
775
776int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
777			     struct vm_area_struct *vma)
778{
779	return ttm_fbdev_mmap(vma, &bo->tbo);
 
 
 
780}
781
 
 
 
 
 
 
 
 
 
 
 
782int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
783{
784	if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
 
 
 
 
 
785		return -EINVAL;
786
787	bo->tiling_flags = tiling_flags;
 
788	return 0;
789}
790
 
 
 
 
 
 
 
 
791void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
792{
793	lockdep_assert_held(&bo->tbo.resv->lock.base);
 
 
 
 
794
795	if (tiling_flags)
796		*tiling_flags = bo->tiling_flags;
797}
798
799int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
800			    uint32_t metadata_size, uint64_t flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
801{
 
802	void *buffer;
803
 
 
804	if (!metadata_size) {
805		if (bo->metadata_size) {
806			kfree(bo->metadata);
807			bo->metadata = NULL;
808			bo->metadata_size = 0;
809		}
810		return 0;
811	}
812
813	if (metadata == NULL)
814		return -EINVAL;
815
816	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
817	if (buffer == NULL)
818		return -ENOMEM;
819
820	kfree(bo->metadata);
821	bo->metadata_flags = flags;
822	bo->metadata = buffer;
823	bo->metadata_size = metadata_size;
824
825	return 0;
826}
827
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
828int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
829			   size_t buffer_size, uint32_t *metadata_size,
830			   uint64_t *flags)
831{
 
 
832	if (!buffer && !metadata_size)
833		return -EINVAL;
834
 
 
 
 
 
835	if (buffer) {
836		if (buffer_size < bo->metadata_size)
837			return -EINVAL;
838
839		if (bo->metadata_size)
840			memcpy(buffer, bo->metadata, bo->metadata_size);
841	}
842
843	if (metadata_size)
844		*metadata_size = bo->metadata_size;
845	if (flags)
846		*flags = bo->metadata_flags;
847
848	return 0;
849}
850
 
 
 
 
 
 
 
 
 
 
851void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
852			   struct ttm_mem_reg *new_mem)
 
853{
854	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
855	struct amdgpu_bo *abo;
856	struct ttm_mem_reg *old_mem = &bo->mem;
857
858	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
859		return;
860
861	abo = container_of(bo, struct amdgpu_bo, tbo);
862	amdgpu_vm_bo_invalidate(adev, abo);
 
 
863
864	/* update statistics */
865	if (!new_mem)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
866		return;
867
868	/* move_notify is called before move happens */
869	amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
870
871	trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
872}
873
874int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
 
 
 
 
 
 
875{
876	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
877	struct amdgpu_bo *abo;
878	unsigned long offset, size, lpfn;
879	int i, r;
 
 
 
 
880
881	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
882		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
883
884	abo = container_of(bo, struct amdgpu_bo, tbo);
885	if (bo->mem.mem_type != TTM_PL_VRAM)
886		return 0;
887
888	size = bo->mem.num_pages << PAGE_SHIFT;
889	offset = bo->mem.start << PAGE_SHIFT;
890	/* TODO: figure out how to map scattered VRAM to the CPU */
891	if ((offset + size) <= adev->mc.visible_vram_size &&
892	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
893		return 0;
894
895	/* Can't move a pinned BO to visible VRAM */
896	if (abo->pin_count > 0)
897		return -EINVAL;
898
899	/* hurrah the memory is not visible ! */
900	abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
901	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
902	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
903	for (i = 0; i < abo->placement.num_placement; i++) {
904		/* Force into visible VRAM */
905		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
906		    (!abo->placements[i].lpfn ||
907		     abo->placements[i].lpfn > lpfn))
908			abo->placements[i].lpfn = lpfn;
909	}
910	r = ttm_bo_validate(bo, &abo->placement, false, false);
911	if (unlikely(r == -ENOMEM)) {
912		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
913		return ttm_bo_validate(bo, &abo->placement, false, false);
914	} else if (unlikely(r != 0)) {
915		return r;
916	}
917
918	offset = bo->mem.start << PAGE_SHIFT;
919	/* this should never happen */
920	if ((offset + size) > adev->mc.visible_vram_size)
921		return -EINVAL;
 
922
 
923	return 0;
924}
925
926/**
927 * amdgpu_bo_fence - add fence to buffer object
928 *
929 * @bo: buffer object in question
930 * @fence: fence to add
931 * @shared: true if fence should be added shared
932 *
933 */
934void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
935		     bool shared)
936{
937	struct reservation_object *resv = bo->tbo.resv;
 
938
939	if (shared)
940		reservation_object_add_shared_fence(resv, fence);
941	else
942		reservation_object_add_excl_fence(resv, fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
943}
944
945/**
946 * amdgpu_bo_gpu_offset - return GPU offset of bo
947 * @bo:	amdgpu object for which we query the offset
948 *
949 * Returns current GPU offset of the object.
950 *
951 * Note: object should either be pinned or reserved when calling this
952 * function, it might be useful to add check for this for debugging.
 
 
 
953 */
954u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
955{
956	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
957	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
958		     !amdgpu_ttm_is_bound(bo->tbo.ttm));
959	WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
960		     !bo->pin_count);
961	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
962	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
963		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
964
965	return bo->tbo.offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966}