Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32#include <linux/list.h>
  33#include <linux/slab.h>
  34#include <linux/dma-buf.h>
  35
  36#include <drm/drm_drv.h>
  37#include <drm/amdgpu_drm.h>
  38#include <drm/drm_cache.h>
  39#include "amdgpu.h"
  40#include "amdgpu_trace.h"
  41#include "amdgpu_amdkfd.h"
  42#include "amdgpu_vram_mgr.h"
  43#include "amdgpu_vm.h"
  44
  45/**
  46 * DOC: amdgpu_object
  47 *
  48 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
  49 * represents memory used by driver (VRAM, system memory, etc.). The driver
  50 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
  51 * to create/destroy/set buffer object which are then managed by the kernel TTM
  52 * memory manager.
  53 * The interfaces are also used internally by kernel clients, including gfx,
  54 * uvd, etc. for kernel managed allocations used by the GPU.
  55 *
  56 */
  57
  58static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
  59{
  60	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  61
  62	amdgpu_bo_kunmap(bo);
  63
  64	if (bo->tbo.base.import_attach)
  65		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
  66	drm_gem_object_release(&bo->tbo.base);
  67	amdgpu_bo_unref(&bo->parent);
  68	kvfree(bo);
  69}
  70
  71static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
  72{
  73	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  74	struct amdgpu_bo_user *ubo;
  75
  76	ubo = to_amdgpu_bo_user(bo);
  77	kfree(ubo->metadata);
  78	amdgpu_bo_destroy(tbo);
  79}
  80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81/**
  82 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
  83 * @bo: buffer object to be checked
  84 *
  85 * Uses destroy function associated with the object to determine if this is
  86 * an &amdgpu_bo.
  87 *
  88 * Returns:
  89 * true if the object belongs to &amdgpu_bo, false if not.
  90 */
  91bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
  92{
  93	if (bo->destroy == &amdgpu_bo_destroy ||
  94	    bo->destroy == &amdgpu_bo_user_destroy)
 
  95		return true;
  96
  97	return false;
  98}
  99
 100/**
 101 * amdgpu_bo_placement_from_domain - set buffer's placement
 102 * @abo: &amdgpu_bo buffer object whose placement is to be set
 103 * @domain: requested domain
 104 *
 105 * Sets buffer's placement according to requested domain and the buffer's
 106 * flags.
 107 */
 108void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 109{
 110	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 111	struct ttm_placement *placement = &abo->placement;
 112	struct ttm_place *places = abo->placements;
 113	u64 flags = abo->flags;
 114	u32 c = 0;
 115
 116	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 117		unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 118		int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
 119
 120		if (adev->gmc.mem_partitions && mem_id >= 0) {
 121			places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
 122			/*
 123			 * memory partition range lpfn is inclusive start + size - 1
 124			 * TTM place lpfn is exclusive start + size
 125			 */
 126			places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
 127		} else {
 128			places[c].fpfn = 0;
 129			places[c].lpfn = 0;
 130		}
 131		places[c].mem_type = TTM_PL_VRAM;
 132		places[c].flags = 0;
 133
 134		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
 135			places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
 136		else
 137			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 138
 139		if (abo->tbo.type == ttm_bo_type_kernel &&
 140		    flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
 141			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
 142
 143		c++;
 144	}
 145
 146	if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
 147		places[c].fpfn = 0;
 148		places[c].lpfn = 0;
 149		places[c].mem_type = AMDGPU_PL_DOORBELL;
 150		places[c].flags = 0;
 151		c++;
 152	}
 153
 154	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 155		places[c].fpfn = 0;
 156		places[c].lpfn = 0;
 157		places[c].mem_type =
 158			abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
 159			AMDGPU_PL_PREEMPT : TTM_PL_TT;
 160		places[c].flags = 0;
 161		/*
 162		 * When GTT is just an alternative to VRAM make sure that we
 163		 * only use it as fallback and still try to fill up VRAM first.
 164		 */
 165		if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
 166		    !(adev->flags & AMD_IS_APU))
 167			places[c].flags |= TTM_PL_FLAG_FALLBACK;
 168		c++;
 169	}
 170
 171	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
 172		places[c].fpfn = 0;
 173		places[c].lpfn = 0;
 174		places[c].mem_type = TTM_PL_SYSTEM;
 175		places[c].flags = 0;
 176		c++;
 177	}
 178
 179	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
 180		places[c].fpfn = 0;
 181		places[c].lpfn = 0;
 182		places[c].mem_type = AMDGPU_PL_GDS;
 183		places[c].flags = 0;
 184		c++;
 185	}
 186
 187	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
 188		places[c].fpfn = 0;
 189		places[c].lpfn = 0;
 190		places[c].mem_type = AMDGPU_PL_GWS;
 191		places[c].flags = 0;
 192		c++;
 193	}
 194
 195	if (domain & AMDGPU_GEM_DOMAIN_OA) {
 196		places[c].fpfn = 0;
 197		places[c].lpfn = 0;
 198		places[c].mem_type = AMDGPU_PL_OA;
 199		places[c].flags = 0;
 200		c++;
 201	}
 202
 203	if (!c) {
 204		places[c].fpfn = 0;
 205		places[c].lpfn = 0;
 206		places[c].mem_type = TTM_PL_SYSTEM;
 207		places[c].flags = 0;
 208		c++;
 209	}
 210
 211	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
 212
 213	placement->num_placement = c;
 214	placement->placement = places;
 
 
 
 215}
 216
 217/**
 218 * amdgpu_bo_create_reserved - create reserved BO for kernel use
 219 *
 220 * @adev: amdgpu device object
 221 * @size: size for the new BO
 222 * @align: alignment for the new BO
 223 * @domain: where to place it
 224 * @bo_ptr: used to initialize BOs in structures
 225 * @gpu_addr: GPU addr of the pinned BO
 226 * @cpu_addr: optional CPU address mapping
 227 *
 228 * Allocates and pins a BO for kernel internal use, and returns it still
 229 * reserved.
 230 *
 231 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 232 *
 233 * Returns:
 234 * 0 on success, negative error code otherwise.
 235 */
 236int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 237			      unsigned long size, int align,
 238			      u32 domain, struct amdgpu_bo **bo_ptr,
 239			      u64 *gpu_addr, void **cpu_addr)
 240{
 241	struct amdgpu_bo_param bp;
 242	bool free = false;
 243	int r;
 244
 245	if (!size) {
 246		amdgpu_bo_unref(bo_ptr);
 247		return 0;
 248	}
 249
 250	memset(&bp, 0, sizeof(bp));
 251	bp.size = size;
 252	bp.byte_align = align;
 253	bp.domain = domain;
 254	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
 255		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 256	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 257	bp.type = ttm_bo_type_kernel;
 258	bp.resv = NULL;
 259	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 260
 261	if (!*bo_ptr) {
 262		r = amdgpu_bo_create(adev, &bp, bo_ptr);
 263		if (r) {
 264			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
 265				r);
 266			return r;
 267		}
 268		free = true;
 269	}
 270
 271	r = amdgpu_bo_reserve(*bo_ptr, false);
 272	if (r) {
 273		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
 274		goto error_free;
 275	}
 276
 277	r = amdgpu_bo_pin(*bo_ptr, domain);
 278	if (r) {
 279		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
 280		goto error_unreserve;
 281	}
 282
 283	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
 284	if (r) {
 285		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
 286		goto error_unpin;
 287	}
 288
 289	if (gpu_addr)
 290		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
 291
 292	if (cpu_addr) {
 293		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 294		if (r) {
 295			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
 296			goto error_unpin;
 297		}
 298	}
 299
 300	return 0;
 301
 302error_unpin:
 303	amdgpu_bo_unpin(*bo_ptr);
 304error_unreserve:
 305	amdgpu_bo_unreserve(*bo_ptr);
 306
 307error_free:
 308	if (free)
 309		amdgpu_bo_unref(bo_ptr);
 310
 311	return r;
 312}
 313
 314/**
 315 * amdgpu_bo_create_kernel - create BO for kernel use
 316 *
 317 * @adev: amdgpu device object
 318 * @size: size for the new BO
 319 * @align: alignment for the new BO
 320 * @domain: where to place it
 321 * @bo_ptr:  used to initialize BOs in structures
 322 * @gpu_addr: GPU addr of the pinned BO
 323 * @cpu_addr: optional CPU address mapping
 324 *
 325 * Allocates and pins a BO for kernel internal use.
 326 *
 327 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 328 *
 329 * Returns:
 330 * 0 on success, negative error code otherwise.
 331 */
 332int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 333			    unsigned long size, int align,
 334			    u32 domain, struct amdgpu_bo **bo_ptr,
 335			    u64 *gpu_addr, void **cpu_addr)
 336{
 337	int r;
 338
 339	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
 340				      gpu_addr, cpu_addr);
 341
 342	if (r)
 343		return r;
 344
 345	if (*bo_ptr)
 346		amdgpu_bo_unreserve(*bo_ptr);
 347
 348	return 0;
 349}
 350
 351/**
 352 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
 353 *
 354 * @adev: amdgpu device object
 355 * @offset: offset of the BO
 356 * @size: size of the BO
 
 357 * @bo_ptr:  used to initialize BOs in structures
 358 * @cpu_addr: optional CPU address mapping
 359 *
 360 * Creates a kernel BO at a specific offset in VRAM.
 361 *
 362 * Returns:
 363 * 0 on success, negative error code otherwise.
 364 */
 365int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 366			       uint64_t offset, uint64_t size,
 367			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
 368{
 369	struct ttm_operation_ctx ctx = { false, false };
 370	unsigned int i;
 371	int r;
 372
 373	offset &= PAGE_MASK;
 374	size = ALIGN(size, PAGE_SIZE);
 375
 376	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
 377				      AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
 378				      cpu_addr);
 379	if (r)
 380		return r;
 381
 382	if ((*bo_ptr) == NULL)
 383		return 0;
 384
 385	/*
 386	 * Remove the original mem node and create a new one at the request
 387	 * position.
 388	 */
 389	if (cpu_addr)
 390		amdgpu_bo_kunmap(*bo_ptr);
 391
 392	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
 393
 394	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
 395		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
 396		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 397	}
 398	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
 399			     &(*bo_ptr)->tbo.resource, &ctx);
 400	if (r)
 401		goto error;
 402
 403	if (cpu_addr) {
 404		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 405		if (r)
 406			goto error;
 407	}
 408
 409	amdgpu_bo_unreserve(*bo_ptr);
 410	return 0;
 411
 412error:
 413	amdgpu_bo_unreserve(*bo_ptr);
 414	amdgpu_bo_unref(bo_ptr);
 415	return r;
 416}
 417
 418/**
 419 * amdgpu_bo_free_kernel - free BO for kernel use
 420 *
 421 * @bo: amdgpu BO to free
 422 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
 423 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
 424 *
 425 * unmaps and unpin a BO for kernel internal use.
 426 */
 427void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
 428			   void **cpu_addr)
 429{
 430	if (*bo == NULL)
 431		return;
 432
 433	WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
 434
 435	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
 436		if (cpu_addr)
 437			amdgpu_bo_kunmap(*bo);
 438
 439		amdgpu_bo_unpin(*bo);
 440		amdgpu_bo_unreserve(*bo);
 441	}
 442	amdgpu_bo_unref(bo);
 443
 444	if (gpu_addr)
 445		*gpu_addr = 0;
 446
 447	if (cpu_addr)
 448		*cpu_addr = NULL;
 449}
 450
 451/* Validate bo size is bit bigger than the request domain */
 452static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
 453					  unsigned long size, u32 domain)
 454{
 455	struct ttm_resource_manager *man = NULL;
 456
 457	/*
 458	 * If GTT is part of requested domains the check must succeed to
 459	 * allow fall back to GTT.
 460	 */
 461	if (domain & AMDGPU_GEM_DOMAIN_GTT)
 462		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 463	else if (domain & AMDGPU_GEM_DOMAIN_VRAM)
 
 
 
 
 
 
 
 464		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 465	else
 466		return true;
 467
 468	if (!man) {
 469		if (domain & AMDGPU_GEM_DOMAIN_GTT)
 470			WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
 471		return false;
 472	}
 473
 474	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
 475	if (size < man->size)
 476		return true;
 477
 478	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size);
 
 
 
 
 
 479	return false;
 480}
 481
 482bool amdgpu_bo_support_uswc(u64 bo_flags)
 483{
 484
 485#ifdef CONFIG_X86_32
 486	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
 487	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
 488	 */
 489	return false;
 490#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
 491	/* Don't try to enable write-combining when it can't work, or things
 492	 * may be slow
 493	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 494	 */
 495
 496#ifndef CONFIG_COMPILE_TEST
 497#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 498	 thanks to write-combining
 499#endif
 500
 501	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 502		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 503			      "better performance thanks to write-combining\n");
 504	return false;
 505#else
 506	/* For architectures that don't support WC memory,
 507	 * mask out the WC flag from the BO
 508	 */
 509	if (!drm_arch_can_wc_memory())
 510		return false;
 511
 512	return true;
 513#endif
 514}
 515
 516/**
 517 * amdgpu_bo_create - create an &amdgpu_bo buffer object
 518 * @adev: amdgpu device object
 519 * @bp: parameters to be used for the buffer object
 520 * @bo_ptr: pointer to the buffer object pointer
 521 *
 522 * Creates an &amdgpu_bo buffer object.
 523 *
 524 * Returns:
 525 * 0 for success or a negative error code on failure.
 526 */
 527int amdgpu_bo_create(struct amdgpu_device *adev,
 528			       struct amdgpu_bo_param *bp,
 529			       struct amdgpu_bo **bo_ptr)
 530{
 531	struct ttm_operation_ctx ctx = {
 532		.interruptible = (bp->type != ttm_bo_type_kernel),
 533		.no_wait_gpu = bp->no_wait_gpu,
 534		/* We opt to avoid OOM on system pages allocations */
 535		.gfp_retry_mayfail = true,
 536		.allow_res_evict = bp->type != ttm_bo_type_kernel,
 537		.resv = bp->resv
 538	};
 539	struct amdgpu_bo *bo;
 540	unsigned long page_align, size = bp->size;
 541	int r;
 542
 543	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
 544	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 545		/* GWS and OA don't need any alignment. */
 546		page_align = bp->byte_align;
 547		size <<= PAGE_SHIFT;
 548
 549	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
 550		/* Both size and alignment must be a multiple of 4. */
 551		page_align = ALIGN(bp->byte_align, 4);
 552		size = ALIGN(size, 4) << PAGE_SHIFT;
 553	} else {
 554		/* Memory should be aligned at least to a page size. */
 555		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 556		size = ALIGN(size, PAGE_SIZE);
 557	}
 558
 559	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
 560		return -ENOMEM;
 561
 562	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 563
 564	*bo_ptr = NULL;
 565	bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
 566	if (bo == NULL)
 567		return -ENOMEM;
 568	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
 569	bo->tbo.base.funcs = &amdgpu_gem_object_funcs;
 570	bo->vm_bo = NULL;
 571	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
 572		bp->domain;
 573	bo->allowed_domains = bo->preferred_domains;
 574	if (bp->type != ttm_bo_type_kernel &&
 575	    !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
 576	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 577		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 578
 579	bo->flags = bp->flags;
 580
 581	if (adev->gmc.mem_partitions)
 582		/* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
 583		bo->xcp_id = bp->xcp_id_plus1 - 1;
 584	else
 585		/* For GPUs without spatial partitioning */
 586		bo->xcp_id = 0;
 587
 588	if (!amdgpu_bo_support_uswc(bo->flags))
 589		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 590
 591	bo->tbo.bdev = &adev->mman.bdev;
 592	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
 593			  AMDGPU_GEM_DOMAIN_GDS))
 594		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 595	else
 596		amdgpu_bo_placement_from_domain(bo, bp->domain);
 597	if (bp->type == ttm_bo_type_kernel)
 598		bo->tbo.priority = 2;
 599	else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
 600		bo->tbo.priority = 1;
 601
 602	if (!bp->destroy)
 603		bp->destroy = &amdgpu_bo_destroy;
 604
 605	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
 606				 &bo->placement, page_align, &ctx,  NULL,
 607				 bp->resv, bp->destroy);
 608	if (unlikely(r != 0))
 609		return r;
 610
 611	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 612	    amdgpu_res_cpu_visible(adev, bo->tbo.resource))
 
 613		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 614					     ctx.bytes_moved);
 615	else
 616		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 617
 618	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 619	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 620		struct dma_fence *fence;
 621
 622		r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
 623		if (unlikely(r))
 624			goto fail_unreserve;
 625
 626		dma_resv_add_fence(bo->tbo.base.resv, fence,
 627				   DMA_RESV_USAGE_KERNEL);
 
 628		dma_fence_put(fence);
 629	}
 630	if (!bp->resv)
 631		amdgpu_bo_unreserve(bo);
 632	*bo_ptr = bo;
 633
 634	trace_amdgpu_bo_create(bo);
 635
 636	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
 637	if (bp->type == ttm_bo_type_device)
 638		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 639
 640	return 0;
 641
 642fail_unreserve:
 643	if (!bp->resv)
 644		dma_resv_unlock(bo->tbo.base.resv);
 645	amdgpu_bo_unref(&bo);
 646	return r;
 647}
 648
 649/**
 650 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
 651 * @adev: amdgpu device object
 652 * @bp: parameters to be used for the buffer object
 653 * @ubo_ptr: pointer to the buffer object pointer
 654 *
 655 * Create a BO to be used by user application;
 656 *
 657 * Returns:
 658 * 0 for success or a negative error code on failure.
 659 */
 660
 661int amdgpu_bo_create_user(struct amdgpu_device *adev,
 662			  struct amdgpu_bo_param *bp,
 663			  struct amdgpu_bo_user **ubo_ptr)
 664{
 665	struct amdgpu_bo *bo_ptr;
 666	int r;
 667
 668	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
 669	bp->destroy = &amdgpu_bo_user_destroy;
 670	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 671	if (r)
 672		return r;
 673
 674	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
 675	return r;
 676}
 677
 678/**
 679 * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
 680 * @adev: amdgpu device object
 681 * @bp: parameters to be used for the buffer object
 682 * @vmbo_ptr: pointer to the buffer object pointer
 683 *
 684 * Create a BO to be for GPUVM.
 685 *
 686 * Returns:
 687 * 0 for success or a negative error code on failure.
 688 */
 689
 690int amdgpu_bo_create_vm(struct amdgpu_device *adev,
 691			struct amdgpu_bo_param *bp,
 692			struct amdgpu_bo_vm **vmbo_ptr)
 693{
 694	struct amdgpu_bo *bo_ptr;
 695	int r;
 696
 697	/* bo_ptr_size will be determined by the caller and it depends on
 698	 * num of amdgpu_vm_pt entries.
 699	 */
 700	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
 
 701	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 702	if (r)
 703		return r;
 704
 705	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
 
 706	return r;
 707}
 708
 709/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 710 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
 711 * @bo: &amdgpu_bo buffer object to be mapped
 712 * @ptr: kernel virtual address to be returned
 713 *
 714 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
 715 * amdgpu_bo_kptr() to get the kernel virtual address.
 716 *
 717 * Returns:
 718 * 0 for success or a negative error code on failure.
 719 */
 720int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 721{
 722	void *kptr;
 723	long r;
 724
 725	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 726		return -EPERM;
 727
 728	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
 729				  false, MAX_SCHEDULE_TIMEOUT);
 730	if (r < 0)
 731		return r;
 732
 733	kptr = amdgpu_bo_kptr(bo);
 734	if (kptr) {
 735		if (ptr)
 736			*ptr = kptr;
 737		return 0;
 738	}
 739
 740	r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
 
 
 
 
 
 741	if (r)
 742		return r;
 743
 744	if (ptr)
 745		*ptr = amdgpu_bo_kptr(bo);
 746
 747	return 0;
 748}
 749
 750/**
 751 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
 752 * @bo: &amdgpu_bo buffer object
 753 *
 754 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
 755 *
 756 * Returns:
 757 * the virtual address of a buffer object area.
 758 */
 759void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 760{
 761	bool is_iomem;
 762
 763	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 764}
 765
 766/**
 767 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
 768 * @bo: &amdgpu_bo buffer object to be unmapped
 769 *
 770 * Unmaps a kernel map set up by amdgpu_bo_kmap().
 771 */
 772void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 773{
 774	if (bo->kmap.bo)
 775		ttm_bo_kunmap(&bo->kmap);
 776}
 777
 778/**
 779 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
 780 * @bo: &amdgpu_bo buffer object
 781 *
 782 * References the contained &ttm_buffer_object.
 783 *
 784 * Returns:
 785 * a refcounted pointer to the &amdgpu_bo buffer object.
 786 */
 787struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 788{
 789	if (bo == NULL)
 790		return NULL;
 791
 792	drm_gem_object_get(&bo->tbo.base);
 793	return bo;
 794}
 795
 796/**
 797 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
 798 * @bo: &amdgpu_bo buffer object
 799 *
 800 * Unreferences the contained &ttm_buffer_object and clear the pointer
 801 */
 802void amdgpu_bo_unref(struct amdgpu_bo **bo)
 803{
 
 
 804	if ((*bo) == NULL)
 805		return;
 806
 807	drm_gem_object_put(&(*bo)->tbo.base);
 
 808	*bo = NULL;
 809}
 810
 811/**
 812 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
 813 * @bo: &amdgpu_bo buffer object to be pinned
 814 * @domain: domain to be pinned to
 
 
 815 *
 816 * Pins the buffer object according to requested domain. If the memory is
 817 * unbound gart memory, binds the pages into gart table. Adjusts pin_count and
 818 * pin_size accordingly.
 819 *
 820 * Pinning means to lock pages in memory along with keeping them at a fixed
 821 * offset. It is required when a buffer can not be moved, for example, when
 822 * a display buffer is being scanned out.
 823 *
 
 
 
 
 824 * Returns:
 825 * 0 for success or a negative error code on failure.
 826 */
 827int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
 
 828{
 829	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 830	struct ttm_operation_ctx ctx = { false, false };
 831	int r, i;
 832
 833	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 834		return -EPERM;
 835
 836	/* Check domain to be pinned to against preferred domains */
 837	if (bo->preferred_domains & domain)
 838		domain = bo->preferred_domains & domain;
 839
 840	/* A shared bo cannot be migrated to VRAM */
 841	if (bo->tbo.base.import_attach) {
 842		if (domain & AMDGPU_GEM_DOMAIN_GTT)
 843			domain = AMDGPU_GEM_DOMAIN_GTT;
 844		else
 845			return -EINVAL;
 846	}
 847
 848	if (bo->tbo.pin_count) {
 849		uint32_t mem_type = bo->tbo.resource->mem_type;
 850		uint32_t mem_flags = bo->tbo.resource->placement;
 851
 852		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 853			return -EINVAL;
 854
 855		if ((mem_type == TTM_PL_VRAM) &&
 856		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
 857		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
 858			return -EINVAL;
 859
 860		ttm_bo_pin(&bo->tbo);
 
 
 
 
 
 
 
 
 861		return 0;
 862	}
 863
 864	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
 865	 * See function amdgpu_display_supported_domains()
 866	 */
 867	domain = amdgpu_bo_get_preferred_domain(adev, domain);
 868
 869	if (bo->tbo.base.import_attach)
 870		dma_buf_pin(bo->tbo.base.import_attach);
 871
 872	/* force to pin into visible video ram */
 873	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
 874		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 875	amdgpu_bo_placement_from_domain(bo, domain);
 876	for (i = 0; i < bo->placement.num_placement; i++) {
 877		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
 878		    bo->placements[i].mem_type == TTM_PL_VRAM)
 879			bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
 
 
 
 
 
 
 
 880	}
 881
 882	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 883	if (unlikely(r)) {
 884		dev_err(adev->dev, "%p pin failed\n", bo);
 885		goto error;
 886	}
 887
 888	ttm_bo_pin(&bo->tbo);
 889
 890	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 
 891		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
 892		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
 893			     &adev->visible_pin_size);
 894	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
 895		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
 896	}
 897
 898error:
 899	return r;
 900}
 901
 902/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 903 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
 904 * @bo: &amdgpu_bo buffer object to be unpinned
 905 *
 906 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
 907 * Changes placement and pin size accordingly.
 908 *
 909 * Returns:
 910 * 0 for success or a negative error code on failure.
 911 */
 912void amdgpu_bo_unpin(struct amdgpu_bo *bo)
 913{
 914	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 915
 916	ttm_bo_unpin(&bo->tbo);
 917	if (bo->tbo.pin_count)
 918		return;
 919
 920	if (bo->tbo.base.import_attach)
 921		dma_buf_unpin(bo->tbo.base.import_attach);
 922
 923	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 924		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
 925		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
 926			     &adev->visible_pin_size);
 927	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
 928		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
 929	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930
 
 
 931}
 932
 933static const char * const amdgpu_vram_names[] = {
 934	"UNKNOWN",
 935	"GDDR1",
 936	"DDR2",
 937	"GDDR3",
 938	"GDDR4",
 939	"GDDR5",
 940	"HBM",
 941	"DDR3",
 942	"DDR4",
 943	"GDDR6",
 944	"DDR5",
 945	"LPDDR4",
 946	"LPDDR5"
 947};
 948
 949/**
 950 * amdgpu_bo_init - initialize memory manager
 951 * @adev: amdgpu device object
 952 *
 953 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
 954 *
 955 * Returns:
 956 * 0 for success or a negative error code on failure.
 957 */
 958int amdgpu_bo_init(struct amdgpu_device *adev)
 959{
 960	/* On A+A platform, VRAM can be mapped as WB */
 961	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
 962		/* reserve PAT memory space to WC for VRAM */
 963		int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
 964				adev->gmc.aper_size);
 965
 966		if (r) {
 967			DRM_ERROR("Unable to set WC memtype for the aperture base\n");
 968			return r;
 969		}
 970
 971		/* Add an MTRR for the VRAM */
 972		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
 973				adev->gmc.aper_size);
 974	}
 975
 976	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
 977		 adev->gmc.mc_vram_size >> 20,
 978		 (unsigned long long)adev->gmc.aper_size >> 20);
 979	DRM_INFO("RAM width %dbits %s\n",
 980		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
 981	return amdgpu_ttm_init(adev);
 982}
 983
 984/**
 985 * amdgpu_bo_fini - tear down memory manager
 986 * @adev: amdgpu device object
 987 *
 988 * Reverses amdgpu_bo_init() to tear down memory manager.
 989 */
 990void amdgpu_bo_fini(struct amdgpu_device *adev)
 991{
 992	int idx;
 993
 994	amdgpu_ttm_fini(adev);
 995
 996	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 997		if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
 998			arch_phys_wc_del(adev->gmc.vram_mtrr);
 999			arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1000		}
1001		drm_dev_exit(idx);
1002	}
1003}
1004
1005/**
1006 * amdgpu_bo_set_tiling_flags - set tiling flags
1007 * @bo: &amdgpu_bo buffer object
1008 * @tiling_flags: new flags
1009 *
1010 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1011 * kernel driver to set the tiling flags on a buffer.
1012 *
1013 * Returns:
1014 * 0 for success or a negative error code on failure.
1015 */
1016int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1017{
1018	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1019	struct amdgpu_bo_user *ubo;
1020
1021	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1022	if (adev->family <= AMDGPU_FAMILY_CZ &&
1023	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1024		return -EINVAL;
1025
1026	ubo = to_amdgpu_bo_user(bo);
1027	ubo->tiling_flags = tiling_flags;
1028	return 0;
1029}
1030
1031/**
1032 * amdgpu_bo_get_tiling_flags - get tiling flags
1033 * @bo: &amdgpu_bo buffer object
1034 * @tiling_flags: returned flags
1035 *
1036 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1037 * set the tiling flags on a buffer.
1038 */
1039void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1040{
1041	struct amdgpu_bo_user *ubo;
1042
1043	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1044	dma_resv_assert_held(bo->tbo.base.resv);
1045	ubo = to_amdgpu_bo_user(bo);
1046
1047	if (tiling_flags)
1048		*tiling_flags = ubo->tiling_flags;
1049}
1050
1051/**
1052 * amdgpu_bo_set_metadata - set metadata
1053 * @bo: &amdgpu_bo buffer object
1054 * @metadata: new metadata
1055 * @metadata_size: size of the new metadata
1056 * @flags: flags of the new metadata
1057 *
1058 * Sets buffer object's metadata, its size and flags.
1059 * Used via GEM ioctl.
1060 *
1061 * Returns:
1062 * 0 for success or a negative error code on failure.
1063 */
1064int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
1065			   u32 metadata_size, uint64_t flags)
1066{
1067	struct amdgpu_bo_user *ubo;
1068	void *buffer;
1069
1070	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1071	ubo = to_amdgpu_bo_user(bo);
1072	if (!metadata_size) {
1073		if (ubo->metadata_size) {
1074			kfree(ubo->metadata);
1075			ubo->metadata = NULL;
1076			ubo->metadata_size = 0;
1077		}
1078		return 0;
1079	}
1080
1081	if (metadata == NULL)
1082		return -EINVAL;
1083
1084	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1085	if (buffer == NULL)
1086		return -ENOMEM;
1087
1088	kfree(ubo->metadata);
1089	ubo->metadata_flags = flags;
1090	ubo->metadata = buffer;
1091	ubo->metadata_size = metadata_size;
1092
1093	return 0;
1094}
1095
1096/**
1097 * amdgpu_bo_get_metadata - get metadata
1098 * @bo: &amdgpu_bo buffer object
1099 * @buffer: returned metadata
1100 * @buffer_size: size of the buffer
1101 * @metadata_size: size of the returned metadata
1102 * @flags: flags of the returned metadata
1103 *
1104 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1105 * less than metadata_size.
1106 * Used via GEM ioctl.
1107 *
1108 * Returns:
1109 * 0 for success or a negative error code on failure.
1110 */
1111int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1112			   size_t buffer_size, uint32_t *metadata_size,
1113			   uint64_t *flags)
1114{
1115	struct amdgpu_bo_user *ubo;
1116
1117	if (!buffer && !metadata_size)
1118		return -EINVAL;
1119
1120	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1121	ubo = to_amdgpu_bo_user(bo);
1122	if (metadata_size)
1123		*metadata_size = ubo->metadata_size;
1124
1125	if (buffer) {
1126		if (buffer_size < ubo->metadata_size)
1127			return -EINVAL;
1128
1129		if (ubo->metadata_size)
1130			memcpy(buffer, ubo->metadata, ubo->metadata_size);
1131	}
1132
1133	if (flags)
1134		*flags = ubo->metadata_flags;
1135
1136	return 0;
1137}
1138
1139/**
1140 * amdgpu_bo_move_notify - notification about a memory move
1141 * @bo: pointer to a buffer object
1142 * @evict: if this move is evicting the buffer from the graphics address space
1143 * @new_mem: new resource for backing the BO
1144 *
1145 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1146 * bookkeeping.
1147 * TTM driver callback which is called when ttm moves a buffer.
1148 */
1149void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1150			   bool evict,
1151			   struct ttm_resource *new_mem)
1152{
1153	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1154	struct ttm_resource *old_mem = bo->resource;
1155	struct amdgpu_bo *abo;
 
1156
1157	if (!amdgpu_bo_is_amdgpu_bo(bo))
1158		return;
1159
1160	abo = ttm_to_amdgpu_bo(bo);
1161	amdgpu_vm_bo_invalidate(adev, abo, evict);
1162
1163	amdgpu_bo_kunmap(abo);
1164
1165	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1166	    old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
1167		dma_buf_move_notify(abo->tbo.base.dma_buf);
1168
1169	/* move_notify is called before move happens */
1170	trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
1171			     old_mem ? old_mem->mem_type : -1);
1172}
1173
1174void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
1175			  struct amdgpu_mem_stats *stats,
1176			  unsigned int sz)
1177{
1178	const unsigned int domain_to_pl[] = {
1179		[ilog2(AMDGPU_GEM_DOMAIN_CPU)]	    = TTM_PL_SYSTEM,
1180		[ilog2(AMDGPU_GEM_DOMAIN_GTT)]	    = TTM_PL_TT,
1181		[ilog2(AMDGPU_GEM_DOMAIN_VRAM)]	    = TTM_PL_VRAM,
1182		[ilog2(AMDGPU_GEM_DOMAIN_GDS)]	    = AMDGPU_PL_GDS,
1183		[ilog2(AMDGPU_GEM_DOMAIN_GWS)]	    = AMDGPU_PL_GWS,
1184		[ilog2(AMDGPU_GEM_DOMAIN_OA)]	    = AMDGPU_PL_OA,
1185		[ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL,
1186	};
1187	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1188	struct ttm_resource *res = bo->tbo.resource;
1189	struct drm_gem_object *obj = &bo->tbo.base;
1190	uint64_t size = amdgpu_bo_size(bo);
1191	unsigned int type;
1192
1193	if (!res) {
1194		/*
1195		 * If no backing store use one of the preferred domain for basic
1196		 * stats. We take the MSB since that should give a reasonable
1197		 * view.
1198		 */
1199		BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT ||
1200			     TTM_PL_VRAM < TTM_PL_SYSTEM);
1201		type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK);
1202		if (!type)
1203			return;
1204		type--;
1205		if (drm_WARN_ON_ONCE(&adev->ddev,
1206				     type >= ARRAY_SIZE(domain_to_pl)))
1207			return;
1208		type = domain_to_pl[type];
1209	} else {
1210		type = res->mem_type;
1211	}
1212
1213	if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz))
 
1214		return;
1215
1216	/* DRM stats common fields: */
1217
1218	if (drm_gem_object_is_shared_for_memory_stats(obj))
1219		stats[type].drm.shared += size;
1220	else
1221		stats[type].drm.private += size;
1222
1223	if (res) {
1224		stats[type].drm.resident += size;
1225
1226		if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP))
1227			stats[type].drm.active += size;
1228		else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
1229			stats[type].drm.purgeable += size;
1230	}
1231
1232	/* amdgpu specific stats: */
 
 
 
1233
1234	if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
1235		stats[TTM_PL_VRAM].requested += size;
1236		if (type != TTM_PL_VRAM)
1237			stats[TTM_PL_VRAM].evicted += size;
1238	} else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
1239		stats[TTM_PL_TT].requested += size;
 
 
 
 
 
 
1240	}
1241}
1242
1243/**
1244 * amdgpu_bo_release_notify - notification about a BO being released
1245 * @bo: pointer to a buffer object
1246 *
1247 * Wipes VRAM buffers whose contents should not be leaked before the
1248 * memory is released.
1249 */
1250void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1251{
1252	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1253	struct dma_fence *fence = NULL;
1254	struct amdgpu_bo *abo;
1255	int r;
1256
1257	if (!amdgpu_bo_is_amdgpu_bo(bo))
1258		return;
1259
1260	abo = ttm_to_amdgpu_bo(bo);
1261
1262	WARN_ON(abo->vm_bo);
1263
1264	if (abo->kfd_bo)
1265		amdgpu_amdkfd_release_notify(abo);
1266
1267	/* We only remove the fence if the resv has individualized. */
1268	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1269			&& bo->base.resv != &bo->base._resv);
1270	if (bo->base.resv == &bo->base._resv)
1271		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1272
1273	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
1274	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
1275	    adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
1276		return;
1277
1278	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
1279		return;
1280
1281	r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
1282	if (!WARN_ON(r)) {
1283		amdgpu_vram_mgr_set_cleared(bo->resource);
1284		amdgpu_bo_fence(abo, fence, false);
1285		dma_fence_put(fence);
1286	}
1287
1288	dma_resv_unlock(bo->base.resv);
1289}
1290
1291/**
1292 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1293 * @bo: pointer to a buffer object
1294 *
1295 * Notifies the driver we are taking a fault on this BO and have reserved it,
1296 * also performs bookkeeping.
1297 * TTM driver callback for dealing with vm faults.
1298 *
1299 * Returns:
1300 * 0 for success or a negative error code on failure.
1301 */
1302vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1303{
1304	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1305	struct ttm_operation_ctx ctx = { false, false };
1306	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 
1307	int r;
1308
1309	/* Remember that this BO was accessed by the CPU */
1310	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1311
1312	if (amdgpu_res_cpu_visible(adev, bo->resource))
 
 
 
 
1313		return 0;
1314
1315	/* Can't move a pinned BO to visible VRAM */
1316	if (abo->tbo.pin_count > 0)
1317		return VM_FAULT_SIGBUS;
1318
1319	/* hurrah the memory is not visible ! */
1320	atomic64_inc(&adev->num_vram_cpu_page_faults);
1321	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1322					AMDGPU_GEM_DOMAIN_GTT);
1323
1324	/* Avoid costly evictions; only set GTT as a busy placement */
1325	abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
 
1326
1327	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1328	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1329		return VM_FAULT_NOPAGE;
1330	else if (unlikely(r))
1331		return VM_FAULT_SIGBUS;
1332
 
1333	/* this should never happen */
1334	if (bo->resource->mem_type == TTM_PL_VRAM &&
1335	    !amdgpu_res_cpu_visible(adev, bo->resource))
1336		return VM_FAULT_SIGBUS;
1337
1338	ttm_bo_move_to_lru_tail_unlocked(bo);
1339	return 0;
1340}
1341
1342/**
1343 * amdgpu_bo_fence - add fence to buffer object
1344 *
1345 * @bo: buffer object in question
1346 * @fence: fence to add
1347 * @shared: true if fence should be added shared
1348 *
1349 */
1350void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1351		     bool shared)
1352{
1353	struct dma_resv *resv = bo->tbo.base.resv;
1354	int r;
1355
1356	r = dma_resv_reserve_fences(resv, 1);
1357	if (r) {
1358		/* As last resort on OOM we block for the fence */
1359		dma_fence_wait(fence, false);
1360		return;
1361	}
1362
1363	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
1364			   DMA_RESV_USAGE_WRITE);
1365}
1366
1367/**
1368 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1369 *
1370 * @adev: amdgpu device pointer
1371 * @resv: reservation object to sync to
1372 * @sync_mode: synchronization mode
1373 * @owner: fence owner
1374 * @intr: Whether the wait is interruptible
1375 *
1376 * Extract the fences from the reservation object and waits for them to finish.
1377 *
1378 * Returns:
1379 * 0 on success, errno otherwise.
1380 */
1381int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1382			     enum amdgpu_sync_mode sync_mode, void *owner,
1383			     bool intr)
1384{
1385	struct amdgpu_sync sync;
1386	int r;
1387
1388	amdgpu_sync_create(&sync);
1389	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1390	r = amdgpu_sync_wait(&sync, intr);
1391	amdgpu_sync_free(&sync);
1392	return r;
1393}
1394
1395/**
1396 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1397 * @bo: buffer object to wait for
1398 * @owner: fence owner
1399 * @intr: Whether the wait is interruptible
1400 *
1401 * Wrapper to wait for fences in a BO.
1402 * Returns:
1403 * 0 on success, errno otherwise.
1404 */
1405int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1406{
1407	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1408
1409	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1410					AMDGPU_SYNC_NE_OWNER, owner, intr);
1411}
1412
1413/**
1414 * amdgpu_bo_gpu_offset - return GPU offset of bo
1415 * @bo:	amdgpu object for which we query the offset
1416 *
1417 * Note: object should either be pinned or reserved when calling this
1418 * function, it might be useful to add check for this for debugging.
1419 *
1420 * Returns:
1421 * current GPU offset of the object.
1422 */
1423u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1424{
1425	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1426	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1427		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1428	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1429	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
1430		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1431
1432	return amdgpu_bo_gpu_offset_no_check(bo);
1433}
1434
1435/**
1436 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1437 * @bo:	amdgpu object for which we query the offset
1438 *
1439 * Returns:
1440 * current GPU offset of the object without raising warnings.
1441 */
1442u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1443{
1444	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1445	uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
1446
1447	if (bo->tbo.resource->mem_type == TTM_PL_TT)
1448		offset = amdgpu_gmc_agp_addr(&bo->tbo);
1449
1450	if (offset == AMDGPU_BO_INVALID_OFFSET)
1451		offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1452			amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1453
1454	return amdgpu_gmc_sign_extend(offset);
1455}
1456
1457/**
1458 * amdgpu_bo_get_preferred_domain - get preferred domain
1459 * @adev: amdgpu device object
1460 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1461 *
1462 * Returns:
1463 * Which of the allowed domains is preferred for allocating the BO.
1464 */
1465uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
1466					    uint32_t domain)
1467{
1468	if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
1469	    ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
1470		domain = AMDGPU_GEM_DOMAIN_VRAM;
1471		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1472			domain = AMDGPU_GEM_DOMAIN_GTT;
1473	}
1474	return domain;
1475}
1476
1477#if defined(CONFIG_DEBUG_FS)
1478#define amdgpu_bo_print_flag(m, bo, flag)		        \
1479	do {							\
1480		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
1481			seq_printf((m), " " #flag);		\
1482		}						\
1483	} while (0)
1484
1485/**
1486 * amdgpu_bo_print_info - print BO info in debugfs file
1487 *
1488 * @id: Index or Id of the BO
1489 * @bo: Requested BO for printing info
1490 * @m: debugfs file
1491 *
1492 * Print BO information in debugfs file
1493 *
1494 * Returns:
1495 * Size of the BO in bytes.
1496 */
1497u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1498{
1499	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1500	struct dma_buf_attachment *attachment;
1501	struct dma_buf *dma_buf;
 
1502	const char *placement;
1503	unsigned int pin_count;
1504	u64 size;
1505
1506	if (dma_resv_trylock(bo->tbo.base.resv)) {
1507		if (!bo->tbo.resource) {
1508			placement = "NONE";
1509		} else {
1510			switch (bo->tbo.resource->mem_type) {
1511			case TTM_PL_VRAM:
1512				if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
1513					placement = "VRAM VISIBLE";
1514				else
1515					placement = "VRAM";
1516				break;
1517			case TTM_PL_TT:
1518				placement = "GTT";
1519				break;
1520			case AMDGPU_PL_GDS:
1521				placement = "GDS";
1522				break;
1523			case AMDGPU_PL_GWS:
1524				placement = "GWS";
1525				break;
1526			case AMDGPU_PL_OA:
1527				placement = "OA";
1528				break;
1529			case AMDGPU_PL_PREEMPT:
1530				placement = "PREEMPTIBLE";
1531				break;
1532			case AMDGPU_PL_DOORBELL:
1533				placement = "DOORBELL";
1534				break;
1535			case TTM_PL_SYSTEM:
1536			default:
1537				placement = "CPU";
1538				break;
1539			}
1540		}
1541		dma_resv_unlock(bo->tbo.base.resv);
1542	} else {
1543		placement = "UNKNOWN";
1544	}
1545
1546	size = amdgpu_bo_size(bo);
1547	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1548			id, size, placement);
1549
1550	pin_count = READ_ONCE(bo->tbo.pin_count);
1551	if (pin_count)
1552		seq_printf(m, " pin count %d", pin_count);
1553
1554	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1555	attachment = READ_ONCE(bo->tbo.base.import_attach);
1556
1557	if (attachment)
1558		seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
1559	else if (dma_buf)
1560		seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
1561
1562	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1563	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1564	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1565	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1566	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1567	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1568	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1569
1570	seq_puts(m, "\n");
1571
1572	return size;
1573}
1574#endif
v5.14.15
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32#include <linux/list.h>
  33#include <linux/slab.h>
  34#include <linux/dma-buf.h>
  35
 
  36#include <drm/amdgpu_drm.h>
  37#include <drm/drm_cache.h>
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40#include "amdgpu_amdkfd.h"
 
 
  41
  42/**
  43 * DOC: amdgpu_object
  44 *
  45 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
  46 * represents memory used by driver (VRAM, system memory, etc.). The driver
  47 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
  48 * to create/destroy/set buffer object which are then managed by the kernel TTM
  49 * memory manager.
  50 * The interfaces are also used internally by kernel clients, including gfx,
  51 * uvd, etc. for kernel managed allocations used by the GPU.
  52 *
  53 */
  54
  55static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
  56{
  57	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  58
  59	amdgpu_bo_kunmap(bo);
  60
  61	if (bo->tbo.base.import_attach)
  62		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
  63	drm_gem_object_release(&bo->tbo.base);
  64	amdgpu_bo_unref(&bo->parent);
  65	kvfree(bo);
  66}
  67
  68static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
  69{
  70	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  71	struct amdgpu_bo_user *ubo;
  72
  73	ubo = to_amdgpu_bo_user(bo);
  74	kfree(ubo->metadata);
  75	amdgpu_bo_destroy(tbo);
  76}
  77
  78static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
  79{
  80	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  81	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  82	struct amdgpu_bo_vm *vmbo;
  83
  84	vmbo = to_amdgpu_bo_vm(bo);
  85	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
  86	if (!list_empty(&vmbo->shadow_list)) {
  87		mutex_lock(&adev->shadow_list_lock);
  88		list_del_init(&vmbo->shadow_list);
  89		mutex_unlock(&adev->shadow_list_lock);
  90	}
  91
  92	amdgpu_bo_destroy(tbo);
  93}
  94
  95/**
  96 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
  97 * @bo: buffer object to be checked
  98 *
  99 * Uses destroy function associated with the object to determine if this is
 100 * an &amdgpu_bo.
 101 *
 102 * Returns:
 103 * true if the object belongs to &amdgpu_bo, false if not.
 104 */
 105bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 106{
 107	if (bo->destroy == &amdgpu_bo_destroy ||
 108	    bo->destroy == &amdgpu_bo_user_destroy ||
 109	    bo->destroy == &amdgpu_bo_vm_destroy)
 110		return true;
 111
 112	return false;
 113}
 114
 115/**
 116 * amdgpu_bo_placement_from_domain - set buffer's placement
 117 * @abo: &amdgpu_bo buffer object whose placement is to be set
 118 * @domain: requested domain
 119 *
 120 * Sets buffer's placement according to requested domain and the buffer's
 121 * flags.
 122 */
 123void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 124{
 125	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 126	struct ttm_placement *placement = &abo->placement;
 127	struct ttm_place *places = abo->placements;
 128	u64 flags = abo->flags;
 129	u32 c = 0;
 130
 131	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 132		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 
 133
 134		places[c].fpfn = 0;
 135		places[c].lpfn = 0;
 
 
 
 
 
 
 
 
 
 136		places[c].mem_type = TTM_PL_VRAM;
 137		places[c].flags = 0;
 138
 139		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
 140			places[c].lpfn = visible_pfn;
 141		else
 142			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 143
 144		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
 
 145			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
 
 
 
 
 
 
 
 
 
 146		c++;
 147	}
 148
 149	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 150		places[c].fpfn = 0;
 151		places[c].lpfn = 0;
 152		places[c].mem_type =
 153			abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
 154			AMDGPU_PL_PREEMPT : TTM_PL_TT;
 155		places[c].flags = 0;
 
 
 
 
 
 
 
 156		c++;
 157	}
 158
 159	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
 160		places[c].fpfn = 0;
 161		places[c].lpfn = 0;
 162		places[c].mem_type = TTM_PL_SYSTEM;
 163		places[c].flags = 0;
 164		c++;
 165	}
 166
 167	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
 168		places[c].fpfn = 0;
 169		places[c].lpfn = 0;
 170		places[c].mem_type = AMDGPU_PL_GDS;
 171		places[c].flags = 0;
 172		c++;
 173	}
 174
 175	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
 176		places[c].fpfn = 0;
 177		places[c].lpfn = 0;
 178		places[c].mem_type = AMDGPU_PL_GWS;
 179		places[c].flags = 0;
 180		c++;
 181	}
 182
 183	if (domain & AMDGPU_GEM_DOMAIN_OA) {
 184		places[c].fpfn = 0;
 185		places[c].lpfn = 0;
 186		places[c].mem_type = AMDGPU_PL_OA;
 187		places[c].flags = 0;
 188		c++;
 189	}
 190
 191	if (!c) {
 192		places[c].fpfn = 0;
 193		places[c].lpfn = 0;
 194		places[c].mem_type = TTM_PL_SYSTEM;
 195		places[c].flags = 0;
 196		c++;
 197	}
 198
 199	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
 200
 201	placement->num_placement = c;
 202	placement->placement = places;
 203
 204	placement->num_busy_placement = c;
 205	placement->busy_placement = places;
 206}
 207
 208/**
 209 * amdgpu_bo_create_reserved - create reserved BO for kernel use
 210 *
 211 * @adev: amdgpu device object
 212 * @size: size for the new BO
 213 * @align: alignment for the new BO
 214 * @domain: where to place it
 215 * @bo_ptr: used to initialize BOs in structures
 216 * @gpu_addr: GPU addr of the pinned BO
 217 * @cpu_addr: optional CPU address mapping
 218 *
 219 * Allocates and pins a BO for kernel internal use, and returns it still
 220 * reserved.
 221 *
 222 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 223 *
 224 * Returns:
 225 * 0 on success, negative error code otherwise.
 226 */
 227int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 228			      unsigned long size, int align,
 229			      u32 domain, struct amdgpu_bo **bo_ptr,
 230			      u64 *gpu_addr, void **cpu_addr)
 231{
 232	struct amdgpu_bo_param bp;
 233	bool free = false;
 234	int r;
 235
 236	if (!size) {
 237		amdgpu_bo_unref(bo_ptr);
 238		return 0;
 239	}
 240
 241	memset(&bp, 0, sizeof(bp));
 242	bp.size = size;
 243	bp.byte_align = align;
 244	bp.domain = domain;
 245	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
 246		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 247	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 248	bp.type = ttm_bo_type_kernel;
 249	bp.resv = NULL;
 250	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 251
 252	if (!*bo_ptr) {
 253		r = amdgpu_bo_create(adev, &bp, bo_ptr);
 254		if (r) {
 255			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
 256				r);
 257			return r;
 258		}
 259		free = true;
 260	}
 261
 262	r = amdgpu_bo_reserve(*bo_ptr, false);
 263	if (r) {
 264		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
 265		goto error_free;
 266	}
 267
 268	r = amdgpu_bo_pin(*bo_ptr, domain);
 269	if (r) {
 270		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
 271		goto error_unreserve;
 272	}
 273
 274	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
 275	if (r) {
 276		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
 277		goto error_unpin;
 278	}
 279
 280	if (gpu_addr)
 281		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
 282
 283	if (cpu_addr) {
 284		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 285		if (r) {
 286			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
 287			goto error_unpin;
 288		}
 289	}
 290
 291	return 0;
 292
 293error_unpin:
 294	amdgpu_bo_unpin(*bo_ptr);
 295error_unreserve:
 296	amdgpu_bo_unreserve(*bo_ptr);
 297
 298error_free:
 299	if (free)
 300		amdgpu_bo_unref(bo_ptr);
 301
 302	return r;
 303}
 304
 305/**
 306 * amdgpu_bo_create_kernel - create BO for kernel use
 307 *
 308 * @adev: amdgpu device object
 309 * @size: size for the new BO
 310 * @align: alignment for the new BO
 311 * @domain: where to place it
 312 * @bo_ptr:  used to initialize BOs in structures
 313 * @gpu_addr: GPU addr of the pinned BO
 314 * @cpu_addr: optional CPU address mapping
 315 *
 316 * Allocates and pins a BO for kernel internal use.
 317 *
 318 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 319 *
 320 * Returns:
 321 * 0 on success, negative error code otherwise.
 322 */
 323int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 324			    unsigned long size, int align,
 325			    u32 domain, struct amdgpu_bo **bo_ptr,
 326			    u64 *gpu_addr, void **cpu_addr)
 327{
 328	int r;
 329
 330	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
 331				      gpu_addr, cpu_addr);
 332
 333	if (r)
 334		return r;
 335
 336	if (*bo_ptr)
 337		amdgpu_bo_unreserve(*bo_ptr);
 338
 339	return 0;
 340}
 341
 342/**
 343 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
 344 *
 345 * @adev: amdgpu device object
 346 * @offset: offset of the BO
 347 * @size: size of the BO
 348 * @domain: where to place it
 349 * @bo_ptr:  used to initialize BOs in structures
 350 * @cpu_addr: optional CPU address mapping
 351 *
 352 * Creates a kernel BO at a specific offset in the address space of the domain.
 353 *
 354 * Returns:
 355 * 0 on success, negative error code otherwise.
 356 */
 357int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 358			       uint64_t offset, uint64_t size, uint32_t domain,
 359			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
 360{
 361	struct ttm_operation_ctx ctx = { false, false };
 362	unsigned int i;
 363	int r;
 364
 365	offset &= PAGE_MASK;
 366	size = ALIGN(size, PAGE_SIZE);
 367
 368	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
 369				      NULL, cpu_addr);
 
 370	if (r)
 371		return r;
 372
 373	if ((*bo_ptr) == NULL)
 374		return 0;
 375
 376	/*
 377	 * Remove the original mem node and create a new one at the request
 378	 * position.
 379	 */
 380	if (cpu_addr)
 381		amdgpu_bo_kunmap(*bo_ptr);
 382
 383	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
 384
 385	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
 386		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
 387		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 388	}
 389	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
 390			     &(*bo_ptr)->tbo.resource, &ctx);
 391	if (r)
 392		goto error;
 393
 394	if (cpu_addr) {
 395		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 396		if (r)
 397			goto error;
 398	}
 399
 400	amdgpu_bo_unreserve(*bo_ptr);
 401	return 0;
 402
 403error:
 404	amdgpu_bo_unreserve(*bo_ptr);
 405	amdgpu_bo_unref(bo_ptr);
 406	return r;
 407}
 408
 409/**
 410 * amdgpu_bo_free_kernel - free BO for kernel use
 411 *
 412 * @bo: amdgpu BO to free
 413 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
 414 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
 415 *
 416 * unmaps and unpin a BO for kernel internal use.
 417 */
 418void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
 419			   void **cpu_addr)
 420{
 421	if (*bo == NULL)
 422		return;
 423
 
 
 424	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
 425		if (cpu_addr)
 426			amdgpu_bo_kunmap(*bo);
 427
 428		amdgpu_bo_unpin(*bo);
 429		amdgpu_bo_unreserve(*bo);
 430	}
 431	amdgpu_bo_unref(bo);
 432
 433	if (gpu_addr)
 434		*gpu_addr = 0;
 435
 436	if (cpu_addr)
 437		*cpu_addr = NULL;
 438}
 439
 440/* Validate bo size is bit bigger then the request domain */
 441static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
 442					  unsigned long size, u32 domain)
 443{
 444	struct ttm_resource_manager *man = NULL;
 445
 446	/*
 447	 * If GTT is part of requested domains the check must succeed to
 448	 * allow fall back to GTT
 449	 */
 450	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 451		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 452
 453		if (size < (man->size << PAGE_SHIFT))
 454			return true;
 455		else
 456			goto fail;
 457	}
 458
 459	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 460		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 
 
 461
 462		if (size < (man->size << PAGE_SHIFT))
 463			return true;
 464		else
 465			goto fail;
 466	}
 467
 
 
 
 468
 469	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
 470	return true;
 471
 472fail:
 473	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
 474		  man->size << PAGE_SHIFT);
 475	return false;
 476}
 477
 478bool amdgpu_bo_support_uswc(u64 bo_flags)
 479{
 480
 481#ifdef CONFIG_X86_32
 482	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
 483	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
 484	 */
 485	return false;
 486#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
 487	/* Don't try to enable write-combining when it can't work, or things
 488	 * may be slow
 489	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 490	 */
 491
 492#ifndef CONFIG_COMPILE_TEST
 493#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 494	 thanks to write-combining
 495#endif
 496
 497	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 498		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 499			      "better performance thanks to write-combining\n");
 500	return false;
 501#else
 502	/* For architectures that don't support WC memory,
 503	 * mask out the WC flag from the BO
 504	 */
 505	if (!drm_arch_can_wc_memory())
 506		return false;
 507
 508	return true;
 509#endif
 510}
 511
 512/**
 513 * amdgpu_bo_create - create an &amdgpu_bo buffer object
 514 * @adev: amdgpu device object
 515 * @bp: parameters to be used for the buffer object
 516 * @bo_ptr: pointer to the buffer object pointer
 517 *
 518 * Creates an &amdgpu_bo buffer object.
 519 *
 520 * Returns:
 521 * 0 for success or a negative error code on failure.
 522 */
 523int amdgpu_bo_create(struct amdgpu_device *adev,
 524			       struct amdgpu_bo_param *bp,
 525			       struct amdgpu_bo **bo_ptr)
 526{
 527	struct ttm_operation_ctx ctx = {
 528		.interruptible = (bp->type != ttm_bo_type_kernel),
 529		.no_wait_gpu = bp->no_wait_gpu,
 530		/* We opt to avoid OOM on system pages allocations */
 531		.gfp_retry_mayfail = true,
 532		.allow_res_evict = bp->type != ttm_bo_type_kernel,
 533		.resv = bp->resv
 534	};
 535	struct amdgpu_bo *bo;
 536	unsigned long page_align, size = bp->size;
 537	int r;
 538
 539	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
 540	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 541		/* GWS and OA don't need any alignment. */
 542		page_align = bp->byte_align;
 543		size <<= PAGE_SHIFT;
 
 544	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
 545		/* Both size and alignment must be a multiple of 4. */
 546		page_align = ALIGN(bp->byte_align, 4);
 547		size = ALIGN(size, 4) << PAGE_SHIFT;
 548	} else {
 549		/* Memory should be aligned at least to a page size. */
 550		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 551		size = ALIGN(size, PAGE_SIZE);
 552	}
 553
 554	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
 555		return -ENOMEM;
 556
 557	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 558
 559	*bo_ptr = NULL;
 560	bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
 561	if (bo == NULL)
 562		return -ENOMEM;
 563	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
 
 564	bo->vm_bo = NULL;
 565	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
 566		bp->domain;
 567	bo->allowed_domains = bo->preferred_domains;
 568	if (bp->type != ttm_bo_type_kernel &&
 
 569	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 570		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 571
 572	bo->flags = bp->flags;
 573
 
 
 
 
 
 
 
 574	if (!amdgpu_bo_support_uswc(bo->flags))
 575		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 576
 577	bo->tbo.bdev = &adev->mman.bdev;
 578	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
 579			  AMDGPU_GEM_DOMAIN_GDS))
 580		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 581	else
 582		amdgpu_bo_placement_from_domain(bo, bp->domain);
 583	if (bp->type == ttm_bo_type_kernel)
 
 
 584		bo->tbo.priority = 1;
 585
 586	if (!bp->destroy)
 587		bp->destroy = &amdgpu_bo_destroy;
 588
 589	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
 590				 &bo->placement, page_align, &ctx,  NULL,
 591				 bp->resv, bp->destroy);
 592	if (unlikely(r != 0))
 593		return r;
 594
 595	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 596	    bo->tbo.resource->mem_type == TTM_PL_VRAM &&
 597	    bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 598		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 599					     ctx.bytes_moved);
 600	else
 601		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 602
 603	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 604	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 605		struct dma_fence *fence;
 606
 607		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
 608		if (unlikely(r))
 609			goto fail_unreserve;
 610
 611		amdgpu_bo_fence(bo, fence, false);
 612		dma_fence_put(bo->tbo.moving);
 613		bo->tbo.moving = dma_fence_get(fence);
 614		dma_fence_put(fence);
 615	}
 616	if (!bp->resv)
 617		amdgpu_bo_unreserve(bo);
 618	*bo_ptr = bo;
 619
 620	trace_amdgpu_bo_create(bo);
 621
 622	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
 623	if (bp->type == ttm_bo_type_device)
 624		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 625
 626	return 0;
 627
 628fail_unreserve:
 629	if (!bp->resv)
 630		dma_resv_unlock(bo->tbo.base.resv);
 631	amdgpu_bo_unref(&bo);
 632	return r;
 633}
 634
 635/**
 636 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
 637 * @adev: amdgpu device object
 638 * @bp: parameters to be used for the buffer object
 639 * @ubo_ptr: pointer to the buffer object pointer
 640 *
 641 * Create a BO to be used by user application;
 642 *
 643 * Returns:
 644 * 0 for success or a negative error code on failure.
 645 */
 646
 647int amdgpu_bo_create_user(struct amdgpu_device *adev,
 648			  struct amdgpu_bo_param *bp,
 649			  struct amdgpu_bo_user **ubo_ptr)
 650{
 651	struct amdgpu_bo *bo_ptr;
 652	int r;
 653
 654	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
 655	bp->destroy = &amdgpu_bo_user_destroy;
 656	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 657	if (r)
 658		return r;
 659
 660	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
 661	return r;
 662}
 663
 664/**
 665 * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
 666 * @adev: amdgpu device object
 667 * @bp: parameters to be used for the buffer object
 668 * @vmbo_ptr: pointer to the buffer object pointer
 669 *
 670 * Create a BO to be for GPUVM.
 671 *
 672 * Returns:
 673 * 0 for success or a negative error code on failure.
 674 */
 675
 676int amdgpu_bo_create_vm(struct amdgpu_device *adev,
 677			struct amdgpu_bo_param *bp,
 678			struct amdgpu_bo_vm **vmbo_ptr)
 679{
 680	struct amdgpu_bo *bo_ptr;
 681	int r;
 682
 683	/* bo_ptr_size will be determined by the caller and it depends on
 684	 * num of amdgpu_vm_pt entries.
 685	 */
 686	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
 687	bp->destroy = &amdgpu_bo_vm_destroy;
 688	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 689	if (r)
 690		return r;
 691
 692	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
 693	INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
 694	return r;
 695}
 696
 697/**
 698 * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
 699 * @bo: pointer to the buffer object
 700 *
 701 * Sets placement according to domain; and changes placement and caching
 702 * policy of the buffer object according to the placement.
 703 * This is used for validating shadow bos.  It calls ttm_bo_validate() to
 704 * make sure the buffer is resident where it needs to be.
 705 *
 706 * Returns:
 707 * 0 for success or a negative error code on failure.
 708 */
 709int amdgpu_bo_validate(struct amdgpu_bo *bo)
 710{
 711	struct ttm_operation_ctx ctx = { false, false };
 712	uint32_t domain;
 713	int r;
 714
 715	if (bo->tbo.pin_count)
 716		return 0;
 717
 718	domain = bo->preferred_domains;
 719
 720retry:
 721	amdgpu_bo_placement_from_domain(bo, domain);
 722	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 723	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 724		domain = bo->allowed_domains;
 725		goto retry;
 726	}
 727
 728	return r;
 729}
 730
 731/**
 732 * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
 733 *
 734 * @bo: BO that will be inserted into the shadow list
 735 *
 736 * Insert a BO to the shadow list.
 737 */
 738void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
 739{
 740	struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
 741
 742	mutex_lock(&adev->shadow_list_lock);
 743	list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
 744	mutex_unlock(&adev->shadow_list_lock);
 745}
 746
 747/**
 748 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
 749 *
 750 * @shadow: &amdgpu_bo shadow to be restored
 751 * @fence: dma_fence associated with the operation
 752 *
 753 * Copies a buffer object's shadow content back to the object.
 754 * This is used for recovering a buffer from its shadow in case of a gpu
 755 * reset where vram context may be lost.
 756 *
 757 * Returns:
 758 * 0 for success or a negative error code on failure.
 759 */
 760int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
 761
 762{
 763	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
 764	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 765	uint64_t shadow_addr, parent_addr;
 766
 767	shadow_addr = amdgpu_bo_gpu_offset(shadow);
 768	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
 769
 770	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
 771				  amdgpu_bo_size(shadow), NULL, fence,
 772				  true, false, false);
 773}
 774
 775/**
 776 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
 777 * @bo: &amdgpu_bo buffer object to be mapped
 778 * @ptr: kernel virtual address to be returned
 779 *
 780 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
 781 * amdgpu_bo_kptr() to get the kernel virtual address.
 782 *
 783 * Returns:
 784 * 0 for success or a negative error code on failure.
 785 */
 786int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 787{
 788	void *kptr;
 789	long r;
 790
 791	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 792		return -EPERM;
 793
 
 
 
 
 
 794	kptr = amdgpu_bo_kptr(bo);
 795	if (kptr) {
 796		if (ptr)
 797			*ptr = kptr;
 798		return 0;
 799	}
 800
 801	r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
 802				  MAX_SCHEDULE_TIMEOUT);
 803	if (r < 0)
 804		return r;
 805
 806	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
 807	if (r)
 808		return r;
 809
 810	if (ptr)
 811		*ptr = amdgpu_bo_kptr(bo);
 812
 813	return 0;
 814}
 815
 816/**
 817 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
 818 * @bo: &amdgpu_bo buffer object
 819 *
 820 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
 821 *
 822 * Returns:
 823 * the virtual address of a buffer object area.
 824 */
 825void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 826{
 827	bool is_iomem;
 828
 829	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 830}
 831
 832/**
 833 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
 834 * @bo: &amdgpu_bo buffer object to be unmapped
 835 *
 836 * Unmaps a kernel map set up by amdgpu_bo_kmap().
 837 */
 838void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 839{
 840	if (bo->kmap.bo)
 841		ttm_bo_kunmap(&bo->kmap);
 842}
 843
 844/**
 845 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
 846 * @bo: &amdgpu_bo buffer object
 847 *
 848 * References the contained &ttm_buffer_object.
 849 *
 850 * Returns:
 851 * a refcounted pointer to the &amdgpu_bo buffer object.
 852 */
 853struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 854{
 855	if (bo == NULL)
 856		return NULL;
 857
 858	ttm_bo_get(&bo->tbo);
 859	return bo;
 860}
 861
 862/**
 863 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
 864 * @bo: &amdgpu_bo buffer object
 865 *
 866 * Unreferences the contained &ttm_buffer_object and clear the pointer
 867 */
 868void amdgpu_bo_unref(struct amdgpu_bo **bo)
 869{
 870	struct ttm_buffer_object *tbo;
 871
 872	if ((*bo) == NULL)
 873		return;
 874
 875	tbo = &((*bo)->tbo);
 876	ttm_bo_put(tbo);
 877	*bo = NULL;
 878}
 879
 880/**
 881 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
 882 * @bo: &amdgpu_bo buffer object to be pinned
 883 * @domain: domain to be pinned to
 884 * @min_offset: the start of requested address range
 885 * @max_offset: the end of requested address range
 886 *
 887 * Pins the buffer object according to requested domain and address range. If
 888 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
 889 * pin_count and pin_size accordingly.
 890 *
 891 * Pinning means to lock pages in memory along with keeping them at a fixed
 892 * offset. It is required when a buffer can not be moved, for example, when
 893 * a display buffer is being scanned out.
 894 *
 895 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
 896 * where to pin a buffer if there are specific restrictions on where a buffer
 897 * must be located.
 898 *
 899 * Returns:
 900 * 0 for success or a negative error code on failure.
 901 */
 902int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 903			     u64 min_offset, u64 max_offset)
 904{
 905	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 906	struct ttm_operation_ctx ctx = { false, false };
 907	int r, i;
 908
 909	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 910		return -EPERM;
 911
 912	if (WARN_ON_ONCE(min_offset > max_offset))
 913		return -EINVAL;
 
 914
 915	/* A shared bo cannot be migrated to VRAM */
 916	if (bo->prime_shared_count || bo->tbo.base.import_attach) {
 917		if (domain & AMDGPU_GEM_DOMAIN_GTT)
 918			domain = AMDGPU_GEM_DOMAIN_GTT;
 919		else
 920			return -EINVAL;
 921	}
 922
 923	if (bo->tbo.pin_count) {
 924		uint32_t mem_type = bo->tbo.resource->mem_type;
 925		uint32_t mem_flags = bo->tbo.resource->placement;
 926
 927		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 928			return -EINVAL;
 929
 930		if ((mem_type == TTM_PL_VRAM) &&
 931		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
 932		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
 933			return -EINVAL;
 934
 935		ttm_bo_pin(&bo->tbo);
 936
 937		if (max_offset != 0) {
 938			u64 domain_start = amdgpu_ttm_domain_start(adev,
 939								   mem_type);
 940			WARN_ON_ONCE(max_offset <
 941				     (amdgpu_bo_gpu_offset(bo) - domain_start));
 942		}
 943
 944		return 0;
 945	}
 946
 947	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
 948	 * See function amdgpu_display_supported_domains()
 949	 */
 950	domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
 951
 952	if (bo->tbo.base.import_attach)
 953		dma_buf_pin(bo->tbo.base.import_attach);
 954
 955	/* force to pin into visible video ram */
 956	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
 957		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 958	amdgpu_bo_placement_from_domain(bo, domain);
 959	for (i = 0; i < bo->placement.num_placement; i++) {
 960		unsigned fpfn, lpfn;
 961
 962		fpfn = min_offset >> PAGE_SHIFT;
 963		lpfn = max_offset >> PAGE_SHIFT;
 964
 965		if (fpfn > bo->placements[i].fpfn)
 966			bo->placements[i].fpfn = fpfn;
 967		if (!bo->placements[i].lpfn ||
 968		    (lpfn && lpfn < bo->placements[i].lpfn))
 969			bo->placements[i].lpfn = lpfn;
 970	}
 971
 972	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 973	if (unlikely(r)) {
 974		dev_err(adev->dev, "%p pin failed\n", bo);
 975		goto error;
 976	}
 977
 978	ttm_bo_pin(&bo->tbo);
 979
 980	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 981	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 982		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
 983		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
 984			     &adev->visible_pin_size);
 985	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 986		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
 987	}
 988
 989error:
 990	return r;
 991}
 992
 993/**
 994 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
 995 * @bo: &amdgpu_bo buffer object to be pinned
 996 * @domain: domain to be pinned to
 997 *
 998 * A simple wrapper to amdgpu_bo_pin_restricted().
 999 * Provides a simpler API for buffers that do not have any strict restrictions
1000 * on where a buffer must be located.
1001 *
1002 * Returns:
1003 * 0 for success or a negative error code on failure.
1004 */
1005int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
1006{
1007	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1008	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1009}
1010
1011/**
1012 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
1013 * @bo: &amdgpu_bo buffer object to be unpinned
1014 *
1015 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1016 * Changes placement and pin size accordingly.
1017 *
1018 * Returns:
1019 * 0 for success or a negative error code on failure.
1020 */
1021void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1022{
1023	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1024
1025	ttm_bo_unpin(&bo->tbo);
1026	if (bo->tbo.pin_count)
1027		return;
1028
1029	if (bo->tbo.base.import_attach)
1030		dma_buf_unpin(bo->tbo.base.import_attach);
1031
1032	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1033		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1034		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1035			     &adev->visible_pin_size);
1036	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1037		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1038	}
1039}
1040
1041/**
1042 * amdgpu_bo_evict_vram - evict VRAM buffers
1043 * @adev: amdgpu device object
1044 *
1045 * Evicts all VRAM buffers on the lru list of the memory type.
1046 * Mainly used for evicting vram at suspend time.
1047 *
1048 * Returns:
1049 * 0 for success or a negative error code on failure.
1050 */
1051int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1052{
1053	struct ttm_resource_manager *man;
1054
1055	if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
1056		/* No need to evict vram on APUs for suspend to ram */
1057		return 0;
1058	}
1059
1060	man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1061	return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
1062}
1063
1064static const char *amdgpu_vram_names[] = {
1065	"UNKNOWN",
1066	"GDDR1",
1067	"DDR2",
1068	"GDDR3",
1069	"GDDR4",
1070	"GDDR5",
1071	"HBM",
1072	"DDR3",
1073	"DDR4",
1074	"GDDR6",
1075	"DDR5"
 
 
1076};
1077
1078/**
1079 * amdgpu_bo_init - initialize memory manager
1080 * @adev: amdgpu device object
1081 *
1082 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1083 *
1084 * Returns:
1085 * 0 for success or a negative error code on failure.
1086 */
1087int amdgpu_bo_init(struct amdgpu_device *adev)
1088{
1089	/* On A+A platform, VRAM can be mapped as WB */
1090	if (!adev->gmc.xgmi.connected_to_cpu) {
1091		/* reserve PAT memory space to WC for VRAM */
1092		arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1093				adev->gmc.aper_size);
1094
 
 
 
 
 
1095		/* Add an MTRR for the VRAM */
1096		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1097				adev->gmc.aper_size);
1098	}
1099
1100	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1101		 adev->gmc.mc_vram_size >> 20,
1102		 (unsigned long long)adev->gmc.aper_size >> 20);
1103	DRM_INFO("RAM width %dbits %s\n",
1104		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1105	return amdgpu_ttm_init(adev);
1106}
1107
1108/**
1109 * amdgpu_bo_fini - tear down memory manager
1110 * @adev: amdgpu device object
1111 *
1112 * Reverses amdgpu_bo_init() to tear down memory manager.
1113 */
1114void amdgpu_bo_fini(struct amdgpu_device *adev)
1115{
 
 
1116	amdgpu_ttm_fini(adev);
 
 
 
 
 
 
 
 
1117}
1118
1119/**
1120 * amdgpu_bo_set_tiling_flags - set tiling flags
1121 * @bo: &amdgpu_bo buffer object
1122 * @tiling_flags: new flags
1123 *
1124 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1125 * kernel driver to set the tiling flags on a buffer.
1126 *
1127 * Returns:
1128 * 0 for success or a negative error code on failure.
1129 */
1130int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1131{
1132	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1133	struct amdgpu_bo_user *ubo;
1134
1135	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1136	if (adev->family <= AMDGPU_FAMILY_CZ &&
1137	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1138		return -EINVAL;
1139
1140	ubo = to_amdgpu_bo_user(bo);
1141	ubo->tiling_flags = tiling_flags;
1142	return 0;
1143}
1144
1145/**
1146 * amdgpu_bo_get_tiling_flags - get tiling flags
1147 * @bo: &amdgpu_bo buffer object
1148 * @tiling_flags: returned flags
1149 *
1150 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1151 * set the tiling flags on a buffer.
1152 */
1153void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1154{
1155	struct amdgpu_bo_user *ubo;
1156
1157	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1158	dma_resv_assert_held(bo->tbo.base.resv);
1159	ubo = to_amdgpu_bo_user(bo);
1160
1161	if (tiling_flags)
1162		*tiling_flags = ubo->tiling_flags;
1163}
1164
1165/**
1166 * amdgpu_bo_set_metadata - set metadata
1167 * @bo: &amdgpu_bo buffer object
1168 * @metadata: new metadata
1169 * @metadata_size: size of the new metadata
1170 * @flags: flags of the new metadata
1171 *
1172 * Sets buffer object's metadata, its size and flags.
1173 * Used via GEM ioctl.
1174 *
1175 * Returns:
1176 * 0 for success or a negative error code on failure.
1177 */
1178int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1179			    uint32_t metadata_size, uint64_t flags)
1180{
1181	struct amdgpu_bo_user *ubo;
1182	void *buffer;
1183
1184	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1185	ubo = to_amdgpu_bo_user(bo);
1186	if (!metadata_size) {
1187		if (ubo->metadata_size) {
1188			kfree(ubo->metadata);
1189			ubo->metadata = NULL;
1190			ubo->metadata_size = 0;
1191		}
1192		return 0;
1193	}
1194
1195	if (metadata == NULL)
1196		return -EINVAL;
1197
1198	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1199	if (buffer == NULL)
1200		return -ENOMEM;
1201
1202	kfree(ubo->metadata);
1203	ubo->metadata_flags = flags;
1204	ubo->metadata = buffer;
1205	ubo->metadata_size = metadata_size;
1206
1207	return 0;
1208}
1209
1210/**
1211 * amdgpu_bo_get_metadata - get metadata
1212 * @bo: &amdgpu_bo buffer object
1213 * @buffer: returned metadata
1214 * @buffer_size: size of the buffer
1215 * @metadata_size: size of the returned metadata
1216 * @flags: flags of the returned metadata
1217 *
1218 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1219 * less than metadata_size.
1220 * Used via GEM ioctl.
1221 *
1222 * Returns:
1223 * 0 for success or a negative error code on failure.
1224 */
1225int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1226			   size_t buffer_size, uint32_t *metadata_size,
1227			   uint64_t *flags)
1228{
1229	struct amdgpu_bo_user *ubo;
1230
1231	if (!buffer && !metadata_size)
1232		return -EINVAL;
1233
1234	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1235	ubo = to_amdgpu_bo_user(bo);
1236	if (metadata_size)
1237		*metadata_size = ubo->metadata_size;
1238
1239	if (buffer) {
1240		if (buffer_size < ubo->metadata_size)
1241			return -EINVAL;
1242
1243		if (ubo->metadata_size)
1244			memcpy(buffer, ubo->metadata, ubo->metadata_size);
1245	}
1246
1247	if (flags)
1248		*flags = ubo->metadata_flags;
1249
1250	return 0;
1251}
1252
1253/**
1254 * amdgpu_bo_move_notify - notification about a memory move
1255 * @bo: pointer to a buffer object
1256 * @evict: if this move is evicting the buffer from the graphics address space
1257 * @new_mem: new information of the bufer object
1258 *
1259 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1260 * bookkeeping.
1261 * TTM driver callback which is called when ttm moves a buffer.
1262 */
1263void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1264			   bool evict,
1265			   struct ttm_resource *new_mem)
1266{
1267	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
1268	struct amdgpu_bo *abo;
1269	struct ttm_resource *old_mem = bo->resource;
1270
1271	if (!amdgpu_bo_is_amdgpu_bo(bo))
1272		return;
1273
1274	abo = ttm_to_amdgpu_bo(bo);
1275	amdgpu_vm_bo_invalidate(adev, abo, evict);
1276
1277	amdgpu_bo_kunmap(abo);
1278
1279	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1280	    bo->resource->mem_type != TTM_PL_SYSTEM)
1281		dma_buf_move_notify(abo->tbo.base.dma_buf);
1282
1283	/* remember the eviction */
1284	if (evict)
1285		atomic64_inc(&adev->num_evictions);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286
1287	/* update statistics */
1288	if (!new_mem)
1289		return;
1290
1291	/* move_notify is called before move happens */
1292	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1293}
 
 
 
 
 
 
 
 
 
 
 
 
1294
1295void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
1296				uint64_t *gtt_mem, uint64_t *cpu_mem)
1297{
1298	unsigned int domain;
1299
1300	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1301	switch (domain) {
1302	case AMDGPU_GEM_DOMAIN_VRAM:
1303		*vram_mem += amdgpu_bo_size(bo);
1304		break;
1305	case AMDGPU_GEM_DOMAIN_GTT:
1306		*gtt_mem += amdgpu_bo_size(bo);
1307		break;
1308	case AMDGPU_GEM_DOMAIN_CPU:
1309	default:
1310		*cpu_mem += amdgpu_bo_size(bo);
1311		break;
1312	}
1313}
1314
1315/**
1316 * amdgpu_bo_release_notify - notification about a BO being released
1317 * @bo: pointer to a buffer object
1318 *
1319 * Wipes VRAM buffers whose contents should not be leaked before the
1320 * memory is released.
1321 */
1322void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1323{
 
1324	struct dma_fence *fence = NULL;
1325	struct amdgpu_bo *abo;
1326	int r;
1327
1328	if (!amdgpu_bo_is_amdgpu_bo(bo))
1329		return;
1330
1331	abo = ttm_to_amdgpu_bo(bo);
1332
 
 
1333	if (abo->kfd_bo)
1334		amdgpu_amdkfd_unreserve_memory_limit(abo);
1335
1336	/* We only remove the fence if the resv has individualized. */
1337	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1338			&& bo->base.resv != &bo->base._resv);
1339	if (bo->base.resv == &bo->base._resv)
1340		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1341
1342	if (bo->resource->mem_type != TTM_PL_VRAM ||
1343	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
 
1344		return;
1345
1346	dma_resv_lock(bo->base.resv, NULL);
 
1347
1348	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1349	if (!WARN_ON(r)) {
 
1350		amdgpu_bo_fence(abo, fence, false);
1351		dma_fence_put(fence);
1352	}
1353
1354	dma_resv_unlock(bo->base.resv);
1355}
1356
1357/**
1358 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1359 * @bo: pointer to a buffer object
1360 *
1361 * Notifies the driver we are taking a fault on this BO and have reserved it,
1362 * also performs bookkeeping.
1363 * TTM driver callback for dealing with vm faults.
1364 *
1365 * Returns:
1366 * 0 for success or a negative error code on failure.
1367 */
1368vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1369{
1370	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1371	struct ttm_operation_ctx ctx = { false, false };
1372	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1373	unsigned long offset;
1374	int r;
1375
1376	/* Remember that this BO was accessed by the CPU */
1377	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1378
1379	if (bo->resource->mem_type != TTM_PL_VRAM)
1380		return 0;
1381
1382	offset = bo->resource->start << PAGE_SHIFT;
1383	if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
1384		return 0;
1385
1386	/* Can't move a pinned BO to visible VRAM */
1387	if (abo->tbo.pin_count > 0)
1388		return VM_FAULT_SIGBUS;
1389
1390	/* hurrah the memory is not visible ! */
1391	atomic64_inc(&adev->num_vram_cpu_page_faults);
1392	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1393					AMDGPU_GEM_DOMAIN_GTT);
1394
1395	/* Avoid costly evictions; only set GTT as a busy placement */
1396	abo->placement.num_busy_placement = 1;
1397	abo->placement.busy_placement = &abo->placements[1];
1398
1399	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1400	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1401		return VM_FAULT_NOPAGE;
1402	else if (unlikely(r))
1403		return VM_FAULT_SIGBUS;
1404
1405	offset = bo->resource->start << PAGE_SHIFT;
1406	/* this should never happen */
1407	if (bo->resource->mem_type == TTM_PL_VRAM &&
1408	    (offset + bo->base.size) > adev->gmc.visible_vram_size)
1409		return VM_FAULT_SIGBUS;
1410
1411	ttm_bo_move_to_lru_tail_unlocked(bo);
1412	return 0;
1413}
1414
1415/**
1416 * amdgpu_bo_fence - add fence to buffer object
1417 *
1418 * @bo: buffer object in question
1419 * @fence: fence to add
1420 * @shared: true if fence should be added shared
1421 *
1422 */
1423void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1424		     bool shared)
1425{
1426	struct dma_resv *resv = bo->tbo.base.resv;
 
1427
1428	if (shared)
1429		dma_resv_add_shared_fence(resv, fence);
1430	else
1431		dma_resv_add_excl_fence(resv, fence);
 
 
 
 
 
1432}
1433
1434/**
1435 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1436 *
1437 * @adev: amdgpu device pointer
1438 * @resv: reservation object to sync to
1439 * @sync_mode: synchronization mode
1440 * @owner: fence owner
1441 * @intr: Whether the wait is interruptible
1442 *
1443 * Extract the fences from the reservation object and waits for them to finish.
1444 *
1445 * Returns:
1446 * 0 on success, errno otherwise.
1447 */
1448int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1449			     enum amdgpu_sync_mode sync_mode, void *owner,
1450			     bool intr)
1451{
1452	struct amdgpu_sync sync;
1453	int r;
1454
1455	amdgpu_sync_create(&sync);
1456	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1457	r = amdgpu_sync_wait(&sync, intr);
1458	amdgpu_sync_free(&sync);
1459	return r;
1460}
1461
1462/**
1463 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1464 * @bo: buffer object to wait for
1465 * @owner: fence owner
1466 * @intr: Whether the wait is interruptible
1467 *
1468 * Wrapper to wait for fences in a BO.
1469 * Returns:
1470 * 0 on success, errno otherwise.
1471 */
1472int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1473{
1474	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1475
1476	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1477					AMDGPU_SYNC_NE_OWNER, owner, intr);
1478}
1479
1480/**
1481 * amdgpu_bo_gpu_offset - return GPU offset of bo
1482 * @bo:	amdgpu object for which we query the offset
1483 *
1484 * Note: object should either be pinned or reserved when calling this
1485 * function, it might be useful to add check for this for debugging.
1486 *
1487 * Returns:
1488 * current GPU offset of the object.
1489 */
1490u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1491{
1492	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1493	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1494		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1495	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1496	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
1497		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1498
1499	return amdgpu_bo_gpu_offset_no_check(bo);
1500}
1501
1502/**
1503 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1504 * @bo:	amdgpu object for which we query the offset
1505 *
1506 * Returns:
1507 * current GPU offset of the object without raising warnings.
1508 */
1509u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1510{
1511	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1512	uint64_t offset;
1513
1514	offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1515		 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
 
 
 
 
1516
1517	return amdgpu_gmc_sign_extend(offset);
1518}
1519
1520/**
1521 * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1522 * @adev: amdgpu device object
1523 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1524 *
1525 * Returns:
1526 * Which of the allowed domains is preferred for pinning the BO for scanout.
1527 */
1528uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1529					    uint32_t domain)
1530{
1531	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
 
1532		domain = AMDGPU_GEM_DOMAIN_VRAM;
1533		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1534			domain = AMDGPU_GEM_DOMAIN_GTT;
1535	}
1536	return domain;
1537}
1538
1539#if defined(CONFIG_DEBUG_FS)
1540#define amdgpu_bo_print_flag(m, bo, flag)		        \
1541	do {							\
1542		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
1543			seq_printf((m), " " #flag);		\
1544		}						\
1545	} while (0)
1546
1547/**
1548 * amdgpu_bo_print_info - print BO info in debugfs file
1549 *
1550 * @id: Index or Id of the BO
1551 * @bo: Requested BO for printing info
1552 * @m: debugfs file
1553 *
1554 * Print BO information in debugfs file
1555 *
1556 * Returns:
1557 * Size of the BO in bytes.
1558 */
1559u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1560{
 
1561	struct dma_buf_attachment *attachment;
1562	struct dma_buf *dma_buf;
1563	unsigned int domain;
1564	const char *placement;
1565	unsigned int pin_count;
1566	u64 size;
1567
1568	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1569	switch (domain) {
1570	case AMDGPU_GEM_DOMAIN_VRAM:
1571		placement = "VRAM";
1572		break;
1573	case AMDGPU_GEM_DOMAIN_GTT:
1574		placement = " GTT";
1575		break;
1576	case AMDGPU_GEM_DOMAIN_CPU:
1577	default:
1578		placement = " CPU";
1579		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580	}
1581
1582	size = amdgpu_bo_size(bo);
1583	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1584			id, size, placement);
1585
1586	pin_count = READ_ONCE(bo->tbo.pin_count);
1587	if (pin_count)
1588		seq_printf(m, " pin count %d", pin_count);
1589
1590	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1591	attachment = READ_ONCE(bo->tbo.base.import_attach);
1592
1593	if (attachment)
1594		seq_printf(m, " imported from %p", dma_buf);
1595	else if (dma_buf)
1596		seq_printf(m, " exported as %p", dma_buf);
1597
1598	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1599	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1600	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1601	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1602	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1603	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1604	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1605
1606	seq_puts(m, "\n");
1607
1608	return size;
1609}
1610#endif