Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.2
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32#include <linux/list.h>
  33#include <linux/slab.h>
  34#include <linux/dma-buf.h>
  35
  36#include <drm/drm_drv.h>
  37#include <drm/amdgpu_drm.h>
  38#include <drm/drm_cache.h>
  39#include "amdgpu.h"
  40#include "amdgpu_trace.h"
  41#include "amdgpu_amdkfd.h"
  42
  43/**
  44 * DOC: amdgpu_object
  45 *
  46 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
  47 * represents memory used by driver (VRAM, system memory, etc.). The driver
  48 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
  49 * to create/destroy/set buffer object which are then managed by the kernel TTM
  50 * memory manager.
  51 * The interfaces are also used internally by kernel clients, including gfx,
  52 * uvd, etc. for kernel managed allocations used by the GPU.
  53 *
  54 */
  55
  56static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
  57{
  58	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  59
  60	amdgpu_bo_kunmap(bo);
 
 
 
 
  61
  62	if (bo->tbo.base.import_attach)
  63		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
  64	drm_gem_object_release(&bo->tbo.base);
  65	amdgpu_bo_unref(&bo->parent);
  66	kvfree(bo);
  67}
  68
  69static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
 
 
  70{
  71	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  72	struct amdgpu_bo_user *ubo;
 
  73
  74	ubo = to_amdgpu_bo_user(bo);
  75	kfree(ubo->metadata);
  76	amdgpu_bo_destroy(tbo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77}
  78
  79static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
  80{
  81	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  82	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  83	struct amdgpu_bo_vm *vmbo;
 
  84
  85	vmbo = to_amdgpu_bo_vm(bo);
  86	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
  87	if (!list_empty(&vmbo->shadow_list)) {
 
 
  88		mutex_lock(&adev->shadow_list_lock);
  89		list_del_init(&vmbo->shadow_list);
  90		mutex_unlock(&adev->shadow_list_lock);
  91	}
  92
  93	amdgpu_bo_destroy(tbo);
  94}
  95
  96/**
  97 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
  98 * @bo: buffer object to be checked
  99 *
 100 * Uses destroy function associated with the object to determine if this is
 101 * an &amdgpu_bo.
 102 *
 103 * Returns:
 104 * true if the object belongs to &amdgpu_bo, false if not.
 105 */
 106bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 107{
 108	if (bo->destroy == &amdgpu_bo_destroy ||
 109	    bo->destroy == &amdgpu_bo_user_destroy ||
 110	    bo->destroy == &amdgpu_bo_vm_destroy)
 111		return true;
 112
 113	return false;
 114}
 115
 116/**
 117 * amdgpu_bo_placement_from_domain - set buffer's placement
 118 * @abo: &amdgpu_bo buffer object whose placement is to be set
 119 * @domain: requested domain
 120 *
 121 * Sets buffer's placement according to requested domain and the buffer's
 122 * flags.
 123 */
 124void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 125{
 126	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 127	struct ttm_placement *placement = &abo->placement;
 128	struct ttm_place *places = abo->placements;
 129	u64 flags = abo->flags;
 130	u32 c = 0;
 131
 132	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 133		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 
 134
 135		places[c].fpfn = 0;
 136		places[c].lpfn = 0;
 137		places[c].mem_type = TTM_PL_VRAM;
 138		places[c].flags = 0;
 139
 
 
 
 
 140		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
 141			places[c].lpfn = visible_pfn;
 142		else
 143			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 144
 145		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
 146			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
 147		c++;
 148	}
 149
 150	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 151		places[c].fpfn = 0;
 152		places[c].lpfn = 0;
 153		places[c].mem_type =
 154			abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
 155			AMDGPU_PL_PREEMPT : TTM_PL_TT;
 156		places[c].flags = 0;
 
 
 157		c++;
 158	}
 159
 160	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
 161		places[c].fpfn = 0;
 162		places[c].lpfn = 0;
 163		places[c].mem_type = TTM_PL_SYSTEM;
 164		places[c].flags = 0;
 
 
 
 
 165		c++;
 166	}
 167
 168	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
 169		places[c].fpfn = 0;
 170		places[c].lpfn = 0;
 171		places[c].mem_type = AMDGPU_PL_GDS;
 172		places[c].flags = 0;
 173		c++;
 174	}
 175
 176	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
 177		places[c].fpfn = 0;
 178		places[c].lpfn = 0;
 179		places[c].mem_type = AMDGPU_PL_GWS;
 180		places[c].flags = 0;
 181		c++;
 182	}
 183
 184	if (domain & AMDGPU_GEM_DOMAIN_OA) {
 185		places[c].fpfn = 0;
 186		places[c].lpfn = 0;
 187		places[c].mem_type = AMDGPU_PL_OA;
 188		places[c].flags = 0;
 189		c++;
 190	}
 191
 192	if (!c) {
 193		places[c].fpfn = 0;
 194		places[c].lpfn = 0;
 195		places[c].mem_type = TTM_PL_SYSTEM;
 196		places[c].flags = 0;
 197		c++;
 198	}
 199
 200	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
 201
 202	placement->num_placement = c;
 203	placement->placement = places;
 204
 205	placement->num_busy_placement = c;
 206	placement->busy_placement = places;
 207}
 208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 209/**
 210 * amdgpu_bo_create_reserved - create reserved BO for kernel use
 211 *
 212 * @adev: amdgpu device object
 213 * @size: size for the new BO
 214 * @align: alignment for the new BO
 215 * @domain: where to place it
 216 * @bo_ptr: used to initialize BOs in structures
 217 * @gpu_addr: GPU addr of the pinned BO
 218 * @cpu_addr: optional CPU address mapping
 219 *
 220 * Allocates and pins a BO for kernel internal use, and returns it still
 221 * reserved.
 222 *
 223 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 224 *
 225 * Returns:
 226 * 0 on success, negative error code otherwise.
 227 */
 228int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 229			      unsigned long size, int align,
 230			      u32 domain, struct amdgpu_bo **bo_ptr,
 231			      u64 *gpu_addr, void **cpu_addr)
 232{
 233	struct amdgpu_bo_param bp;
 234	bool free = false;
 235	int r;
 236
 237	if (!size) {
 238		amdgpu_bo_unref(bo_ptr);
 239		return 0;
 240	}
 241
 242	memset(&bp, 0, sizeof(bp));
 243	bp.size = size;
 244	bp.byte_align = align;
 245	bp.domain = domain;
 246	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
 247		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 248	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 249	bp.type = ttm_bo_type_kernel;
 250	bp.resv = NULL;
 251	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 252
 253	if (!*bo_ptr) {
 254		r = amdgpu_bo_create(adev, &bp, bo_ptr);
 255		if (r) {
 256			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
 257				r);
 258			return r;
 259		}
 260		free = true;
 261	}
 262
 263	r = amdgpu_bo_reserve(*bo_ptr, false);
 264	if (r) {
 265		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
 266		goto error_free;
 267	}
 268
 269	r = amdgpu_bo_pin(*bo_ptr, domain);
 270	if (r) {
 271		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
 272		goto error_unreserve;
 273	}
 274
 275	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
 276	if (r) {
 277		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
 278		goto error_unpin;
 279	}
 280
 281	if (gpu_addr)
 282		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
 283
 284	if (cpu_addr) {
 285		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 286		if (r) {
 287			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
 288			goto error_unpin;
 289		}
 290	}
 291
 292	return 0;
 293
 294error_unpin:
 295	amdgpu_bo_unpin(*bo_ptr);
 296error_unreserve:
 297	amdgpu_bo_unreserve(*bo_ptr);
 298
 299error_free:
 300	if (free)
 301		amdgpu_bo_unref(bo_ptr);
 302
 303	return r;
 304}
 305
 306/**
 307 * amdgpu_bo_create_kernel - create BO for kernel use
 308 *
 309 * @adev: amdgpu device object
 310 * @size: size for the new BO
 311 * @align: alignment for the new BO
 312 * @domain: where to place it
 313 * @bo_ptr:  used to initialize BOs in structures
 314 * @gpu_addr: GPU addr of the pinned BO
 315 * @cpu_addr: optional CPU address mapping
 316 *
 317 * Allocates and pins a BO for kernel internal use.
 318 *
 319 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 320 *
 321 * Returns:
 322 * 0 on success, negative error code otherwise.
 323 */
 324int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 325			    unsigned long size, int align,
 326			    u32 domain, struct amdgpu_bo **bo_ptr,
 327			    u64 *gpu_addr, void **cpu_addr)
 328{
 329	int r;
 330
 331	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
 332				      gpu_addr, cpu_addr);
 333
 334	if (r)
 335		return r;
 336
 337	if (*bo_ptr)
 338		amdgpu_bo_unreserve(*bo_ptr);
 339
 340	return 0;
 341}
 342
 343/**
 344 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
 345 *
 346 * @adev: amdgpu device object
 347 * @offset: offset of the BO
 348 * @size: size of the BO
 349 * @bo_ptr:  used to initialize BOs in structures
 350 * @cpu_addr: optional CPU address mapping
 351 *
 352 * Creates a kernel BO at a specific offset in VRAM.
 353 *
 354 * Returns:
 355 * 0 on success, negative error code otherwise.
 356 */
 357int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 358			       uint64_t offset, uint64_t size,
 359			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
 360{
 361	struct ttm_operation_ctx ctx = { false, false };
 362	unsigned int i;
 363	int r;
 364
 365	offset &= PAGE_MASK;
 366	size = ALIGN(size, PAGE_SIZE);
 367
 368	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
 369				      AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
 370				      cpu_addr);
 371	if (r)
 372		return r;
 373
 374	if ((*bo_ptr) == NULL)
 375		return 0;
 376
 377	/*
 378	 * Remove the original mem node and create a new one at the request
 379	 * position.
 380	 */
 381	if (cpu_addr)
 382		amdgpu_bo_kunmap(*bo_ptr);
 383
 384	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
 385
 386	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
 387		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
 388		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 389	}
 390	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
 391			     &(*bo_ptr)->tbo.resource, &ctx);
 392	if (r)
 393		goto error;
 394
 395	if (cpu_addr) {
 396		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 397		if (r)
 398			goto error;
 399	}
 400
 
 401	amdgpu_bo_unreserve(*bo_ptr);
 402	return 0;
 403
 404error:
 405	amdgpu_bo_unreserve(*bo_ptr);
 406	amdgpu_bo_unref(bo_ptr);
 
 407	return r;
 408}
 409
 410/**
 411 * amdgpu_bo_free_kernel - free BO for kernel use
 412 *
 413 * @bo: amdgpu BO to free
 414 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
 415 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
 416 *
 417 * unmaps and unpin a BO for kernel internal use.
 418 */
 419void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
 420			   void **cpu_addr)
 421{
 422	if (*bo == NULL)
 423		return;
 424
 425	WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
 426
 427	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
 428		if (cpu_addr)
 429			amdgpu_bo_kunmap(*bo);
 430
 431		amdgpu_bo_unpin(*bo);
 432		amdgpu_bo_unreserve(*bo);
 433	}
 434	amdgpu_bo_unref(bo);
 435
 436	if (gpu_addr)
 437		*gpu_addr = 0;
 438
 439	if (cpu_addr)
 440		*cpu_addr = NULL;
 441}
 442
 443/* Validate bo size is bit bigger then the request domain */
 444static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
 445					  unsigned long size, u32 domain)
 446{
 447	struct ttm_resource_manager *man = NULL;
 448
 449	/*
 450	 * If GTT is part of requested domains the check must succeed to
 451	 * allow fall back to GTT.
 452	 */
 453	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 454		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 455
 456		if (man && size < man->size)
 457			return true;
 458		else if (!man)
 459			WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
 460		goto fail;
 461	} else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 462		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 463
 464		if (man && size < man->size)
 465			return true;
 466		goto fail;
 467	}
 468
 469	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
 470	return true;
 471
 472fail:
 473	if (man)
 474		DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
 475			  man->size);
 476	return false;
 477}
 478
 479bool amdgpu_bo_support_uswc(u64 bo_flags)
 480{
 481
 482#ifdef CONFIG_X86_32
 483	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
 484	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
 485	 */
 486	return false;
 487#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
 488	/* Don't try to enable write-combining when it can't work, or things
 489	 * may be slow
 490	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 491	 */
 492
 493#ifndef CONFIG_COMPILE_TEST
 494#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 495	 thanks to write-combining
 496#endif
 497
 498	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 499		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 500			      "better performance thanks to write-combining\n");
 501	return false;
 502#else
 503	/* For architectures that don't support WC memory,
 504	 * mask out the WC flag from the BO
 505	 */
 506	if (!drm_arch_can_wc_memory())
 507		return false;
 508
 509	return true;
 510#endif
 511}
 512
 513/**
 514 * amdgpu_bo_create - create an &amdgpu_bo buffer object
 515 * @adev: amdgpu device object
 516 * @bp: parameters to be used for the buffer object
 517 * @bo_ptr: pointer to the buffer object pointer
 518 *
 519 * Creates an &amdgpu_bo buffer object.
 520 *
 521 * Returns:
 522 * 0 for success or a negative error code on failure.
 523 */
 524int amdgpu_bo_create(struct amdgpu_device *adev,
 525			       struct amdgpu_bo_param *bp,
 526			       struct amdgpu_bo **bo_ptr)
 527{
 528	struct ttm_operation_ctx ctx = {
 529		.interruptible = (bp->type != ttm_bo_type_kernel),
 530		.no_wait_gpu = bp->no_wait_gpu,
 531		/* We opt to avoid OOM on system pages allocations */
 532		.gfp_retry_mayfail = true,
 533		.allow_res_evict = bp->type != ttm_bo_type_kernel,
 534		.resv = bp->resv
 535	};
 536	struct amdgpu_bo *bo;
 537	unsigned long page_align, size = bp->size;
 
 
 538	int r;
 539
 540	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
 541	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 542		/* GWS and OA don't need any alignment. */
 543		page_align = bp->byte_align;
 544		size <<= PAGE_SHIFT;
 545
 546	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
 547		/* Both size and alignment must be a multiple of 4. */
 548		page_align = ALIGN(bp->byte_align, 4);
 549		size = ALIGN(size, 4) << PAGE_SHIFT;
 550	} else {
 551		/* Memory should be aligned at least to a page size. */
 552		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 553		size = ALIGN(size, PAGE_SIZE);
 554	}
 
 555
 556	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
 557		return -ENOMEM;
 558
 559	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 560
 561	*bo_ptr = NULL;
 562	bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
 563	if (bo == NULL)
 564		return -ENOMEM;
 565	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
 566	bo->vm_bo = NULL;
 567	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
 568		bp->domain;
 569	bo->allowed_domains = bo->preferred_domains;
 570	if (bp->type != ttm_bo_type_kernel &&
 571	    !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
 572	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 
 
 
 
 
 
 
 573		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 574
 575	bo->flags = bp->flags;
 576
 577	if (!amdgpu_bo_support_uswc(bo->flags))
 
 
 
 578		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 579
 580	if (adev->ras_enabled)
 581		bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
 582
 583	bo->tbo.bdev = &adev->mman.bdev;
 584	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
 585			  AMDGPU_GEM_DOMAIN_GDS))
 586		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 587	else
 588		amdgpu_bo_placement_from_domain(bo, bp->domain);
 589	if (bp->type == ttm_bo_type_kernel)
 590		bo->tbo.priority = 1;
 591
 592	if (!bp->destroy)
 593		bp->destroy = &amdgpu_bo_destroy;
 594
 595	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
 596				 &bo->placement, page_align, &ctx,  NULL,
 597				 bp->resv, bp->destroy);
 598	if (unlikely(r != 0))
 599		return r;
 600
 601	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 602	    bo->tbo.resource->mem_type == TTM_PL_VRAM &&
 603	    bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 604		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 605					     ctx.bytes_moved);
 606	else
 607		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 608
 609	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 610	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 611		struct dma_fence *fence;
 612
 613		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
 614		if (unlikely(r))
 615			goto fail_unreserve;
 616
 617		dma_resv_add_fence(bo->tbo.base.resv, fence,
 618				   DMA_RESV_USAGE_KERNEL);
 
 619		dma_fence_put(fence);
 620	}
 621	if (!bp->resv)
 622		amdgpu_bo_unreserve(bo);
 623	*bo_ptr = bo;
 624
 625	trace_amdgpu_bo_create(bo);
 626
 627	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
 628	if (bp->type == ttm_bo_type_device)
 629		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 630
 631	return 0;
 632
 633fail_unreserve:
 634	if (!bp->resv)
 635		dma_resv_unlock(bo->tbo.base.resv);
 636	amdgpu_bo_unref(&bo);
 637	return r;
 638}
 639
 640/**
 641 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
 642 * @adev: amdgpu device object
 643 * @bp: parameters to be used for the buffer object
 644 * @ubo_ptr: pointer to the buffer object pointer
 645 *
 646 * Create a BO to be used by user application;
 647 *
 648 * Returns:
 649 * 0 for success or a negative error code on failure.
 650 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 651
 652int amdgpu_bo_create_user(struct amdgpu_device *adev,
 653			  struct amdgpu_bo_param *bp,
 654			  struct amdgpu_bo_user **ubo_ptr)
 
 
 
 655{
 656	struct amdgpu_bo *bo_ptr;
 
 657	int r;
 658
 659	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
 660	bp->destroy = &amdgpu_bo_user_destroy;
 661	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 
 
 
 
 
 
 662	if (r)
 663		return r;
 664
 665	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
 
 
 
 
 
 666	return r;
 667}
 668
 669/**
 670 * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
 671 * @adev: amdgpu device object
 672 * @bp: parameters to be used for the buffer object
 673 * @vmbo_ptr: pointer to the buffer object pointer
 674 *
 675 * Create a BO to be for GPUVM.
 676 *
 677 * Returns:
 678 * 0 for success or a negative error code on failure.
 679 */
 680
 681int amdgpu_bo_create_vm(struct amdgpu_device *adev,
 682			struct amdgpu_bo_param *bp,
 683			struct amdgpu_bo_vm **vmbo_ptr)
 684{
 685	struct amdgpu_bo *bo_ptr;
 
 686	int r;
 687
 688	/* bo_ptr_size will be determined by the caller and it depends on
 689	 * num of amdgpu_vm_pt entries.
 690	 */
 691	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
 692	r = amdgpu_bo_create(adev, bp, &bo_ptr);
 
 
 693	if (r)
 694		return r;
 695
 696	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
 697	INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
 698	/* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
 699	 * is initialized.
 700	 */
 701	bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
 
 702	return r;
 703}
 704
 705/**
 706 * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
 707 *
 708 * @vmbo: BO that will be inserted into the shadow list
 709 *
 710 * Insert a BO to the shadow list.
 711 */
 712void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
 713{
 714	struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
 
 
 715
 716	mutex_lock(&adev->shadow_list_lock);
 717	list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
 718	mutex_unlock(&adev->shadow_list_lock);
 719}
 720
 721/**
 722 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
 723 *
 724 * @shadow: &amdgpu_bo shadow to be restored
 725 * @fence: dma_fence associated with the operation
 726 *
 727 * Copies a buffer object's shadow content back to the object.
 728 * This is used for recovering a buffer from its shadow in case of a gpu
 729 * reset where vram context may be lost.
 730 *
 731 * Returns:
 732 * 0 for success or a negative error code on failure.
 733 */
 734int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
 735
 736{
 737	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
 738	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 739	uint64_t shadow_addr, parent_addr;
 740
 741	shadow_addr = amdgpu_bo_gpu_offset(shadow);
 742	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
 
 
 
 743
 744	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
 745				  amdgpu_bo_size(shadow), NULL, fence,
 746				  true, false, false);
 747}
 748
 749/**
 750 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
 751 * @bo: &amdgpu_bo buffer object to be mapped
 752 * @ptr: kernel virtual address to be returned
 753 *
 754 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
 755 * amdgpu_bo_kptr() to get the kernel virtual address.
 756 *
 757 * Returns:
 758 * 0 for success or a negative error code on failure.
 759 */
 760int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 761{
 762	void *kptr;
 763	long r;
 764
 765	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 766		return -EPERM;
 767
 768	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
 769				  false, MAX_SCHEDULE_TIMEOUT);
 770	if (r < 0)
 771		return r;
 772
 773	kptr = amdgpu_bo_kptr(bo);
 774	if (kptr) {
 775		if (ptr)
 776			*ptr = kptr;
 777		return 0;
 778	}
 779
 780	r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
 
 
 
 
 
 781	if (r)
 782		return r;
 783
 
 784	if (ptr)
 785		*ptr = amdgpu_bo_kptr(bo);
 786
 787	return 0;
 788}
 789
 790/**
 791 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
 792 * @bo: &amdgpu_bo buffer object
 793 *
 794 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
 795 *
 796 * Returns:
 797 * the virtual address of a buffer object area.
 798 */
 799void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 800{
 801	bool is_iomem;
 802
 803	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 804}
 805
 806/**
 807 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
 808 * @bo: &amdgpu_bo buffer object to be unmapped
 809 *
 810 * Unmaps a kernel map set up by amdgpu_bo_kmap().
 811 */
 812void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 813{
 814	if (bo->kmap.bo)
 815		ttm_bo_kunmap(&bo->kmap);
 
 
 816}
 817
 818/**
 819 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
 820 * @bo: &amdgpu_bo buffer object
 821 *
 822 * References the contained &ttm_buffer_object.
 823 *
 824 * Returns:
 825 * a refcounted pointer to the &amdgpu_bo buffer object.
 826 */
 827struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 828{
 829	if (bo == NULL)
 830		return NULL;
 831
 832	ttm_bo_get(&bo->tbo);
 833	return bo;
 834}
 835
 836/**
 837 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
 838 * @bo: &amdgpu_bo buffer object
 839 *
 840 * Unreferences the contained &ttm_buffer_object and clear the pointer
 841 */
 842void amdgpu_bo_unref(struct amdgpu_bo **bo)
 843{
 844	struct ttm_buffer_object *tbo;
 845
 846	if ((*bo) == NULL)
 847		return;
 848
 849	tbo = &((*bo)->tbo);
 850	ttm_bo_put(tbo);
 851	*bo = NULL;
 
 852}
 853
 854/**
 855 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
 856 * @bo: &amdgpu_bo buffer object to be pinned
 857 * @domain: domain to be pinned to
 858 * @min_offset: the start of requested address range
 859 * @max_offset: the end of requested address range
 860 *
 861 * Pins the buffer object according to requested domain and address range. If
 862 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
 863 * pin_count and pin_size accordingly.
 864 *
 865 * Pinning means to lock pages in memory along with keeping them at a fixed
 866 * offset. It is required when a buffer can not be moved, for example, when
 867 * a display buffer is being scanned out.
 868 *
 869 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
 870 * where to pin a buffer if there are specific restrictions on where a buffer
 871 * must be located.
 872 *
 873 * Returns:
 874 * 0 for success or a negative error code on failure.
 875 */
 876int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 877			     u64 min_offset, u64 max_offset)
 
 878{
 879	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 880	struct ttm_operation_ctx ctx = { false, false };
 881	int r, i;
 
 882
 883	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 884		return -EPERM;
 885
 886	if (WARN_ON_ONCE(min_offset > max_offset))
 887		return -EINVAL;
 888
 889	/* Check domain to be pinned to against preferred domains */
 890	if (bo->preferred_domains & domain)
 891		domain = bo->preferred_domains & domain;
 892
 893	/* A shared bo cannot be migrated to VRAM */
 894	if (bo->tbo.base.import_attach) {
 895		if (domain & AMDGPU_GEM_DOMAIN_GTT)
 896			domain = AMDGPU_GEM_DOMAIN_GTT;
 897		else
 898			return -EINVAL;
 899	}
 900
 901	if (bo->tbo.pin_count) {
 902		uint32_t mem_type = bo->tbo.resource->mem_type;
 903		uint32_t mem_flags = bo->tbo.resource->placement;
 904
 905		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 906			return -EINVAL;
 907
 908		if ((mem_type == TTM_PL_VRAM) &&
 909		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
 910		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
 911			return -EINVAL;
 912
 913		ttm_bo_pin(&bo->tbo);
 
 
 914
 915		if (max_offset != 0) {
 916			u64 domain_start = amdgpu_ttm_domain_start(adev,
 917								   mem_type);
 918			WARN_ON_ONCE(max_offset <
 919				     (amdgpu_bo_gpu_offset(bo) - domain_start));
 920		}
 921
 922		return 0;
 923	}
 924
 925	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
 926	 * See function amdgpu_display_supported_domains()
 927	 */
 928	domain = amdgpu_bo_get_preferred_domain(adev, domain);
 929
 930	if (bo->tbo.base.import_attach)
 931		dma_buf_pin(bo->tbo.base.import_attach);
 932
 933	/* force to pin into visible video ram */
 934	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
 935		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 936	amdgpu_bo_placement_from_domain(bo, domain);
 937	for (i = 0; i < bo->placement.num_placement; i++) {
 938		unsigned fpfn, lpfn;
 939
 940		fpfn = min_offset >> PAGE_SHIFT;
 941		lpfn = max_offset >> PAGE_SHIFT;
 942
 
 
 
 
 
 
 
 
 
 943		if (fpfn > bo->placements[i].fpfn)
 944			bo->placements[i].fpfn = fpfn;
 945		if (!bo->placements[i].lpfn ||
 946		    (lpfn && lpfn < bo->placements[i].lpfn))
 947			bo->placements[i].lpfn = lpfn;
 
 948	}
 949
 950	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 951	if (unlikely(r)) {
 952		dev_err(adev->dev, "%p pin failed\n", bo);
 953		goto error;
 954	}
 
 
 
 
 
 955
 956	ttm_bo_pin(&bo->tbo);
 957
 958	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 959	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 960		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
 961		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
 962			     &adev->visible_pin_size);
 963	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 964		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
 965	}
 966
 967error:
 968	return r;
 969}
 970
 971/**
 972 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
 973 * @bo: &amdgpu_bo buffer object to be pinned
 974 * @domain: domain to be pinned to
 975 *
 976 * A simple wrapper to amdgpu_bo_pin_restricted().
 977 * Provides a simpler API for buffers that do not have any strict restrictions
 978 * on where a buffer must be located.
 979 *
 980 * Returns:
 981 * 0 for success or a negative error code on failure.
 982 */
 983int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
 984{
 985	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 986	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
 987}
 988
 989/**
 990 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
 991 * @bo: &amdgpu_bo buffer object to be unpinned
 992 *
 993 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
 994 * Changes placement and pin size accordingly.
 995 *
 996 * Returns:
 997 * 0 for success or a negative error code on failure.
 998 */
 999void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1000{
1001	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
1002
1003	ttm_bo_unpin(&bo->tbo);
1004	if (bo->tbo.pin_count)
1005		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006
1007	if (bo->tbo.base.import_attach)
1008		dma_buf_unpin(bo->tbo.base.import_attach);
 
1009
1010	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1011		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1012		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1013			     &adev->visible_pin_size);
1014	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1015		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1016	}
 
1017}
1018
1019static const char *amdgpu_vram_names[] = {
1020	"UNKNOWN",
1021	"GDDR1",
1022	"DDR2",
1023	"GDDR3",
1024	"GDDR4",
1025	"GDDR5",
1026	"HBM",
1027	"DDR3",
1028	"DDR4",
1029	"GDDR6",
1030	"DDR5",
1031	"LPDDR4",
1032	"LPDDR5"
1033};
1034
1035/**
1036 * amdgpu_bo_init - initialize memory manager
1037 * @adev: amdgpu device object
1038 *
1039 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1040 *
1041 * Returns:
1042 * 0 for success or a negative error code on failure.
1043 */
1044int amdgpu_bo_init(struct amdgpu_device *adev)
1045{
1046	/* On A+A platform, VRAM can be mapped as WB */
1047	if (!adev->gmc.xgmi.connected_to_cpu) {
1048		/* reserve PAT memory space to WC for VRAM */
1049		int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1050				adev->gmc.aper_size);
1051
1052		if (r) {
1053			DRM_ERROR("Unable to set WC memtype for the aperture base\n");
1054			return r;
1055		}
1056
1057		/* Add an MTRR for the VRAM */
1058		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1059				adev->gmc.aper_size);
1060	}
1061
1062	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1063		 adev->gmc.mc_vram_size >> 20,
1064		 (unsigned long long)adev->gmc.aper_size >> 20);
1065	DRM_INFO("RAM width %dbits %s\n",
1066		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1067	return amdgpu_ttm_init(adev);
1068}
1069
1070/**
1071 * amdgpu_bo_fini - tear down memory manager
1072 * @adev: amdgpu device object
1073 *
1074 * Reverses amdgpu_bo_init() to tear down memory manager.
1075 */
1076void amdgpu_bo_fini(struct amdgpu_device *adev)
1077{
1078	int idx;
1079
1080	amdgpu_ttm_fini(adev);
 
 
 
1081
1082	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1083
1084		if (!adev->gmc.xgmi.connected_to_cpu) {
1085			arch_phys_wc_del(adev->gmc.vram_mtrr);
1086			arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1087		}
1088		drm_dev_exit(idx);
1089	}
1090}
1091
1092/**
1093 * amdgpu_bo_set_tiling_flags - set tiling flags
1094 * @bo: &amdgpu_bo buffer object
1095 * @tiling_flags: new flags
1096 *
1097 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1098 * kernel driver to set the tiling flags on a buffer.
1099 *
1100 * Returns:
1101 * 0 for success or a negative error code on failure.
1102 */
1103int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1104{
1105	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1106	struct amdgpu_bo_user *ubo;
1107
1108	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1109	if (adev->family <= AMDGPU_FAMILY_CZ &&
1110	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1111		return -EINVAL;
1112
1113	ubo = to_amdgpu_bo_user(bo);
1114	ubo->tiling_flags = tiling_flags;
1115	return 0;
1116}
1117
1118/**
1119 * amdgpu_bo_get_tiling_flags - get tiling flags
1120 * @bo: &amdgpu_bo buffer object
1121 * @tiling_flags: returned flags
1122 *
1123 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1124 * set the tiling flags on a buffer.
1125 */
1126void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1127{
1128	struct amdgpu_bo_user *ubo;
1129
1130	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1131	dma_resv_assert_held(bo->tbo.base.resv);
1132	ubo = to_amdgpu_bo_user(bo);
1133
1134	if (tiling_flags)
1135		*tiling_flags = ubo->tiling_flags;
1136}
1137
1138/**
1139 * amdgpu_bo_set_metadata - set metadata
1140 * @bo: &amdgpu_bo buffer object
1141 * @metadata: new metadata
1142 * @metadata_size: size of the new metadata
1143 * @flags: flags of the new metadata
1144 *
1145 * Sets buffer object's metadata, its size and flags.
1146 * Used via GEM ioctl.
1147 *
1148 * Returns:
1149 * 0 for success or a negative error code on failure.
1150 */
1151int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1152			    uint32_t metadata_size, uint64_t flags)
1153{
1154	struct amdgpu_bo_user *ubo;
1155	void *buffer;
1156
1157	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1158	ubo = to_amdgpu_bo_user(bo);
1159	if (!metadata_size) {
1160		if (ubo->metadata_size) {
1161			kfree(ubo->metadata);
1162			ubo->metadata = NULL;
1163			ubo->metadata_size = 0;
1164		}
1165		return 0;
1166	}
1167
1168	if (metadata == NULL)
1169		return -EINVAL;
1170
1171	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1172	if (buffer == NULL)
1173		return -ENOMEM;
1174
1175	kfree(ubo->metadata);
1176	ubo->metadata_flags = flags;
1177	ubo->metadata = buffer;
1178	ubo->metadata_size = metadata_size;
1179
1180	return 0;
1181}
1182
1183/**
1184 * amdgpu_bo_get_metadata - get metadata
1185 * @bo: &amdgpu_bo buffer object
1186 * @buffer: returned metadata
1187 * @buffer_size: size of the buffer
1188 * @metadata_size: size of the returned metadata
1189 * @flags: flags of the returned metadata
1190 *
1191 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1192 * less than metadata_size.
1193 * Used via GEM ioctl.
1194 *
1195 * Returns:
1196 * 0 for success or a negative error code on failure.
1197 */
1198int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1199			   size_t buffer_size, uint32_t *metadata_size,
1200			   uint64_t *flags)
1201{
1202	struct amdgpu_bo_user *ubo;
1203
1204	if (!buffer && !metadata_size)
1205		return -EINVAL;
1206
1207	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1208	ubo = to_amdgpu_bo_user(bo);
1209	if (metadata_size)
1210		*metadata_size = ubo->metadata_size;
1211
1212	if (buffer) {
1213		if (buffer_size < ubo->metadata_size)
1214			return -EINVAL;
1215
1216		if (ubo->metadata_size)
1217			memcpy(buffer, ubo->metadata, ubo->metadata_size);
1218	}
1219
 
 
1220	if (flags)
1221		*flags = ubo->metadata_flags;
1222
1223	return 0;
1224}
1225
1226/**
1227 * amdgpu_bo_move_notify - notification about a memory move
1228 * @bo: pointer to a buffer object
1229 * @evict: if this move is evicting the buffer from the graphics address space
1230 * @new_mem: new information of the bufer object
1231 *
1232 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1233 * bookkeeping.
1234 * TTM driver callback which is called when ttm moves a buffer.
1235 */
1236void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1237			   bool evict,
1238			   struct ttm_resource *new_mem)
1239{
1240	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1241	struct amdgpu_bo *abo;
1242	struct ttm_resource *old_mem = bo->resource;
1243
1244	if (!amdgpu_bo_is_amdgpu_bo(bo))
1245		return;
1246
1247	abo = ttm_to_amdgpu_bo(bo);
1248	amdgpu_vm_bo_invalidate(adev, abo, evict);
1249
1250	amdgpu_bo_kunmap(abo);
1251
1252	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1253	    bo->resource->mem_type != TTM_PL_SYSTEM)
1254		dma_buf_move_notify(abo->tbo.base.dma_buf);
1255
1256	/* remember the eviction */
1257	if (evict)
1258		atomic64_inc(&adev->num_evictions);
1259
1260	/* update statistics */
1261	if (!new_mem)
1262		return;
1263
1264	/* move_notify is called before move happens */
1265	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1266}
1267
1268void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
1269				uint64_t *gtt_mem, uint64_t *cpu_mem)
1270{
1271	unsigned int domain;
1272
1273	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1274	switch (domain) {
1275	case AMDGPU_GEM_DOMAIN_VRAM:
1276		*vram_mem += amdgpu_bo_size(bo);
1277		break;
1278	case AMDGPU_GEM_DOMAIN_GTT:
1279		*gtt_mem += amdgpu_bo_size(bo);
1280		break;
1281	case AMDGPU_GEM_DOMAIN_CPU:
1282	default:
1283		*cpu_mem += amdgpu_bo_size(bo);
1284		break;
1285	}
1286}
1287
1288/**
1289 * amdgpu_bo_release_notify - notification about a BO being released
1290 * @bo: pointer to a buffer object
1291 *
1292 * Wipes VRAM buffers whose contents should not be leaked before the
1293 * memory is released.
1294 */
1295void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1296{
1297	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1298	struct dma_fence *fence = NULL;
1299	struct amdgpu_bo *abo;
1300	int r;
1301
1302	if (!amdgpu_bo_is_amdgpu_bo(bo))
1303		return;
1304
1305	abo = ttm_to_amdgpu_bo(bo);
1306
1307	if (abo->kfd_bo)
1308		amdgpu_amdkfd_release_notify(abo);
1309
1310	/* We only remove the fence if the resv has individualized. */
1311	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1312			&& bo->base.resv != &bo->base._resv);
1313	if (bo->base.resv == &bo->base._resv)
1314		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1315
1316	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
1317	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
1318	    adev->in_suspend || adev->shutdown)
1319		return;
1320
1321	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
1322		return;
1323
1324	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1325	if (!WARN_ON(r)) {
1326		amdgpu_bo_fence(abo, fence, false);
1327		dma_fence_put(fence);
1328	}
1329
1330	dma_resv_unlock(bo->base.resv);
1331}
1332
1333/**
1334 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1335 * @bo: pointer to a buffer object
1336 *
1337 * Notifies the driver we are taking a fault on this BO and have reserved it,
1338 * also performs bookkeeping.
1339 * TTM driver callback for dealing with vm faults.
1340 *
1341 * Returns:
1342 * 0 for success or a negative error code on failure.
1343 */
1344vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1345{
1346	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1347	struct ttm_operation_ctx ctx = { false, false };
1348	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1349	unsigned long offset;
1350	int r;
1351
1352	/* Remember that this BO was accessed by the CPU */
1353	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1354
1355	if (bo->resource->mem_type != TTM_PL_VRAM)
 
1356		return 0;
1357
1358	offset = bo->resource->start << PAGE_SHIFT;
1359	if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
 
 
 
1360		return 0;
1361
1362	/* Can't move a pinned BO to visible VRAM */
1363	if (abo->tbo.pin_count > 0)
1364		return VM_FAULT_SIGBUS;
1365
1366	/* hurrah the memory is not visible ! */
1367	atomic64_inc(&adev->num_vram_cpu_page_faults);
1368	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1369					AMDGPU_GEM_DOMAIN_GTT);
1370
1371	/* Avoid costly evictions; only set GTT as a busy placement */
1372	abo->placement.num_busy_placement = 1;
1373	abo->placement.busy_placement = &abo->placements[1];
1374
1375	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1376	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1377		return VM_FAULT_NOPAGE;
1378	else if (unlikely(r))
1379		return VM_FAULT_SIGBUS;
 
 
 
 
1380
1381	offset = bo->resource->start << PAGE_SHIFT;
1382	/* this should never happen */
1383	if (bo->resource->mem_type == TTM_PL_VRAM &&
1384	    (offset + bo->base.size) > adev->gmc.visible_vram_size)
1385		return VM_FAULT_SIGBUS;
1386
1387	ttm_bo_move_to_lru_tail_unlocked(bo);
1388	return 0;
1389}
1390
1391/**
1392 * amdgpu_bo_fence - add fence to buffer object
1393 *
1394 * @bo: buffer object in question
1395 * @fence: fence to add
1396 * @shared: true if fence should be added shared
1397 *
1398 */
1399void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1400		     bool shared)
1401{
1402	struct dma_resv *resv = bo->tbo.base.resv;
1403	int r;
1404
1405	r = dma_resv_reserve_fences(resv, 1);
1406	if (r) {
1407		/* As last resort on OOM we block for the fence */
1408		dma_fence_wait(fence, false);
1409		return;
1410	}
1411
1412	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
1413			   DMA_RESV_USAGE_WRITE);
1414}
1415
1416/**
1417 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1418 *
1419 * @adev: amdgpu device pointer
1420 * @resv: reservation object to sync to
1421 * @sync_mode: synchronization mode
1422 * @owner: fence owner
1423 * @intr: Whether the wait is interruptible
1424 *
1425 * Extract the fences from the reservation object and waits for them to finish.
1426 *
1427 * Returns:
1428 * 0 on success, errno otherwise.
1429 */
1430int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1431			     enum amdgpu_sync_mode sync_mode, void *owner,
1432			     bool intr)
1433{
1434	struct amdgpu_sync sync;
1435	int r;
1436
1437	amdgpu_sync_create(&sync);
1438	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1439	r = amdgpu_sync_wait(&sync, intr);
1440	amdgpu_sync_free(&sync);
1441	return r;
1442}
1443
1444/**
1445 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1446 * @bo: buffer object to wait for
1447 * @owner: fence owner
1448 * @intr: Whether the wait is interruptible
1449 *
1450 * Wrapper to wait for fences in a BO.
1451 * Returns:
1452 * 0 on success, errno otherwise.
1453 */
1454int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1455{
1456	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1457
1458	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1459					AMDGPU_SYNC_NE_OWNER, owner, intr);
 
 
1460}
1461
1462/**
1463 * amdgpu_bo_gpu_offset - return GPU offset of bo
1464 * @bo:	amdgpu object for which we query the offset
1465 *
 
 
1466 * Note: object should either be pinned or reserved when calling this
1467 * function, it might be useful to add check for this for debugging.
1468 *
1469 * Returns:
1470 * current GPU offset of the object.
1471 */
1472u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1473{
1474	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1475	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1476		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1477	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1478	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
 
 
1479		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1480
1481	return amdgpu_bo_gpu_offset_no_check(bo);
1482}
1483
1484/**
1485 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1486 * @bo:	amdgpu object for which we query the offset
1487 *
1488 * Returns:
1489 * current GPU offset of the object without raising warnings.
1490 */
1491u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1492{
1493	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1494	uint64_t offset;
1495
1496	offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1497		 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1498
1499	return amdgpu_gmc_sign_extend(offset);
1500}
1501
1502/**
1503 * amdgpu_bo_get_preferred_domain - get preferred domain
1504 * @adev: amdgpu device object
1505 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1506 *
1507 * Returns:
1508 * Which of the allowed domains is preferred for allocating the BO.
1509 */
1510uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
1511					    uint32_t domain)
1512{
1513	if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
1514	    ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
1515		domain = AMDGPU_GEM_DOMAIN_VRAM;
1516		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1517			domain = AMDGPU_GEM_DOMAIN_GTT;
1518	}
1519	return domain;
1520}
1521
1522#if defined(CONFIG_DEBUG_FS)
1523#define amdgpu_bo_print_flag(m, bo, flag)		        \
1524	do {							\
1525		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
1526			seq_printf((m), " " #flag);		\
1527		}						\
1528	} while (0)
1529
1530/**
1531 * amdgpu_bo_print_info - print BO info in debugfs file
1532 *
1533 * @id: Index or Id of the BO
1534 * @bo: Requested BO for printing info
1535 * @m: debugfs file
1536 *
1537 * Print BO information in debugfs file
1538 *
1539 * Returns:
1540 * Size of the BO in bytes.
1541 */
1542u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1543{
1544	struct dma_buf_attachment *attachment;
1545	struct dma_buf *dma_buf;
1546	unsigned int domain;
1547	const char *placement;
1548	unsigned int pin_count;
1549	u64 size;
1550
1551	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1552	switch (domain) {
1553	case AMDGPU_GEM_DOMAIN_VRAM:
1554		placement = "VRAM";
1555		break;
1556	case AMDGPU_GEM_DOMAIN_GTT:
1557		placement = " GTT";
1558		break;
1559	case AMDGPU_GEM_DOMAIN_CPU:
1560	default:
1561		placement = " CPU";
1562		break;
1563	}
1564
1565	size = amdgpu_bo_size(bo);
1566	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1567			id, size, placement);
1568
1569	pin_count = READ_ONCE(bo->tbo.pin_count);
1570	if (pin_count)
1571		seq_printf(m, " pin count %d", pin_count);
1572
1573	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1574	attachment = READ_ONCE(bo->tbo.base.import_attach);
1575
1576	if (attachment)
1577		seq_printf(m, " imported from %p", dma_buf);
1578	else if (dma_buf)
1579		seq_printf(m, " exported as %p", dma_buf);
1580
1581	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1582	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1583	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1584	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1585	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1586	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1587	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1588
1589	seq_puts(m, "\n");
1590
1591	return size;
1592}
1593#endif
v4.10.11
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 30 *    Dave Airlie
 31 */
 32#include <linux/list.h>
 33#include <linux/slab.h>
 34#include <drm/drmP.h>
 
 
 35#include <drm/amdgpu_drm.h>
 36#include <drm/drm_cache.h>
 37#include "amdgpu.h"
 38#include "amdgpu_trace.h"
 
 39
 
 
 
 
 
 
 
 
 
 
 
 
 40
 
 
 
 41
 42static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
 43						struct ttm_mem_reg *mem)
 44{
 45	if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
 46		return 0;
 47
 48	return ((mem->start << PAGE_SHIFT) + mem->size) >
 49		adev->mc.visible_vram_size ?
 50		adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
 51		mem->size;
 
 52}
 53
 54static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
 55		       struct ttm_mem_reg *old_mem,
 56		       struct ttm_mem_reg *new_mem)
 57{
 58	u64 vis_size;
 59	if (!adev)
 60		return;
 61
 62	if (new_mem) {
 63		switch (new_mem->mem_type) {
 64		case TTM_PL_TT:
 65			atomic64_add(new_mem->size, &adev->gtt_usage);
 66			break;
 67		case TTM_PL_VRAM:
 68			atomic64_add(new_mem->size, &adev->vram_usage);
 69			vis_size = amdgpu_get_vis_part_size(adev, new_mem);
 70			atomic64_add(vis_size, &adev->vram_vis_usage);
 71			break;
 72		}
 73	}
 74
 75	if (old_mem) {
 76		switch (old_mem->mem_type) {
 77		case TTM_PL_TT:
 78			atomic64_sub(old_mem->size, &adev->gtt_usage);
 79			break;
 80		case TTM_PL_VRAM:
 81			atomic64_sub(old_mem->size, &adev->vram_usage);
 82			vis_size = amdgpu_get_vis_part_size(adev, old_mem);
 83			atomic64_sub(vis_size, &adev->vram_vis_usage);
 84			break;
 85		}
 86	}
 87}
 88
 89static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 90{
 91	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 92	struct amdgpu_bo *bo;
 93
 94	bo = container_of(tbo, struct amdgpu_bo, tbo);
 95
 96	amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
 97
 98	drm_gem_object_release(&bo->gem_base);
 99	amdgpu_bo_unref(&bo->parent);
100	if (!list_empty(&bo->shadow_list)) {
101		mutex_lock(&adev->shadow_list_lock);
102		list_del_init(&bo->shadow_list);
103		mutex_unlock(&adev->shadow_list_lock);
104	}
105	kfree(bo->metadata);
106	kfree(bo);
107}
108
109bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 
 
 
 
 
 
 
 
 
 
110{
111	if (bo->destroy == &amdgpu_ttm_bo_destroy)
 
 
112		return true;
 
113	return false;
114}
115
116static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
117				      struct ttm_placement *placement,
118				      struct ttm_place *places,
119				      u32 domain, u64 flags)
 
 
 
 
 
120{
 
 
 
 
121	u32 c = 0;
122
123	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
124		unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
125		unsigned lpfn = 0;
126
127		/* This forces a reallocation if the flag wasn't set before */
128		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
129			lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
 
130
131		places[c].fpfn = 0;
132		places[c].lpfn = lpfn;
133		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
134			TTM_PL_FLAG_VRAM;
135		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
136			places[c].lpfn = visible_pfn;
137		else
138			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 
 
 
139		c++;
140	}
141
142	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
143		places[c].fpfn = 0;
144		places[c].lpfn = 0;
145		places[c].flags = TTM_PL_FLAG_TT;
146		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
147			places[c].flags |= TTM_PL_FLAG_WC |
148				TTM_PL_FLAG_UNCACHED;
149		else
150			places[c].flags |= TTM_PL_FLAG_CACHED;
151		c++;
152	}
153
154	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
155		places[c].fpfn = 0;
156		places[c].lpfn = 0;
157		places[c].flags = TTM_PL_FLAG_SYSTEM;
158		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
159			places[c].flags |= TTM_PL_FLAG_WC |
160				TTM_PL_FLAG_UNCACHED;
161		else
162			places[c].flags |= TTM_PL_FLAG_CACHED;
163		c++;
164	}
165
166	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
167		places[c].fpfn = 0;
168		places[c].lpfn = 0;
169		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
 
170		c++;
171	}
172
173	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
174		places[c].fpfn = 0;
175		places[c].lpfn = 0;
176		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
 
177		c++;
178	}
179
180	if (domain & AMDGPU_GEM_DOMAIN_OA) {
181		places[c].fpfn = 0;
182		places[c].lpfn = 0;
183		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
 
184		c++;
185	}
186
187	if (!c) {
188		places[c].fpfn = 0;
189		places[c].lpfn = 0;
190		places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 
191		c++;
192	}
193
 
 
194	placement->num_placement = c;
195	placement->placement = places;
196
197	placement->num_busy_placement = c;
198	placement->busy_placement = places;
199}
200
201void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
202{
203	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
204
205	amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
206				  domain, abo->flags);
207}
208
209static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
210					struct ttm_placement *placement)
211{
212	BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
213
214	memcpy(bo->placements, placement->placement,
215	       placement->num_placement * sizeof(struct ttm_place));
216	bo->placement.num_placement = placement->num_placement;
217	bo->placement.num_busy_placement = placement->num_busy_placement;
218	bo->placement.placement = bo->placements;
219	bo->placement.busy_placement = bo->placements;
220}
221
222/**
223 * amdgpu_bo_create_kernel - create BO for kernel use
224 *
225 * @adev: amdgpu device object
226 * @size: size for the new BO
227 * @align: alignment for the new BO
228 * @domain: where to place it
229 * @bo_ptr: resulting BO
230 * @gpu_addr: GPU addr of the pinned BO
231 * @cpu_addr: optional CPU address mapping
232 *
233 * Allocates and pins a BO for kernel internal use.
 
 
 
234 *
235 * Returns 0 on success, negative error code otherwise.
 
236 */
237int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
238			    unsigned long size, int align,
239			    u32 domain, struct amdgpu_bo **bo_ptr,
240			    u64 *gpu_addr, void **cpu_addr)
241{
 
 
242	int r;
243
244	r = amdgpu_bo_create(adev, size, align, true, domain,
245			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
246			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
247			     NULL, NULL, bo_ptr);
248	if (r) {
249		dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
250		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251	}
252
253	r = amdgpu_bo_reserve(*bo_ptr, false);
254	if (r) {
255		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
256		goto error_free;
257	}
258
259	r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
260	if (r) {
261		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
262		goto error_unreserve;
263	}
264
 
 
 
 
 
 
 
 
 
265	if (cpu_addr) {
266		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
267		if (r) {
268			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
269			goto error_unreserve;
270		}
271	}
272
 
 
 
 
 
273	amdgpu_bo_unreserve(*bo_ptr);
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
277error_unreserve:
278	amdgpu_bo_unreserve(*bo_ptr);
 
279
280error_free:
 
281	amdgpu_bo_unref(bo_ptr);
282
283	return r;
284}
285
286/**
287 * amdgpu_bo_free_kernel - free BO for kernel use
288 *
289 * @bo: amdgpu BO to free
 
 
290 *
291 * unmaps and unpin a BO for kernel internal use.
292 */
293void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
294			   void **cpu_addr)
295{
296	if (*bo == NULL)
297		return;
298
299	if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
 
 
300		if (cpu_addr)
301			amdgpu_bo_kunmap(*bo);
302
303		amdgpu_bo_unpin(*bo);
304		amdgpu_bo_unreserve(*bo);
305	}
306	amdgpu_bo_unref(bo);
307
308	if (gpu_addr)
309		*gpu_addr = 0;
310
311	if (cpu_addr)
312		*cpu_addr = NULL;
313}
314
315int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
316				unsigned long size, int byte_align,
317				bool kernel, u32 domain, u64 flags,
318				struct sg_table *sg,
319				struct ttm_placement *placement,
320				struct reservation_object *resv,
321				struct amdgpu_bo **bo_ptr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322{
 
 
 
 
 
 
 
 
323	struct amdgpu_bo *bo;
324	enum ttm_bo_type type;
325	unsigned long page_align;
326	size_t acc_size;
327	int r;
328
329	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
330	size = ALIGN(size, PAGE_SIZE);
331
332	if (kernel) {
333		type = ttm_bo_type_kernel;
334	} else if (sg) {
335		type = ttm_bo_type_sg;
 
 
 
336	} else {
337		type = ttm_bo_type_device;
 
 
338	}
339	*bo_ptr = NULL;
340
341	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
342				       sizeof(struct amdgpu_bo));
 
 
343
344	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
 
345	if (bo == NULL)
346		return -ENOMEM;
347	r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
348	if (unlikely(r)) {
349		kfree(bo);
350		return r;
351	}
352	INIT_LIST_HEAD(&bo->shadow_list);
353	INIT_LIST_HEAD(&bo->va);
354	bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
355					 AMDGPU_GEM_DOMAIN_GTT |
356					 AMDGPU_GEM_DOMAIN_CPU |
357					 AMDGPU_GEM_DOMAIN_GDS |
358					 AMDGPU_GEM_DOMAIN_GWS |
359					 AMDGPU_GEM_DOMAIN_OA);
360	bo->allowed_domains = bo->prefered_domains;
361	if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
362		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
363
364	bo->flags = flags;
365
366	/* For architectures that don't support WC memory,
367	 * mask out the WC flag from the BO
368	 */
369	if (!drm_arch_can_wc_memory())
370		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
371
372	amdgpu_fill_placement_to_bo(bo, placement);
373	/* Kernel allocation are uninterruptible */
374
375	if (!resv) {
376		bool locked;
377
378		reservation_object_init(&bo->tbo.ttm_resv);
379		locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
380		WARN_ON(!locked);
381	}
382	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
383			&bo->placement, page_align, !kernel, NULL,
384			acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
385			&amdgpu_ttm_bo_destroy);
 
 
 
 
386	if (unlikely(r != 0))
387		return r;
388
389	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
390	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
 
 
 
 
 
 
 
 
391		struct dma_fence *fence;
392
393		r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
394		if (unlikely(r))
395			goto fail_unreserve;
396
397		amdgpu_bo_fence(bo, fence, false);
398		dma_fence_put(bo->tbo.moving);
399		bo->tbo.moving = dma_fence_get(fence);
400		dma_fence_put(fence);
401	}
402	if (!resv)
403		ww_mutex_unlock(&bo->tbo.resv->lock);
404	*bo_ptr = bo;
405
406	trace_amdgpu_bo_create(bo);
407
 
 
 
 
408	return 0;
409
410fail_unreserve:
411	ww_mutex_unlock(&bo->tbo.resv->lock);
 
412	amdgpu_bo_unref(&bo);
413	return r;
414}
415
416static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
417				   unsigned long size, int byte_align,
418				   struct amdgpu_bo *bo)
419{
420	struct ttm_placement placement = {0};
421	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
422	int r;
423
424	if (bo->shadow)
425		return 0;
426
427	bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
428	memset(&placements, 0,
429	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
430
431	amdgpu_ttm_placement_init(adev, &placement,
432				  placements, AMDGPU_GEM_DOMAIN_GTT,
433				  AMDGPU_GEM_CREATE_CPU_GTT_USWC);
434
435	r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
436					AMDGPU_GEM_DOMAIN_GTT,
437					AMDGPU_GEM_CREATE_CPU_GTT_USWC,
438					NULL, &placement,
439					bo->tbo.resv,
440					&bo->shadow);
441	if (!r) {
442		bo->shadow->parent = amdgpu_bo_ref(bo);
443		mutex_lock(&adev->shadow_list_lock);
444		list_add_tail(&bo->shadow_list, &adev->shadow_list);
445		mutex_unlock(&adev->shadow_list_lock);
446	}
447
448	return r;
449}
450
451int amdgpu_bo_create(struct amdgpu_device *adev,
452		     unsigned long size, int byte_align,
453		     bool kernel, u32 domain, u64 flags,
454		     struct sg_table *sg,
455		     struct reservation_object *resv,
456		     struct amdgpu_bo **bo_ptr)
457{
458	struct ttm_placement placement = {0};
459	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
460	int r;
461
462	memset(&placements, 0,
463	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
464
465	amdgpu_ttm_placement_init(adev, &placement,
466				  placements, domain, flags);
467
468	r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
469					domain, flags, sg, &placement,
470					resv, bo_ptr);
471	if (r)
472		return r;
473
474	if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
475		r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
476		if (r)
477			amdgpu_bo_unref(bo_ptr);
478	}
479
480	return r;
481}
482
483int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
484			       struct amdgpu_ring *ring,
485			       struct amdgpu_bo *bo,
486			       struct reservation_object *resv,
487			       struct dma_fence **fence,
488			       bool direct)
 
 
 
 
 
489
 
 
 
490{
491	struct amdgpu_bo *shadow = bo->shadow;
492	uint64_t bo_addr, shadow_addr;
493	int r;
494
495	if (!shadow)
496		return -EINVAL;
497
498	bo_addr = amdgpu_bo_gpu_offset(bo);
499	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
500
501	r = reservation_object_reserve_shared(bo->tbo.resv);
502	if (r)
503		goto err;
504
505	r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
506			       amdgpu_bo_size(bo), resv, fence,
507			       direct);
508	if (!r)
509		amdgpu_bo_fence(bo, *fence, true);
510
511err:
512	return r;
513}
514
515int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
516				  struct amdgpu_ring *ring,
517				  struct amdgpu_bo *bo,
518				  struct reservation_object *resv,
519				  struct dma_fence **fence,
520				  bool direct)
521
 
522{
523	struct amdgpu_bo *shadow = bo->shadow;
524	uint64_t bo_addr, shadow_addr;
525	int r;
526
527	if (!shadow)
528		return -EINVAL;
 
 
529
530	bo_addr = amdgpu_bo_gpu_offset(bo);
531	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
 
 
 
 
 
 
 
 
 
 
 
 
532
533	r = reservation_object_reserve_shared(bo->tbo.resv);
534	if (r)
535		goto err;
 
536
537	r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
538			       amdgpu_bo_size(bo), resv, fence,
539			       direct);
540	if (!r)
541		amdgpu_bo_fence(bo, *fence, true);
542
543err:
544	return r;
 
545}
546
 
 
 
 
 
 
 
 
 
 
 
547int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
548{
549	bool is_iomem;
550	long r;
551
552	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
553		return -EPERM;
554
555	if (bo->kptr) {
556		if (ptr) {
557			*ptr = bo->kptr;
558		}
 
 
 
 
 
559		return 0;
560	}
561
562	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
563						MAX_SCHEDULE_TIMEOUT);
564	if (r < 0)
565		return r;
566
567	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
568	if (r)
569		return r;
570
571	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
572	if (ptr)
573		*ptr = bo->kptr;
574
575	return 0;
576}
577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
579{
580	if (bo->kptr == NULL)
581		return;
582	bo->kptr = NULL;
583	ttm_bo_kunmap(&bo->kmap);
584}
585
 
 
 
 
 
 
 
 
 
586struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
587{
588	if (bo == NULL)
589		return NULL;
590
591	ttm_bo_reference(&bo->tbo);
592	return bo;
593}
594
 
 
 
 
 
 
595void amdgpu_bo_unref(struct amdgpu_bo **bo)
596{
597	struct ttm_buffer_object *tbo;
598
599	if ((*bo) == NULL)
600		return;
601
602	tbo = &((*bo)->tbo);
603	ttm_bo_unref(&tbo);
604	if (tbo == NULL)
605		*bo = NULL;
606}
607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
608int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
609			     u64 min_offset, u64 max_offset,
610			     u64 *gpu_addr)
611{
612	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
613	int r, i;
614	unsigned fpfn, lpfn;
615
616	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
617		return -EPERM;
618
619	if (WARN_ON_ONCE(min_offset > max_offset))
620		return -EINVAL;
621
622	if (bo->pin_count) {
623		uint32_t mem_type = bo->tbo.mem.mem_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
625		if (domain != amdgpu_mem_type_to_domain(mem_type))
 
 
626			return -EINVAL;
627
628		bo->pin_count++;
629		if (gpu_addr)
630			*gpu_addr = amdgpu_bo_gpu_offset(bo);
631
632		if (max_offset != 0) {
633			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
 
634			WARN_ON_ONCE(max_offset <
635				     (amdgpu_bo_gpu_offset(bo) - domain_start));
636		}
637
638		return 0;
639	}
640
641	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
642	amdgpu_ttm_placement_from_domain(bo, domain);
 
 
 
 
 
 
 
 
 
 
643	for (i = 0; i < bo->placement.num_placement; i++) {
644		/* force to pin into visible video ram */
645		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
646		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
647		    (!max_offset || max_offset >
648		     adev->mc.visible_vram_size)) {
649			if (WARN_ON_ONCE(min_offset >
650					 adev->mc.visible_vram_size))
651				return -EINVAL;
652			fpfn = min_offset >> PAGE_SHIFT;
653			lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
654		} else {
655			fpfn = min_offset >> PAGE_SHIFT;
656			lpfn = max_offset >> PAGE_SHIFT;
657		}
658		if (fpfn > bo->placements[i].fpfn)
659			bo->placements[i].fpfn = fpfn;
660		if (!bo->placements[i].lpfn ||
661		    (lpfn && lpfn < bo->placements[i].lpfn))
662			bo->placements[i].lpfn = lpfn;
663		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
664	}
665
666	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
667	if (unlikely(r)) {
668		dev_err(adev->dev, "%p pin failed\n", bo);
669		goto error;
670	}
671	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
672	if (unlikely(r)) {
673		dev_err(adev->dev, "%p bind failed\n", bo);
674		goto error;
675	}
676
677	bo->pin_count = 1;
678	if (gpu_addr != NULL)
679		*gpu_addr = amdgpu_bo_gpu_offset(bo);
680	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
681		adev->vram_pin_size += amdgpu_bo_size(bo);
682		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
683			adev->invisible_pin_size += amdgpu_bo_size(bo);
684	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
685		adev->gart_pin_size += amdgpu_bo_size(bo);
686	}
687
688error:
689	return r;
690}
691
692int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 
 
 
 
 
 
 
 
 
 
 
 
693{
694	return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
 
695}
696
697int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 
 
 
 
 
 
 
 
 
 
698{
699	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
700	int r, i;
701
702	if (!bo->pin_count) {
703		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
704		return 0;
705	}
706	bo->pin_count--;
707	if (bo->pin_count)
708		return 0;
709	for (i = 0; i < bo->placement.num_placement; i++) {
710		bo->placements[i].lpfn = 0;
711		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
712	}
713	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
714	if (unlikely(r)) {
715		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
716		goto error;
717	}
718
719	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
720		adev->vram_pin_size -= amdgpu_bo_size(bo);
721		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
722			adev->invisible_pin_size -= amdgpu_bo_size(bo);
723	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
724		adev->gart_pin_size -= amdgpu_bo_size(bo);
725	}
726
727error:
728	return r;
729}
730
731int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
732{
733	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
734	if (0 && (adev->flags & AMD_IS_APU)) {
735		/* Useless to evict on IGP chips */
736		return 0;
737	}
738	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
739}
740
741static const char *amdgpu_vram_names[] = {
742	"UNKNOWN",
743	"GDDR1",
744	"DDR2",
745	"GDDR3",
746	"GDDR4",
747	"GDDR5",
748	"HBM",
749	"DDR3"
 
 
 
 
 
750};
751
 
 
 
 
 
 
 
 
 
752int amdgpu_bo_init(struct amdgpu_device *adev)
753{
754	/* reserve PAT memory space to WC for VRAM */
755	arch_io_reserve_memtype_wc(adev->mc.aper_base,
756				   adev->mc.aper_size);
757
758	/* Add an MTRR for the VRAM */
759	adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
760					      adev->mc.aper_size);
 
 
 
 
 
 
 
 
 
761	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
762		adev->mc.mc_vram_size >> 20,
763		(unsigned long long)adev->mc.aper_size >> 20);
764	DRM_INFO("RAM width %dbits %s\n",
765		 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
766	return amdgpu_ttm_init(adev);
767}
768
 
 
 
 
 
 
769void amdgpu_bo_fini(struct amdgpu_device *adev)
770{
 
 
771	amdgpu_ttm_fini(adev);
772	arch_phys_wc_del(adev->mc.vram_mtrr);
773	arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
774}
775
776int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
777			     struct vm_area_struct *vma)
778{
779	return ttm_fbdev_mmap(vma, &bo->tbo);
 
 
 
 
780}
781
 
 
 
 
 
 
 
 
 
 
 
782int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
783{
784	if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
 
 
 
 
 
785		return -EINVAL;
786
787	bo->tiling_flags = tiling_flags;
 
788	return 0;
789}
790
 
 
 
 
 
 
 
 
791void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
792{
793	lockdep_assert_held(&bo->tbo.resv->lock.base);
 
 
 
 
794
795	if (tiling_flags)
796		*tiling_flags = bo->tiling_flags;
797}
798
 
 
 
 
 
 
 
 
 
 
 
 
 
799int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
800			    uint32_t metadata_size, uint64_t flags)
801{
 
802	void *buffer;
803
 
 
804	if (!metadata_size) {
805		if (bo->metadata_size) {
806			kfree(bo->metadata);
807			bo->metadata = NULL;
808			bo->metadata_size = 0;
809		}
810		return 0;
811	}
812
813	if (metadata == NULL)
814		return -EINVAL;
815
816	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
817	if (buffer == NULL)
818		return -ENOMEM;
819
820	kfree(bo->metadata);
821	bo->metadata_flags = flags;
822	bo->metadata = buffer;
823	bo->metadata_size = metadata_size;
824
825	return 0;
826}
827
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
828int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
829			   size_t buffer_size, uint32_t *metadata_size,
830			   uint64_t *flags)
831{
 
 
832	if (!buffer && !metadata_size)
833		return -EINVAL;
834
 
 
 
 
 
835	if (buffer) {
836		if (buffer_size < bo->metadata_size)
837			return -EINVAL;
838
839		if (bo->metadata_size)
840			memcpy(buffer, bo->metadata, bo->metadata_size);
841	}
842
843	if (metadata_size)
844		*metadata_size = bo->metadata_size;
845	if (flags)
846		*flags = bo->metadata_flags;
847
848	return 0;
849}
850
 
 
 
 
 
 
 
 
 
 
851void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
852			   struct ttm_mem_reg *new_mem)
 
853{
854	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
855	struct amdgpu_bo *abo;
856	struct ttm_mem_reg *old_mem = &bo->mem;
857
858	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
859		return;
860
861	abo = container_of(bo, struct amdgpu_bo, tbo);
862	amdgpu_vm_bo_invalidate(adev, abo);
 
 
 
 
 
 
 
 
 
 
863
864	/* update statistics */
865	if (!new_mem)
866		return;
867
868	/* move_notify is called before move happens */
869	amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
 
 
 
 
 
 
870
871	trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 
 
 
 
 
 
 
 
 
 
 
 
872}
873
874int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
 
 
 
 
 
 
875{
876	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
877	struct amdgpu_bo *abo;
878	unsigned long offset, size, lpfn;
879	int i, r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
880
881	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
882		return 0;
883
884	abo = container_of(bo, struct amdgpu_bo, tbo);
885	if (bo->mem.mem_type != TTM_PL_VRAM)
886		return 0;
887
888	size = bo->mem.num_pages << PAGE_SHIFT;
889	offset = bo->mem.start << PAGE_SHIFT;
890	/* TODO: figure out how to map scattered VRAM to the CPU */
891	if ((offset + size) <= adev->mc.visible_vram_size &&
892	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
893		return 0;
894
895	/* Can't move a pinned BO to visible VRAM */
896	if (abo->pin_count > 0)
897		return -EINVAL;
898
899	/* hurrah the memory is not visible ! */
900	abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
901	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
902	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
903	for (i = 0; i < abo->placement.num_placement; i++) {
904		/* Force into visible VRAM */
905		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
906		    (!abo->placements[i].lpfn ||
907		     abo->placements[i].lpfn > lpfn))
908			abo->placements[i].lpfn = lpfn;
909	}
910	r = ttm_bo_validate(bo, &abo->placement, false, false);
911	if (unlikely(r == -ENOMEM)) {
912		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
913		return ttm_bo_validate(bo, &abo->placement, false, false);
914	} else if (unlikely(r != 0)) {
915		return r;
916	}
917
918	offset = bo->mem.start << PAGE_SHIFT;
919	/* this should never happen */
920	if ((offset + size) > adev->mc.visible_vram_size)
921		return -EINVAL;
 
922
 
923	return 0;
924}
925
926/**
927 * amdgpu_bo_fence - add fence to buffer object
928 *
929 * @bo: buffer object in question
930 * @fence: fence to add
931 * @shared: true if fence should be added shared
932 *
933 */
934void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
935		     bool shared)
936{
937	struct reservation_object *resv = bo->tbo.resv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
938
939	if (shared)
940		reservation_object_add_shared_fence(resv, fence);
941	else
942		reservation_object_add_excl_fence(resv, fence);
943}
944
945/**
946 * amdgpu_bo_gpu_offset - return GPU offset of bo
947 * @bo:	amdgpu object for which we query the offset
948 *
949 * Returns current GPU offset of the object.
950 *
951 * Note: object should either be pinned or reserved when calling this
952 * function, it might be useful to add check for this for debugging.
 
 
 
953 */
954u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
955{
956	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
957	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
958		     !amdgpu_ttm_is_bound(bo->tbo.ttm));
959	WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
960		     !bo->pin_count);
961	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
962	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
963		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
964
965	return bo->tbo.offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966}