Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32
  33#include <linux/dma-mapping.h>
  34#include <linux/iommu.h>
 
  35#include <linux/pagemap.h>
  36#include <linux/sched/task.h>
  37#include <linux/sched/mm.h>
  38#include <linux/seq_file.h>
  39#include <linux/slab.h>
  40#include <linux/swap.h>
  41#include <linux/dma-buf.h>
  42#include <linux/sizes.h>
  43#include <linux/module.h>
  44
  45#include <drm/drm_drv.h>
  46#include <drm/ttm/ttm_bo.h>
  47#include <drm/ttm/ttm_placement.h>
  48#include <drm/ttm/ttm_range_manager.h>
  49#include <drm/ttm/ttm_tt.h>
  50
 
  51#include <drm/amdgpu_drm.h>
  52
  53#include "amdgpu.h"
  54#include "amdgpu_object.h"
  55#include "amdgpu_trace.h"
  56#include "amdgpu_amdkfd.h"
  57#include "amdgpu_sdma.h"
  58#include "amdgpu_ras.h"
  59#include "amdgpu_hmm.h"
  60#include "amdgpu_atomfirmware.h"
  61#include "amdgpu_res_cursor.h"
  62#include "bif/bif_4_1_d.h"
  63
  64MODULE_IMPORT_NS(DMA_BUF);
 
 
 
 
  65
  66#define AMDGPU_TTM_VRAM_MAX_DW_READ	((size_t)128)
 
  67
  68static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
  69				   struct ttm_tt *ttm,
  70				   struct ttm_resource *bo_mem);
  71static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
  72				      struct ttm_tt *ttm);
  73
  74static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
  75				    unsigned int type,
  76				    uint64_t size_in_page)
  77{
  78	return ttm_range_man_init(&adev->mman.bdev, type,
  79				  false, size_in_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  80}
  81
  82/**
  83 * amdgpu_evict_flags - Compute placement flags
  84 *
  85 * @bo: The buffer object to evict
  86 * @placement: Possible destination(s) for evicted BO
  87 *
  88 * Fill in placement data when ttm_bo_evict() is called
  89 */
  90static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
  91				struct ttm_placement *placement)
  92{
  93	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  94	struct amdgpu_bo *abo;
  95	static const struct ttm_place placements = {
  96		.fpfn = 0,
  97		.lpfn = 0,
  98		.mem_type = TTM_PL_SYSTEM,
  99		.flags = 0
 100	};
 101
 102	/* Don't handle scatter gather BOs */
 103	if (bo->type == ttm_bo_type_sg) {
 104		placement->num_placement = 0;
 
 105		return;
 106	}
 107
 108	/* Object isn't an AMDGPU object so ignore */
 109	if (!amdgpu_bo_is_amdgpu_bo(bo)) {
 110		placement->placement = &placements;
 
 111		placement->num_placement = 1;
 
 112		return;
 113	}
 114
 115	abo = ttm_to_amdgpu_bo(bo);
 116	if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
 117		placement->num_placement = 0;
 118		return;
 119	}
 120
 121	switch (bo->resource->mem_type) {
 122	case AMDGPU_PL_GDS:
 123	case AMDGPU_PL_GWS:
 124	case AMDGPU_PL_OA:
 125	case AMDGPU_PL_DOORBELL:
 126		placement->num_placement = 0;
 
 127		return;
 128
 129	case TTM_PL_VRAM:
 130		if (!adev->mman.buffer_funcs_enabled) {
 131			/* Move to system memory */
 132			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 133
 134		} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 135			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
 136			   amdgpu_res_cpu_visible(adev, bo->resource)) {
 137
 138			/* Try evicting to the CPU inaccessible part of VRAM
 139			 * first, but only set GTT as busy placement, so this
 140			 * BO will be evicted to GTT rather than causing other
 141			 * BOs to be evicted from VRAM
 142			 */
 143			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
 144							AMDGPU_GEM_DOMAIN_GTT |
 145							AMDGPU_GEM_DOMAIN_CPU);
 146			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 147			abo->placements[0].lpfn = 0;
 148			abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
 
 149		} else {
 150			/* Move to GTT memory */
 151			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
 152							AMDGPU_GEM_DOMAIN_CPU);
 153		}
 154		break;
 155	case TTM_PL_TT:
 156	case AMDGPU_PL_PREEMPT:
 157	default:
 158		amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 159		break;
 160	}
 161	*placement = abo->placement;
 162}
 163
 164/**
 165 * amdgpu_ttm_map_buffer - Map memory into the GART windows
 166 * @bo: buffer object to map
 167 * @mem: memory object to map
 168 * @mm_cur: range to map
 169 * @window: which GART window to use
 170 * @ring: DMA ring to use for the copy
 171 * @tmz: if we should setup a TMZ enabled mapping
 172 * @size: in number of bytes to map, out number of bytes mapped
 173 * @addr: resulting address inside the MC address space
 174 *
 175 * Setup one of the GART windows to access a specific piece of memory or return
 176 * the physical address for local memory.
 177 */
 178static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
 179				 struct ttm_resource *mem,
 180				 struct amdgpu_res_cursor *mm_cur,
 181				 unsigned int window, struct amdgpu_ring *ring,
 182				 bool tmz, uint64_t *size, uint64_t *addr)
 183{
 184	struct amdgpu_device *adev = ring->adev;
 185	unsigned int offset, num_pages, num_dw, num_bytes;
 186	uint64_t src_addr, dst_addr;
 187	struct amdgpu_job *job;
 188	void *cpu_addr;
 189	uint64_t flags;
 190	unsigned int i;
 191	int r;
 192
 193	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
 194	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
 195
 196	if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
 197		return -EINVAL;
 198
 199	/* Map only what can't be accessed directly */
 200	if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
 201		*addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
 202			mm_cur->start;
 203		return 0;
 204	}
 205
 206
 207	/*
 208	 * If start begins at an offset inside the page, then adjust the size
 209	 * and addr accordingly
 210	 */
 211	offset = mm_cur->start & ~PAGE_MASK;
 212
 213	num_pages = PFN_UP(*size + offset);
 214	num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
 215
 216	*size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
 217
 218	*addr = adev->gmc.gart_start;
 219	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
 220		AMDGPU_GPU_PAGE_SIZE;
 221	*addr += offset;
 222
 223	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
 224	num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 225
 226	r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
 227				     AMDGPU_FENCE_OWNER_UNDEFINED,
 228				     num_dw * 4 + num_bytes,
 229				     AMDGPU_IB_POOL_DELAYED, &job);
 230	if (r)
 231		return r;
 232
 233	src_addr = num_dw * 4;
 234	src_addr += job->ibs[0].gpu_addr;
 235
 236	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 237	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
 238	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
 239				dst_addr, num_bytes, false);
 240
 241	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 242	WARN_ON(job->ibs[0].length_dw > num_dw);
 
 
 
 243
 244	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
 245	if (tmz)
 246		flags |= AMDGPU_PTE_TMZ;
 
 
 
 
 
 
 
 
 
 247
 248	cpu_addr = &job->ibs[0].ptr[num_dw];
 
 
 
 249
 250	if (mem->mem_type == TTM_PL_TT) {
 251		dma_addr_t *dma_addr;
 
 
 
 
 
 
 
 
 
 
 
 252
 253		dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
 254		amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
 255	} else {
 256		dma_addr_t dma_address;
 
 
 257
 258		dma_address = mm_cur->start;
 259		dma_address += adev->vm_manager.vram_base_offset;
 
 
 
 
 
 
 
 
 
 
 260
 261		for (i = 0; i < num_pages; ++i) {
 262			amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
 263					flags, cpu_addr);
 264			dma_address += PAGE_SIZE;
 265		}
 266	}
 267
 268	dma_fence_put(amdgpu_job_submit(job));
 269	return 0;
 270}
 271
 272/**
 273 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
 274 * @adev: amdgpu device
 275 * @src: buffer/address where to read from
 276 * @dst: buffer/address where to write to
 277 * @size: number of bytes to copy
 278 * @tmz: if a secure copy should be used
 279 * @resv: resv object to sync to
 280 * @f: Returns the last fence if multiple jobs are submitted.
 281 *
 282 * The function copies @size bytes from {src->mem + src->offset} to
 283 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
 284 * move and different for a BO to BO copy.
 285 *
 
 286 */
 287int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 288			       const struct amdgpu_copy_mem *src,
 289			       const struct amdgpu_copy_mem *dst,
 290			       uint64_t size, bool tmz,
 291			       struct dma_resv *resv,
 292			       struct dma_fence **f)
 293{
 294	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 295	struct amdgpu_res_cursor src_mm, dst_mm;
 
 
 296	struct dma_fence *fence = NULL;
 297	int r = 0;
 
 
 298
 299	if (!adev->mman.buffer_funcs_enabled) {
 300		DRM_ERROR("Trying to move memory with ring turned off.\n");
 301		return -EINVAL;
 302	}
 303
 304	amdgpu_res_first(src->mem, src->offset, size, &src_mm);
 305	amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
 
 
 
 
 
 
 
 
 
 306
 307	mutex_lock(&adev->mman.gtt_window_lock);
 308	while (src_mm.remaining) {
 309		uint64_t from, to, cur_size;
 
 
 310		struct dma_fence *next;
 311
 312		/* Never copy more than 256MiB at once to avoid a timeout */
 313		cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
 
 
 
 
 
 
 314
 315		/* Map src to window 0 and dst to window 1. */
 316		r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
 317					  0, ring, tmz, &cur_size, &from);
 318		if (r)
 319			goto error;
 
 
 
 
 
 
 
 
 
 
 320
 321		r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
 322					  1, ring, tmz, &cur_size, &to);
 323		if (r)
 324			goto error;
 
 
 
 
 
 325
 326		r = amdgpu_copy_buffer(ring, from, to, cur_size,
 327				       resv, &next, false, true, tmz);
 328		if (r)
 329			goto error;
 330
 331		dma_fence_put(fence);
 332		fence = next;
 333
 334		amdgpu_res_next(&src_mm, cur_size);
 335		amdgpu_res_next(&dst_mm, cur_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 336	}
 337error:
 338	mutex_unlock(&adev->mman.gtt_window_lock);
 339	if (f)
 340		*f = dma_fence_get(fence);
 341	dma_fence_put(fence);
 342	return r;
 343}
 344
 345/*
 346 * amdgpu_move_blit - Copy an entire buffer to another buffer
 347 *
 348 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
 349 * help move buffers to and from VRAM.
 350 */
 351static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 352			    bool evict,
 353			    struct ttm_resource *new_mem,
 354			    struct ttm_resource *old_mem)
 355{
 356	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 357	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 358	struct amdgpu_copy_mem src, dst;
 359	struct dma_fence *fence = NULL;
 360	int r;
 361
 362	src.bo = bo;
 363	dst.bo = bo;
 364	src.mem = old_mem;
 365	dst.mem = new_mem;
 366	src.offset = 0;
 367	dst.offset = 0;
 368
 369	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
 370				       new_mem->size,
 371				       amdgpu_bo_encrypted(abo),
 372				       bo->base.resv, &fence);
 373	if (r)
 374		goto error;
 375
 376	/* clear the space being freed */
 377	if (old_mem->mem_type == TTM_PL_VRAM &&
 378	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
 
 379		struct dma_fence *wipe_fence = NULL;
 380
 381		r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence,
 382					false);
 383		if (r) {
 384			goto error;
 385		} else if (wipe_fence) {
 386			dma_fence_put(fence);
 387			fence = wipe_fence;
 388		}
 389	}
 390
 391	/* Always block for VM page tables before committing the new location */
 392	if (bo->type == ttm_bo_type_kernel)
 393		r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
 394	else
 395		r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
 396	dma_fence_put(fence);
 397	return r;
 398
 399error:
 400	if (fence)
 401		dma_fence_wait(fence, false);
 402	dma_fence_put(fence);
 403	return r;
 404}
 405
 406/**
 407 * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
 408 * @adev: amdgpu device
 409 * @res: the resource to check
 410 *
 411 * Returns: true if the full resource is CPU visible, false otherwise.
 412 */
 413bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
 414			    struct ttm_resource *res)
 
 415{
 416	struct amdgpu_res_cursor cursor;
 
 
 
 
 
 417
 418	if (!res)
 419		return false;
 420
 421	if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
 422	    res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
 423		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 424
 425	if (res->mem_type != TTM_PL_VRAM)
 426		return false;
 
 
 
 427
 428	amdgpu_res_first(res, 0, res->size, &cursor);
 429	while (cursor.remaining) {
 430		if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
 431			return false;
 432		amdgpu_res_next(&cursor, cursor.size);
 433	}
 434
 435	return true;
 
 
 
 
 
 
 
 
 
 
 436}
 437
 438/*
 439 * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 440 *
 441 * Called by amdgpu_bo_move()
 442 */
 443static bool amdgpu_res_copyable(struct amdgpu_device *adev,
 444				struct ttm_resource *mem)
 445{
 446	if (!amdgpu_res_cpu_visible(adev, mem))
 
 
 
 
 
 447		return false;
 448
 449	/* ttm_resource_ioremap only supports contiguous memory */
 450	if (mem->mem_type == TTM_PL_VRAM &&
 451	    !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
 452		return false;
 453
 454	return true;
 
 455}
 456
 457/*
 458 * amdgpu_bo_move - Move a buffer object to a new memory location
 459 *
 460 * Called by ttm_bo_handle_move_mem()
 461 */
 462static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 463			  struct ttm_operation_ctx *ctx,
 464			  struct ttm_resource *new_mem,
 465			  struct ttm_place *hop)
 466{
 467	struct amdgpu_device *adev;
 468	struct amdgpu_bo *abo;
 469	struct ttm_resource *old_mem = bo->resource;
 470	int r;
 471
 472	if (new_mem->mem_type == TTM_PL_TT ||
 473	    new_mem->mem_type == AMDGPU_PL_PREEMPT) {
 474		r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
 475		if (r)
 476			return r;
 477	}
 478
 479	abo = ttm_to_amdgpu_bo(bo);
 
 
 
 480	adev = amdgpu_ttm_adev(bo->bdev);
 481
 482	if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
 483			 bo->ttm == NULL)) {
 484		amdgpu_bo_move_notify(bo, evict, new_mem);
 485		ttm_bo_move_null(bo, new_mem);
 486		return 0;
 487	}
 488	if (old_mem->mem_type == TTM_PL_SYSTEM &&
 489	    (new_mem->mem_type == TTM_PL_TT ||
 490	     new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
 491		amdgpu_bo_move_notify(bo, evict, new_mem);
 492		ttm_bo_move_null(bo, new_mem);
 
 493		return 0;
 494	}
 495	if ((old_mem->mem_type == TTM_PL_TT ||
 496	     old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
 497	    new_mem->mem_type == TTM_PL_SYSTEM) {
 498		r = ttm_bo_wait_ctx(bo, ctx);
 499		if (r)
 500			return r;
 501
 502		amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
 503		amdgpu_bo_move_notify(bo, evict, new_mem);
 504		ttm_resource_free(bo, &bo->resource);
 505		ttm_bo_assign_mem(bo, new_mem);
 506		return 0;
 507	}
 508
 509	if (old_mem->mem_type == AMDGPU_PL_GDS ||
 510	    old_mem->mem_type == AMDGPU_PL_GWS ||
 511	    old_mem->mem_type == AMDGPU_PL_OA ||
 512	    old_mem->mem_type == AMDGPU_PL_DOORBELL ||
 513	    new_mem->mem_type == AMDGPU_PL_GDS ||
 514	    new_mem->mem_type == AMDGPU_PL_GWS ||
 515	    new_mem->mem_type == AMDGPU_PL_OA ||
 516	    new_mem->mem_type == AMDGPU_PL_DOORBELL) {
 517		/* Nothing to save here */
 518		amdgpu_bo_move_notify(bo, evict, new_mem);
 519		ttm_bo_move_null(bo, new_mem);
 520		return 0;
 521	}
 522
 523	if (bo->type == ttm_bo_type_device &&
 524	    new_mem->mem_type == TTM_PL_VRAM &&
 525	    old_mem->mem_type != TTM_PL_VRAM) {
 526		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
 527		 * accesses the BO after it's moved.
 528		 */
 529		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 530	}
 531
 532	if (adev->mman.buffer_funcs_enabled &&
 533	    ((old_mem->mem_type == TTM_PL_SYSTEM &&
 534	      new_mem->mem_type == TTM_PL_VRAM) ||
 535	     (old_mem->mem_type == TTM_PL_VRAM &&
 536	      new_mem->mem_type == TTM_PL_SYSTEM))) {
 537		hop->fpfn = 0;
 538		hop->lpfn = 0;
 539		hop->mem_type = TTM_PL_TT;
 540		hop->flags = TTM_PL_FLAG_TEMPORARY;
 541		return -EMULTIHOP;
 542	}
 543
 544	amdgpu_bo_move_notify(bo, evict, new_mem);
 545	if (adev->mman.buffer_funcs_enabled)
 546		r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
 547	else
 548		r = -ENODEV;
 549
 550	if (r) {
 
 551		/* Check that all memory is CPU accessible */
 552		if (!amdgpu_res_copyable(adev, old_mem) ||
 553		    !amdgpu_res_copyable(adev, new_mem)) {
 554			pr_err("Move buffer fallback to memcpy unavailable\n");
 555			return r;
 556		}
 557
 558		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
 559		if (r)
 560			return r;
 561	}
 562
 563	/* update statistics after the move */
 564	if (evict)
 565		atomic64_inc(&adev->num_evictions);
 566	atomic64_add(bo->base.size, &adev->num_bytes_moved);
 
 
 
 
 
 
 
 567	return 0;
 568}
 569
 570/*
 571 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
 572 *
 573 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
 574 */
 575static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
 576				     struct ttm_resource *mem)
 577{
 
 578	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 
 579
 
 
 
 
 
 
 
 580	switch (mem->mem_type) {
 581	case TTM_PL_SYSTEM:
 582		/* system memory */
 583		return 0;
 584	case TTM_PL_TT:
 585	case AMDGPU_PL_PREEMPT:
 586		break;
 587	case TTM_PL_VRAM:
 588		mem->bus.offset = mem->start << PAGE_SHIFT;
 589
 
 
 
 
 
 
 590		if (adev->mman.aper_base_kaddr &&
 591		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
 592			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
 593					mem->bus.offset;
 594
 595		mem->bus.offset += adev->gmc.aper_base;
 596		mem->bus.is_iomem = true;
 597		break;
 598	case AMDGPU_PL_DOORBELL:
 599		mem->bus.offset = mem->start << PAGE_SHIFT;
 600		mem->bus.offset += adev->doorbell.base;
 601		mem->bus.is_iomem = true;
 602		mem->bus.caching = ttm_uncached;
 603		break;
 604	default:
 605		return -EINVAL;
 606	}
 607	return 0;
 608}
 609
 610static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 611					   unsigned long page_offset)
 612{
 613	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 614	struct amdgpu_res_cursor cursor;
 615
 616	amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
 617			 &cursor);
 618
 619	if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
 620		return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
 621
 622	return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
 623}
 624
 625/**
 626 * amdgpu_ttm_domain_start - Returns GPU start address
 627 * @adev: amdgpu device object
 628 * @type: type of the memory
 629 *
 630 * Returns:
 631 * GPU start address of a memory domain
 632 */
 633
 634uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
 635{
 636	switch (type) {
 637	case TTM_PL_TT:
 638		return adev->gmc.gart_start;
 639	case TTM_PL_VRAM:
 640		return adev->gmc.vram_start;
 641	}
 642
 643	return 0;
 
 
 644}
 645
 646/*
 647 * TTM backend functions.
 648 */
 649struct amdgpu_ttm_tt {
 650	struct ttm_tt	ttm;
 651	struct drm_gem_object	*gobj;
 652	u64			offset;
 653	uint64_t		userptr;
 654	struct task_struct	*usertask;
 655	uint32_t		userflags;
 656	bool			bound;
 657	int32_t			pool_id;
 
 658};
 659
 660#define ttm_to_amdgpu_ttm_tt(ptr)	container_of(ptr, struct amdgpu_ttm_tt, ttm)
 661
 662#ifdef CONFIG_DRM_AMDGPU_USERPTR
 663/*
 664 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
 665 * memory and start HMM tracking CPU page table update
 666 *
 667 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
 668 * once afterwards to stop HMM tracking
 669 */
 670int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
 671				 struct hmm_range **range)
 
 
 
 672{
 
 673	struct ttm_tt *ttm = bo->tbo.ttm;
 674	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
 675	unsigned long start = gtt->userptr;
 676	struct vm_area_struct *vma;
 677	struct mm_struct *mm;
 678	bool readonly;
 
 679	int r = 0;
 680
 681	/* Make sure get_user_pages_done() can cleanup gracefully */
 682	*range = NULL;
 683
 684	mm = bo->notifier.mm;
 685	if (unlikely(!mm)) {
 686		DRM_DEBUG_DRIVER("BO is not registered?\n");
 687		return -EFAULT;
 688	}
 689
 690	if (!mmget_not_zero(mm)) /* Happens during process shutdown */
 691		return -ESRCH;
 692
 693	mmap_read_lock(mm);
 694	vma = vma_lookup(mm, start);
 695	if (unlikely(!vma)) {
 696		r = -EFAULT;
 697		goto out_unlock;
 698	}
 699	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
 700		vma->vm_file)) {
 701		r = -EPERM;
 702		goto out_unlock;
 703	}
 704
 705	readonly = amdgpu_ttm_tt_is_readonly(ttm);
 706	r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
 707				       readonly, NULL, pages, range);
 708out_unlock:
 709	mmap_read_unlock(mm);
 710	if (r)
 711		pr_debug("failed %d to get user pages 0x%lx\n", r, start);
 712
 713	mmput(mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 714
 715	return r;
 716}
 717
 718/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
 719 */
 720void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
 721				      struct hmm_range *range)
 722{
 723	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724
 725	if (gtt && gtt->userptr && range)
 726		amdgpu_hmm_range_get_pages_done(range);
 
 
 
 
 
 
 
 
 
 
 
 
 
 727}
 728
 729/*
 730 * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
 731 * Check if the pages backing this ttm range have been invalidated
 732 *
 733 * Returns: true if pages are still valid
 734 */
 735bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
 736				       struct hmm_range *range)
 737{
 738	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
 739
 740	if (!gtt || !gtt->userptr || !range)
 741		return false;
 742
 743	DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
 744		gtt->userptr, ttm->num_pages);
 745
 746	WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
 
 747
 748	return !amdgpu_hmm_range_get_pages_done(range);
 
 
 
 
 
 
 
 
 
 749}
 750#endif
 751
 752/*
 753 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
 754 *
 755 * Called by amdgpu_cs_list_validate(). This creates the page list
 756 * that backs user memory and will ultimately be mapped into the device
 757 * address space.
 758 */
 759void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 760{
 761	unsigned long i;
 762
 763	for (i = 0; i < ttm->num_pages; ++i)
 764		ttm->pages[i] = pages ? pages[i] : NULL;
 765}
 766
 767/*
 768 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
 769 *
 770 * Called by amdgpu_ttm_backend_bind()
 771 **/
 772static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
 773				     struct ttm_tt *ttm)
 774{
 775	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 776	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
 
 
 777	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 778	enum dma_data_direction direction = write ?
 779		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 780	int r;
 781
 782	/* Allocate an SG array and squash pages into it */
 783	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 784				      (u64)ttm->num_pages << PAGE_SHIFT,
 785				      GFP_KERNEL);
 786	if (r)
 787		goto release_sg;
 788
 789	/* Map SG to device */
 790	r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
 791	if (r)
 
 792		goto release_sg;
 793
 794	/* convert SG to linear array of pages and dma addresses */
 795	drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
 796				       ttm->num_pages);
 797
 798	return 0;
 799
 800release_sg:
 801	kfree(ttm->sg);
 802	ttm->sg = NULL;
 803	return r;
 804}
 805
 806/*
 807 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
 808 */
 809static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
 810					struct ttm_tt *ttm)
 811{
 812	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 813	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
 814	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 815	enum dma_data_direction direction = write ?
 816		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 817
 818	/* double check that we don't free the table twice */
 819	if (!ttm->sg || !ttm->sg->sgl)
 820		return;
 821
 822	/* unmap the pages mapped to the device */
 823	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
 824	sg_free_table(ttm->sg);
 825}
 826
 827/*
 828 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
 829 * MQDn+CtrlStackn where n is the number of XCCs per partition.
 830 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
 831 * and uses memory type default, UC. The rest of pages_per_xcc are
 832 * Ctrl stack and modify their memory type to NC.
 833 */
 834static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
 835				struct ttm_tt *ttm, uint64_t flags)
 836{
 837	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 838	uint64_t total_pages = ttm->num_pages;
 839	int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
 840	uint64_t page_idx, pages_per_xcc;
 841	int i;
 842	uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
 843			AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
 844
 845	pages_per_xcc = total_pages;
 846	do_div(pages_per_xcc, num_xcc);
 847
 848	for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
 849		/* MQD page: use default flags */
 850		amdgpu_gart_bind(adev,
 851				gtt->offset + (page_idx << PAGE_SHIFT),
 852				1, &gtt->ttm.dma_address[page_idx], flags);
 853		/*
 854		 * Ctrl pages - modify the memory type to NC (ctrl_flags) from
 855		 * the second page of the BO onward.
 856		 */
 857		amdgpu_gart_bind(adev,
 858				gtt->offset + ((page_idx + 1) << PAGE_SHIFT),
 859				pages_per_xcc - 1,
 860				&gtt->ttm.dma_address[page_idx + 1],
 861				ctrl_flags);
 862	}
 863}
 864
 865static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
 866				 struct ttm_buffer_object *tbo,
 867				 uint64_t flags)
 868{
 869	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
 870	struct ttm_tt *ttm = tbo->ttm;
 871	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
 872
 873	if (amdgpu_bo_encrypted(abo))
 874		flags |= AMDGPU_PTE_TMZ;
 875
 876	if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
 877		amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 878	} else {
 879		amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
 880				 gtt->ttm.dma_address, flags);
 881	}
 882	gtt->bound = true;
 
 
 
 
 
 
 883}
 884
 885/*
 886 * amdgpu_ttm_backend_bind - Bind GTT memory
 887 *
 888 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
 889 * This handles binding GTT memory to the device address space.
 890 */
 891static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
 892				   struct ttm_tt *ttm,
 893				   struct ttm_resource *bo_mem)
 894{
 895	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 896	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 897	uint64_t flags;
 898	int r;
 899
 900	if (!bo_mem)
 901		return -EINVAL;
 902
 903	if (gtt->bound)
 904		return 0;
 905
 906	if (gtt->userptr) {
 907		r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
 908		if (r) {
 909			DRM_ERROR("failed to pin userptr\n");
 910			return r;
 911		}
 912	} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
 913		if (!ttm->sg) {
 914			struct dma_buf_attachment *attach;
 915			struct sg_table *sgt;
 916
 917			attach = gtt->gobj->import_attach;
 918			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
 919			if (IS_ERR(sgt))
 920				return PTR_ERR(sgt);
 921
 922			ttm->sg = sgt;
 923		}
 924
 925		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
 926					       ttm->num_pages);
 927	}
 928
 929	if (!ttm->num_pages) {
 930		WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
 931		     ttm->num_pages, bo_mem, ttm);
 932	}
 933
 934	if (bo_mem->mem_type != TTM_PL_TT ||
 935	    !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
 
 
 
 
 936		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
 937		return 0;
 938	}
 939
 940	/* compute PTE flags relevant to this BO memory */
 941	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
 942
 943	/* bind pages into GART page tables */
 944	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
 945	amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
 946			 gtt->ttm.dma_address, flags);
 947	gtt->bound = true;
 948	return 0;
 
 
 
 949}
 950
 951/*
 952 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
 953 * through AGP or GART aperture.
 954 *
 955 * If bo is accessible through AGP aperture, then use AGP aperture
 956 * to access bo; otherwise allocate logical space in GART aperture
 957 * and map bo to GART aperture.
 958 */
 959int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 960{
 961	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 962	struct ttm_operation_ctx ctx = { false, false };
 963	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
 
 964	struct ttm_placement placement;
 965	struct ttm_place placements;
 966	struct ttm_resource *tmp;
 967	uint64_t addr, flags;
 968	int r;
 969
 970	if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
 971		return 0;
 972
 973	addr = amdgpu_gmc_agp_addr(bo);
 974	if (addr != AMDGPU_BO_INVALID_OFFSET)
 975		return 0;
 
 976
 977	/* allocate GART space */
 978	placement.num_placement = 1;
 979	placement.placement = &placements;
 980	placements.fpfn = 0;
 981	placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
 982	placements.mem_type = TTM_PL_TT;
 983	placements.flags = bo->resource->placement;
 
 
 
 
 984
 985	r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
 986	if (unlikely(r))
 987		return r;
 988
 989	/* compute PTE flags for this buffer object */
 990	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
 991
 992	/* Bind pages */
 993	gtt->offset = (u64)tmp->start << PAGE_SHIFT;
 994	amdgpu_ttm_gart_bind(adev, bo, flags);
 995	amdgpu_gart_invalidate_tlb(adev);
 996	ttm_resource_free(bo, &bo->resource);
 997	ttm_bo_assign_mem(bo, tmp);
 
 
 
 
 
 
 
 
 998
 999	return 0;
1000}
1001
1002/*
1003 * amdgpu_ttm_recover_gart - Rebind GTT pages
1004 *
1005 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1006 * rebind GTT pages during a GPU reset.
1007 */
1008void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1009{
1010	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1011	uint64_t flags;
 
1012
1013	if (!tbo->ttm)
1014		return;
1015
1016	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1017	amdgpu_ttm_gart_bind(adev, tbo, flags);
 
 
1018}
1019
1020/*
1021 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1022 *
1023 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1024 * ttm_tt_destroy().
1025 */
1026static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1027				      struct ttm_tt *ttm)
1028{
1029	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1030	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
 
1031
1032	/* if the pages have userptr pinning then clear that first */
1033	if (gtt->userptr) {
1034		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1035	} else if (ttm->sg && gtt->gobj->import_attach) {
1036		struct dma_buf_attachment *attach;
1037
1038		attach = gtt->gobj->import_attach;
1039		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1040		ttm->sg = NULL;
1041	}
1042
1043	if (!gtt->bound)
1044		return;
1045
1046	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1047		return;
1048
1049	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1050	amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1051	gtt->bound = false;
 
 
 
1052}
1053
1054static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1055				       struct ttm_tt *ttm)
1056{
1057	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1058
1059	if (gtt->usertask)
1060		put_task_struct(gtt->usertask);
1061
1062	ttm_tt_fini(&gtt->ttm);
1063	kfree(gtt);
1064}
1065
 
 
 
 
 
 
1066/**
1067 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1068 *
1069 * @bo: The buffer object to create a GTT ttm_tt object around
1070 * @page_flags: Page flags to be added to the ttm_tt object
1071 *
1072 * Called by ttm_tt_create().
1073 */
1074static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1075					   uint32_t page_flags)
1076{
1077	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1078	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1079	struct amdgpu_ttm_tt *gtt;
1080	enum ttm_caching caching;
 
1081
1082	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1083	if (!gtt)
1084		return NULL;
1085
1086	gtt->gobj = &bo->base;
1087	if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
1088		gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
1089	else
1090		gtt->pool_id = abo->xcp_id;
1091
1092	if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1093		caching = ttm_write_combined;
1094	else
1095		caching = ttm_cached;
1096
1097	/* allocate space for the uninitialized page entries */
1098	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
1099		kfree(gtt);
1100		return NULL;
1101	}
1102	return &gtt->ttm;
1103}
1104
1105/*
1106 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1107 *
1108 * Map the pages of a ttm_tt object to an address space visible
1109 * to the underlying device.
1110 */
1111static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1112				  struct ttm_tt *ttm,
1113				  struct ttm_operation_ctx *ctx)
1114{
1115	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1116	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1117	struct ttm_pool *pool;
1118	pgoff_t i;
1119	int ret;
1120
1121	/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1122	if (gtt->userptr) {
1123		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1124		if (!ttm->sg)
1125			return -ENOMEM;
 
 
 
1126		return 0;
1127	}
1128
1129	if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
 
 
 
 
1130		return 0;
 
1131
1132	if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1133		pool = &adev->mman.ttm_pools[gtt->pool_id];
1134	else
1135		pool = &adev->mman.bdev.pool;
1136	ret = ttm_pool_alloc(pool, ttm, ctx);
1137	if (ret)
1138		return ret;
1139
1140	for (i = 0; i < ttm->num_pages; ++i)
1141		ttm->pages[i]->mapping = bdev->dev_mapping;
1142
1143	return 0;
 
 
1144}
1145
1146/*
1147 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1148 *
1149 * Unmaps pages of a ttm_tt object from the device address space and
1150 * unpopulates the page array backing it.
1151 */
1152static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1153				     struct ttm_tt *ttm)
1154{
1155	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1156	struct amdgpu_device *adev;
1157	struct ttm_pool *pool;
1158	pgoff_t i;
1159
1160	amdgpu_ttm_backend_unbind(bdev, ttm);
1161
1162	if (gtt->userptr) {
1163		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1164		kfree(ttm->sg);
1165		ttm->sg = NULL;
1166		return;
1167	}
1168
1169	if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1170		return;
1171
1172	for (i = 0; i < ttm->num_pages; ++i)
1173		ttm->pages[i]->mapping = NULL;
1174
1175	adev = amdgpu_ttm_adev(bdev);
1176
1177	if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1178		pool = &adev->mman.ttm_pools[gtt->pool_id];
1179	else
1180		pool = &adev->mman.bdev.pool;
1181
1182	return ttm_pool_free(pool, ttm);
1183}
1184
1185/**
1186 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1187 * task
1188 *
1189 * @tbo: The ttm_buffer_object that contains the userptr
1190 * @user_addr:  The returned value
1191 */
1192int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1193			      uint64_t *user_addr)
1194{
1195	struct amdgpu_ttm_tt *gtt;
1196
1197	if (!tbo->ttm)
1198		return -EINVAL;
1199
1200	gtt = (void *)tbo->ttm;
1201	*user_addr = gtt->userptr;
1202	return 0;
1203}
1204
1205/**
1206 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1207 * task
1208 *
1209 * @bo: The ttm_buffer_object to bind this userptr to
1210 * @addr:  The address in the current tasks VM space to use
1211 * @flags: Requirements of userptr object.
1212 *
1213 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1214 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1215 * initialize GPU VM for a KFD process.
1216 */
1217int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1218			      uint64_t addr, uint32_t flags)
1219{
1220	struct amdgpu_ttm_tt *gtt;
1221
1222	if (!bo->ttm) {
1223		/* TODO: We want a separate TTM object type for userptrs */
1224		bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1225		if (bo->ttm == NULL)
1226			return -ENOMEM;
1227	}
1228
1229	/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1230	bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1231
1232	gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1233	gtt->userptr = addr;
1234	gtt->userflags = flags;
1235
1236	if (gtt->usertask)
1237		put_task_struct(gtt->usertask);
1238	gtt->usertask = current->group_leader;
1239	get_task_struct(gtt->usertask);
1240
1241	return 0;
1242}
1243
1244/*
1245 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1246 */
1247struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1248{
1249	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1250
1251	if (gtt == NULL)
1252		return NULL;
1253
1254	if (gtt->usertask == NULL)
1255		return NULL;
1256
1257	return gtt->usertask->mm;
1258}
1259
1260/*
1261 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1262 * address range for the current task.
1263 *
1264 */
1265bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1266				  unsigned long end, unsigned long *userptr)
1267{
1268	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1269	unsigned long size;
1270
1271	if (gtt == NULL || !gtt->userptr)
1272		return false;
1273
1274	/* Return false if no part of the ttm_tt object lies within
1275	 * the range
1276	 */
1277	size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1278	if (gtt->userptr > end || gtt->userptr + size <= start)
1279		return false;
1280
1281	if (userptr)
1282		*userptr = gtt->userptr;
1283	return true;
1284}
1285
1286/*
1287 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1288 */
1289bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1290{
1291	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1292
1293	if (gtt == NULL || !gtt->userptr)
1294		return false;
1295
1296	return true;
1297}
1298
1299/*
1300 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1301 */
1302bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1303{
1304	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1305
1306	if (gtt == NULL)
1307		return false;
1308
1309	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1310}
1311
1312/**
1313 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1314 *
1315 * @ttm: The ttm_tt object to compute the flags for
1316 * @mem: The memory registry backing this ttm_tt object
1317 *
1318 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1319 */
1320uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1321{
1322	uint64_t flags = 0;
1323
1324	if (mem && mem->mem_type != TTM_PL_SYSTEM)
1325		flags |= AMDGPU_PTE_VALID;
1326
1327	if (mem && (mem->mem_type == TTM_PL_TT ||
1328		    mem->mem_type == AMDGPU_PL_DOORBELL ||
1329		    mem->mem_type == AMDGPU_PL_PREEMPT)) {
1330		flags |= AMDGPU_PTE_SYSTEM;
1331
1332		if (ttm->caching == ttm_cached)
1333			flags |= AMDGPU_PTE_SNOOPED;
1334	}
1335
1336	if (mem && mem->mem_type == TTM_PL_VRAM &&
1337			mem->bus.caching == ttm_cached)
1338		flags |= AMDGPU_PTE_SNOOPED;
1339
1340	return flags;
1341}
1342
1343/**
1344 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1345 *
1346 * @adev: amdgpu_device pointer
1347 * @ttm: The ttm_tt object to compute the flags for
1348 * @mem: The memory registry backing this ttm_tt object
1349 *
1350 * Figure out the flags to use for a VM PTE (Page Table Entry).
1351 */
1352uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1353				 struct ttm_resource *mem)
1354{
1355	uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1356
1357	flags |= adev->gart.gart_pte_flags;
1358	flags |= AMDGPU_PTE_READABLE;
1359
1360	if (!amdgpu_ttm_tt_is_readonly(ttm))
1361		flags |= AMDGPU_PTE_WRITEABLE;
1362
1363	return flags;
1364}
1365
1366/*
1367 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1368 * object.
1369 *
1370 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1371 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1372 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1373 * used to clean out a memory space.
1374 */
1375static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1376					    const struct ttm_place *place)
1377{
1378	struct dma_resv_iter resv_cursor;
 
 
1379	struct dma_fence *f;
 
1380
1381	if (!amdgpu_bo_is_amdgpu_bo(bo))
1382		return ttm_bo_eviction_valuable(bo, place);
1383
1384	/* Swapout? */
1385	if (bo->resource->mem_type == TTM_PL_SYSTEM)
1386		return true;
1387
1388	if (bo->type == ttm_bo_type_kernel &&
1389	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1390		return false;
1391
1392	/* If bo is a KFD BO, check if the bo belongs to the current process.
1393	 * If true, then return false as any KFD process needs all its BOs to
1394	 * be resident to run successfully
1395	 */
1396	dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1397				DMA_RESV_USAGE_BOOKKEEP, f) {
1398		if (amdkfd_fence_check_mm(f, current->mm))
1399			return false;
 
 
 
 
1400	}
1401
1402	/* Preemptible BOs don't own system resources managed by the
1403	 * driver (pages, VRAM, GART space). They point to resources
1404	 * owned by someone else (e.g. pageable memory in user mode
1405	 * or a DMABuf). They are used in a preemptible context so we
1406	 * can guarantee no deadlocks and good QoS in case of MMU
1407	 * notifiers or DMABuf move notifiers from the resource owner.
1408	 */
1409	if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
1410		return false;
1411
1412	if (bo->resource->mem_type == TTM_PL_TT &&
1413	    amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1414		return false;
1415
1416	return ttm_bo_eviction_valuable(bo, place);
1417}
1418
1419static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1420				      void *buf, size_t size, bool write)
1421{
1422	while (size) {
1423		uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1424		uint64_t bytes = 4 - (pos & 0x3);
1425		uint32_t shift = (pos & 0x3) * 8;
1426		uint32_t mask = 0xffffffff << shift;
1427		uint32_t value = 0;
1428
1429		if (size < bytes) {
1430			mask &= 0xffffffff >> (bytes - size) * 8;
1431			bytes = size;
1432		}
1433
1434		if (mask != 0xffffffff) {
1435			amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1436			if (write) {
1437				value &= ~mask;
1438				value |= (*(uint32_t *)buf << shift) & mask;
1439				amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1440			} else {
1441				value = (value & mask) >> shift;
1442				memcpy(buf, &value, bytes);
1443			}
1444		} else {
1445			amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1446		}
 
1447
1448		pos += bytes;
1449		buf += bytes;
1450		size -= bytes;
1451	}
1452}
1453
1454static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1455					unsigned long offset, void *buf,
1456					int len, int write)
1457{
1458	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1459	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1460	struct amdgpu_res_cursor src_mm;
1461	struct amdgpu_job *job;
1462	struct dma_fence *fence;
1463	uint64_t src_addr, dst_addr;
1464	unsigned int num_dw;
1465	int r, idx;
1466
1467	if (len != PAGE_SIZE)
1468		return -EINVAL;
1469
1470	if (!adev->mman.sdma_access_ptr)
1471		return -EACCES;
1472
1473	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1474		return -ENODEV;
1475
1476	if (write)
1477		memcpy(adev->mman.sdma_access_ptr, buf, len);
1478
1479	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1480	r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
1481				     AMDGPU_FENCE_OWNER_UNDEFINED,
1482				     num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1483				     &job);
1484	if (r)
1485		goto out;
1486
1487	amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1488	src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1489		src_mm.start;
1490	dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1491	if (write)
1492		swap(src_addr, dst_addr);
1493
1494	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1495				PAGE_SIZE, false);
1496
1497	amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
1498	WARN_ON(job->ibs[0].length_dw > num_dw);
1499
1500	fence = amdgpu_job_submit(job);
1501
1502	if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1503		r = -ETIMEDOUT;
1504	dma_fence_put(fence);
1505
1506	if (!(r || write))
1507		memcpy(buf, adev->mman.sdma_access_ptr, len);
1508out:
1509	drm_dev_exit(idx);
1510	return r;
1511}
1512
1513/**
1514 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1515 *
1516 * @bo:  The buffer object to read/write
1517 * @offset:  Offset into buffer object
1518 * @buf:  Secondary buffer to write/read from
1519 * @len: Length in bytes of access
1520 * @write:  true if writing
1521 *
1522 * This is used to access VRAM that backs a buffer object via MMIO
1523 * access for debugging purposes.
1524 */
1525static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1526				    unsigned long offset, void *buf, int len,
1527				    int write)
1528{
1529	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1530	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1531	struct amdgpu_res_cursor cursor;
 
1532	int ret = 0;
 
 
1533
1534	if (bo->resource->mem_type != TTM_PL_VRAM)
1535		return -EIO;
1536
1537	if (amdgpu_device_has_timeouts_enabled(adev) &&
1538			!amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1539		return len;
1540
1541	amdgpu_res_first(bo->resource, offset, len, &cursor);
1542	while (cursor.remaining) {
1543		size_t count, size = cursor.size;
1544		loff_t pos = cursor.start;
1545
1546		count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1547		size -= count;
1548		if (size) {
1549			/* using MM to access rest vram and handle un-aligned address */
1550			pos += count;
1551			buf += count;
1552			amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1553		}
1554
1555		ret += cursor.size;
1556		buf += cursor.size;
1557		amdgpu_res_next(&cursor, cursor.size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1558	}
1559
1560	return ret;
1561}
1562
1563static void
1564amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1565{
1566	amdgpu_bo_move_notify(bo, false, NULL);
1567}
1568
1569static struct ttm_device_funcs amdgpu_bo_driver = {
1570	.ttm_tt_create = &amdgpu_ttm_tt_create,
1571	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
1572	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1573	.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
 
1574	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1575	.evict_flags = &amdgpu_evict_flags,
1576	.move = &amdgpu_bo_move,
1577	.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
 
1578	.release_notify = &amdgpu_bo_release_notify,
 
1579	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
 
1580	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1581	.access_memory = &amdgpu_ttm_access_memory,
 
1582};
1583
1584/*
1585 * Firmware Reservation functions
1586 */
1587/**
1588 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1589 *
1590 * @adev: amdgpu_device pointer
1591 *
1592 * free fw reserved vram if it has been reserved.
1593 */
1594static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1595{
1596	amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1597		NULL, &adev->mman.fw_vram_usage_va);
1598}
1599
1600/*
1601 * Driver Reservation functions
1602 */
1603/**
1604 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
1605 *
1606 * @adev: amdgpu_device pointer
1607 *
1608 * free drv reserved vram if it has been reserved.
1609 */
1610static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
1611{
1612	amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
1613						  NULL,
1614						  &adev->mman.drv_vram_usage_va);
1615}
1616
1617/**
1618 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1619 *
1620 * @adev: amdgpu_device pointer
1621 *
1622 * create bo vram reservation from fw.
1623 */
1624static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1625{
1626	uint64_t vram_size = adev->gmc.visible_vram_size;
1627
1628	adev->mman.fw_vram_usage_va = NULL;
1629	adev->mman.fw_vram_usage_reserved_bo = NULL;
1630
1631	if (adev->mman.fw_vram_usage_size == 0 ||
1632	    adev->mman.fw_vram_usage_size > vram_size)
1633		return 0;
1634
1635	return amdgpu_bo_create_kernel_at(adev,
1636					  adev->mman.fw_vram_usage_start_offset,
1637					  adev->mman.fw_vram_usage_size,
1638					  &adev->mman.fw_vram_usage_reserved_bo,
1639					  &adev->mman.fw_vram_usage_va);
1640}
1641
1642/**
1643 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
1644 *
1645 * @adev: amdgpu_device pointer
1646 *
1647 * create bo vram reservation from drv.
1648 */
1649static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
1650{
1651	u64 vram_size = adev->gmc.visible_vram_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1652
1653	adev->mman.drv_vram_usage_va = NULL;
1654	adev->mman.drv_vram_usage_reserved_bo = NULL;
1655
1656	if (adev->mman.drv_vram_usage_size == 0 ||
1657	    adev->mman.drv_vram_usage_size > vram_size)
1658		return 0;
1659
1660	return amdgpu_bo_create_kernel_at(adev,
1661					  adev->mman.drv_vram_usage_start_offset,
1662					  adev->mman.drv_vram_usage_size,
1663					  &adev->mman.drv_vram_usage_reserved_bo,
1664					  &adev->mman.drv_vram_usage_va);
1665}
1666
1667/*
1668 * Memoy training reservation functions
1669 */
1670
1671/**
1672 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1673 *
1674 * @adev: amdgpu_device pointer
1675 *
1676 * free memory training reserved vram if it has been reserved.
1677 */
1678static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1679{
1680	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1681
1682	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1683	amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1684	ctx->c2p_bo = NULL;
1685
1686	return 0;
1687}
1688
1689static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
1690						uint32_t reserve_size)
1691{
1692	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1693
1694	memset(ctx, 0, sizeof(*ctx));
1695
1696	ctx->c2p_train_data_offset =
1697		ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
1698	ctx->p2c_train_data_offset =
1699		(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1700	ctx->train_data_size =
1701		GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1702
1703	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1704			ctx->train_data_size,
1705			ctx->p2c_train_data_offset,
1706			ctx->c2p_train_data_offset);
1707}
1708
1709/*
1710 * reserve TMR memory at the top of VRAM which holds
1711 * IP Discovery data and is protected by PSP.
1712 */
1713static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1714{
1715	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1716	bool mem_train_support = false;
1717	uint32_t reserve_size = 0;
1718	int ret;
1719
1720	if (adev->bios && !amdgpu_sriov_vf(adev)) {
1721		if (amdgpu_atomfirmware_mem_training_supported(adev))
1722			mem_train_support = true;
1723		else
1724			DRM_DEBUG("memory training does not support!\n");
1725	}
1726
1727	/*
1728	 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1729	 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1730	 *
1731	 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1732	 * discovery data and G6 memory training data respectively
1733	 */
1734	if (adev->bios)
1735		reserve_size =
1736			amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1737
1738	if (!adev->bios &&
1739	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
1740		reserve_size = max(reserve_size, (uint32_t)280 << 20);
1741	else if (!reserve_size)
1742		reserve_size = DISCOVERY_TMR_OFFSET;
1743
1744	if (mem_train_support) {
1745		/* reserve vram for mem train according to TMR location */
1746		amdgpu_ttm_training_data_block_init(adev, reserve_size);
1747		ret = amdgpu_bo_create_kernel_at(adev,
1748						 ctx->c2p_train_data_offset,
1749						 ctx->train_data_size,
1750						 &ctx->c2p_bo,
1751						 NULL);
1752		if (ret) {
1753			DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1754			amdgpu_ttm_training_reserve_vram_fini(adev);
1755			return ret;
1756		}
1757		ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1758	}
1759
1760	if (!adev->gmc.is_app_apu) {
1761		ret = amdgpu_bo_create_kernel_at(
1762			adev, adev->gmc.real_vram_size - reserve_size,
1763			reserve_size, &adev->mman.fw_reserved_memory, NULL);
1764		if (ret) {
1765			DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1766			amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
1767					      NULL, NULL);
1768			return ret;
1769		}
1770	} else {
1771		DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
1772	}
1773
1774	return 0;
1775}
1776
1777static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
1778{
1779	int i;
 
 
1780
1781	if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
1782		return 0;
 
 
 
 
 
 
 
 
 
1783
1784	adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions,
1785				       sizeof(*adev->mman.ttm_pools),
1786				       GFP_KERNEL);
1787	if (!adev->mman.ttm_pools)
1788		return -ENOMEM;
1789
1790	for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
1791		ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
1792			      adev->gmc.mem_partitions[i].numa.node,
1793			      false, false);
1794	}
1795	return 0;
1796}
1797
1798static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
1799{
1800	int i;
1801
1802	if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
1803		return;
1804
1805	for (i = 0; i < adev->gmc.num_mem_partitions; i++)
1806		ttm_pool_fini(&adev->mman.ttm_pools[i]);
1807
1808	kfree(adev->mman.ttm_pools);
1809	adev->mman.ttm_pools = NULL;
 
 
 
 
 
 
 
 
1810}
1811
1812/*
1813 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1814 * gtt/vram related fields.
1815 *
1816 * This initializes all of the memory space pools that the TTM layer
1817 * will need such as the GTT space (system memory mapped to the device),
1818 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1819 * can be mapped per VMID.
1820 */
1821int amdgpu_ttm_init(struct amdgpu_device *adev)
1822{
1823	uint64_t gtt_size;
1824	int r;
 
 
1825
1826	mutex_init(&adev->mman.gtt_window_lock);
1827
1828	/* No others user of address space so set it to 0 */
1829	r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1830			       adev_to_drm(adev)->anon_inode->i_mapping,
1831			       adev_to_drm(adev)->vma_offset_manager,
1832			       adev->need_swiotlb,
1833			       dma_addressing_limited(adev->dev));
1834	if (r) {
1835		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1836		return r;
1837	}
1838
1839	r = amdgpu_ttm_pools_init(adev);
1840	if (r) {
1841		DRM_ERROR("failed to init ttm pools(%d).\n", r);
1842		return r;
1843	}
1844	adev->mman.initialized = true;
1845
 
 
 
1846	/* Initialize VRAM pool with all of VRAM divided into pages */
1847	r = amdgpu_vram_mgr_init(adev);
 
1848	if (r) {
1849		DRM_ERROR("Failed initializing VRAM heap.\n");
1850		return r;
1851	}
1852
 
 
 
 
 
 
1853	/* Change the size here instead of the init above so only lpfn is affected */
1854	amdgpu_ttm_set_buffer_funcs_status(adev, false);
1855#ifdef CONFIG_64BIT
1856#ifdef CONFIG_X86
1857	if (adev->gmc.xgmi.connected_to_cpu)
1858		adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1859				adev->gmc.visible_vram_size);
1860
1861	else if (adev->gmc.is_app_apu)
1862		DRM_DEBUG_DRIVER(
1863			"No need to ioremap when real vram size is 0\n");
1864	else
1865#endif
1866		adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1867				adev->gmc.visible_vram_size);
1868#endif
1869
1870	/*
1871	 *The reserved vram for firmware must be pinned to the specified
1872	 *place on the VRAM, so reserve it early.
1873	 */
1874	r = amdgpu_ttm_fw_reserve_vram_init(adev);
1875	if (r)
1876		return r;
1877
1878	/*
1879	 *The reserved vram for driver must be pinned to the specified
1880	 *place on the VRAM, so reserve it early.
1881	 */
1882	r = amdgpu_ttm_drv_reserve_vram_init(adev);
1883	if (r)
1884		return r;
1885
1886	/*
1887	 * only NAVI10 and onwards ASIC support for IP discovery.
1888	 * If IP discovery enabled, a block of memory should be
1889	 * reserved for IP discovey.
1890	 */
1891	if (adev->mman.discovery_bin) {
1892		r = amdgpu_ttm_reserve_tmr(adev);
1893		if (r)
1894			return r;
1895	}
1896
1897	/* allocate memory as required for VGA
1898	 * This is used for VGA emulation and pre-OS scanout buffers to
1899	 * avoid display artifacts while transitioning between pre-OS
1900	 * and driver.
1901	 */
1902	if (!adev->gmc.is_app_apu) {
1903		r = amdgpu_bo_create_kernel_at(adev, 0,
1904					       adev->mman.stolen_vga_size,
1905					       &adev->mman.stolen_vga_memory,
1906					       NULL);
1907		if (r)
1908			return r;
1909
1910		r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1911					       adev->mman.stolen_extended_size,
1912					       &adev->mman.stolen_extended_memory,
1913					       NULL);
1914
1915		if (r)
1916			return r;
1917
1918		r = amdgpu_bo_create_kernel_at(adev,
1919					       adev->mman.stolen_reserved_offset,
1920					       adev->mman.stolen_reserved_size,
1921					       &adev->mman.stolen_reserved_memory,
1922					       NULL);
1923		if (r)
1924			return r;
1925	} else {
1926		DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
1927	}
1928
1929	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1930		 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
1931
1932	/* Compute GTT size, either based on TTM limit
1933	 * or whatever the user passed on module init.
1934	 */
1935	if (amdgpu_gtt_size == -1)
1936		gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
 
 
 
 
 
1937	else
1938		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1939
1940	/* Initialize GTT memory pool */
1941	r = amdgpu_gtt_mgr_init(adev, gtt_size);
1942	if (r) {
1943		DRM_ERROR("Failed initializing GTT heap.\n");
1944		return r;
1945	}
1946	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1947		 (unsigned int)(gtt_size / (1024 * 1024)));
1948
1949	/* Initiailize doorbell pool on PCI BAR */
1950	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
1951	if (r) {
1952		DRM_ERROR("Failed initializing doorbell heap.\n");
1953		return r;
1954	}
1955
1956	/* Create a boorbell page for kernel usages */
1957	r = amdgpu_doorbell_create_kernel_doorbells(adev);
1958	if (r) {
1959		DRM_ERROR("Failed to initialize kernel doorbells.\n");
1960		return r;
1961	}
1962
1963	/* Initialize preemptible memory pool */
1964	r = amdgpu_preempt_mgr_init(adev);
1965	if (r) {
1966		DRM_ERROR("Failed initializing PREEMPT heap.\n");
1967		return r;
1968	}
1969
1970	/* Initialize various on-chip memory pools */
1971	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
 
1972	if (r) {
1973		DRM_ERROR("Failed initializing GDS heap.\n");
1974		return r;
1975	}
1976
1977	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
 
1978	if (r) {
1979		DRM_ERROR("Failed initializing gws heap.\n");
1980		return r;
1981	}
1982
1983	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
 
1984	if (r) {
1985		DRM_ERROR("Failed initializing oa heap.\n");
1986		return r;
1987	}
1988	if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
1989				AMDGPU_GEM_DOMAIN_GTT,
1990				&adev->mman.sdma_access_bo, NULL,
1991				&adev->mman.sdma_access_ptr))
1992		DRM_WARN("Debug VRAM access will use slowpath MM access\n");
1993
 
 
 
 
 
 
1994	return 0;
1995}
1996
1997/*
 
 
 
 
 
 
 
 
 
 
1998 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1999 */
2000void amdgpu_ttm_fini(struct amdgpu_device *adev)
2001{
2002	int idx;
2003
2004	if (!adev->mman.initialized)
2005		return;
2006
2007	amdgpu_ttm_pools_fini(adev);
2008
2009	amdgpu_ttm_training_reserve_vram_fini(adev);
2010	/* return the stolen vga memory back to VRAM */
2011	if (!adev->gmc.is_app_apu) {
2012		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
2013		amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
2014		/* return the FW reserved memory back to VRAM */
2015		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
2016				      NULL);
2017		if (adev->mman.stolen_reserved_size)
2018			amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
2019					      NULL, NULL);
2020	}
2021	amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
2022					&adev->mman.sdma_access_ptr);
2023	amdgpu_ttm_fw_reserve_vram_fini(adev);
2024	amdgpu_ttm_drv_reserve_vram_fini(adev);
2025
2026	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
2027
2028		if (adev->mman.aper_base_kaddr)
2029			iounmap(adev->mman.aper_base_kaddr);
2030		adev->mman.aper_base_kaddr = NULL;
2031
2032		drm_dev_exit(idx);
2033	}
2034
2035	amdgpu_vram_mgr_fini(adev);
2036	amdgpu_gtt_mgr_fini(adev);
2037	amdgpu_preempt_mgr_fini(adev);
2038	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
2039	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
2040	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
2041	ttm_device_fini(&adev->mman.bdev);
2042	adev->mman.initialized = false;
2043	DRM_INFO("amdgpu: ttm finalized\n");
2044}
2045
2046/**
2047 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2048 *
2049 * @adev: amdgpu_device pointer
2050 * @enable: true when we can use buffer functions.
2051 *
2052 * Enable/disable use of buffer functions during suspend/resume. This should
2053 * only be called at bootup or when userspace isn't running.
2054 */
2055void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2056{
2057	struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
2058	uint64_t size;
2059	int r;
2060
2061	if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
2062	    adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
2063		return;
2064
2065	if (enable) {
2066		struct amdgpu_ring *ring;
2067		struct drm_gpu_scheduler *sched;
2068
2069		ring = adev->mman.buffer_funcs_ring;
2070		sched = &ring->sched;
2071		r = drm_sched_entity_init(&adev->mman.high_pr,
2072					  DRM_SCHED_PRIORITY_KERNEL, &sched,
2073					  1, NULL);
2074		if (r) {
2075			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2076				  r);
2077			return;
2078		}
2079
2080		r = drm_sched_entity_init(&adev->mman.low_pr,
2081					  DRM_SCHED_PRIORITY_NORMAL, &sched,
2082					  1, NULL);
2083		if (r) {
2084			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2085				  r);
2086			goto error_free_entity;
2087		}
2088	} else {
2089		drm_sched_entity_destroy(&adev->mman.high_pr);
2090		drm_sched_entity_destroy(&adev->mman.low_pr);
2091		dma_fence_put(man->move);
2092		man->move = NULL;
2093	}
2094
2095	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
2096	if (enable)
2097		size = adev->gmc.real_vram_size;
2098	else
2099		size = adev->gmc.visible_vram_size;
2100	man->size = size;
2101	adev->mman.buffer_funcs_enabled = enable;
 
2102
2103	return;
 
 
 
2104
2105error_free_entity:
2106	drm_sched_entity_destroy(&adev->mman.high_pr);
 
 
2107}
2108
2109static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
2110				  bool direct_submit,
2111				  unsigned int num_dw,
2112				  struct dma_resv *resv,
2113				  bool vm_needs_flush,
2114				  struct amdgpu_job **job,
2115				  bool delayed)
2116{
2117	enum amdgpu_ib_pool_type pool = direct_submit ?
2118		AMDGPU_IB_POOL_DIRECT :
2119		AMDGPU_IB_POOL_DELAYED;
 
 
 
 
2120	int r;
2121	struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
2122						    &adev->mman.high_pr;
2123	r = amdgpu_job_alloc_with_ib(adev, entity,
2124				     AMDGPU_FENCE_OWNER_UNDEFINED,
2125				     num_dw * 4, pool, job);
 
 
 
 
 
 
 
 
 
 
2126	if (r)
2127		return r;
2128
2129	if (vm_needs_flush) {
2130		(*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
2131							adev->gmc.pdb0_bo :
2132							adev->gart.bo);
2133		(*job)->vm_needs_flush = true;
2134	}
2135	if (!resv)
2136		return 0;
2137
2138	return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2139						   DMA_RESV_USAGE_BOOKKEEP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2140}
2141
2142int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2143		       uint64_t dst_offset, uint32_t byte_count,
2144		       struct dma_resv *resv,
2145		       struct dma_fence **fence, bool direct_submit,
2146		       bool vm_needs_flush, bool tmz)
2147{
2148	struct amdgpu_device *adev = ring->adev;
2149	unsigned int num_loops, num_dw;
2150	struct amdgpu_job *job;
 
2151	uint32_t max_bytes;
2152	unsigned int i;
 
2153	int r;
2154
2155	if (!direct_submit && !ring->sched.ready) {
2156		DRM_ERROR("Trying to move memory with ring turned off.\n");
2157		return -EINVAL;
2158	}
2159
2160	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2161	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2162	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2163	r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
2164				   resv, vm_needs_flush, &job, false);
 
 
 
 
2165	if (r)
2166		return r;
2167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2168	for (i = 0; i < num_loops; i++) {
2169		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2170
2171		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2172					dst_offset, cur_size_in_bytes, tmz);
2173
2174		src_offset += cur_size_in_bytes;
2175		dst_offset += cur_size_in_bytes;
2176		byte_count -= cur_size_in_bytes;
2177	}
2178
2179	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2180	WARN_ON(job->ibs[0].length_dw > num_dw);
2181	if (direct_submit)
2182		r = amdgpu_job_submit_direct(job, ring, fence);
2183	else
2184		*fence = amdgpu_job_submit(job);
 
2185	if (r)
2186		goto error_free;
2187
2188	return r;
2189
2190error_free:
2191	amdgpu_job_free(job);
2192	DRM_ERROR("Error scheduling IBs (%d)\n", r);
2193	return r;
2194}
2195
2196static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
2197			       uint64_t dst_addr, uint32_t byte_count,
2198			       struct dma_resv *resv,
2199			       struct dma_fence **fence,
2200			       bool vm_needs_flush, bool delayed)
2201{
2202	struct amdgpu_device *adev = ring->adev;
2203	unsigned int num_loops, num_dw;
2204	struct amdgpu_job *job;
2205	uint32_t max_bytes;
2206	unsigned int i;
2207	int r;
2208
2209	max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2210	num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2211	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2212	r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
2213				   &job, delayed);
2214	if (r)
2215		return r;
2216
2217	for (i = 0; i < num_loops; i++) {
2218		uint32_t cur_size = min(byte_count, max_bytes);
2219
2220		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2221					cur_size);
2222
2223		dst_addr += cur_size;
2224		byte_count -= cur_size;
2225	}
2226
2227	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2228	WARN_ON(job->ibs[0].length_dw > num_dw);
2229	*fence = amdgpu_job_submit(job);
2230	return 0;
2231}
2232
2233int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2234			uint32_t src_data,
2235			struct dma_resv *resv,
2236			struct dma_fence **f,
2237			bool delayed)
2238{
2239	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
2240	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2241	struct dma_fence *fence = NULL;
2242	struct amdgpu_res_cursor dst;
 
 
 
 
2243	int r;
2244
2245	if (!adev->mman.buffer_funcs_enabled) {
2246		DRM_ERROR("Trying to clear memory with ring turned off.\n");
2247		return -EINVAL;
2248	}
2249
2250	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
 
 
 
 
2251
2252	mutex_lock(&adev->mman.gtt_window_lock);
2253	while (dst.remaining) {
2254		struct dma_fence *next;
2255		uint64_t cur_size, to;
 
 
 
 
 
 
 
2256
2257		/* Never fill more than 256MiB at once to avoid timeouts */
2258		cur_size = min(dst.size, 256ULL << 20);
2259
2260		r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
2261					  1, ring, false, &cur_size, &to);
2262		if (r)
2263			goto error;
2264
2265		r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
2266					&next, true, delayed);
2267		if (r)
2268			goto error;
 
 
 
 
2269
2270		dma_fence_put(fence);
2271		fence = next;
2272
2273		amdgpu_res_next(&dst, cur_size);
2274	}
2275error:
2276	mutex_unlock(&adev->mman.gtt_window_lock);
2277	if (f)
2278		*f = dma_fence_get(fence);
2279	dma_fence_put(fence);
2280	return r;
2281}
2282
2283/**
2284 * amdgpu_ttm_evict_resources - evict memory buffers
2285 * @adev: amdgpu device object
2286 * @mem_type: evicted BO's memory type
2287 *
2288 * Evicts all @mem_type buffers on the lru list of the memory type.
2289 *
2290 * Returns:
2291 * 0 for success or a negative error code on failure.
2292 */
2293int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2294{
2295	struct ttm_resource_manager *man;
2296
2297	switch (mem_type) {
2298	case TTM_PL_VRAM:
2299	case TTM_PL_TT:
2300	case AMDGPU_PL_GWS:
2301	case AMDGPU_PL_GDS:
2302	case AMDGPU_PL_OA:
2303		man = ttm_manager_type(&adev->mman.bdev, mem_type);
2304		break;
2305	default:
2306		DRM_ERROR("Trying to evict invalid memory type\n");
2307		return -EINVAL;
2308	}
2309
2310	return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
 
 
 
 
 
 
 
 
 
 
 
2311}
2312
2313#if defined(CONFIG_DEBUG_FS)
2314
2315static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2316{
2317	struct amdgpu_device *adev = m->private;
 
 
 
 
 
2318
2319	return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
 
2320}
2321
2322DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
 
 
 
 
 
 
 
 
 
 
2323
2324/*
2325 * amdgpu_ttm_vram_read - Linear read access to VRAM
2326 *
2327 * Accesses VRAM via MMIO for debugging purposes.
2328 */
2329static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2330				    size_t size, loff_t *pos)
2331{
2332	struct amdgpu_device *adev = file_inode(f)->i_private;
2333	ssize_t result = 0;
 
2334
2335	if (size & 0x3 || *pos & 0x3)
2336		return -EINVAL;
2337
2338	if (*pos >= adev->gmc.mc_vram_size)
2339		return -ENXIO;
2340
2341	size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2342	while (size) {
2343		size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2344		uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2345
2346		amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2347		if (copy_to_user(buf, value, bytes))
2348			return -EFAULT;
2349
2350		result += bytes;
2351		buf += bytes;
2352		*pos += bytes;
2353		size -= bytes;
 
 
 
 
 
 
 
 
 
 
2354	}
2355
2356	return result;
2357}
2358
2359/*
2360 * amdgpu_ttm_vram_write - Linear write access to VRAM
2361 *
2362 * Accesses VRAM via MMIO for debugging purposes.
2363 */
2364static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2365				    size_t size, loff_t *pos)
2366{
2367	struct amdgpu_device *adev = file_inode(f)->i_private;
2368	ssize_t result = 0;
2369	int r;
2370
2371	if (size & 0x3 || *pos & 0x3)
2372		return -EINVAL;
2373
2374	if (*pos >= adev->gmc.mc_vram_size)
2375		return -ENXIO;
2376
2377	while (size) {
 
2378		uint32_t value;
2379
2380		if (*pos >= adev->gmc.mc_vram_size)
2381			return result;
2382
2383		r = get_user(value, (uint32_t *)buf);
2384		if (r)
2385			return r;
2386
2387		amdgpu_device_mm_access(adev, *pos, &value, 4, true);
 
 
 
 
2388
2389		result += 4;
2390		buf += 4;
2391		*pos += 4;
2392		size -= 4;
2393	}
2394
2395	return result;
2396}
2397
2398static const struct file_operations amdgpu_ttm_vram_fops = {
2399	.owner = THIS_MODULE,
2400	.read = amdgpu_ttm_vram_read,
2401	.write = amdgpu_ttm_vram_write,
2402	.llseek = default_llseek,
2403};
2404
2405/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2406 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2407 *
2408 * This function is used to read memory that has been mapped to the
2409 * GPU and the known addresses are not physical addresses but instead
2410 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2411 */
2412static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2413				 size_t size, loff_t *pos)
2414{
2415	struct amdgpu_device *adev = file_inode(f)->i_private;
2416	struct iommu_domain *dom;
2417	ssize_t result = 0;
2418	int r;
2419
2420	/* retrieve the IOMMU domain if any for this device */
2421	dom = iommu_get_domain_for_dev(adev->dev);
2422
2423	while (size) {
2424		phys_addr_t addr = *pos & PAGE_MASK;
2425		loff_t off = *pos & ~PAGE_MASK;
2426		size_t bytes = PAGE_SIZE - off;
2427		unsigned long pfn;
2428		struct page *p;
2429		void *ptr;
2430
2431		bytes = min(bytes, size);
2432
2433		/* Translate the bus address to a physical address.  If
2434		 * the domain is NULL it means there is no IOMMU active
2435		 * and the address translation is the identity
2436		 */
2437		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2438
2439		pfn = addr >> PAGE_SHIFT;
2440		if (!pfn_valid(pfn))
2441			return -EPERM;
2442
2443		p = pfn_to_page(pfn);
2444		if (p->mapping != adev->mman.bdev.dev_mapping)
2445			return -EPERM;
2446
2447		ptr = kmap_local_page(p);
2448		r = copy_to_user(buf, ptr + off, bytes);
2449		kunmap_local(ptr);
2450		if (r)
2451			return -EFAULT;
2452
2453		size -= bytes;
2454		*pos += bytes;
2455		result += bytes;
2456	}
2457
2458	return result;
2459}
2460
2461/*
2462 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2463 *
2464 * This function is used to write memory that has been mapped to the
2465 * GPU and the known addresses are not physical addresses but instead
2466 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2467 */
2468static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2469				 size_t size, loff_t *pos)
2470{
2471	struct amdgpu_device *adev = file_inode(f)->i_private;
2472	struct iommu_domain *dom;
2473	ssize_t result = 0;
2474	int r;
2475
2476	dom = iommu_get_domain_for_dev(adev->dev);
2477
2478	while (size) {
2479		phys_addr_t addr = *pos & PAGE_MASK;
2480		loff_t off = *pos & ~PAGE_MASK;
2481		size_t bytes = PAGE_SIZE - off;
2482		unsigned long pfn;
2483		struct page *p;
2484		void *ptr;
2485
2486		bytes = min(bytes, size);
2487
2488		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2489
2490		pfn = addr >> PAGE_SHIFT;
2491		if (!pfn_valid(pfn))
2492			return -EPERM;
2493
2494		p = pfn_to_page(pfn);
2495		if (p->mapping != adev->mman.bdev.dev_mapping)
2496			return -EPERM;
2497
2498		ptr = kmap_local_page(p);
2499		r = copy_from_user(ptr + off, buf, bytes);
2500		kunmap_local(ptr);
2501		if (r)
2502			return -EFAULT;
2503
2504		size -= bytes;
2505		*pos += bytes;
2506		result += bytes;
2507	}
2508
2509	return result;
2510}
2511
2512static const struct file_operations amdgpu_ttm_iomem_fops = {
2513	.owner = THIS_MODULE,
2514	.read = amdgpu_iomem_read,
2515	.write = amdgpu_iomem_write,
2516	.llseek = default_llseek
2517};
2518
 
 
 
 
 
 
 
 
 
 
 
 
2519#endif
2520
2521void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2522{
2523#if defined(CONFIG_DEBUG_FS)
2524	struct drm_minor *minor = adev_to_drm(adev)->primary;
2525	struct dentry *root = minor->debugfs_root;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2526
2527	debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2528				 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2529	debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2530			    &amdgpu_ttm_iomem_fops);
2531	debugfs_create_file("ttm_page_pool", 0444, root, adev,
2532			    &amdgpu_ttm_page_pool_fops);
2533	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2534							     TTM_PL_VRAM),
2535					    root, "amdgpu_vram_mm");
2536	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2537							     TTM_PL_TT),
2538					    root, "amdgpu_gtt_mm");
2539	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2540							     AMDGPU_PL_GDS),
2541					    root, "amdgpu_gds_mm");
2542	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2543							     AMDGPU_PL_GWS),
2544					    root, "amdgpu_gws_mm");
2545	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2546							     AMDGPU_PL_OA),
2547					    root, "amdgpu_oa_mm");
2548
 
 
2549#endif
2550}
v5.4
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32
  33#include <linux/dma-mapping.h>
  34#include <linux/iommu.h>
  35#include <linux/hmm.h>
  36#include <linux/pagemap.h>
  37#include <linux/sched/task.h>
 
  38#include <linux/seq_file.h>
  39#include <linux/slab.h>
  40#include <linux/swap.h>
  41#include <linux/swiotlb.h>
 
 
  42
  43#include <drm/ttm/ttm_bo_api.h>
  44#include <drm/ttm/ttm_bo_driver.h>
  45#include <drm/ttm/ttm_placement.h>
  46#include <drm/ttm/ttm_module.h>
  47#include <drm/ttm/ttm_page_alloc.h>
  48
  49#include <drm/drm_debugfs.h>
  50#include <drm/amdgpu_drm.h>
  51
  52#include "amdgpu.h"
  53#include "amdgpu_object.h"
  54#include "amdgpu_trace.h"
  55#include "amdgpu_amdkfd.h"
  56#include "amdgpu_sdma.h"
 
 
 
 
  57#include "bif/bif_4_1_d.h"
  58
  59static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
  60			     struct ttm_mem_reg *mem, unsigned num_pages,
  61			     uint64_t offset, unsigned window,
  62			     struct amdgpu_ring *ring,
  63			     uint64_t *addr);
  64
  65static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
  66static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
  67
  68static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 
 
 
 
 
 
 
 
  69{
  70	return 0;
  71}
  72
  73/**
  74 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
  75 * memory request.
  76 *
  77 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
  78 * @type: The type of memory requested
  79 * @man: The memory type manager for each domain
  80 *
  81 * This is called by ttm_bo_init_mm() when a buffer object is being
  82 * initialized.
  83 */
  84static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  85				struct ttm_mem_type_manager *man)
  86{
  87	struct amdgpu_device *adev;
  88
  89	adev = amdgpu_ttm_adev(bdev);
  90
  91	switch (type) {
  92	case TTM_PL_SYSTEM:
  93		/* System memory */
  94		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  95		man->available_caching = TTM_PL_MASK_CACHING;
  96		man->default_caching = TTM_PL_FLAG_CACHED;
  97		break;
  98	case TTM_PL_TT:
  99		/* GTT memory  */
 100		man->func = &amdgpu_gtt_mgr_func;
 101		man->gpu_offset = adev->gmc.gart_start;
 102		man->available_caching = TTM_PL_MASK_CACHING;
 103		man->default_caching = TTM_PL_FLAG_CACHED;
 104		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
 105		break;
 106	case TTM_PL_VRAM:
 107		/* "On-card" video ram */
 108		man->func = &amdgpu_vram_mgr_func;
 109		man->gpu_offset = adev->gmc.vram_start;
 110		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 111			     TTM_MEMTYPE_FLAG_MAPPABLE;
 112		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
 113		man->default_caching = TTM_PL_FLAG_WC;
 114		break;
 115	case AMDGPU_PL_GDS:
 116	case AMDGPU_PL_GWS:
 117	case AMDGPU_PL_OA:
 118		/* On-chip GDS memory*/
 119		man->func = &ttm_bo_manager_func;
 120		man->gpu_offset = 0;
 121		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
 122		man->available_caching = TTM_PL_FLAG_UNCACHED;
 123		man->default_caching = TTM_PL_FLAG_UNCACHED;
 124		break;
 125	default:
 126		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 127		return -EINVAL;
 128	}
 129	return 0;
 130}
 131
 132/**
 133 * amdgpu_evict_flags - Compute placement flags
 134 *
 135 * @bo: The buffer object to evict
 136 * @placement: Possible destination(s) for evicted BO
 137 *
 138 * Fill in placement data when ttm_bo_evict() is called
 139 */
 140static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 141				struct ttm_placement *placement)
 142{
 143	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 144	struct amdgpu_bo *abo;
 145	static const struct ttm_place placements = {
 146		.fpfn = 0,
 147		.lpfn = 0,
 148		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 
 149	};
 150
 151	/* Don't handle scatter gather BOs */
 152	if (bo->type == ttm_bo_type_sg) {
 153		placement->num_placement = 0;
 154		placement->num_busy_placement = 0;
 155		return;
 156	}
 157
 158	/* Object isn't an AMDGPU object so ignore */
 159	if (!amdgpu_bo_is_amdgpu_bo(bo)) {
 160		placement->placement = &placements;
 161		placement->busy_placement = &placements;
 162		placement->num_placement = 1;
 163		placement->num_busy_placement = 1;
 164		return;
 165	}
 166
 167	abo = ttm_to_amdgpu_bo(bo);
 168	switch (bo->mem.mem_type) {
 
 
 
 
 
 169	case AMDGPU_PL_GDS:
 170	case AMDGPU_PL_GWS:
 171	case AMDGPU_PL_OA:
 
 172		placement->num_placement = 0;
 173		placement->num_busy_placement = 0;
 174		return;
 175
 176	case TTM_PL_VRAM:
 177		if (!adev->mman.buffer_funcs_enabled) {
 178			/* Move to system memory */
 179			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 
 180		} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 181			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
 182			   amdgpu_bo_in_cpu_visible_vram(abo)) {
 183
 184			/* Try evicting to the CPU inaccessible part of VRAM
 185			 * first, but only set GTT as busy placement, so this
 186			 * BO will be evicted to GTT rather than causing other
 187			 * BOs to be evicted from VRAM
 188			 */
 189			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
 190							 AMDGPU_GEM_DOMAIN_GTT);
 
 191			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 192			abo->placements[0].lpfn = 0;
 193			abo->placement.busy_placement = &abo->placements[1];
 194			abo->placement.num_busy_placement = 1;
 195		} else {
 196			/* Move to GTT memory */
 197			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
 
 198		}
 199		break;
 200	case TTM_PL_TT:
 
 201	default:
 202		amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 203		break;
 204	}
 205	*placement = abo->placement;
 206}
 207
 208/**
 209 * amdgpu_verify_access - Verify access for a mmap call
 210 *
 211 * @bo:	The buffer object to map
 212 * @filp: The file pointer from the process performing the mmap
 213 *
 214 * This is called by ttm_bo_mmap() to verify whether a process
 215 * has the right to mmap a BO to their process space.
 216 */
 217static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 
 
 
 
 
 
 
 
 
 218{
 219	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220
 221	/*
 222	 * Don't verify access for KFD BOs. They don't have a GEM
 223	 * object associated with them.
 224	 */
 225	if (abo->kfd_bo)
 226		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227
 228	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
 229		return -EPERM;
 230	return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
 231					  filp->private_data);
 232}
 233
 234/**
 235 * amdgpu_move_null - Register memory for a buffer object
 236 *
 237 * @bo: The bo to assign the memory to
 238 * @new_mem: The memory to be assigned.
 239 *
 240 * Assign the memory from new_mem to the memory of the buffer object bo.
 241 */
 242static void amdgpu_move_null(struct ttm_buffer_object *bo,
 243			     struct ttm_mem_reg *new_mem)
 244{
 245	struct ttm_mem_reg *old_mem = &bo->mem;
 246
 247	BUG_ON(old_mem->mm_node != NULL);
 248	*old_mem = *new_mem;
 249	new_mem->mm_node = NULL;
 250}
 251
 252/**
 253 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
 254 *
 255 * @bo: The bo to assign the memory to.
 256 * @mm_node: Memory manager node for drm allocator.
 257 * @mem: The region where the bo resides.
 258 *
 259 */
 260static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 261				    struct drm_mm_node *mm_node,
 262				    struct ttm_mem_reg *mem)
 263{
 264	uint64_t addr = 0;
 265
 266	if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
 267		addr = mm_node->start << PAGE_SHIFT;
 268		addr += bo->bdev->man[mem->mem_type].gpu_offset;
 269	}
 270	return addr;
 271}
 272
 273/**
 274 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
 275 * @offset. It also modifies the offset to be within the drm_mm_node returned
 276 *
 277 * @mem: The region where the bo resides.
 278 * @offset: The offset that drm_mm_node is used for finding.
 279 *
 280 */
 281static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
 282					       unsigned long *offset)
 283{
 284	struct drm_mm_node *mm_node = mem->mm_node;
 285
 286	while (*offset >= (mm_node->size << PAGE_SHIFT)) {
 287		*offset -= (mm_node->size << PAGE_SHIFT);
 288		++mm_node;
 
 
 289	}
 290	return mm_node;
 
 
 291}
 292
 293/**
 294 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
 
 
 
 
 
 
 
 295 *
 296 * The function copies @size bytes from {src->mem + src->offset} to
 297 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
 298 * move and different for a BO to BO copy.
 299 *
 300 * @f: Returns the last fence if multiple jobs are submitted.
 301 */
 302int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 303			       struct amdgpu_copy_mem *src,
 304			       struct amdgpu_copy_mem *dst,
 305			       uint64_t size,
 306			       struct dma_resv *resv,
 307			       struct dma_fence **f)
 308{
 309	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 310	struct drm_mm_node *src_mm, *dst_mm;
 311	uint64_t src_node_start, dst_node_start, src_node_size,
 312		 dst_node_size, src_page_offset, dst_page_offset;
 313	struct dma_fence *fence = NULL;
 314	int r = 0;
 315	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
 316					AMDGPU_GPU_PAGE_SIZE);
 317
 318	if (!adev->mman.buffer_funcs_enabled) {
 319		DRM_ERROR("Trying to move memory with ring turned off.\n");
 320		return -EINVAL;
 321	}
 322
 323	src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
 324	src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
 325					     src->offset;
 326	src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
 327	src_page_offset = src_node_start & (PAGE_SIZE - 1);
 328
 329	dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
 330	dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
 331					     dst->offset;
 332	dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
 333	dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
 334
 335	mutex_lock(&adev->mman.gtt_window_lock);
 336
 337	while (size) {
 338		unsigned long cur_size;
 339		uint64_t from = src_node_start, to = dst_node_start;
 340		struct dma_fence *next;
 341
 342		/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
 343		 * begins at an offset, then adjust the size accordingly
 344		 */
 345		cur_size = min3(min(src_node_size, dst_node_size), size,
 346				GTT_MAX_BYTES);
 347		if (cur_size + src_page_offset > GTT_MAX_BYTES ||
 348		    cur_size + dst_page_offset > GTT_MAX_BYTES)
 349			cur_size -= max(src_page_offset, dst_page_offset);
 350
 351		/* Map only what needs to be accessed. Map src to window 0 and
 352		 * dst to window 1
 353		 */
 354		if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
 355			r = amdgpu_map_buffer(src->bo, src->mem,
 356					PFN_UP(cur_size + src_page_offset),
 357					src_node_start, 0, ring,
 358					&from);
 359			if (r)
 360				goto error;
 361			/* Adjust the offset because amdgpu_map_buffer returns
 362			 * start of mapped page
 363			 */
 364			from += src_page_offset;
 365		}
 366
 367		if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
 368			r = amdgpu_map_buffer(dst->bo, dst->mem,
 369					PFN_UP(cur_size + dst_page_offset),
 370					dst_node_start, 1, ring,
 371					&to);
 372			if (r)
 373				goto error;
 374			to += dst_page_offset;
 375		}
 376
 377		r = amdgpu_copy_buffer(ring, from, to, cur_size,
 378				       resv, &next, false, true);
 379		if (r)
 380			goto error;
 381
 382		dma_fence_put(fence);
 383		fence = next;
 384
 385		size -= cur_size;
 386		if (!size)
 387			break;
 388
 389		src_node_size -= cur_size;
 390		if (!src_node_size) {
 391			src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
 392							     src->mem);
 393			src_node_size = (src_mm->size << PAGE_SHIFT);
 394			src_page_offset = 0;
 395		} else {
 396			src_node_start += cur_size;
 397			src_page_offset = src_node_start & (PAGE_SIZE - 1);
 398		}
 399		dst_node_size -= cur_size;
 400		if (!dst_node_size) {
 401			dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
 402							     dst->mem);
 403			dst_node_size = (dst_mm->size << PAGE_SHIFT);
 404			dst_page_offset = 0;
 405		} else {
 406			dst_node_start += cur_size;
 407			dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
 408		}
 409	}
 410error:
 411	mutex_unlock(&adev->mman.gtt_window_lock);
 412	if (f)
 413		*f = dma_fence_get(fence);
 414	dma_fence_put(fence);
 415	return r;
 416}
 417
 418/**
 419 * amdgpu_move_blit - Copy an entire buffer to another buffer
 420 *
 421 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
 422 * help move buffers to and from VRAM.
 423 */
 424static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 425			    bool evict, bool no_wait_gpu,
 426			    struct ttm_mem_reg *new_mem,
 427			    struct ttm_mem_reg *old_mem)
 428{
 429	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
 430	struct amdgpu_copy_mem src, dst;
 431	struct dma_fence *fence = NULL;
 432	int r;
 433
 434	src.bo = bo;
 435	dst.bo = bo;
 436	src.mem = old_mem;
 437	dst.mem = new_mem;
 438	src.offset = 0;
 439	dst.offset = 0;
 440
 441	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
 442				       new_mem->num_pages << PAGE_SHIFT,
 
 443				       bo->base.resv, &fence);
 444	if (r)
 445		goto error;
 446
 447	/* clear the space being freed */
 448	if (old_mem->mem_type == TTM_PL_VRAM &&
 449	    (ttm_to_amdgpu_bo(bo)->flags &
 450	     AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
 451		struct dma_fence *wipe_fence = NULL;
 452
 453		r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
 454				       NULL, &wipe_fence);
 455		if (r) {
 456			goto error;
 457		} else if (wipe_fence) {
 458			dma_fence_put(fence);
 459			fence = wipe_fence;
 460		}
 461	}
 462
 463	/* Always block for VM page tables before committing the new location */
 464	if (bo->type == ttm_bo_type_kernel)
 465		r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
 466	else
 467		r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 468	dma_fence_put(fence);
 469	return r;
 470
 471error:
 472	if (fence)
 473		dma_fence_wait(fence, false);
 474	dma_fence_put(fence);
 475	return r;
 476}
 477
 478/**
 479 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
 
 
 480 *
 481 * Called by amdgpu_bo_move().
 482 */
 483static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
 484				struct ttm_operation_ctx *ctx,
 485				struct ttm_mem_reg *new_mem)
 486{
 487	struct amdgpu_device *adev;
 488	struct ttm_mem_reg *old_mem = &bo->mem;
 489	struct ttm_mem_reg tmp_mem;
 490	struct ttm_place placements;
 491	struct ttm_placement placement;
 492	int r;
 493
 494	adev = amdgpu_ttm_adev(bo->bdev);
 
 495
 496	/* create space/pages for new_mem in GTT space */
 497	tmp_mem = *new_mem;
 498	tmp_mem.mm_node = NULL;
 499	placement.num_placement = 1;
 500	placement.placement = &placements;
 501	placement.num_busy_placement = 1;
 502	placement.busy_placement = &placements;
 503	placements.fpfn = 0;
 504	placements.lpfn = 0;
 505	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 506	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
 507	if (unlikely(r)) {
 508		pr_err("Failed to find GTT space for blit from VRAM\n");
 509		return r;
 510	}
 511
 512	/* set caching flags */
 513	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
 514	if (unlikely(r)) {
 515		goto out_cleanup;
 516	}
 517
 518	/* Bind the memory to the GTT space */
 519	r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
 520	if (unlikely(r)) {
 521		goto out_cleanup;
 
 522	}
 523
 524	/* blit VRAM to GTT */
 525	r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
 526	if (unlikely(r)) {
 527		goto out_cleanup;
 528	}
 529
 530	/* move BO (in tmp_mem) to new_mem */
 531	r = ttm_bo_move_ttm(bo, ctx, new_mem);
 532out_cleanup:
 533	ttm_bo_mem_put(bo, &tmp_mem);
 534	return r;
 535}
 536
 537/**
 538 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
 539 *
 540 * Called by amdgpu_bo_move().
 541 */
 542static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
 543				struct ttm_operation_ctx *ctx,
 544				struct ttm_mem_reg *new_mem)
 545{
 546	struct amdgpu_device *adev;
 547	struct ttm_mem_reg *old_mem = &bo->mem;
 548	struct ttm_mem_reg tmp_mem;
 549	struct ttm_placement placement;
 550	struct ttm_place placements;
 551	int r;
 552
 553	adev = amdgpu_ttm_adev(bo->bdev);
 554
 555	/* make space in GTT for old_mem buffer */
 556	tmp_mem = *new_mem;
 557	tmp_mem.mm_node = NULL;
 558	placement.num_placement = 1;
 559	placement.placement = &placements;
 560	placement.num_busy_placement = 1;
 561	placement.busy_placement = &placements;
 562	placements.fpfn = 0;
 563	placements.lpfn = 0;
 564	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 565	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
 566	if (unlikely(r)) {
 567		pr_err("Failed to find GTT space for blit to VRAM\n");
 568		return r;
 569	}
 570
 571	/* move/bind old memory to GTT space */
 572	r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
 573	if (unlikely(r)) {
 574		goto out_cleanup;
 575	}
 576
 577	/* copy to VRAM */
 578	r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
 579	if (unlikely(r)) {
 580		goto out_cleanup;
 581	}
 582out_cleanup:
 583	ttm_bo_mem_put(bo, &tmp_mem);
 584	return r;
 585}
 586
 587/**
 588 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
 589 *
 590 * Called by amdgpu_bo_move()
 591 */
 592static bool amdgpu_mem_visible(struct amdgpu_device *adev,
 593			       struct ttm_mem_reg *mem)
 594{
 595	struct drm_mm_node *nodes = mem->mm_node;
 596
 597	if (mem->mem_type == TTM_PL_SYSTEM ||
 598	    mem->mem_type == TTM_PL_TT)
 599		return true;
 600	if (mem->mem_type != TTM_PL_VRAM)
 601		return false;
 602
 603	/* ttm_mem_reg_ioremap only supports contiguous memory */
 604	if (nodes->size != mem->num_pages)
 
 605		return false;
 606
 607	return ((nodes->start + nodes->size) << PAGE_SHIFT)
 608		<= adev->gmc.visible_vram_size;
 609}
 610
 611/**
 612 * amdgpu_bo_move - Move a buffer object to a new memory location
 613 *
 614 * Called by ttm_bo_handle_move_mem()
 615 */
 616static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 617			  struct ttm_operation_ctx *ctx,
 618			  struct ttm_mem_reg *new_mem)
 
 619{
 620	struct amdgpu_device *adev;
 621	struct amdgpu_bo *abo;
 622	struct ttm_mem_reg *old_mem = &bo->mem;
 623	int r;
 624
 625	/* Can't move a pinned BO */
 
 
 
 
 
 
 626	abo = ttm_to_amdgpu_bo(bo);
 627	if (WARN_ON_ONCE(abo->pin_count > 0))
 628		return -EINVAL;
 629
 630	adev = amdgpu_ttm_adev(bo->bdev);
 631
 632	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 633		amdgpu_move_null(bo, new_mem);
 
 
 634		return 0;
 635	}
 636	if ((old_mem->mem_type == TTM_PL_TT &&
 637	     new_mem->mem_type == TTM_PL_SYSTEM) ||
 638	    (old_mem->mem_type == TTM_PL_SYSTEM &&
 639	     new_mem->mem_type == TTM_PL_TT)) {
 640		/* bind is enough */
 641		amdgpu_move_null(bo, new_mem);
 642		return 0;
 643	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 644	if (old_mem->mem_type == AMDGPU_PL_GDS ||
 645	    old_mem->mem_type == AMDGPU_PL_GWS ||
 646	    old_mem->mem_type == AMDGPU_PL_OA ||
 
 647	    new_mem->mem_type == AMDGPU_PL_GDS ||
 648	    new_mem->mem_type == AMDGPU_PL_GWS ||
 649	    new_mem->mem_type == AMDGPU_PL_OA) {
 
 650		/* Nothing to save here */
 651		amdgpu_move_null(bo, new_mem);
 
 652		return 0;
 653	}
 654
 655	if (!adev->mman.buffer_funcs_enabled) {
 656		r = -ENODEV;
 657		goto memcpy;
 
 
 
 
 658	}
 659
 660	if (old_mem->mem_type == TTM_PL_VRAM &&
 661	    new_mem->mem_type == TTM_PL_SYSTEM) {
 662		r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
 663	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
 664		   new_mem->mem_type == TTM_PL_VRAM) {
 665		r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
 666	} else {
 667		r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
 668				     new_mem, old_mem);
 
 669	}
 670
 
 
 
 
 
 
 671	if (r) {
 672memcpy:
 673		/* Check that all memory is CPU accessible */
 674		if (!amdgpu_mem_visible(adev, old_mem) ||
 675		    !amdgpu_mem_visible(adev, new_mem)) {
 676			pr_err("Move buffer fallback to memcpy unavailable\n");
 677			return r;
 678		}
 679
 680		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
 681		if (r)
 682			return r;
 683	}
 684
 685	if (bo->type == ttm_bo_type_device &&
 686	    new_mem->mem_type == TTM_PL_VRAM &&
 687	    old_mem->mem_type != TTM_PL_VRAM) {
 688		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
 689		 * accesses the BO after it's moved.
 690		 */
 691		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 692	}
 693
 694	/* update statistics */
 695	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
 696	return 0;
 697}
 698
 699/**
 700 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
 701 *
 702 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
 703 */
 704static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 
 705{
 706	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 707	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 708	struct drm_mm_node *mm_node = mem->mm_node;
 709
 710	mem->bus.addr = NULL;
 711	mem->bus.offset = 0;
 712	mem->bus.size = mem->num_pages << PAGE_SHIFT;
 713	mem->bus.base = 0;
 714	mem->bus.is_iomem = false;
 715	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
 716		return -EINVAL;
 717	switch (mem->mem_type) {
 718	case TTM_PL_SYSTEM:
 719		/* system memory */
 720		return 0;
 721	case TTM_PL_TT:
 
 722		break;
 723	case TTM_PL_VRAM:
 724		mem->bus.offset = mem->start << PAGE_SHIFT;
 725		/* check if it's visible */
 726		if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
 727			return -EINVAL;
 728		/* Only physically contiguous buffers apply. In a contiguous
 729		 * buffer, size of the first mm_node would match the number of
 730		 * pages in ttm_mem_reg.
 731		 */
 732		if (adev->mman.aper_base_kaddr &&
 733		    (mm_node->size == mem->num_pages))
 734			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
 735					mem->bus.offset;
 736
 737		mem->bus.base = adev->gmc.aper_base;
 738		mem->bus.is_iomem = true;
 739		break;
 
 
 
 
 
 
 740	default:
 741		return -EINVAL;
 742	}
 743	return 0;
 744}
 745
 746static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 
 747{
 
 
 
 
 
 
 
 
 
 
 748}
 749
 750static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 751					   unsigned long page_offset)
 
 
 
 
 
 
 
 
 752{
 753	struct drm_mm_node *mm;
 754	unsigned long offset = (page_offset << PAGE_SHIFT);
 
 
 
 
 755
 756	mm = amdgpu_find_mm_node(&bo->mem, &offset);
 757	return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
 758		(offset >> PAGE_SHIFT);
 759}
 760
 761/*
 762 * TTM backend functions.
 763 */
 764struct amdgpu_ttm_tt {
 765	struct ttm_dma_tt	ttm;
 
 766	u64			offset;
 767	uint64_t		userptr;
 768	struct task_struct	*usertask;
 769	uint32_t		userflags;
 770#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 771	struct hmm_range	*range;
 772#endif
 773};
 774
 775/**
 
 
 
 776 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
 777 * memory and start HMM tracking CPU page table update
 778 *
 779 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
 780 * once afterwards to stop HMM tracking
 781 */
 782#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 783
 784#define MAX_RETRY_HMM_RANGE_FAULT	16
 785
 786int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
 787{
 788	struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
 789	struct ttm_tt *ttm = bo->tbo.ttm;
 790	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 791	struct mm_struct *mm = gtt->usertask->mm;
 792	unsigned long start = gtt->userptr;
 793	struct vm_area_struct *vma;
 794	struct hmm_range *range;
 795	unsigned long i;
 796	uint64_t *pfns;
 797	int r = 0;
 798
 799	if (!mm) /* Happens during process shutdown */
 800		return -ESRCH;
 801
 802	if (unlikely(!mirror)) {
 803		DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
 804		r = -EFAULT;
 805		goto out;
 806	}
 807
 808	vma = find_vma(mm, start);
 809	if (unlikely(!vma || start < vma->vm_start)) {
 
 
 
 
 810		r = -EFAULT;
 811		goto out;
 812	}
 813	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
 814		vma->vm_file)) {
 815		r = -EPERM;
 816		goto out;
 817	}
 818
 819	range = kzalloc(sizeof(*range), GFP_KERNEL);
 820	if (unlikely(!range)) {
 821		r = -ENOMEM;
 822		goto out;
 823	}
 
 
 824
 825	pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
 826	if (unlikely(!pfns)) {
 827		r = -ENOMEM;
 828		goto out_free_ranges;
 829	}
 830
 831	amdgpu_hmm_init_range(range);
 832	range->default_flags = range->flags[HMM_PFN_VALID];
 833	range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ?
 834				0 : range->flags[HMM_PFN_WRITE];
 835	range->pfn_flags_mask = 0;
 836	range->pfns = pfns;
 837	range->start = start;
 838	range->end = start + ttm->num_pages * PAGE_SIZE;
 839
 840	hmm_range_register(range, mirror);
 
 841
 842	/*
 843	 * Just wait for range to be valid, safe to ignore return value as we
 844	 * will use the return value of hmm_range_fault() below under the
 845	 * mmap_sem to ascertain the validity of the range.
 846	 */
 847	hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
 848
 849	down_read(&mm->mmap_sem);
 850	r = hmm_range_fault(range, 0);
 851	up_read(&mm->mmap_sem);
 852
 853	if (unlikely(r < 0))
 854		goto out_free_pfns;
 855
 856	for (i = 0; i < ttm->num_pages; i++) {
 857		pages[i] = hmm_device_entry_to_page(range, pfns[i]);
 858		if (unlikely(!pages[i])) {
 859			pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
 860			       i, pfns[i]);
 861			r = -ENOMEM;
 862
 863			goto out_free_pfns;
 864		}
 865	}
 866
 867	gtt->range = range;
 868
 869	return 0;
 870
 871out_free_pfns:
 872	hmm_range_unregister(range);
 873	kvfree(pfns);
 874out_free_ranges:
 875	kfree(range);
 876out:
 877	return r;
 878}
 879
 880/**
 881 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
 882 * Check if the pages backing this ttm range have been invalidated
 883 *
 884 * Returns: true if pages are still valid
 885 */
 886bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
 
 887{
 888	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 889	bool r = false;
 890
 891	if (!gtt || !gtt->userptr)
 892		return false;
 893
 894	DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
 895		gtt->userptr, ttm->num_pages);
 896
 897	WARN_ONCE(!gtt->range || !gtt->range->pfns,
 898		"No user pages to check\n");
 899
 900	if (gtt->range) {
 901		r = hmm_range_valid(gtt->range);
 902		hmm_range_unregister(gtt->range);
 903
 904		kvfree(gtt->range->pfns);
 905		kfree(gtt->range);
 906		gtt->range = NULL;
 907	}
 908
 909	return r;
 910}
 911#endif
 912
 913/**
 914 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
 915 *
 916 * Called by amdgpu_cs_list_validate(). This creates the page list
 917 * that backs user memory and will ultimately be mapped into the device
 918 * address space.
 919 */
 920void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 921{
 922	unsigned long i;
 923
 924	for (i = 0; i < ttm->num_pages; ++i)
 925		ttm->pages[i] = pages ? pages[i] : NULL;
 926}
 927
 928/**
 929 * amdgpu_ttm_tt_pin_userptr - 	prepare the sg table with the user pages
 930 *
 931 * Called by amdgpu_ttm_backend_bind()
 932 **/
 933static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 
 934{
 935	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 936	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 937	unsigned nents;
 938	int r;
 939
 940	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 941	enum dma_data_direction direction = write ?
 942		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
 943
 944	/* Allocate an SG array and squash pages into it */
 945	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 946				      ttm->num_pages << PAGE_SHIFT,
 947				      GFP_KERNEL);
 948	if (r)
 949		goto release_sg;
 950
 951	/* Map SG to device */
 952	r = -ENOMEM;
 953	nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 954	if (nents != ttm->sg->nents)
 955		goto release_sg;
 956
 957	/* convert SG to linear array of pages and dma addresses */
 958	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 959					 gtt->ttm.dma_address, ttm->num_pages);
 960
 961	return 0;
 962
 963release_sg:
 964	kfree(ttm->sg);
 
 965	return r;
 966}
 967
 968/**
 969 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
 970 */
 971static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 
 972{
 973	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 974	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 975
 976	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 977	enum dma_data_direction direction = write ?
 978		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 979
 980	/* double check that we don't free the table twice */
 981	if (!ttm->sg->sgl)
 982		return;
 983
 984	/* unmap the pages mapped to the device */
 985	dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 
 
 986
 987	sg_free_table(ttm->sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988
 989#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 990	if (gtt->range &&
 991	    ttm->pages[0] == hmm_device_entry_to_page(gtt->range,
 992						      gtt->range->pfns[0]))
 993		WARN_ONCE(1, "Missing get_user_page_done\n");
 994#endif
 
 
 
 
 
 
 
 
 
 995}
 996
 997int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
 998				struct ttm_buffer_object *tbo,
 999				uint64_t flags)
1000{
1001	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
1002	struct ttm_tt *ttm = tbo->ttm;
1003	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1004	int r;
1005
1006	if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
1007		uint64_t page_idx = 1;
1008
1009		r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
1010				ttm->pages, gtt->ttm.dma_address, flags);
1011		if (r)
1012			goto gart_bind_fail;
1013
1014		/* Patch mtype of the second part BO */
1015		flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1016		flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
1017
1018		r = amdgpu_gart_bind(adev,
1019				gtt->offset + (page_idx << PAGE_SHIFT),
1020				ttm->num_pages - page_idx,
1021				&ttm->pages[page_idx],
1022				&(gtt->ttm.dma_address[page_idx]), flags);
1023	} else {
1024		r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1025				     ttm->pages, gtt->ttm.dma_address, flags);
1026	}
1027
1028gart_bind_fail:
1029	if (r)
1030		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1031			  ttm->num_pages, gtt->offset);
1032
1033	return r;
1034}
1035
1036/**
1037 * amdgpu_ttm_backend_bind - Bind GTT memory
1038 *
1039 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1040 * This handles binding GTT memory to the device address space.
1041 */
1042static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
1043				   struct ttm_mem_reg *bo_mem)
 
1044{
1045	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1046	struct amdgpu_ttm_tt *gtt = (void*)ttm;
1047	uint64_t flags;
1048	int r = 0;
 
 
 
 
 
 
1049
1050	if (gtt->userptr) {
1051		r = amdgpu_ttm_tt_pin_userptr(ttm);
1052		if (r) {
1053			DRM_ERROR("failed to pin userptr\n");
1054			return r;
1055		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056	}
 
1057	if (!ttm->num_pages) {
1058		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1059		     ttm->num_pages, bo_mem, ttm);
1060	}
1061
1062	if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1063	    bo_mem->mem_type == AMDGPU_PL_GWS ||
1064	    bo_mem->mem_type == AMDGPU_PL_OA)
1065		return -EINVAL;
1066
1067	if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1068		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1069		return 0;
1070	}
1071
1072	/* compute PTE flags relevant to this BO memory */
1073	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1074
1075	/* bind pages into GART page tables */
1076	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1077	r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1078		ttm->pages, gtt->ttm.dma_address, flags);
1079
1080	if (r)
1081		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1082			  ttm->num_pages, gtt->offset);
1083	return r;
1084}
1085
1086/**
1087 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
 
 
 
 
 
1088 */
1089int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1090{
1091	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1092	struct ttm_operation_ctx ctx = { false, false };
1093	struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
1094	struct ttm_mem_reg tmp;
1095	struct ttm_placement placement;
1096	struct ttm_place placements;
 
1097	uint64_t addr, flags;
1098	int r;
1099
1100	if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1101		return 0;
1102
1103	addr = amdgpu_gmc_agp_addr(bo);
1104	if (addr != AMDGPU_BO_INVALID_OFFSET) {
1105		bo->mem.start = addr >> PAGE_SHIFT;
1106	} else {
1107
1108		/* allocate GART space */
1109		tmp = bo->mem;
1110		tmp.mm_node = NULL;
1111		placement.num_placement = 1;
1112		placement.placement = &placements;
1113		placement.num_busy_placement = 1;
1114		placement.busy_placement = &placements;
1115		placements.fpfn = 0;
1116		placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1117		placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
1118			TTM_PL_FLAG_TT;
1119
1120		r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1121		if (unlikely(r))
1122			return r;
1123
1124		/* compute PTE flags for this buffer object */
1125		flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1126
1127		/* Bind pages */
1128		gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1129		r = amdgpu_ttm_gart_bind(adev, bo, flags);
1130		if (unlikely(r)) {
1131			ttm_bo_mem_put(bo, &tmp);
1132			return r;
1133		}
1134
1135		ttm_bo_mem_put(bo, &bo->mem);
1136		bo->mem = tmp;
1137	}
1138
1139	bo->offset = (bo->mem.start << PAGE_SHIFT) +
1140		bo->bdev->man[bo->mem.mem_type].gpu_offset;
1141
1142	return 0;
1143}
1144
1145/**
1146 * amdgpu_ttm_recover_gart - Rebind GTT pages
1147 *
1148 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1149 * rebind GTT pages during a GPU reset.
1150 */
1151int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1152{
1153	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1154	uint64_t flags;
1155	int r;
1156
1157	if (!tbo->ttm)
1158		return 0;
1159
1160	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1161	r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1162
1163	return r;
1164}
1165
1166/**
1167 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1168 *
1169 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1170 * ttm_tt_destroy().
1171 */
1172static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 
1173{
1174	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1175	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1176	int r;
1177
1178	/* if the pages have userptr pinning then clear that first */
1179	if (gtt->userptr)
1180		amdgpu_ttm_tt_unpin_userptr(ttm);
 
 
 
 
 
 
 
 
 
 
1181
1182	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1183		return 0;
1184
1185	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1186	r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1187	if (r)
1188		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1189			  gtt->ttm.ttm.num_pages, gtt->offset);
1190	return r;
1191}
1192
1193static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
 
1194{
1195	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1196
1197	if (gtt->usertask)
1198		put_task_struct(gtt->usertask);
1199
1200	ttm_dma_tt_fini(&gtt->ttm);
1201	kfree(gtt);
1202}
1203
1204static struct ttm_backend_func amdgpu_backend_func = {
1205	.bind = &amdgpu_ttm_backend_bind,
1206	.unbind = &amdgpu_ttm_backend_unbind,
1207	.destroy = &amdgpu_ttm_backend_destroy,
1208};
1209
1210/**
1211 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1212 *
1213 * @bo: The buffer object to create a GTT ttm_tt object around
 
1214 *
1215 * Called by ttm_tt_create().
1216 */
1217static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1218					   uint32_t page_flags)
1219{
1220	struct amdgpu_device *adev;
 
1221	struct amdgpu_ttm_tt *gtt;
1222
1223	adev = amdgpu_ttm_adev(bo->bdev);
1224
1225	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1226	if (gtt == NULL) {
1227		return NULL;
1228	}
1229	gtt->ttm.ttm.func = &amdgpu_backend_func;
 
 
 
 
 
 
 
 
 
1230
1231	/* allocate space for the uninitialized page entries */
1232	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
1233		kfree(gtt);
1234		return NULL;
1235	}
1236	return &gtt->ttm.ttm;
1237}
1238
1239/**
1240 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1241 *
1242 * Map the pages of a ttm_tt object to an address space visible
1243 * to the underlying device.
1244 */
1245static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1246			struct ttm_operation_ctx *ctx)
 
1247{
1248	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1249	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1250	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
 
1251
1252	/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1253	if (gtt && gtt->userptr) {
1254		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1255		if (!ttm->sg)
1256			return -ENOMEM;
1257
1258		ttm->page_flags |= TTM_PAGE_FLAG_SG;
1259		ttm->state = tt_unbound;
1260		return 0;
1261	}
1262
1263	if (slave && ttm->sg) {
1264		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1265						 gtt->ttm.dma_address,
1266						 ttm->num_pages);
1267		ttm->state = tt_unbound;
1268		return 0;
1269	}
1270
1271#ifdef CONFIG_SWIOTLB
1272	if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1273		return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
1274	}
1275#endif
 
 
 
 
 
1276
1277	/* fall back to generic helper to populate the page array
1278	 * and map them to the device */
1279	return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
1280}
1281
1282/**
1283 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1284 *
1285 * Unmaps pages of a ttm_tt object from the device address space and
1286 * unpopulates the page array backing it.
1287 */
1288static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
 
1289{
 
1290	struct amdgpu_device *adev;
1291	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1292	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
 
1293
1294	if (gtt && gtt->userptr) {
1295		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1296		kfree(ttm->sg);
1297		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1298		return;
1299	}
1300
1301	if (slave)
1302		return;
1303
1304	adev = amdgpu_ttm_adev(ttm->bdev);
 
 
 
1305
1306#ifdef CONFIG_SWIOTLB
1307	if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1308		ttm_dma_unpopulate(&gtt->ttm, adev->dev);
1309		return;
1310	}
1311#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312
1313	/* fall back to generic helper to unmap and unpopulate array */
1314	ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
 
1315}
1316
1317/**
1318 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1319 * task
1320 *
1321 * @ttm: The ttm_tt object to bind this userptr object to
1322 * @addr:  The address in the current tasks VM space to use
1323 * @flags: Requirements of userptr object.
1324 *
1325 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1326 * to current task
 
1327 */
1328int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1329			      uint32_t flags)
1330{
1331	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
 
 
 
 
 
 
1332
1333	if (gtt == NULL)
1334		return -EINVAL;
1335
 
1336	gtt->userptr = addr;
1337	gtt->userflags = flags;
1338
1339	if (gtt->usertask)
1340		put_task_struct(gtt->usertask);
1341	gtt->usertask = current->group_leader;
1342	get_task_struct(gtt->usertask);
1343
1344	return 0;
1345}
1346
1347/**
1348 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1349 */
1350struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1351{
1352	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1353
1354	if (gtt == NULL)
1355		return NULL;
1356
1357	if (gtt->usertask == NULL)
1358		return NULL;
1359
1360	return gtt->usertask->mm;
1361}
1362
1363/**
1364 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1365 * address range for the current task.
1366 *
1367 */
1368bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1369				  unsigned long end)
1370{
1371	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1372	unsigned long size;
1373
1374	if (gtt == NULL || !gtt->userptr)
1375		return false;
1376
1377	/* Return false if no part of the ttm_tt object lies within
1378	 * the range
1379	 */
1380	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1381	if (gtt->userptr > end || gtt->userptr + size <= start)
1382		return false;
1383
 
 
1384	return true;
1385}
1386
1387/**
1388 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1389 */
1390bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1391{
1392	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1393
1394	if (gtt == NULL || !gtt->userptr)
1395		return false;
1396
1397	return true;
1398}
1399
1400/**
1401 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1402 */
1403bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1404{
1405	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1406
1407	if (gtt == NULL)
1408		return false;
1409
1410	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1411}
1412
1413/**
1414 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1415 *
1416 * @ttm: The ttm_tt object to compute the flags for
1417 * @mem: The memory registry backing this ttm_tt object
1418 *
1419 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1420 */
1421uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
1422{
1423	uint64_t flags = 0;
1424
1425	if (mem && mem->mem_type != TTM_PL_SYSTEM)
1426		flags |= AMDGPU_PTE_VALID;
1427
1428	if (mem && mem->mem_type == TTM_PL_TT) {
 
 
1429		flags |= AMDGPU_PTE_SYSTEM;
1430
1431		if (ttm->caching_state == tt_cached)
1432			flags |= AMDGPU_PTE_SNOOPED;
1433	}
1434
 
 
 
 
1435	return flags;
1436}
1437
1438/**
1439 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1440 *
 
1441 * @ttm: The ttm_tt object to compute the flags for
1442 * @mem: The memory registry backing this ttm_tt object
1443
1444 * Figure out the flags to use for a VM PTE (Page Table Entry).
1445 */
1446uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1447				 struct ttm_mem_reg *mem)
1448{
1449	uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1450
1451	flags |= adev->gart.gart_pte_flags;
1452	flags |= AMDGPU_PTE_READABLE;
1453
1454	if (!amdgpu_ttm_tt_is_readonly(ttm))
1455		flags |= AMDGPU_PTE_WRITEABLE;
1456
1457	return flags;
1458}
1459
1460/**
1461 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1462 * object.
1463 *
1464 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1465 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1466 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1467 * used to clean out a memory space.
1468 */
1469static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1470					    const struct ttm_place *place)
1471{
1472	unsigned long num_pages = bo->mem.num_pages;
1473	struct drm_mm_node *node = bo->mem.mm_node;
1474	struct dma_resv_list *flist;
1475	struct dma_fence *f;
1476	int i;
1477
1478	/* Don't evict VM page tables while they are busy, otherwise we can't
1479	 * cleanly handle page faults.
1480	 */
 
 
 
 
1481	if (bo->type == ttm_bo_type_kernel &&
1482	    !dma_resv_test_signaled_rcu(bo->base.resv, true))
1483		return false;
1484
1485	/* If bo is a KFD BO, check if the bo belongs to the current process.
1486	 * If true, then return false as any KFD process needs all its BOs to
1487	 * be resident to run successfully
1488	 */
1489	flist = dma_resv_get_list(bo->base.resv);
1490	if (flist) {
1491		for (i = 0; i < flist->shared_count; ++i) {
1492			f = rcu_dereference_protected(flist->shared[i],
1493				dma_resv_held(bo->base.resv));
1494			if (amdkfd_fence_check_mm(f, current->mm))
1495				return false;
1496		}
1497	}
1498
1499	switch (bo->mem.mem_type) {
1500	case TTM_PL_TT:
1501		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
1502
1503	case TTM_PL_VRAM:
1504		/* Check each drm MM node individually */
1505		while (num_pages) {
1506			if (place->fpfn < (node->start + node->size) &&
1507			    !(place->lpfn && place->lpfn <= node->start))
1508				return true;
 
 
 
1509
1510			num_pages -= node->size;
1511			++node;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1512		}
1513		return false;
1514
1515	default:
1516		break;
 
1517	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1518
1519	return ttm_bo_eviction_valuable(bo, place);
 
 
 
 
 
 
 
 
1520}
1521
1522/**
1523 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1524 *
1525 * @bo:  The buffer object to read/write
1526 * @offset:  Offset into buffer object
1527 * @buf:  Secondary buffer to write/read from
1528 * @len: Length in bytes of access
1529 * @write:  true if writing
1530 *
1531 * This is used to access VRAM that backs a buffer object via MMIO
1532 * access for debugging purposes.
1533 */
1534static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1535				    unsigned long offset,
1536				    void *buf, int len, int write)
1537{
1538	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1539	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1540	struct drm_mm_node *nodes;
1541	uint32_t value = 0;
1542	int ret = 0;
1543	uint64_t pos;
1544	unsigned long flags;
1545
1546	if (bo->mem.mem_type != TTM_PL_VRAM)
1547		return -EIO;
1548
1549	nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1550	pos = (nodes->start << PAGE_SHIFT) + offset;
1551
1552	while (len && pos < adev->gmc.mc_vram_size) {
1553		uint64_t aligned_pos = pos & ~(uint64_t)3;
1554		uint32_t bytes = 4 - (pos & 3);
1555		uint32_t shift = (pos & 3) * 8;
1556		uint32_t mask = 0xffffffff << shift;
1557
1558		if (len < bytes) {
1559			mask &= 0xffffffff >> (bytes - len) * 8;
1560			bytes = len;
1561		}
1562
1563		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1564		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1565		WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1566		if (!write || mask != 0xffffffff)
1567			value = RREG32_NO_KIQ(mmMM_DATA);
1568		if (write) {
1569			value &= ~mask;
1570			value |= (*(uint32_t *)buf << shift) & mask;
1571			WREG32_NO_KIQ(mmMM_DATA, value);
1572		}
1573		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1574		if (!write) {
1575			value = (value & mask) >> shift;
1576			memcpy(buf, &value, bytes);
1577		}
1578
1579		ret += bytes;
1580		buf = (uint8_t *)buf + bytes;
1581		pos += bytes;
1582		len -= bytes;
1583		if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1584			++nodes;
1585			pos = (nodes->start << PAGE_SHIFT);
1586		}
1587	}
1588
1589	return ret;
1590}
1591
1592static struct ttm_bo_driver amdgpu_bo_driver = {
 
 
 
 
 
 
1593	.ttm_tt_create = &amdgpu_ttm_tt_create,
1594	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
1595	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1596	.invalidate_caches = &amdgpu_invalidate_caches,
1597	.init_mem_type = &amdgpu_init_mem_type,
1598	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1599	.evict_flags = &amdgpu_evict_flags,
1600	.move = &amdgpu_bo_move,
1601	.verify_access = &amdgpu_verify_access,
1602	.move_notify = &amdgpu_bo_move_notify,
1603	.release_notify = &amdgpu_bo_release_notify,
1604	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1605	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1606	.io_mem_free = &amdgpu_ttm_io_mem_free,
1607	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1608	.access_memory = &amdgpu_ttm_access_memory,
1609	.del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1610};
1611
1612/*
1613 * Firmware Reservation functions
1614 */
1615/**
1616 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1617 *
1618 * @adev: amdgpu_device pointer
1619 *
1620 * free fw reserved vram if it has been reserved.
1621 */
1622static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1623{
1624	amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1625		NULL, &adev->fw_vram_usage.va);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626}
1627
1628/**
1629 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1630 *
1631 * @adev: amdgpu_device pointer
1632 *
1633 * create bo vram reservation from fw.
1634 */
1635static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1636{
1637	struct ttm_operation_ctx ctx = { false, false };
1638	struct amdgpu_bo_param bp;
1639	int r = 0;
1640	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1641	u64 vram_size = adev->gmc.visible_vram_size;
1642	u64 offset = adev->fw_vram_usage.start_offset;
1643	u64 size = adev->fw_vram_usage.size;
1644	struct amdgpu_bo *bo;
1645
1646	memset(&bp, 0, sizeof(bp));
1647	bp.size = adev->fw_vram_usage.size;
1648	bp.byte_align = PAGE_SIZE;
1649	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
1650	bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1651		AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1652	bp.type = ttm_bo_type_kernel;
1653	bp.resv = NULL;
1654	adev->fw_vram_usage.va = NULL;
1655	adev->fw_vram_usage.reserved_bo = NULL;
1656
1657	if (adev->fw_vram_usage.size > 0 &&
1658		adev->fw_vram_usage.size <= vram_size) {
 
 
 
 
 
 
 
 
 
 
 
1659
1660		r = amdgpu_bo_create(adev, &bp,
1661				     &adev->fw_vram_usage.reserved_bo);
1662		if (r)
1663			goto error_create;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1664
1665		r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1666		if (r)
1667			goto error_reserve;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1668
1669		/* remove the original mem node and create a new one at the
1670		 * request position
1671		 */
1672		bo = adev->fw_vram_usage.reserved_bo;
1673		offset = ALIGN(offset, PAGE_SIZE);
1674		for (i = 0; i < bo->placement.num_placement; ++i) {
1675			bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1676			bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1677		}
 
 
 
 
 
 
1678
1679		ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1680		r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1681				     &bo->tbo.mem, &ctx);
1682		if (r)
1683			goto error_pin;
1684
1685		r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1686			AMDGPU_GEM_DOMAIN_VRAM,
1687			adev->fw_vram_usage.start_offset,
1688			(adev->fw_vram_usage.start_offset +
1689			adev->fw_vram_usage.size));
1690		if (r)
1691			goto error_pin;
1692		r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1693			&adev->fw_vram_usage.va);
1694		if (r)
1695			goto error_kmap;
1696
1697		amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
 
 
 
 
 
 
 
 
 
1698	}
1699	return r;
 
 
 
 
 
 
 
 
 
 
 
1700
1701error_kmap:
1702	amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1703error_pin:
1704	amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1705error_reserve:
1706	amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1707error_create:
1708	adev->fw_vram_usage.va = NULL;
1709	adev->fw_vram_usage.reserved_bo = NULL;
1710	return r;
1711}
1712/**
 
1713 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1714 * gtt/vram related fields.
1715 *
1716 * This initializes all of the memory space pools that the TTM layer
1717 * will need such as the GTT space (system memory mapped to the device),
1718 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1719 * can be mapped per VMID.
1720 */
1721int amdgpu_ttm_init(struct amdgpu_device *adev)
1722{
1723	uint64_t gtt_size;
1724	int r;
1725	u64 vis_vram_limit;
1726	void *stolen_vga_buf;
1727
1728	mutex_init(&adev->mman.gtt_window_lock);
1729
1730	/* No others user of address space so set it to 0 */
1731	r = ttm_bo_device_init(&adev->mman.bdev,
1732			       &amdgpu_bo_driver,
1733			       adev->ddev->anon_inode->i_mapping,
 
1734			       dma_addressing_limited(adev->dev));
1735	if (r) {
1736		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1737		return r;
1738	}
 
 
 
 
 
 
1739	adev->mman.initialized = true;
1740
1741	/* We opt to avoid OOM on system pages allocations */
1742	adev->mman.bdev.no_retry = true;
1743
1744	/* Initialize VRAM pool with all of VRAM divided into pages */
1745	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1746				adev->gmc.real_vram_size >> PAGE_SHIFT);
1747	if (r) {
1748		DRM_ERROR("Failed initializing VRAM heap.\n");
1749		return r;
1750	}
1751
1752	/* Reduce size of CPU-visible VRAM if requested */
1753	vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1754	if (amdgpu_vis_vram_limit > 0 &&
1755	    vis_vram_limit <= adev->gmc.visible_vram_size)
1756		adev->gmc.visible_vram_size = vis_vram_limit;
1757
1758	/* Change the size here instead of the init above so only lpfn is affected */
1759	amdgpu_ttm_set_buffer_funcs_status(adev, false);
1760#ifdef CONFIG_64BIT
1761	adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1762						adev->gmc.visible_vram_size);
 
 
 
 
 
 
 
 
 
 
1763#endif
1764
1765	/*
1766	 *The reserved vram for firmware must be pinned to the specified
1767	 *place on the VRAM, so reserve it early.
1768	 */
1769	r = amdgpu_ttm_fw_reserve_vram_init(adev);
1770	if (r) {
 
 
 
 
 
 
 
 
1771		return r;
 
 
 
 
 
 
 
 
 
 
1772	}
1773
1774	/* allocate memory as required for VGA
1775	 * This is used for VGA emulation and pre-OS scanout buffers to
1776	 * avoid display artifacts while transitioning between pre-OS
1777	 * and driver.  */
1778	r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1779				    AMDGPU_GEM_DOMAIN_VRAM,
1780				    &adev->stolen_vga_memory,
1781				    NULL, &stolen_vga_buf);
1782	if (r)
1783		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1784	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1785		 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1786
1787	/* Compute GTT size, either bsaed on 3/4th the size of RAM size
1788	 * or whatever the user passed on module init */
1789	if (amdgpu_gtt_size == -1) {
1790		struct sysinfo si;
1791
1792		si_meminfo(&si);
1793		gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1794			       adev->gmc.mc_vram_size),
1795			       ((uint64_t)si.totalram * si.mem_unit * 3/4));
1796	}
1797	else
1798		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1799
1800	/* Initialize GTT memory pool */
1801	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1802	if (r) {
1803		DRM_ERROR("Failed initializing GTT heap.\n");
1804		return r;
1805	}
1806	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1807		 (unsigned)(gtt_size / (1024 * 1024)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1808
1809	/* Initialize various on-chip memory pools */
1810	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1811			   adev->gds.gds_size);
1812	if (r) {
1813		DRM_ERROR("Failed initializing GDS heap.\n");
1814		return r;
1815	}
1816
1817	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1818			   adev->gds.gws_size);
1819	if (r) {
1820		DRM_ERROR("Failed initializing gws heap.\n");
1821		return r;
1822	}
1823
1824	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1825			   adev->gds.oa_size);
1826	if (r) {
1827		DRM_ERROR("Failed initializing oa heap.\n");
1828		return r;
1829	}
 
 
 
 
 
1830
1831	/* Register debugfs entries for amdgpu_ttm */
1832	r = amdgpu_ttm_debugfs_init(adev);
1833	if (r) {
1834		DRM_ERROR("Failed to init debugfs\n");
1835		return r;
1836	}
1837	return 0;
1838}
1839
1840/**
1841 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1842 */
1843void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1844{
1845	void *stolen_vga_buf;
1846	/* return the VGA stolen memory (if any) back to VRAM */
1847	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1848}
1849
1850/**
1851 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1852 */
1853void amdgpu_ttm_fini(struct amdgpu_device *adev)
1854{
 
 
1855	if (!adev->mman.initialized)
1856		return;
1857
1858	amdgpu_ttm_debugfs_fini(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1859	amdgpu_ttm_fw_reserve_vram_fini(adev);
1860	if (adev->mman.aper_base_kaddr)
1861		iounmap(adev->mman.aper_base_kaddr);
1862	adev->mman.aper_base_kaddr = NULL;
1863
1864	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1865	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1866	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1867	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1868	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1869	ttm_bo_device_release(&adev->mman.bdev);
 
 
 
 
 
 
 
 
1870	adev->mman.initialized = false;
1871	DRM_INFO("amdgpu: ttm finalized\n");
1872}
1873
1874/**
1875 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1876 *
1877 * @adev: amdgpu_device pointer
1878 * @enable: true when we can use buffer functions.
1879 *
1880 * Enable/disable use of buffer functions during suspend/resume. This should
1881 * only be called at bootup or when userspace isn't running.
1882 */
1883void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1884{
1885	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1886	uint64_t size;
1887	int r;
1888
1889	if (!adev->mman.initialized || adev->in_gpu_reset ||
1890	    adev->mman.buffer_funcs_enabled == enable)
1891		return;
1892
1893	if (enable) {
1894		struct amdgpu_ring *ring;
1895		struct drm_sched_rq *rq;
1896
1897		ring = adev->mman.buffer_funcs_ring;
1898		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1899		r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
 
 
1900		if (r) {
1901			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1902				  r);
1903			return;
1904		}
 
 
 
 
 
 
 
 
 
1905	} else {
1906		drm_sched_entity_destroy(&adev->mman.entity);
 
1907		dma_fence_put(man->move);
1908		man->move = NULL;
1909	}
1910
1911	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
1912	if (enable)
1913		size = adev->gmc.real_vram_size;
1914	else
1915		size = adev->gmc.visible_vram_size;
1916	man->size = size >> PAGE_SHIFT;
1917	adev->mman.buffer_funcs_enabled = enable;
1918}
1919
1920int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1921{
1922	struct drm_file *file_priv = filp->private_data;
1923	struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
1924
1925	if (adev == NULL)
1926		return -EINVAL;
1927
1928	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1929}
1930
1931static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1932			     struct ttm_mem_reg *mem, unsigned num_pages,
1933			     uint64_t offset, unsigned window,
1934			     struct amdgpu_ring *ring,
1935			     uint64_t *addr)
1936{
1937	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1938	struct amdgpu_device *adev = ring->adev;
1939	struct ttm_tt *ttm = bo->ttm;
1940	struct amdgpu_job *job;
1941	unsigned num_dw, num_bytes;
1942	dma_addr_t *dma_address;
1943	struct dma_fence *fence;
1944	uint64_t src_addr, dst_addr;
1945	uint64_t flags;
1946	int r;
1947
1948	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1949	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1950
1951	*addr = adev->gmc.gart_start;
1952	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1953		AMDGPU_GPU_PAGE_SIZE;
1954
1955	num_dw = adev->mman.buffer_funcs->copy_num_dw;
1956	while (num_dw & 0x7)
1957		num_dw++;
1958
1959	num_bytes = num_pages * 8;
1960
1961	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1962	if (r)
1963		return r;
1964
1965	src_addr = num_dw * 4;
1966	src_addr += job->ibs[0].gpu_addr;
 
 
 
 
 
 
1967
1968	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
1969	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1970	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1971				dst_addr, num_bytes);
1972
1973	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1974	WARN_ON(job->ibs[0].length_dw > num_dw);
1975
1976	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
1977	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
1978	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
1979			    &job->ibs[0].ptr[num_dw]);
1980	if (r)
1981		goto error_free;
1982
1983	r = amdgpu_job_submit(job, &adev->mman.entity,
1984			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1985	if (r)
1986		goto error_free;
1987
1988	dma_fence_put(fence);
1989
1990	return r;
1991
1992error_free:
1993	amdgpu_job_free(job);
1994	return r;
1995}
1996
1997int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1998		       uint64_t dst_offset, uint32_t byte_count,
1999		       struct dma_resv *resv,
2000		       struct dma_fence **fence, bool direct_submit,
2001		       bool vm_needs_flush)
2002{
2003	struct amdgpu_device *adev = ring->adev;
 
2004	struct amdgpu_job *job;
2005
2006	uint32_t max_bytes;
2007	unsigned num_loops, num_dw;
2008	unsigned i;
2009	int r;
2010
2011	if (direct_submit && !ring->sched.ready) {
2012		DRM_ERROR("Trying to move memory with ring turned off.\n");
2013		return -EINVAL;
2014	}
2015
2016	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2017	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2018	num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
2019
2020	/* for IB padding */
2021	while (num_dw & 0x7)
2022		num_dw++;
2023
2024	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2025	if (r)
2026		return r;
2027
2028	if (vm_needs_flush) {
2029		job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2030		job->vm_needs_flush = true;
2031	}
2032	if (resv) {
2033		r = amdgpu_sync_resv(adev, &job->sync, resv,
2034				     AMDGPU_FENCE_OWNER_UNDEFINED,
2035				     false);
2036		if (r) {
2037			DRM_ERROR("sync failed (%d).\n", r);
2038			goto error_free;
2039		}
2040	}
2041
2042	for (i = 0; i < num_loops; i++) {
2043		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2044
2045		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2046					dst_offset, cur_size_in_bytes);
2047
2048		src_offset += cur_size_in_bytes;
2049		dst_offset += cur_size_in_bytes;
2050		byte_count -= cur_size_in_bytes;
2051	}
2052
2053	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2054	WARN_ON(job->ibs[0].length_dw > num_dw);
2055	if (direct_submit)
2056		r = amdgpu_job_submit_direct(job, ring, fence);
2057	else
2058		r = amdgpu_job_submit(job, &adev->mman.entity,
2059				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2060	if (r)
2061		goto error_free;
2062
2063	return r;
2064
2065error_free:
2066	amdgpu_job_free(job);
2067	DRM_ERROR("Error scheduling IBs (%d)\n", r);
2068	return r;
2069}
2070
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2071int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2072		       uint32_t src_data,
2073		       struct dma_resv *resv,
2074		       struct dma_fence **fence)
 
2075{
2076	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2077	uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2078	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2079
2080	struct drm_mm_node *mm_node;
2081	unsigned long num_pages;
2082	unsigned int num_loops, num_dw;
2083
2084	struct amdgpu_job *job;
2085	int r;
2086
2087	if (!adev->mman.buffer_funcs_enabled) {
2088		DRM_ERROR("Trying to clear memory with ring turned off.\n");
2089		return -EINVAL;
2090	}
2091
2092	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2093		r = amdgpu_ttm_alloc_gart(&bo->tbo);
2094		if (r)
2095			return r;
2096	}
2097
2098	num_pages = bo->tbo.num_pages;
2099	mm_node = bo->tbo.mem.mm_node;
2100	num_loops = 0;
2101	while (num_pages) {
2102		uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2103
2104		num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
2105		num_pages -= mm_node->size;
2106		++mm_node;
2107	}
2108	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2109
2110	/* for IB padding */
2111	num_dw += 64;
2112
2113	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2114	if (r)
2115		return r;
 
2116
2117	if (resv) {
2118		r = amdgpu_sync_resv(adev, &job->sync, resv,
2119				     AMDGPU_FENCE_OWNER_UNDEFINED, false);
2120		if (r) {
2121			DRM_ERROR("sync failed (%d).\n", r);
2122			goto error_free;
2123		}
2124	}
2125
2126	num_pages = bo->tbo.num_pages;
2127	mm_node = bo->tbo.mem.mm_node;
2128
2129	while (num_pages) {
2130		uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2131		uint64_t dst_addr;
2132
2133		dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2134		while (byte_count) {
2135			uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
2136							   max_bytes);
 
2137
2138			amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2139						dst_addr, cur_size_in_bytes);
 
 
 
 
 
 
 
 
 
 
 
2140
2141			dst_addr += cur_size_in_bytes;
2142			byte_count -= cur_size_in_bytes;
2143		}
2144
2145		num_pages -= mm_node->size;
2146		++mm_node;
 
 
 
 
 
2147	}
2148
2149	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2150	WARN_ON(job->ibs[0].length_dw > num_dw);
2151	r = amdgpu_job_submit(job, &adev->mman.entity,
2152			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2153	if (r)
2154		goto error_free;
2155
2156	return 0;
2157
2158error_free:
2159	amdgpu_job_free(job);
2160	return r;
2161}
2162
2163#if defined(CONFIG_DEBUG_FS)
2164
2165static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2166{
2167	struct drm_info_node *node = (struct drm_info_node *)m->private;
2168	unsigned ttm_pl = (uintptr_t)node->info_ent->data;
2169	struct drm_device *dev = node->minor->dev;
2170	struct amdgpu_device *adev = dev->dev_private;
2171	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
2172	struct drm_printer p = drm_seq_file_printer(m);
2173
2174	man->func->debug(man, &p);
2175	return 0;
2176}
2177
2178static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2179	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
2180	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
2181	{"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
2182	{"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
2183	{"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
2184	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
2185#ifdef CONFIG_SWIOTLB
2186	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
2187#endif
2188};
2189
2190/**
2191 * amdgpu_ttm_vram_read - Linear read access to VRAM
2192 *
2193 * Accesses VRAM via MMIO for debugging purposes.
2194 */
2195static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2196				    size_t size, loff_t *pos)
2197{
2198	struct amdgpu_device *adev = file_inode(f)->i_private;
2199	ssize_t result = 0;
2200	int r;
2201
2202	if (size & 0x3 || *pos & 0x3)
2203		return -EINVAL;
2204
2205	if (*pos >= adev->gmc.mc_vram_size)
2206		return -ENXIO;
2207
 
2208	while (size) {
2209		unsigned long flags;
2210		uint32_t value;
2211
2212		if (*pos >= adev->gmc.mc_vram_size)
2213			return result;
 
2214
2215		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2216		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2217		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2218		value = RREG32_NO_KIQ(mmMM_DATA);
2219		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2220
2221		r = put_user(value, (uint32_t *)buf);
2222		if (r)
2223			return r;
2224
2225		result += 4;
2226		buf += 4;
2227		*pos += 4;
2228		size -= 4;
2229	}
2230
2231	return result;
2232}
2233
2234/**
2235 * amdgpu_ttm_vram_write - Linear write access to VRAM
2236 *
2237 * Accesses VRAM via MMIO for debugging purposes.
2238 */
2239static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2240				    size_t size, loff_t *pos)
2241{
2242	struct amdgpu_device *adev = file_inode(f)->i_private;
2243	ssize_t result = 0;
2244	int r;
2245
2246	if (size & 0x3 || *pos & 0x3)
2247		return -EINVAL;
2248
2249	if (*pos >= adev->gmc.mc_vram_size)
2250		return -ENXIO;
2251
2252	while (size) {
2253		unsigned long flags;
2254		uint32_t value;
2255
2256		if (*pos >= adev->gmc.mc_vram_size)
2257			return result;
2258
2259		r = get_user(value, (uint32_t *)buf);
2260		if (r)
2261			return r;
2262
2263		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2264		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2265		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2266		WREG32_NO_KIQ(mmMM_DATA, value);
2267		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2268
2269		result += 4;
2270		buf += 4;
2271		*pos += 4;
2272		size -= 4;
2273	}
2274
2275	return result;
2276}
2277
2278static const struct file_operations amdgpu_ttm_vram_fops = {
2279	.owner = THIS_MODULE,
2280	.read = amdgpu_ttm_vram_read,
2281	.write = amdgpu_ttm_vram_write,
2282	.llseek = default_llseek,
2283};
2284
2285#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2286
2287/**
2288 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2289 */
2290static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2291				   size_t size, loff_t *pos)
2292{
2293	struct amdgpu_device *adev = file_inode(f)->i_private;
2294	ssize_t result = 0;
2295	int r;
2296
2297	while (size) {
2298		loff_t p = *pos / PAGE_SIZE;
2299		unsigned off = *pos & ~PAGE_MASK;
2300		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2301		struct page *page;
2302		void *ptr;
2303
2304		if (p >= adev->gart.num_cpu_pages)
2305			return result;
2306
2307		page = adev->gart.pages[p];
2308		if (page) {
2309			ptr = kmap(page);
2310			ptr += off;
2311
2312			r = copy_to_user(buf, ptr, cur_size);
2313			kunmap(adev->gart.pages[p]);
2314		} else
2315			r = clear_user(buf, cur_size);
2316
2317		if (r)
2318			return -EFAULT;
2319
2320		result += cur_size;
2321		buf += cur_size;
2322		*pos += cur_size;
2323		size -= cur_size;
2324	}
2325
2326	return result;
2327}
2328
2329static const struct file_operations amdgpu_ttm_gtt_fops = {
2330	.owner = THIS_MODULE,
2331	.read = amdgpu_ttm_gtt_read,
2332	.llseek = default_llseek
2333};
2334
2335#endif
2336
2337/**
2338 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2339 *
2340 * This function is used to read memory that has been mapped to the
2341 * GPU and the known addresses are not physical addresses but instead
2342 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2343 */
2344static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2345				 size_t size, loff_t *pos)
2346{
2347	struct amdgpu_device *adev = file_inode(f)->i_private;
2348	struct iommu_domain *dom;
2349	ssize_t result = 0;
2350	int r;
2351
2352	/* retrieve the IOMMU domain if any for this device */
2353	dom = iommu_get_domain_for_dev(adev->dev);
2354
2355	while (size) {
2356		phys_addr_t addr = *pos & PAGE_MASK;
2357		loff_t off = *pos & ~PAGE_MASK;
2358		size_t bytes = PAGE_SIZE - off;
2359		unsigned long pfn;
2360		struct page *p;
2361		void *ptr;
2362
2363		bytes = bytes < size ? bytes : size;
2364
2365		/* Translate the bus address to a physical address.  If
2366		 * the domain is NULL it means there is no IOMMU active
2367		 * and the address translation is the identity
2368		 */
2369		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2370
2371		pfn = addr >> PAGE_SHIFT;
2372		if (!pfn_valid(pfn))
2373			return -EPERM;
2374
2375		p = pfn_to_page(pfn);
2376		if (p->mapping != adev->mman.bdev.dev_mapping)
2377			return -EPERM;
2378
2379		ptr = kmap(p);
2380		r = copy_to_user(buf, ptr + off, bytes);
2381		kunmap(p);
2382		if (r)
2383			return -EFAULT;
2384
2385		size -= bytes;
2386		*pos += bytes;
2387		result += bytes;
2388	}
2389
2390	return result;
2391}
2392
2393/**
2394 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2395 *
2396 * This function is used to write memory that has been mapped to the
2397 * GPU and the known addresses are not physical addresses but instead
2398 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2399 */
2400static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2401				 size_t size, loff_t *pos)
2402{
2403	struct amdgpu_device *adev = file_inode(f)->i_private;
2404	struct iommu_domain *dom;
2405	ssize_t result = 0;
2406	int r;
2407
2408	dom = iommu_get_domain_for_dev(adev->dev);
2409
2410	while (size) {
2411		phys_addr_t addr = *pos & PAGE_MASK;
2412		loff_t off = *pos & ~PAGE_MASK;
2413		size_t bytes = PAGE_SIZE - off;
2414		unsigned long pfn;
2415		struct page *p;
2416		void *ptr;
2417
2418		bytes = bytes < size ? bytes : size;
2419
2420		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2421
2422		pfn = addr >> PAGE_SHIFT;
2423		if (!pfn_valid(pfn))
2424			return -EPERM;
2425
2426		p = pfn_to_page(pfn);
2427		if (p->mapping != adev->mman.bdev.dev_mapping)
2428			return -EPERM;
2429
2430		ptr = kmap(p);
2431		r = copy_from_user(ptr + off, buf, bytes);
2432		kunmap(p);
2433		if (r)
2434			return -EFAULT;
2435
2436		size -= bytes;
2437		*pos += bytes;
2438		result += bytes;
2439	}
2440
2441	return result;
2442}
2443
2444static const struct file_operations amdgpu_ttm_iomem_fops = {
2445	.owner = THIS_MODULE,
2446	.read = amdgpu_iomem_read,
2447	.write = amdgpu_iomem_write,
2448	.llseek = default_llseek
2449};
2450
2451static const struct {
2452	char *name;
2453	const struct file_operations *fops;
2454	int domain;
2455} ttm_debugfs_entries[] = {
2456	{ "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2457#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2458	{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2459#endif
2460	{ "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2461};
2462
2463#endif
2464
2465static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2466{
2467#if defined(CONFIG_DEBUG_FS)
2468	unsigned count;
2469
2470	struct drm_minor *minor = adev->ddev->primary;
2471	struct dentry *ent, *root = minor->debugfs_root;
2472
2473	for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2474		ent = debugfs_create_file(
2475				ttm_debugfs_entries[count].name,
2476				S_IFREG | S_IRUGO, root,
2477				adev,
2478				ttm_debugfs_entries[count].fops);
2479		if (IS_ERR(ent))
2480			return PTR_ERR(ent);
2481		if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2482			i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2483		else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2484			i_size_write(ent->d_inode, adev->gmc.gart_size);
2485		adev->mman.debugfs_entries[count] = ent;
2486	}
2487
2488	count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2489
2490#ifdef CONFIG_SWIOTLB
2491	if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2492		--count;
2493#endif
2494
2495	return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2496#else
2497	return 0;
2498#endif
2499}
2500
2501static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2502{
2503#if defined(CONFIG_DEBUG_FS)
2504	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
2505
2506	for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2507		debugfs_remove(adev->mman.debugfs_entries[i]);
2508#endif
2509}