Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32#include <drm/ttm/ttm_bo_api.h>
  33#include <drm/ttm/ttm_bo_driver.h>
  34#include <drm/ttm/ttm_placement.h>
  35#include <drm/ttm/ttm_module.h>
  36#include <drm/ttm/ttm_page_alloc.h>
  37#include <drm/drmP.h>
  38#include <drm/amdgpu_drm.h>
  39#include <linux/seq_file.h>
  40#include <linux/slab.h>
  41#include <linux/swiotlb.h>
  42#include <linux/swap.h>
  43#include <linux/pagemap.h>
  44#include <linux/debugfs.h>
  45#include <linux/iommu.h>
  46#include "amdgpu.h"
  47#include "amdgpu_object.h"
  48#include "amdgpu_trace.h"
  49#include "amdgpu_amdkfd.h"
  50#include "bif/bif_4_1_d.h"
  51
  52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
  53
  54static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
  55			     struct ttm_mem_reg *mem, unsigned num_pages,
  56			     uint64_t offset, unsigned window,
  57			     struct amdgpu_ring *ring,
  58			     uint64_t *addr);
  59
  60static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
  61static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
  62
 
  63/*
  64 * Global memory.
  65 */
  66static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
  67{
  68	return ttm_mem_global_init(ref->object);
  69}
  70
  71static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
  72{
  73	ttm_mem_global_release(ref->object);
  74}
  75
  76static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
  77{
  78	struct drm_global_reference *global_ref;
  79	struct amdgpu_ring *ring;
  80	struct drm_sched_rq *rq;
  81	int r;
  82
  83	adev->mman.mem_global_referenced = false;
  84	global_ref = &adev->mman.mem_global_ref;
  85	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  86	global_ref->size = sizeof(struct ttm_mem_global);
  87	global_ref->init = &amdgpu_ttm_mem_global_init;
  88	global_ref->release = &amdgpu_ttm_mem_global_release;
  89	r = drm_global_item_ref(global_ref);
  90	if (r) {
  91		DRM_ERROR("Failed setting up TTM memory accounting "
  92			  "subsystem.\n");
  93		goto error_mem;
  94	}
  95
  96	adev->mman.bo_global_ref.mem_glob =
  97		adev->mman.mem_global_ref.object;
  98	global_ref = &adev->mman.bo_global_ref.ref;
  99	global_ref->global_type = DRM_GLOBAL_TTM_BO;
 100	global_ref->size = sizeof(struct ttm_bo_global);
 101	global_ref->init = &ttm_bo_global_init;
 102	global_ref->release = &ttm_bo_global_release;
 103	r = drm_global_item_ref(global_ref);
 104	if (r) {
 105		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
 106		goto error_bo;
 107	}
 108
 109	mutex_init(&adev->mman.gtt_window_lock);
 110
 111	ring = adev->mman.buffer_funcs_ring;
 112	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
 113	r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
 114				  rq, amdgpu_sched_jobs, NULL);
 115	if (r) {
 116		DRM_ERROR("Failed setting up TTM BO move run queue.\n");
 117		goto error_entity;
 118	}
 119
 120	adev->mman.mem_global_referenced = true;
 121
 122	return 0;
 123
 124error_entity:
 125	drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 126error_bo:
 127	drm_global_item_unref(&adev->mman.mem_global_ref);
 128error_mem:
 129	return r;
 130}
 131
 132static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 133{
 134	if (adev->mman.mem_global_referenced) {
 135		drm_sched_entity_fini(adev->mman.entity.sched,
 136				      &adev->mman.entity);
 137		mutex_destroy(&adev->mman.gtt_window_lock);
 138		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 139		drm_global_item_unref(&adev->mman.mem_global_ref);
 140		adev->mman.mem_global_referenced = false;
 141	}
 142}
 143
 144static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 145{
 146	return 0;
 147}
 148
 149static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 150				struct ttm_mem_type_manager *man)
 151{
 152	struct amdgpu_device *adev;
 153
 154	adev = amdgpu_ttm_adev(bdev);
 155
 156	switch (type) {
 157	case TTM_PL_SYSTEM:
 158		/* System memory */
 159		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 160		man->available_caching = TTM_PL_MASK_CACHING;
 161		man->default_caching = TTM_PL_FLAG_CACHED;
 162		break;
 163	case TTM_PL_TT:
 164		man->func = &amdgpu_gtt_mgr_func;
 165		man->gpu_offset = adev->gmc.gart_start;
 166		man->available_caching = TTM_PL_MASK_CACHING;
 167		man->default_caching = TTM_PL_FLAG_CACHED;
 168		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
 169		break;
 170	case TTM_PL_VRAM:
 171		/* "On-card" video ram */
 172		man->func = &amdgpu_vram_mgr_func;
 173		man->gpu_offset = adev->gmc.vram_start;
 174		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 175			     TTM_MEMTYPE_FLAG_MAPPABLE;
 176		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
 177		man->default_caching = TTM_PL_FLAG_WC;
 178		break;
 179	case AMDGPU_PL_GDS:
 180	case AMDGPU_PL_GWS:
 181	case AMDGPU_PL_OA:
 182		/* On-chip GDS memory*/
 183		man->func = &ttm_bo_manager_func;
 184		man->gpu_offset = 0;
 185		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
 186		man->available_caching = TTM_PL_FLAG_UNCACHED;
 187		man->default_caching = TTM_PL_FLAG_UNCACHED;
 188		break;
 189	default:
 190		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 191		return -EINVAL;
 192	}
 193	return 0;
 194}
 195
 196static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 197				struct ttm_placement *placement)
 198{
 199	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 200	struct amdgpu_bo *abo;
 201	static const struct ttm_place placements = {
 202		.fpfn = 0,
 203		.lpfn = 0,
 204		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 205	};
 206
 207	if (bo->type == ttm_bo_type_sg) {
 208		placement->num_placement = 0;
 209		placement->num_busy_placement = 0;
 210		return;
 211	}
 212
 213	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
 214		placement->placement = &placements;
 215		placement->busy_placement = &placements;
 216		placement->num_placement = 1;
 217		placement->num_busy_placement = 1;
 218		return;
 219	}
 220	abo = ttm_to_amdgpu_bo(bo);
 221	switch (bo->mem.mem_type) {
 222	case TTM_PL_VRAM:
 223		if (!adev->mman.buffer_funcs_enabled) {
 224			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 225		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 226			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 227			unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 228			struct drm_mm_node *node = bo->mem.mm_node;
 229			unsigned long pages_left;
 230
 231			for (pages_left = bo->mem.num_pages;
 232			     pages_left;
 233			     pages_left -= node->size, node++) {
 234				if (node->start < fpfn)
 235					break;
 236			}
 237
 238			if (!pages_left)
 239				goto gtt;
 240
 241			/* Try evicting to the CPU inaccessible part of VRAM
 242			 * first, but only set GTT as busy placement, so this
 243			 * BO will be evicted to GTT rather than causing other
 244			 * BOs to be evicted from VRAM
 245			 */
 246			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
 247							 AMDGPU_GEM_DOMAIN_GTT);
 248			abo->placements[0].fpfn = fpfn;
 249			abo->placements[0].lpfn = 0;
 250			abo->placement.busy_placement = &abo->placements[1];
 251			abo->placement.num_busy_placement = 1;
 252		} else {
 253gtt:
 254			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255		}
 256		break;
 257	case TTM_PL_TT:
 258	default:
 259		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 260	}
 261	*placement = abo->placement;
 262}
 263
 264static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 265{
 266	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 267
 268	/*
 269	 * Don't verify access for KFD BOs. They don't have a GEM
 270	 * object associated with them.
 271	 */
 272	if (abo->kfd_bo)
 273		return 0;
 274
 275	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
 276		return -EPERM;
 277	return drm_vma_node_verify_access(&abo->gem_base.vma_node,
 278					  filp->private_data);
 279}
 280
 281static void amdgpu_move_null(struct ttm_buffer_object *bo,
 282			     struct ttm_mem_reg *new_mem)
 283{
 284	struct ttm_mem_reg *old_mem = &bo->mem;
 285
 286	BUG_ON(old_mem->mm_node != NULL);
 287	*old_mem = *new_mem;
 288	new_mem->mm_node = NULL;
 289}
 290
 291static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 292				    struct drm_mm_node *mm_node,
 293				    struct ttm_mem_reg *mem)
 
 294{
 295	uint64_t addr = 0;
 296
 297	if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
 298		addr = mm_node->start << PAGE_SHIFT;
 299		addr += bo->bdev->man[mem->mem_type].gpu_offset;
 300	}
 301	return addr;
 302}
 303
 304/**
 305 * amdgpu_find_mm_node - Helper function finds the drm_mm_node
 306 *  corresponding to @offset. It also modifies the offset to be
 307 *  within the drm_mm_node returned
 308 */
 309static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
 310					       unsigned long *offset)
 311{
 312	struct drm_mm_node *mm_node = mem->mm_node;
 313
 314	while (*offset >= (mm_node->size << PAGE_SHIFT)) {
 315		*offset -= (mm_node->size << PAGE_SHIFT);
 316		++mm_node;
 
 
 
 
 317	}
 318	return mm_node;
 
 319}
 320
 321/**
 322 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
 323 *
 324 * The function copies @size bytes from {src->mem + src->offset} to
 325 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
 326 * move and different for a BO to BO copy.
 327 *
 328 * @f: Returns the last fence if multiple jobs are submitted.
 329 */
 330int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 331			       struct amdgpu_copy_mem *src,
 332			       struct amdgpu_copy_mem *dst,
 333			       uint64_t size,
 334			       struct reservation_object *resv,
 335			       struct dma_fence **f)
 336{
 
 337	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 338	struct drm_mm_node *src_mm, *dst_mm;
 339	uint64_t src_node_start, dst_node_start, src_node_size,
 340		 dst_node_size, src_page_offset, dst_page_offset;
 
 341	struct dma_fence *fence = NULL;
 342	int r = 0;
 343	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
 344					AMDGPU_GPU_PAGE_SIZE);
 345
 346	if (!adev->mman.buffer_funcs_enabled) {
 347		DRM_ERROR("Trying to move memory with ring turned off.\n");
 348		return -EINVAL;
 349	}
 350
 351	src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
 352	src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
 353					     src->offset;
 354	src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
 355	src_page_offset = src_node_start & (PAGE_SIZE - 1);
 356
 357	dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
 358	dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
 359					     dst->offset;
 360	dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
 361	dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
 362
 363	mutex_lock(&adev->mman.gtt_window_lock);
 364
 365	while (size) {
 366		unsigned long cur_size;
 367		uint64_t from = src_node_start, to = dst_node_start;
 368		struct dma_fence *next;
 369
 370		/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
 371		 * begins at an offset, then adjust the size accordingly
 372		 */
 373		cur_size = min3(min(src_node_size, dst_node_size), size,
 374				GTT_MAX_BYTES);
 375		if (cur_size + src_page_offset > GTT_MAX_BYTES ||
 376		    cur_size + dst_page_offset > GTT_MAX_BYTES)
 377			cur_size -= max(src_page_offset, dst_page_offset);
 378
 379		/* Map only what needs to be accessed. Map src to window 0 and
 380		 * dst to window 1
 381		 */
 382		if (src->mem->mem_type == TTM_PL_TT &&
 383		    !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
 384			r = amdgpu_map_buffer(src->bo, src->mem,
 385					PFN_UP(cur_size + src_page_offset),
 386					src_node_start, 0, ring,
 387					&from);
 388			if (r)
 389				goto error;
 390			/* Adjust the offset because amdgpu_map_buffer returns
 391			 * start of mapped page
 392			 */
 393			from += src_page_offset;
 394		}
 395
 396		if (dst->mem->mem_type == TTM_PL_TT &&
 397		    !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
 398			r = amdgpu_map_buffer(dst->bo, dst->mem,
 399					PFN_UP(cur_size + dst_page_offset),
 400					dst_node_start, 1, ring,
 401					&to);
 402			if (r)
 403				goto error;
 404			to += dst_page_offset;
 405		}
 406
 407		r = amdgpu_copy_buffer(ring, from, to, cur_size,
 408				       resv, &next, false, true);
 
 409		if (r)
 410			goto error;
 411
 412		dma_fence_put(fence);
 413		fence = next;
 414
 415		size -= cur_size;
 416		if (!size)
 417			break;
 418
 419		src_node_size -= cur_size;
 420		if (!src_node_size) {
 421			src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
 422							     src->mem);
 423			src_node_size = (src_mm->size << PAGE_SHIFT);
 
 
 424		} else {
 425			src_node_start += cur_size;
 426			src_page_offset = src_node_start & (PAGE_SIZE - 1);
 427		}
 428		dst_node_size -= cur_size;
 429		if (!dst_node_size) {
 430			dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
 431							     dst->mem);
 432			dst_node_size = (dst_mm->size << PAGE_SHIFT);
 
 
 
 
 433		} else {
 434			dst_node_start += cur_size;
 435			dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
 436		}
 437	}
 438error:
 439	mutex_unlock(&adev->mman.gtt_window_lock);
 440	if (f)
 441		*f = dma_fence_get(fence);
 442	dma_fence_put(fence);
 443	return r;
 444}
 445
 446
 447static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 448			    bool evict, bool no_wait_gpu,
 449			    struct ttm_mem_reg *new_mem,
 450			    struct ttm_mem_reg *old_mem)
 451{
 452	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 453	struct amdgpu_copy_mem src, dst;
 454	struct dma_fence *fence = NULL;
 455	int r;
 456
 457	src.bo = bo;
 458	dst.bo = bo;
 459	src.mem = old_mem;
 460	dst.mem = new_mem;
 461	src.offset = 0;
 462	dst.offset = 0;
 463
 464	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
 465				       new_mem->num_pages << PAGE_SHIFT,
 466				       bo->resv, &fence);
 467	if (r)
 468		goto error;
 469
 470	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 471	dma_fence_put(fence);
 472	return r;
 473
 474error:
 475	if (fence)
 476		dma_fence_wait(fence, false);
 477	dma_fence_put(fence);
 478	return r;
 479}
 480
 481static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
 482				struct ttm_operation_ctx *ctx,
 
 483				struct ttm_mem_reg *new_mem)
 484{
 485	struct amdgpu_device *adev;
 486	struct ttm_mem_reg *old_mem = &bo->mem;
 487	struct ttm_mem_reg tmp_mem;
 488	struct ttm_place placements;
 489	struct ttm_placement placement;
 490	int r;
 491
 492	adev = amdgpu_ttm_adev(bo->bdev);
 493	tmp_mem = *new_mem;
 494	tmp_mem.mm_node = NULL;
 495	placement.num_placement = 1;
 496	placement.placement = &placements;
 497	placement.num_busy_placement = 1;
 498	placement.busy_placement = &placements;
 499	placements.fpfn = 0;
 500	placements.lpfn = 0;
 501	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 502	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
 
 503	if (unlikely(r)) {
 504		return r;
 505	}
 506
 507	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
 508	if (unlikely(r)) {
 509		goto out_cleanup;
 510	}
 511
 512	r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
 513	if (unlikely(r)) {
 514		goto out_cleanup;
 515	}
 516	r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
 517	if (unlikely(r)) {
 518		goto out_cleanup;
 519	}
 520	r = ttm_bo_move_ttm(bo, ctx, new_mem);
 521out_cleanup:
 522	ttm_bo_mem_put(bo, &tmp_mem);
 523	return r;
 524}
 525
 526static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
 527				struct ttm_operation_ctx *ctx,
 
 528				struct ttm_mem_reg *new_mem)
 529{
 530	struct amdgpu_device *adev;
 531	struct ttm_mem_reg *old_mem = &bo->mem;
 532	struct ttm_mem_reg tmp_mem;
 533	struct ttm_placement placement;
 534	struct ttm_place placements;
 535	int r;
 536
 537	adev = amdgpu_ttm_adev(bo->bdev);
 538	tmp_mem = *new_mem;
 539	tmp_mem.mm_node = NULL;
 540	placement.num_placement = 1;
 541	placement.placement = &placements;
 542	placement.num_busy_placement = 1;
 543	placement.busy_placement = &placements;
 544	placements.fpfn = 0;
 545	placements.lpfn = 0;
 546	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 547	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
 
 548	if (unlikely(r)) {
 549		return r;
 550	}
 551	r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
 552	if (unlikely(r)) {
 553		goto out_cleanup;
 554	}
 555	r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
 556	if (unlikely(r)) {
 557		goto out_cleanup;
 558	}
 559out_cleanup:
 560	ttm_bo_mem_put(bo, &tmp_mem);
 561	return r;
 562}
 563
 564static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 565			  struct ttm_operation_ctx *ctx,
 566			  struct ttm_mem_reg *new_mem)
 
 567{
 568	struct amdgpu_device *adev;
 569	struct amdgpu_bo *abo;
 570	struct ttm_mem_reg *old_mem = &bo->mem;
 571	int r;
 572
 573	/* Can't move a pinned BO */
 574	abo = ttm_to_amdgpu_bo(bo);
 575	if (WARN_ON_ONCE(abo->pin_count > 0))
 576		return -EINVAL;
 577
 578	adev = amdgpu_ttm_adev(bo->bdev);
 579
 
 
 
 
 580	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 581		amdgpu_move_null(bo, new_mem);
 582		return 0;
 583	}
 584	if ((old_mem->mem_type == TTM_PL_TT &&
 585	     new_mem->mem_type == TTM_PL_SYSTEM) ||
 586	    (old_mem->mem_type == TTM_PL_SYSTEM &&
 587	     new_mem->mem_type == TTM_PL_TT)) {
 588		/* bind is enough */
 589		amdgpu_move_null(bo, new_mem);
 590		return 0;
 591	}
 592
 593	if (!adev->mman.buffer_funcs_enabled)
 
 
 594		goto memcpy;
 
 595
 596	if (old_mem->mem_type == TTM_PL_VRAM &&
 597	    new_mem->mem_type == TTM_PL_SYSTEM) {
 598		r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
 
 599	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
 600		   new_mem->mem_type == TTM_PL_VRAM) {
 601		r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
 
 602	} else {
 603		r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
 604				     new_mem, old_mem);
 605	}
 606
 607	if (r) {
 608memcpy:
 609		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
 610		if (r) {
 611			return r;
 612		}
 613	}
 614
 615	if (bo->type == ttm_bo_type_device &&
 616	    new_mem->mem_type == TTM_PL_VRAM &&
 617	    old_mem->mem_type != TTM_PL_VRAM) {
 618		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
 619		 * accesses the BO after it's moved.
 620		 */
 621		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 622	}
 623
 624	/* update statistics */
 625	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
 626	return 0;
 627}
 628
 629static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 630{
 631	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 632	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 633	struct drm_mm_node *mm_node = mem->mm_node;
 634
 635	mem->bus.addr = NULL;
 636	mem->bus.offset = 0;
 637	mem->bus.size = mem->num_pages << PAGE_SHIFT;
 638	mem->bus.base = 0;
 639	mem->bus.is_iomem = false;
 640	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
 641		return -EINVAL;
 642	switch (mem->mem_type) {
 643	case TTM_PL_SYSTEM:
 644		/* system memory */
 645		return 0;
 646	case TTM_PL_TT:
 647		break;
 648	case TTM_PL_VRAM:
 649		mem->bus.offset = mem->start << PAGE_SHIFT;
 650		/* check if it's visible */
 651		if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
 652			return -EINVAL;
 653		/* Only physically contiguous buffers apply. In a contiguous
 654		 * buffer, size of the first mm_node would match the number of
 655		 * pages in ttm_mem_reg.
 656		 */
 657		if (adev->mman.aper_base_kaddr &&
 658		    (mm_node->size == mem->num_pages))
 659			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
 660					mem->bus.offset;
 661
 662		mem->bus.base = adev->gmc.aper_base;
 663		mem->bus.is_iomem = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664		break;
 665	default:
 666		return -EINVAL;
 667	}
 668	return 0;
 669}
 670
 671static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 672{
 673}
 674
 675static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 676					   unsigned long page_offset)
 677{
 678	struct drm_mm_node *mm;
 679	unsigned long offset = (page_offset << PAGE_SHIFT);
 680
 681	mm = amdgpu_find_mm_node(&bo->mem, &offset);
 682	return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
 683		(offset >> PAGE_SHIFT);
 684}
 685
 686/*
 687 * TTM backend functions.
 688 */
 689struct amdgpu_ttm_gup_task_list {
 690	struct list_head	list;
 691	struct task_struct	*task;
 692};
 693
 694struct amdgpu_ttm_tt {
 695	struct ttm_dma_tt	ttm;
 
 696	u64			offset;
 697	uint64_t		userptr;
 698	struct mm_struct	*usermm;
 699	uint32_t		userflags;
 700	spinlock_t              guptasklock;
 701	struct list_head        guptasks;
 702	atomic_t		mmu_invalidations;
 703	uint32_t		last_set_pages;
 704};
 705
 706int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 707{
 708	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 709	unsigned int flags = 0;
 710	unsigned pinned = 0;
 711	int r;
 712
 713	if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
 714		flags |= FOLL_WRITE;
 715
 716	down_read(&current->mm->mmap_sem);
 717
 718	if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
 719		/* check that we only use anonymous memory
 720		   to prevent problems with writeback */
 721		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
 722		struct vm_area_struct *vma;
 723
 724		vma = find_vma(gtt->usermm, gtt->userptr);
 725		if (!vma || vma->vm_file || vma->vm_end < end) {
 726			up_read(&current->mm->mmap_sem);
 727			return -EPERM;
 728		}
 729	}
 730
 731	do {
 732		unsigned num_pages = ttm->num_pages - pinned;
 733		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 734		struct page **p = pages + pinned;
 735		struct amdgpu_ttm_gup_task_list guptask;
 736
 737		guptask.task = current;
 738		spin_lock(&gtt->guptasklock);
 739		list_add(&guptask.list, &gtt->guptasks);
 740		spin_unlock(&gtt->guptasklock);
 741
 742		r = get_user_pages(userptr, num_pages, flags, p, NULL);
 743
 744		spin_lock(&gtt->guptasklock);
 745		list_del(&guptask.list);
 746		spin_unlock(&gtt->guptasklock);
 747
 748		if (r < 0)
 749			goto release_pages;
 750
 751		pinned += r;
 752
 753	} while (pinned < ttm->num_pages);
 754
 755	up_read(&current->mm->mmap_sem);
 756	return 0;
 757
 758release_pages:
 759	release_pages(pages, pinned);
 760	up_read(&current->mm->mmap_sem);
 761	return r;
 762}
 763
 764void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 765{
 766	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 767	unsigned i;
 768
 769	gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
 770	for (i = 0; i < ttm->num_pages; ++i) {
 771		if (ttm->pages[i])
 772			put_page(ttm->pages[i]);
 773
 774		ttm->pages[i] = pages ? pages[i] : NULL;
 775	}
 776}
 777
 778void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
 779{
 780	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 781	unsigned i;
 782
 783	for (i = 0; i < ttm->num_pages; ++i) {
 784		struct page *page = ttm->pages[i];
 785
 786		if (!page)
 787			continue;
 788
 789		if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
 790			set_page_dirty(page);
 791
 792		mark_page_accessed(page);
 793	}
 794}
 795
 796/* prepare the sg table with the user pages */
 797static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 798{
 799	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 800	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 801	unsigned nents;
 802	int r;
 803
 804	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 805	enum dma_data_direction direction = write ?
 806		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 807
 808	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 809				      ttm->num_pages << PAGE_SHIFT,
 810				      GFP_KERNEL);
 811	if (r)
 812		goto release_sg;
 813
 814	r = -ENOMEM;
 815	nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 816	if (nents != ttm->sg->nents)
 817		goto release_sg;
 818
 819	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 820					 gtt->ttm.dma_address, ttm->num_pages);
 821
 822	return 0;
 823
 824release_sg:
 825	kfree(ttm->sg);
 826	return r;
 827}
 828
 829static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 830{
 831	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 832	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
 833
 834	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 835	enum dma_data_direction direction = write ?
 836		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 837
 838	/* double check that we don't free the table twice */
 839	if (!ttm->sg->sgl)
 840		return;
 841
 842	/* free the sg table and pages again */
 843	dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 844
 845	amdgpu_ttm_tt_mark_user_pages(ttm);
 
 
 
 
 
 
 
 846
 847	sg_free_table(ttm->sg);
 848}
 849
 850static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 851				   struct ttm_mem_reg *bo_mem)
 852{
 853	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 854	struct amdgpu_ttm_tt *gtt = (void*)ttm;
 855	uint64_t flags;
 856	int r = 0;
 857
 858	if (gtt->userptr) {
 859		r = amdgpu_ttm_tt_pin_userptr(ttm);
 860		if (r) {
 861			DRM_ERROR("failed to pin userptr\n");
 862			return r;
 863		}
 864	}
 865	if (!ttm->num_pages) {
 866		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
 867		     ttm->num_pages, bo_mem, ttm);
 868	}
 869
 870	if (bo_mem->mem_type == AMDGPU_PL_GDS ||
 871	    bo_mem->mem_type == AMDGPU_PL_GWS ||
 872	    bo_mem->mem_type == AMDGPU_PL_OA)
 873		return -EINVAL;
 874
 875	if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
 876		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
 877		return 0;
 878	}
 879
 880	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
 881	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
 882	r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
 883		ttm->pages, gtt->ttm.dma_address, flags);
 884
 885	if (r)
 886		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
 887			  ttm->num_pages, gtt->offset);
 888	return r;
 889}
 890
 891int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 892{
 893	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 894	struct ttm_operation_ctx ctx = { false, false };
 895	struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
 896	struct ttm_mem_reg tmp;
 897	struct ttm_placement placement;
 898	struct ttm_place placements;
 899	uint64_t flags;
 900	int r;
 901
 902	if (bo->mem.mem_type != TTM_PL_TT ||
 903	    amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
 904		return 0;
 905
 906	tmp = bo->mem;
 907	tmp.mm_node = NULL;
 908	placement.num_placement = 1;
 909	placement.placement = &placements;
 910	placement.num_busy_placement = 1;
 911	placement.busy_placement = &placements;
 912	placements.fpfn = 0;
 913	placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
 914	placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
 915		TTM_PL_FLAG_TT;
 916
 917	r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
 918	if (unlikely(r))
 919		return r;
 920
 921	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
 922	gtt->offset = (u64)tmp.start << PAGE_SHIFT;
 923	r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
 924			     bo->ttm->pages, gtt->ttm.dma_address, flags);
 925	if (unlikely(r)) {
 926		ttm_bo_mem_put(bo, &tmp);
 927		return r;
 928	}
 929
 930	ttm_bo_mem_put(bo, &bo->mem);
 931	bo->mem = tmp;
 932	bo->offset = (bo->mem.start << PAGE_SHIFT) +
 933		bo->bdev->man[bo->mem.mem_type].gpu_offset;
 934
 
 
 
 
 
 
 
 
 935	return 0;
 936}
 937
 938int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 939{
 940	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 941	struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
 942	uint64_t flags;
 943	int r;
 944
 945	if (!gtt)
 946		return 0;
 947
 948	flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
 949	r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
 950			     gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
 951	if (r)
 952		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
 953			  gtt->ttm.ttm.num_pages, gtt->offset);
 954	return r;
 
 
 
 
 
 
 955}
 956
 957static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 958{
 959	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 960	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 961	int r;
 962
 963	if (gtt->userptr)
 964		amdgpu_ttm_tt_unpin_userptr(ttm);
 965
 966	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
 967		return 0;
 968
 969	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
 970	r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
 971	if (r)
 972		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
 973			  gtt->ttm.ttm.num_pages, gtt->offset);
 974	return r;
 
 
 
 975}
 976
 977static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
 978{
 979	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 980
 981	ttm_dma_tt_fini(&gtt->ttm);
 982	kfree(gtt);
 983}
 984
 985static struct ttm_backend_func amdgpu_backend_func = {
 986	.bind = &amdgpu_ttm_backend_bind,
 987	.unbind = &amdgpu_ttm_backend_unbind,
 988	.destroy = &amdgpu_ttm_backend_destroy,
 989};
 990
 991static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
 992					   uint32_t page_flags)
 
 993{
 994	struct amdgpu_device *adev;
 995	struct amdgpu_ttm_tt *gtt;
 996
 997	adev = amdgpu_ttm_adev(bo->bdev);
 998
 999	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1000	if (gtt == NULL) {
1001		return NULL;
1002	}
1003	gtt->ttm.ttm.func = &amdgpu_backend_func;
1004	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
 
1005		kfree(gtt);
1006		return NULL;
1007	}
 
1008	return &gtt->ttm.ttm;
1009}
1010
1011static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1012			struct ttm_operation_ctx *ctx)
1013{
1014	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1015	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
 
1016	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1017
 
 
 
1018	if (gtt && gtt->userptr) {
1019		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1020		if (!ttm->sg)
1021			return -ENOMEM;
1022
1023		ttm->page_flags |= TTM_PAGE_FLAG_SG;
1024		ttm->state = tt_unbound;
1025		return 0;
1026	}
1027
1028	if (slave && ttm->sg) {
1029		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1030						 gtt->ttm.dma_address,
1031						 ttm->num_pages);
1032		ttm->state = tt_unbound;
1033		return 0;
1034	}
1035
 
 
1036#ifdef CONFIG_SWIOTLB
1037	if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1038		return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
1039	}
1040#endif
1041
1042	return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1043}
1044
1045static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1046{
1047	struct amdgpu_device *adev;
1048	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
1049	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1050
1051	if (gtt && gtt->userptr) {
1052		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1053		kfree(ttm->sg);
1054		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1055		return;
1056	}
1057
1058	if (slave)
1059		return;
1060
1061	adev = amdgpu_ttm_adev(ttm->bdev);
1062
1063#ifdef CONFIG_SWIOTLB
1064	if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1065		ttm_dma_unpopulate(&gtt->ttm, adev->dev);
1066		return;
1067	}
1068#endif
1069
1070	ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
 
 
 
 
 
 
 
1071}
1072
1073int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1074			      uint32_t flags)
1075{
1076	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1077
1078	if (gtt == NULL)
1079		return -EINVAL;
1080
1081	gtt->userptr = addr;
1082	gtt->usermm = current->mm;
1083	gtt->userflags = flags;
1084	spin_lock_init(&gtt->guptasklock);
1085	INIT_LIST_HEAD(&gtt->guptasks);
1086	atomic_set(&gtt->mmu_invalidations, 0);
1087	gtt->last_set_pages = 0;
1088
1089	return 0;
1090}
1091
1092struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1093{
1094	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1095
1096	if (gtt == NULL)
1097		return NULL;
1098
1099	return gtt->usermm;
1100}
1101
1102bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1103				  unsigned long end)
1104{
1105	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1106	struct amdgpu_ttm_gup_task_list *entry;
1107	unsigned long size;
1108
1109	if (gtt == NULL || !gtt->userptr)
1110		return false;
1111
1112	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1113	if (gtt->userptr > end || gtt->userptr + size <= start)
1114		return false;
1115
1116	spin_lock(&gtt->guptasklock);
1117	list_for_each_entry(entry, &gtt->guptasks, list) {
1118		if (entry->task == current) {
1119			spin_unlock(&gtt->guptasklock);
1120			return false;
1121		}
1122	}
1123	spin_unlock(&gtt->guptasklock);
1124
1125	atomic_inc(&gtt->mmu_invalidations);
1126
1127	return true;
1128}
1129
1130bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1131				       int *last_invalidated)
1132{
1133	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1134	int prev_invalidated = *last_invalidated;
1135
1136	*last_invalidated = atomic_read(&gtt->mmu_invalidations);
1137	return prev_invalidated != *last_invalidated;
1138}
1139
1140bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
1141{
1142	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1143
1144	if (gtt == NULL || !gtt->userptr)
1145		return false;
1146
1147	return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
1148}
1149
1150bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1151{
1152	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1153
1154	if (gtt == NULL)
1155		return false;
1156
1157	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1158}
1159
1160uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1161				 struct ttm_mem_reg *mem)
1162{
1163	uint64_t flags = 0;
1164
1165	if (mem && mem->mem_type != TTM_PL_SYSTEM)
1166		flags |= AMDGPU_PTE_VALID;
1167
1168	if (mem && mem->mem_type == TTM_PL_TT) {
1169		flags |= AMDGPU_PTE_SYSTEM;
1170
1171		if (ttm->caching_state == tt_cached)
1172			flags |= AMDGPU_PTE_SNOOPED;
1173	}
1174
1175	flags |= adev->gart.gart_pte_flags;
 
 
1176	flags |= AMDGPU_PTE_READABLE;
1177
1178	if (!amdgpu_ttm_tt_is_readonly(ttm))
1179		flags |= AMDGPU_PTE_WRITEABLE;
1180
1181	return flags;
1182}
1183
1184static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1185					    const struct ttm_place *place)
1186{
1187	unsigned long num_pages = bo->mem.num_pages;
1188	struct drm_mm_node *node = bo->mem.mm_node;
1189	struct reservation_object_list *flist;
1190	struct dma_fence *f;
1191	int i;
1192
1193	/* If bo is a KFD BO, check if the bo belongs to the current process.
1194	 * If true, then return false as any KFD process needs all its BOs to
1195	 * be resident to run successfully
1196	 */
1197	flist = reservation_object_get_list(bo->resv);
1198	if (flist) {
1199		for (i = 0; i < flist->shared_count; ++i) {
1200			f = rcu_dereference_protected(flist->shared[i],
1201				reservation_object_held(bo->resv));
1202			if (amdkfd_fence_check_mm(f, current->mm))
1203				return false;
1204		}
1205	}
 
1206
1207	switch (bo->mem.mem_type) {
1208	case TTM_PL_TT:
1209		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1210
1211	case TTM_PL_VRAM:
1212		/* Check each drm MM node individually */
1213		while (num_pages) {
1214			if (place->fpfn < (node->start + node->size) &&
1215			    !(place->lpfn && place->lpfn <= node->start))
1216				return true;
1217
1218			num_pages -= node->size;
1219			++node;
1220		}
1221		return false;
1222
1223	default:
1224		break;
1225	}
1226
1227	return ttm_bo_eviction_valuable(bo, place);
1228}
1229
1230static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1231				    unsigned long offset,
1232				    void *buf, int len, int write)
1233{
1234	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1235	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1236	struct drm_mm_node *nodes;
1237	uint32_t value = 0;
1238	int ret = 0;
1239	uint64_t pos;
1240	unsigned long flags;
1241
1242	if (bo->mem.mem_type != TTM_PL_VRAM)
1243		return -EIO;
1244
1245	nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1246	pos = (nodes->start << PAGE_SHIFT) + offset;
1247
1248	while (len && pos < adev->gmc.mc_vram_size) {
1249		uint64_t aligned_pos = pos & ~(uint64_t)3;
1250		uint32_t bytes = 4 - (pos & 3);
1251		uint32_t shift = (pos & 3) * 8;
1252		uint32_t mask = 0xffffffff << shift;
1253
1254		if (len < bytes) {
1255			mask &= 0xffffffff >> (bytes - len) * 8;
1256			bytes = len;
1257		}
1258
1259		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1260		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1261		WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1262		if (!write || mask != 0xffffffff)
1263			value = RREG32_NO_KIQ(mmMM_DATA);
1264		if (write) {
1265			value &= ~mask;
1266			value |= (*(uint32_t *)buf << shift) & mask;
1267			WREG32_NO_KIQ(mmMM_DATA, value);
1268		}
1269		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1270		if (!write) {
1271			value = (value & mask) >> shift;
1272			memcpy(buf, &value, bytes);
1273		}
1274
1275		ret += bytes;
1276		buf = (uint8_t *)buf + bytes;
1277		pos += bytes;
1278		len -= bytes;
1279		if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1280			++nodes;
1281			pos = (nodes->start << PAGE_SHIFT);
1282		}
1283	}
1284
1285	return ret;
1286}
1287
1288static struct ttm_bo_driver amdgpu_bo_driver = {
1289	.ttm_tt_create = &amdgpu_ttm_tt_create,
1290	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
1291	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1292	.invalidate_caches = &amdgpu_invalidate_caches,
1293	.init_mem_type = &amdgpu_init_mem_type,
1294	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1295	.evict_flags = &amdgpu_evict_flags,
1296	.move = &amdgpu_bo_move,
1297	.verify_access = &amdgpu_verify_access,
1298	.move_notify = &amdgpu_bo_move_notify,
1299	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1300	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1301	.io_mem_free = &amdgpu_ttm_io_mem_free,
1302	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1303	.access_memory = &amdgpu_ttm_access_memory
 
1304};
1305
1306/*
1307 * Firmware Reservation functions
1308 */
1309/**
1310 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1311 *
1312 * @adev: amdgpu_device pointer
1313 *
1314 * free fw reserved vram if it has been reserved.
1315 */
1316static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1317{
1318	amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1319		NULL, &adev->fw_vram_usage.va);
1320}
1321
1322/**
1323 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1324 *
1325 * @adev: amdgpu_device pointer
1326 *
1327 * create bo vram reservation from fw.
1328 */
1329static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1330{
1331	struct ttm_operation_ctx ctx = { false, false };
1332	int r = 0;
1333	int i;
1334	u64 vram_size = adev->gmc.visible_vram_size;
1335	u64 offset = adev->fw_vram_usage.start_offset;
1336	u64 size = adev->fw_vram_usage.size;
1337	struct amdgpu_bo *bo;
1338
1339	adev->fw_vram_usage.va = NULL;
1340	adev->fw_vram_usage.reserved_bo = NULL;
1341
1342	if (adev->fw_vram_usage.size > 0 &&
1343		adev->fw_vram_usage.size <= vram_size) {
1344
1345		r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
1346				     AMDGPU_GEM_DOMAIN_VRAM,
1347				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1348				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1349				     ttm_bo_type_kernel, NULL,
1350				     &adev->fw_vram_usage.reserved_bo);
1351		if (r)
1352			goto error_create;
1353
1354		r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1355		if (r)
1356			goto error_reserve;
1357
1358		/* remove the original mem node and create a new one at the
1359		 * request position
1360		 */
1361		bo = adev->fw_vram_usage.reserved_bo;
1362		offset = ALIGN(offset, PAGE_SIZE);
1363		for (i = 0; i < bo->placement.num_placement; ++i) {
1364			bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1365			bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1366		}
1367
1368		ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1369		r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1370				     &bo->tbo.mem, &ctx);
1371		if (r)
1372			goto error_pin;
1373
1374		r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1375			AMDGPU_GEM_DOMAIN_VRAM,
1376			adev->fw_vram_usage.start_offset,
1377			(adev->fw_vram_usage.start_offset +
1378			adev->fw_vram_usage.size), NULL);
1379		if (r)
1380			goto error_pin;
1381		r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1382			&adev->fw_vram_usage.va);
1383		if (r)
1384			goto error_kmap;
1385
1386		amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1387	}
1388	return r;
1389
1390error_kmap:
1391	amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1392error_pin:
1393	amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1394error_reserve:
1395	amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1396error_create:
1397	adev->fw_vram_usage.va = NULL;
1398	adev->fw_vram_usage.reserved_bo = NULL;
1399	return r;
1400}
1401
1402int amdgpu_ttm_init(struct amdgpu_device *adev)
1403{
1404	uint64_t gtt_size;
1405	int r;
1406	u64 vis_vram_limit;
1407
1408	r = amdgpu_ttm_global_init(adev);
1409	if (r) {
1410		return r;
1411	}
1412	/* No others user of address space so set it to 0 */
1413	r = ttm_bo_device_init(&adev->mman.bdev,
1414			       adev->mman.bo_global_ref.ref.object,
1415			       &amdgpu_bo_driver,
1416			       adev->ddev->anon_inode->i_mapping,
1417			       DRM_FILE_PAGE_OFFSET,
1418			       adev->need_dma32);
1419	if (r) {
1420		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1421		return r;
1422	}
1423	adev->mman.initialized = true;
1424
1425	/* We opt to avoid OOM on system pages allocations */
1426	adev->mman.bdev.no_retry = true;
 
 
 
 
 
 
 
 
 
1427
 
1428	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1429				adev->gmc.real_vram_size >> PAGE_SHIFT);
1430	if (r) {
1431		DRM_ERROR("Failed initializing VRAM heap.\n");
1432		return r;
1433	}
1434
1435	/* Reduce size of CPU-visible VRAM if requested */
1436	vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1437	if (amdgpu_vis_vram_limit > 0 &&
1438	    vis_vram_limit <= adev->gmc.visible_vram_size)
1439		adev->gmc.visible_vram_size = vis_vram_limit;
1440
1441	/* Change the size here instead of the init above so only lpfn is affected */
1442	amdgpu_ttm_set_buffer_funcs_status(adev, false);
1443#ifdef CONFIG_64BIT
1444	adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1445						adev->gmc.visible_vram_size);
1446#endif
1447
1448	/*
1449	 *The reserved vram for firmware must be pinned to the specified
1450	 *place on the VRAM, so reserve it early.
1451	 */
1452	r = amdgpu_ttm_fw_reserve_vram_init(adev);
1453	if (r) {
1454		return r;
1455	}
1456
1457	r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1458				    AMDGPU_GEM_DOMAIN_VRAM,
1459				    &adev->stolen_vga_memory,
1460				    NULL, NULL);
1461	if (r)
1462		return r;
 
 
 
 
 
 
1463	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1464		 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1465
1466	if (amdgpu_gtt_size == -1) {
1467		struct sysinfo si;
1468
1469		si_meminfo(&si);
1470		gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1471			       adev->gmc.mc_vram_size),
1472			       ((uint64_t)si.totalram * si.mem_unit * 3/4));
1473	}
1474	else
1475		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1476	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1477	if (r) {
1478		DRM_ERROR("Failed initializing GTT heap.\n");
1479		return r;
1480	}
1481	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1482		 (unsigned)(gtt_size / (1024 * 1024)));
1483
1484	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1485	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1486	adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1487	adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1488	adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1489	adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1490	adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1491	adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1492	adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1493	/* GDS Memory */
1494	if (adev->gds.mem.total_size) {
1495		r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1496				   adev->gds.mem.total_size >> PAGE_SHIFT);
1497		if (r) {
1498			DRM_ERROR("Failed initializing GDS heap.\n");
1499			return r;
1500		}
1501	}
1502
1503	/* GWS */
1504	if (adev->gds.gws.total_size) {
1505		r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1506				   adev->gds.gws.total_size >> PAGE_SHIFT);
1507		if (r) {
1508			DRM_ERROR("Failed initializing gws heap.\n");
1509			return r;
1510		}
1511	}
1512
1513	/* OA */
1514	if (adev->gds.oa.total_size) {
1515		r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1516				   adev->gds.oa.total_size >> PAGE_SHIFT);
1517		if (r) {
1518			DRM_ERROR("Failed initializing oa heap.\n");
1519			return r;
1520		}
1521	}
1522
1523	r = amdgpu_ttm_debugfs_init(adev);
1524	if (r) {
1525		DRM_ERROR("Failed to init debugfs\n");
1526		return r;
1527	}
1528	return 0;
1529}
1530
1531void amdgpu_ttm_fini(struct amdgpu_device *adev)
1532{
 
 
1533	if (!adev->mman.initialized)
1534		return;
1535
1536	amdgpu_ttm_debugfs_fini(adev);
1537	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1538	amdgpu_ttm_fw_reserve_vram_fini(adev);
1539	if (adev->mman.aper_base_kaddr)
1540		iounmap(adev->mman.aper_base_kaddr);
1541	adev->mman.aper_base_kaddr = NULL;
1542
 
 
1543	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1544	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1545	if (adev->gds.mem.total_size)
1546		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1547	if (adev->gds.gws.total_size)
1548		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1549	if (adev->gds.oa.total_size)
1550		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1551	ttm_bo_device_release(&adev->mman.bdev);
 
1552	amdgpu_ttm_global_fini(adev);
1553	adev->mman.initialized = false;
1554	DRM_INFO("amdgpu: ttm finalized\n");
1555}
1556
1557/**
1558 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1559 *
1560 * @adev: amdgpu_device pointer
1561 * @enable: true when we can use buffer functions.
1562 *
1563 * Enable/disable use of buffer functions during suspend/resume. This should
1564 * only be called at bootup or when userspace isn't running.
1565 */
1566void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1567{
1568	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1569	uint64_t size;
1570
1571	if (!adev->mman.initialized || adev->in_gpu_reset)
1572		return;
1573
 
1574	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
1575	if (enable)
1576		size = adev->gmc.real_vram_size;
1577	else
1578		size = adev->gmc.visible_vram_size;
1579	man->size = size >> PAGE_SHIFT;
1580	adev->mman.buffer_funcs_enabled = enable;
1581}
1582
1583int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1584{
1585	struct drm_file *file_priv;
1586	struct amdgpu_device *adev;
1587
1588	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1589		return -EINVAL;
1590
1591	file_priv = filp->private_data;
1592	adev = file_priv->minor->dev->dev_private;
1593	if (adev == NULL)
1594		return -EINVAL;
1595
1596	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1597}
1598
1599static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1600			     struct ttm_mem_reg *mem, unsigned num_pages,
1601			     uint64_t offset, unsigned window,
1602			     struct amdgpu_ring *ring,
1603			     uint64_t *addr)
1604{
1605	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1606	struct amdgpu_device *adev = ring->adev;
1607	struct ttm_tt *ttm = bo->ttm;
1608	struct amdgpu_job *job;
1609	unsigned num_dw, num_bytes;
1610	dma_addr_t *dma_address;
1611	struct dma_fence *fence;
1612	uint64_t src_addr, dst_addr;
1613	uint64_t flags;
1614	int r;
1615
1616	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1617	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1618
1619	*addr = adev->gmc.gart_start;
1620	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1621		AMDGPU_GPU_PAGE_SIZE;
1622
1623	num_dw = adev->mman.buffer_funcs->copy_num_dw;
1624	while (num_dw & 0x7)
1625		num_dw++;
1626
1627	num_bytes = num_pages * 8;
1628
1629	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1630	if (r)
1631		return r;
1632
1633	src_addr = num_dw * 4;
1634	src_addr += job->ibs[0].gpu_addr;
1635
1636	dst_addr = adev->gart.table_addr;
1637	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1638	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1639				dst_addr, num_bytes);
1640
1641	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1642	WARN_ON(job->ibs[0].length_dw > num_dw);
1643
1644	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
1645	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
1646	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
1647			    &job->ibs[0].ptr[num_dw]);
1648	if (r)
1649		goto error_free;
1650
1651	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1652			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1653	if (r)
1654		goto error_free;
1655
1656	dma_fence_put(fence);
1657
1658	return r;
1659
1660error_free:
1661	amdgpu_job_free(job);
1662	return r;
1663}
1664
1665int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1666		       uint64_t dst_offset, uint32_t byte_count,
1667		       struct reservation_object *resv,
1668		       struct dma_fence **fence, bool direct_submit,
1669		       bool vm_needs_flush)
1670{
1671	struct amdgpu_device *adev = ring->adev;
1672	struct amdgpu_job *job;
1673
1674	uint32_t max_bytes;
1675	unsigned num_loops, num_dw;
1676	unsigned i;
1677	int r;
1678
1679	if (direct_submit && !ring->ready) {
1680		DRM_ERROR("Trying to move memory with ring turned off.\n");
1681		return -EINVAL;
1682	}
1683
1684	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1685	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1686	num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
1687
1688	/* for IB padding */
1689	while (num_dw & 0x7)
1690		num_dw++;
1691
1692	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1693	if (r)
1694		return r;
1695
1696	job->vm_needs_flush = vm_needs_flush;
1697	if (resv) {
1698		r = amdgpu_sync_resv(adev, &job->sync, resv,
1699				     AMDGPU_FENCE_OWNER_UNDEFINED,
1700				     false);
1701		if (r) {
1702			DRM_ERROR("sync failed (%d).\n", r);
1703			goto error_free;
1704		}
1705	}
1706
1707	for (i = 0; i < num_loops; i++) {
1708		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1709
1710		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1711					dst_offset, cur_size_in_bytes);
1712
1713		src_offset += cur_size_in_bytes;
1714		dst_offset += cur_size_in_bytes;
1715		byte_count -= cur_size_in_bytes;
1716	}
1717
1718	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1719	WARN_ON(job->ibs[0].length_dw > num_dw);
1720	if (direct_submit) {
1721		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
1722				       NULL, fence);
1723		job->fence = dma_fence_get(*fence);
1724		if (r)
1725			DRM_ERROR("Error scheduling IBs (%d)\n", r);
1726		amdgpu_job_free(job);
1727	} else {
1728		r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1729				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1730		if (r)
1731			goto error_free;
1732	}
1733
1734	return r;
1735
1736error_free:
1737	amdgpu_job_free(job);
1738	return r;
1739}
1740
1741int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1742		       uint32_t src_data,
1743		       struct reservation_object *resv,
1744		       struct dma_fence **fence)
1745{
1746	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1747	uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1748	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1749
1750	struct drm_mm_node *mm_node;
1751	unsigned long num_pages;
1752	unsigned int num_loops, num_dw;
1753
1754	struct amdgpu_job *job;
1755	int r;
1756
1757	if (!adev->mman.buffer_funcs_enabled) {
1758		DRM_ERROR("Trying to clear memory with ring turned off.\n");
1759		return -EINVAL;
1760	}
1761
1762	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
1763		r = amdgpu_ttm_alloc_gart(&bo->tbo);
1764		if (r)
1765			return r;
1766	}
1767
1768	num_pages = bo->tbo.num_pages;
1769	mm_node = bo->tbo.mem.mm_node;
1770	num_loops = 0;
1771	while (num_pages) {
1772		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1773
1774		num_loops += DIV_ROUND_UP(byte_count, max_bytes);
1775		num_pages -= mm_node->size;
1776		++mm_node;
1777	}
1778	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1779
1780	/* for IB padding */
1781	num_dw += 64;
1782
1783	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1784	if (r)
1785		return r;
1786
1787	if (resv) {
1788		r = amdgpu_sync_resv(adev, &job->sync, resv,
1789				     AMDGPU_FENCE_OWNER_UNDEFINED, false);
1790		if (r) {
1791			DRM_ERROR("sync failed (%d).\n", r);
1792			goto error_free;
1793		}
1794	}
1795
1796	num_pages = bo->tbo.num_pages;
1797	mm_node = bo->tbo.mem.mm_node;
1798
1799	while (num_pages) {
1800		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1801		uint64_t dst_addr;
1802
1803		dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
 
 
 
 
1804		while (byte_count) {
1805			uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1806
1807			amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
1808						dst_addr, cur_size_in_bytes);
1809
1810			dst_addr += cur_size_in_bytes;
1811			byte_count -= cur_size_in_bytes;
1812		}
1813
1814		num_pages -= mm_node->size;
1815		++mm_node;
1816	}
1817
1818	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1819	WARN_ON(job->ibs[0].length_dw > num_dw);
1820	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1821			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1822	if (r)
1823		goto error_free;
1824
1825	return 0;
1826
1827error_free:
1828	amdgpu_job_free(job);
1829	return r;
1830}
1831
1832#if defined(CONFIG_DEBUG_FS)
1833
1834static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1835{
1836	struct drm_info_node *node = (struct drm_info_node *)m->private;
1837	unsigned ttm_pl = *(int *)node->info_ent->data;
1838	struct drm_device *dev = node->minor->dev;
1839	struct amdgpu_device *adev = dev->dev_private;
1840	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
1841	struct drm_printer p = drm_seq_file_printer(m);
1842
1843	man->func->debug(man, &p);
1844	return 0;
 
 
 
 
 
 
 
 
1845}
1846
1847static int ttm_pl_vram = TTM_PL_VRAM;
1848static int ttm_pl_tt = TTM_PL_TT;
1849
1850static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
1851	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
1852	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
1853	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1854#ifdef CONFIG_SWIOTLB
1855	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1856#endif
1857};
1858
1859static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1860				    size_t size, loff_t *pos)
1861{
1862	struct amdgpu_device *adev = file_inode(f)->i_private;
1863	ssize_t result = 0;
1864	int r;
1865
1866	if (size & 0x3 || *pos & 0x3)
1867		return -EINVAL;
1868
1869	if (*pos >= adev->gmc.mc_vram_size)
1870		return -ENXIO;
1871
1872	while (size) {
1873		unsigned long flags;
1874		uint32_t value;
1875
1876		if (*pos >= adev->gmc.mc_vram_size)
1877			return result;
1878
1879		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1880		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1881		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1882		value = RREG32_NO_KIQ(mmMM_DATA);
1883		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1884
1885		r = put_user(value, (uint32_t *)buf);
1886		if (r)
1887			return r;
1888
1889		result += 4;
1890		buf += 4;
1891		*pos += 4;
1892		size -= 4;
1893	}
1894
1895	return result;
1896}
1897
1898static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
1899				    size_t size, loff_t *pos)
1900{
1901	struct amdgpu_device *adev = file_inode(f)->i_private;
1902	ssize_t result = 0;
1903	int r;
1904
1905	if (size & 0x3 || *pos & 0x3)
1906		return -EINVAL;
1907
1908	if (*pos >= adev->gmc.mc_vram_size)
1909		return -ENXIO;
1910
1911	while (size) {
1912		unsigned long flags;
1913		uint32_t value;
1914
1915		if (*pos >= adev->gmc.mc_vram_size)
1916			return result;
1917
1918		r = get_user(value, (uint32_t *)buf);
1919		if (r)
1920			return r;
1921
1922		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1923		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1924		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1925		WREG32_NO_KIQ(mmMM_DATA, value);
1926		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1927
1928		result += 4;
1929		buf += 4;
1930		*pos += 4;
1931		size -= 4;
1932	}
1933
1934	return result;
1935}
1936
1937static const struct file_operations amdgpu_ttm_vram_fops = {
1938	.owner = THIS_MODULE,
1939	.read = amdgpu_ttm_vram_read,
1940	.write = amdgpu_ttm_vram_write,
1941	.llseek = default_llseek,
1942};
1943
1944#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1945
1946static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
1947				   size_t size, loff_t *pos)
1948{
1949	struct amdgpu_device *adev = file_inode(f)->i_private;
1950	ssize_t result = 0;
1951	int r;
1952
1953	while (size) {
1954		loff_t p = *pos / PAGE_SIZE;
1955		unsigned off = *pos & ~PAGE_MASK;
1956		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1957		struct page *page;
1958		void *ptr;
1959
1960		if (p >= adev->gart.num_cpu_pages)
1961			return result;
1962
1963		page = adev->gart.pages[p];
1964		if (page) {
1965			ptr = kmap(page);
1966			ptr += off;
1967
1968			r = copy_to_user(buf, ptr, cur_size);
1969			kunmap(adev->gart.pages[p]);
1970		} else
1971			r = clear_user(buf, cur_size);
1972
1973		if (r)
1974			return -EFAULT;
1975
1976		result += cur_size;
1977		buf += cur_size;
1978		*pos += cur_size;
1979		size -= cur_size;
1980	}
1981
1982	return result;
1983}
1984
1985static const struct file_operations amdgpu_ttm_gtt_fops = {
1986	.owner = THIS_MODULE,
1987	.read = amdgpu_ttm_gtt_read,
1988	.llseek = default_llseek
1989};
1990
1991#endif
1992
1993static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
1994				 size_t size, loff_t *pos)
1995{
1996	struct amdgpu_device *adev = file_inode(f)->i_private;
1997	struct iommu_domain *dom;
1998	ssize_t result = 0;
1999	int r;
2000
2001	dom = iommu_get_domain_for_dev(adev->dev);
2002
2003	while (size) {
2004		phys_addr_t addr = *pos & PAGE_MASK;
2005		loff_t off = *pos & ~PAGE_MASK;
2006		size_t bytes = PAGE_SIZE - off;
2007		unsigned long pfn;
2008		struct page *p;
2009		void *ptr;
2010
2011		bytes = bytes < size ? bytes : size;
2012
2013		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2014
2015		pfn = addr >> PAGE_SHIFT;
2016		if (!pfn_valid(pfn))
2017			return -EPERM;
2018
2019		p = pfn_to_page(pfn);
2020		if (p->mapping != adev->mman.bdev.dev_mapping)
2021			return -EPERM;
2022
2023		ptr = kmap(p);
2024		r = copy_to_user(buf, ptr + off, bytes);
2025		kunmap(p);
2026		if (r)
2027			return -EFAULT;
2028
2029		size -= bytes;
2030		*pos += bytes;
2031		result += bytes;
2032	}
2033
2034	return result;
2035}
2036
2037static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2038				 size_t size, loff_t *pos)
2039{
2040	struct amdgpu_device *adev = file_inode(f)->i_private;
2041	struct iommu_domain *dom;
2042	ssize_t result = 0;
2043	int r;
2044
2045	dom = iommu_get_domain_for_dev(adev->dev);
2046
2047	while (size) {
2048		phys_addr_t addr = *pos & PAGE_MASK;
2049		loff_t off = *pos & ~PAGE_MASK;
2050		size_t bytes = PAGE_SIZE - off;
2051		unsigned long pfn;
2052		struct page *p;
2053		void *ptr;
2054
2055		bytes = bytes < size ? bytes : size;
2056
2057		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2058
2059		pfn = addr >> PAGE_SHIFT;
2060		if (!pfn_valid(pfn))
2061			return -EPERM;
2062
2063		p = pfn_to_page(pfn);
2064		if (p->mapping != adev->mman.bdev.dev_mapping)
2065			return -EPERM;
2066
2067		ptr = kmap(p);
2068		r = copy_from_user(ptr + off, buf, bytes);
2069		kunmap(p);
2070		if (r)
2071			return -EFAULT;
2072
2073		size -= bytes;
2074		*pos += bytes;
2075		result += bytes;
2076	}
2077
2078	return result;
2079}
2080
2081static const struct file_operations amdgpu_ttm_iomem_fops = {
2082	.owner = THIS_MODULE,
2083	.read = amdgpu_iomem_read,
2084	.write = amdgpu_iomem_write,
2085	.llseek = default_llseek
2086};
2087
2088static const struct {
2089	char *name;
2090	const struct file_operations *fops;
2091	int domain;
2092} ttm_debugfs_entries[] = {
2093	{ "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2094#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2095	{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2096#endif
2097	{ "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2098};
2099
2100#endif
2101
2102static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2103{
2104#if defined(CONFIG_DEBUG_FS)
2105	unsigned count;
2106
2107	struct drm_minor *minor = adev->ddev->primary;
2108	struct dentry *ent, *root = minor->debugfs_root;
2109
2110	for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2111		ent = debugfs_create_file(
2112				ttm_debugfs_entries[count].name,
2113				S_IFREG | S_IRUGO, root,
2114				adev,
2115				ttm_debugfs_entries[count].fops);
2116		if (IS_ERR(ent))
2117			return PTR_ERR(ent);
2118		if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2119			i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2120		else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2121			i_size_write(ent->d_inode, adev->gmc.gart_size);
2122		adev->mman.debugfs_entries[count] = ent;
2123	}
2124
 
2125	count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2126
2127#ifdef CONFIG_SWIOTLB
2128	if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2129		--count;
2130#endif
2131
2132	return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2133#else
 
2134	return 0;
2135#endif
2136}
2137
2138static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2139{
2140#if defined(CONFIG_DEBUG_FS)
2141	unsigned i;
2142
2143	for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2144		debugfs_remove(adev->mman.debugfs_entries[i]);
 
 
 
 
 
 
2145#endif
2146}
v4.10.11
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32#include <ttm/ttm_bo_api.h>
  33#include <ttm/ttm_bo_driver.h>
  34#include <ttm/ttm_placement.h>
  35#include <ttm/ttm_module.h>
  36#include <ttm/ttm_page_alloc.h>
  37#include <drm/drmP.h>
  38#include <drm/amdgpu_drm.h>
  39#include <linux/seq_file.h>
  40#include <linux/slab.h>
  41#include <linux/swiotlb.h>
  42#include <linux/swap.h>
  43#include <linux/pagemap.h>
  44#include <linux/debugfs.h>
 
  45#include "amdgpu.h"
 
 
 
  46#include "bif/bif_4_1_d.h"
  47
  48#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
  49
 
 
 
 
 
 
  50static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
  51static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
  52
  53
  54/*
  55 * Global memory.
  56 */
  57static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
  58{
  59	return ttm_mem_global_init(ref->object);
  60}
  61
  62static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
  63{
  64	ttm_mem_global_release(ref->object);
  65}
  66
  67static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
  68{
  69	struct drm_global_reference *global_ref;
  70	struct amdgpu_ring *ring;
  71	struct amd_sched_rq *rq;
  72	int r;
  73
  74	adev->mman.mem_global_referenced = false;
  75	global_ref = &adev->mman.mem_global_ref;
  76	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  77	global_ref->size = sizeof(struct ttm_mem_global);
  78	global_ref->init = &amdgpu_ttm_mem_global_init;
  79	global_ref->release = &amdgpu_ttm_mem_global_release;
  80	r = drm_global_item_ref(global_ref);
  81	if (r) {
  82		DRM_ERROR("Failed setting up TTM memory accounting "
  83			  "subsystem.\n");
  84		goto error_mem;
  85	}
  86
  87	adev->mman.bo_global_ref.mem_glob =
  88		adev->mman.mem_global_ref.object;
  89	global_ref = &adev->mman.bo_global_ref.ref;
  90	global_ref->global_type = DRM_GLOBAL_TTM_BO;
  91	global_ref->size = sizeof(struct ttm_bo_global);
  92	global_ref->init = &ttm_bo_global_init;
  93	global_ref->release = &ttm_bo_global_release;
  94	r = drm_global_item_ref(global_ref);
  95	if (r) {
  96		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
  97		goto error_bo;
  98	}
  99
 
 
 100	ring = adev->mman.buffer_funcs_ring;
 101	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
 102	r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
 103				  rq, amdgpu_sched_jobs);
 104	if (r) {
 105		DRM_ERROR("Failed setting up TTM BO move run queue.\n");
 106		goto error_entity;
 107	}
 108
 109	adev->mman.mem_global_referenced = true;
 110
 111	return 0;
 112
 113error_entity:
 114	drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 115error_bo:
 116	drm_global_item_unref(&adev->mman.mem_global_ref);
 117error_mem:
 118	return r;
 119}
 120
 121static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 122{
 123	if (adev->mman.mem_global_referenced) {
 124		amd_sched_entity_fini(adev->mman.entity.sched,
 125				      &adev->mman.entity);
 
 126		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 127		drm_global_item_unref(&adev->mman.mem_global_ref);
 128		adev->mman.mem_global_referenced = false;
 129	}
 130}
 131
 132static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 133{
 134	return 0;
 135}
 136
 137static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 138				struct ttm_mem_type_manager *man)
 139{
 140	struct amdgpu_device *adev;
 141
 142	adev = amdgpu_ttm_adev(bdev);
 143
 144	switch (type) {
 145	case TTM_PL_SYSTEM:
 146		/* System memory */
 147		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 148		man->available_caching = TTM_PL_MASK_CACHING;
 149		man->default_caching = TTM_PL_FLAG_CACHED;
 150		break;
 151	case TTM_PL_TT:
 152		man->func = &amdgpu_gtt_mgr_func;
 153		man->gpu_offset = adev->mc.gtt_start;
 154		man->available_caching = TTM_PL_MASK_CACHING;
 155		man->default_caching = TTM_PL_FLAG_CACHED;
 156		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
 157		break;
 158	case TTM_PL_VRAM:
 159		/* "On-card" video ram */
 160		man->func = &amdgpu_vram_mgr_func;
 161		man->gpu_offset = adev->mc.vram_start;
 162		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 163			     TTM_MEMTYPE_FLAG_MAPPABLE;
 164		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
 165		man->default_caching = TTM_PL_FLAG_WC;
 166		break;
 167	case AMDGPU_PL_GDS:
 168	case AMDGPU_PL_GWS:
 169	case AMDGPU_PL_OA:
 170		/* On-chip GDS memory*/
 171		man->func = &ttm_bo_manager_func;
 172		man->gpu_offset = 0;
 173		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
 174		man->available_caching = TTM_PL_FLAG_UNCACHED;
 175		man->default_caching = TTM_PL_FLAG_UNCACHED;
 176		break;
 177	default:
 178		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 179		return -EINVAL;
 180	}
 181	return 0;
 182}
 183
 184static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 185				struct ttm_placement *placement)
 186{
 187	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 188	struct amdgpu_bo *abo;
 189	static struct ttm_place placements = {
 190		.fpfn = 0,
 191		.lpfn = 0,
 192		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 193	};
 194	unsigned i;
 
 
 
 
 
 195
 196	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
 197		placement->placement = &placements;
 198		placement->busy_placement = &placements;
 199		placement->num_placement = 1;
 200		placement->num_busy_placement = 1;
 201		return;
 202	}
 203	abo = container_of(bo, struct amdgpu_bo, tbo);
 204	switch (bo->mem.mem_type) {
 205	case TTM_PL_VRAM:
 206		if (adev->mman.buffer_funcs_ring->ready == false) {
 207			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 208		} else {
 
 209			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
 210			for (i = 0; i < abo->placement.num_placement; ++i) {
 211				if (!(abo->placements[i].flags &
 212				      TTM_PL_FLAG_TT))
 213					continue;
 214
 215				if (abo->placements[i].lpfn)
 216					continue;
 217
 218				/* set an upper limit to force directly
 219				 * allocating address space for the BO.
 220				 */
 221				abo->placements[i].lpfn =
 222					adev->mc.gtt_size >> PAGE_SHIFT;
 223			}
 224		}
 225		break;
 226	case TTM_PL_TT:
 227	default:
 228		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 229	}
 230	*placement = abo->placement;
 231}
 232
 233static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 234{
 235	struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
 
 
 
 
 
 
 
 236
 237	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
 238		return -EPERM;
 239	return drm_vma_node_verify_access(&abo->gem_base.vma_node,
 240					  filp->private_data);
 241}
 242
 243static void amdgpu_move_null(struct ttm_buffer_object *bo,
 244			     struct ttm_mem_reg *new_mem)
 245{
 246	struct ttm_mem_reg *old_mem = &bo->mem;
 247
 248	BUG_ON(old_mem->mm_node != NULL);
 249	*old_mem = *new_mem;
 250	new_mem->mm_node = NULL;
 251}
 252
 253static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 254			       struct drm_mm_node *mm_node,
 255			       struct ttm_mem_reg *mem,
 256			       uint64_t *addr)
 257{
 258	int r;
 
 
 
 
 
 
 
 259
 260	switch (mem->mem_type) {
 261	case TTM_PL_TT:
 262		r = amdgpu_ttm_bind(bo, mem);
 263		if (r)
 264			return r;
 
 
 
 
 265
 266	case TTM_PL_VRAM:
 267		*addr = mm_node->start << PAGE_SHIFT;
 268		*addr += bo->bdev->man[mem->mem_type].gpu_offset;
 269		break;
 270	default:
 271		DRM_ERROR("Unknown placement %d\n", mem->mem_type);
 272		return -EINVAL;
 273	}
 274
 275	return 0;
 276}
 277
 278static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 279			    bool evict, bool no_wait_gpu,
 280			    struct ttm_mem_reg *new_mem,
 281			    struct ttm_mem_reg *old_mem)
 
 
 
 
 
 
 
 
 
 
 
 282{
 283	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 284	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 285
 286	struct drm_mm_node *old_mm, *new_mm;
 287	uint64_t old_start, old_size, new_start, new_size;
 288	unsigned long num_pages;
 289	struct dma_fence *fence = NULL;
 290	int r;
 291
 292	BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
 293
 294	if (!ring->ready) {
 295		DRM_ERROR("Trying to move memory with ring turned off.\n");
 296		return -EINVAL;
 297	}
 298
 299	old_mm = old_mem->mm_node;
 300	r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
 301	if (r)
 302		return r;
 303	old_size = old_mm->size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 304
 
 
 
 
 
 
 
 
 305
 306	new_mm = new_mem->mm_node;
 307	r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
 308	if (r)
 309		return r;
 310	new_size = new_mm->size;
 
 
 
 
 
 
 
 
 
 
 
 311
 312	num_pages = new_mem->num_pages;
 313	while (num_pages) {
 314		unsigned long cur_pages = min(old_size, new_size);
 315		struct dma_fence *next;
 
 
 
 
 
 
 316
 317		r = amdgpu_copy_buffer(ring, old_start, new_start,
 318				       cur_pages * PAGE_SIZE,
 319				       bo->resv, &next, false);
 320		if (r)
 321			goto error;
 322
 323		dma_fence_put(fence);
 324		fence = next;
 325
 326		num_pages -= cur_pages;
 327		if (!num_pages)
 328			break;
 329
 330		old_size -= cur_pages;
 331		if (!old_size) {
 332			r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
 333						&old_start);
 334			if (r)
 335				goto error;
 336			old_size = old_mm->size;
 337		} else {
 338			old_start += cur_pages * PAGE_SIZE;
 
 339		}
 340
 341		new_size -= cur_pages;
 342		if (!new_size) {
 343			r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
 344						&new_start);
 345			if (r)
 346				goto error;
 347
 348			new_size = new_mm->size;
 349		} else {
 350			new_start += cur_pages * PAGE_SIZE;
 
 351		}
 352	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 353
 354	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 355	dma_fence_put(fence);
 356	return r;
 357
 358error:
 359	if (fence)
 360		dma_fence_wait(fence, false);
 361	dma_fence_put(fence);
 362	return r;
 363}
 364
 365static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
 366				bool evict, bool interruptible,
 367				bool no_wait_gpu,
 368				struct ttm_mem_reg *new_mem)
 369{
 370	struct amdgpu_device *adev;
 371	struct ttm_mem_reg *old_mem = &bo->mem;
 372	struct ttm_mem_reg tmp_mem;
 373	struct ttm_place placements;
 374	struct ttm_placement placement;
 375	int r;
 376
 377	adev = amdgpu_ttm_adev(bo->bdev);
 378	tmp_mem = *new_mem;
 379	tmp_mem.mm_node = NULL;
 380	placement.num_placement = 1;
 381	placement.placement = &placements;
 382	placement.num_busy_placement = 1;
 383	placement.busy_placement = &placements;
 384	placements.fpfn = 0;
 385	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
 386	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 387	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 388			     interruptible, no_wait_gpu);
 389	if (unlikely(r)) {
 390		return r;
 391	}
 392
 393	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
 394	if (unlikely(r)) {
 395		goto out_cleanup;
 396	}
 397
 398	r = ttm_tt_bind(bo->ttm, &tmp_mem);
 399	if (unlikely(r)) {
 400		goto out_cleanup;
 401	}
 402	r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
 403	if (unlikely(r)) {
 404		goto out_cleanup;
 405	}
 406	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
 407out_cleanup:
 408	ttm_bo_mem_put(bo, &tmp_mem);
 409	return r;
 410}
 411
 412static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
 413				bool evict, bool interruptible,
 414				bool no_wait_gpu,
 415				struct ttm_mem_reg *new_mem)
 416{
 417	struct amdgpu_device *adev;
 418	struct ttm_mem_reg *old_mem = &bo->mem;
 419	struct ttm_mem_reg tmp_mem;
 420	struct ttm_placement placement;
 421	struct ttm_place placements;
 422	int r;
 423
 424	adev = amdgpu_ttm_adev(bo->bdev);
 425	tmp_mem = *new_mem;
 426	tmp_mem.mm_node = NULL;
 427	placement.num_placement = 1;
 428	placement.placement = &placements;
 429	placement.num_busy_placement = 1;
 430	placement.busy_placement = &placements;
 431	placements.fpfn = 0;
 432	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
 433	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 434	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 435			     interruptible, no_wait_gpu);
 436	if (unlikely(r)) {
 437		return r;
 438	}
 439	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
 440	if (unlikely(r)) {
 441		goto out_cleanup;
 442	}
 443	r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
 444	if (unlikely(r)) {
 445		goto out_cleanup;
 446	}
 447out_cleanup:
 448	ttm_bo_mem_put(bo, &tmp_mem);
 449	return r;
 450}
 451
 452static int amdgpu_bo_move(struct ttm_buffer_object *bo,
 453			bool evict, bool interruptible,
 454			bool no_wait_gpu,
 455			struct ttm_mem_reg *new_mem)
 456{
 457	struct amdgpu_device *adev;
 458	struct amdgpu_bo *abo;
 459	struct ttm_mem_reg *old_mem = &bo->mem;
 460	int r;
 461
 462	/* Can't move a pinned BO */
 463	abo = container_of(bo, struct amdgpu_bo, tbo);
 464	if (WARN_ON_ONCE(abo->pin_count > 0))
 465		return -EINVAL;
 466
 467	adev = amdgpu_ttm_adev(bo->bdev);
 468
 469	/* remember the eviction */
 470	if (evict)
 471		atomic64_inc(&adev->num_evictions);
 472
 473	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 474		amdgpu_move_null(bo, new_mem);
 475		return 0;
 476	}
 477	if ((old_mem->mem_type == TTM_PL_TT &&
 478	     new_mem->mem_type == TTM_PL_SYSTEM) ||
 479	    (old_mem->mem_type == TTM_PL_SYSTEM &&
 480	     new_mem->mem_type == TTM_PL_TT)) {
 481		/* bind is enough */
 482		amdgpu_move_null(bo, new_mem);
 483		return 0;
 484	}
 485	if (adev->mman.buffer_funcs == NULL ||
 486	    adev->mman.buffer_funcs_ring == NULL ||
 487	    !adev->mman.buffer_funcs_ring->ready) {
 488		/* use memcpy */
 489		goto memcpy;
 490	}
 491
 492	if (old_mem->mem_type == TTM_PL_VRAM &&
 493	    new_mem->mem_type == TTM_PL_SYSTEM) {
 494		r = amdgpu_move_vram_ram(bo, evict, interruptible,
 495					no_wait_gpu, new_mem);
 496	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
 497		   new_mem->mem_type == TTM_PL_VRAM) {
 498		r = amdgpu_move_ram_vram(bo, evict, interruptible,
 499					    no_wait_gpu, new_mem);
 500	} else {
 501		r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
 
 502	}
 503
 504	if (r) {
 505memcpy:
 506		r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
 507		if (r) {
 508			return r;
 509		}
 510	}
 511
 
 
 
 
 
 
 
 
 
 512	/* update statistics */
 513	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
 514	return 0;
 515}
 516
 517static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 518{
 519	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 520	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 
 521
 522	mem->bus.addr = NULL;
 523	mem->bus.offset = 0;
 524	mem->bus.size = mem->num_pages << PAGE_SHIFT;
 525	mem->bus.base = 0;
 526	mem->bus.is_iomem = false;
 527	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
 528		return -EINVAL;
 529	switch (mem->mem_type) {
 530	case TTM_PL_SYSTEM:
 531		/* system memory */
 532		return 0;
 533	case TTM_PL_TT:
 534		break;
 535	case TTM_PL_VRAM:
 536		mem->bus.offset = mem->start << PAGE_SHIFT;
 537		/* check if it's visible */
 538		if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
 539			return -EINVAL;
 540		mem->bus.base = adev->mc.aper_base;
 
 
 
 
 
 
 
 
 
 541		mem->bus.is_iomem = true;
 542#ifdef __alpha__
 543		/*
 544		 * Alpha: use bus.addr to hold the ioremap() return,
 545		 * so we can modify bus.base below.
 546		 */
 547		if (mem->placement & TTM_PL_FLAG_WC)
 548			mem->bus.addr =
 549				ioremap_wc(mem->bus.base + mem->bus.offset,
 550					   mem->bus.size);
 551		else
 552			mem->bus.addr =
 553				ioremap_nocache(mem->bus.base + mem->bus.offset,
 554						mem->bus.size);
 555
 556		/*
 557		 * Alpha: Use just the bus offset plus
 558		 * the hose/domain memory base for bus.base.
 559		 * It then can be used to build PTEs for VRAM
 560		 * access, as done in ttm_bo_vm_fault().
 561		 */
 562		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
 563			adev->ddev->hose->dense_mem_base;
 564#endif
 565		break;
 566	default:
 567		return -EINVAL;
 568	}
 569	return 0;
 570}
 571
 572static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 573{
 574}
 575
 
 
 
 
 
 
 
 
 
 
 
 576/*
 577 * TTM backend functions.
 578 */
 579struct amdgpu_ttm_gup_task_list {
 580	struct list_head	list;
 581	struct task_struct	*task;
 582};
 583
 584struct amdgpu_ttm_tt {
 585	struct ttm_dma_tt	ttm;
 586	struct amdgpu_device	*adev;
 587	u64			offset;
 588	uint64_t		userptr;
 589	struct mm_struct	*usermm;
 590	uint32_t		userflags;
 591	spinlock_t              guptasklock;
 592	struct list_head        guptasks;
 593	atomic_t		mmu_invalidations;
 594	struct list_head        list;
 595};
 596
 597int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 598{
 599	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 600	unsigned int flags = 0;
 601	unsigned pinned = 0;
 602	int r;
 603
 604	if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
 605		flags |= FOLL_WRITE;
 606
 
 
 607	if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
 608		/* check that we only use anonymous memory
 609		   to prevent problems with writeback */
 610		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
 611		struct vm_area_struct *vma;
 612
 613		vma = find_vma(gtt->usermm, gtt->userptr);
 614		if (!vma || vma->vm_file || vma->vm_end < end)
 
 615			return -EPERM;
 
 616	}
 617
 618	do {
 619		unsigned num_pages = ttm->num_pages - pinned;
 620		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 621		struct page **p = pages + pinned;
 622		struct amdgpu_ttm_gup_task_list guptask;
 623
 624		guptask.task = current;
 625		spin_lock(&gtt->guptasklock);
 626		list_add(&guptask.list, &gtt->guptasks);
 627		spin_unlock(&gtt->guptasklock);
 628
 629		r = get_user_pages(userptr, num_pages, flags, p, NULL);
 630
 631		spin_lock(&gtt->guptasklock);
 632		list_del(&guptask.list);
 633		spin_unlock(&gtt->guptasklock);
 634
 635		if (r < 0)
 636			goto release_pages;
 637
 638		pinned += r;
 639
 640	} while (pinned < ttm->num_pages);
 641
 
 642	return 0;
 643
 644release_pages:
 645	release_pages(pages, pinned, 0);
 
 646	return r;
 647}
 648
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 649/* prepare the sg table with the user pages */
 650static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 651{
 652	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 653	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 654	unsigned nents;
 655	int r;
 656
 657	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 658	enum dma_data_direction direction = write ?
 659		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 660
 661	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 662				      ttm->num_pages << PAGE_SHIFT,
 663				      GFP_KERNEL);
 664	if (r)
 665		goto release_sg;
 666
 667	r = -ENOMEM;
 668	nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 669	if (nents != ttm->sg->nents)
 670		goto release_sg;
 671
 672	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 673					 gtt->ttm.dma_address, ttm->num_pages);
 674
 675	return 0;
 676
 677release_sg:
 678	kfree(ttm->sg);
 679	return r;
 680}
 681
 682static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 683{
 684	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 685	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 686	struct sg_page_iter sg_iter;
 687
 688	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 689	enum dma_data_direction direction = write ?
 690		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 691
 692	/* double check that we don't free the table twice */
 693	if (!ttm->sg->sgl)
 694		return;
 695
 696	/* free the sg table and pages again */
 697	dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 698
 699	for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
 700		struct page *page = sg_page_iter_page(&sg_iter);
 701		if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
 702			set_page_dirty(page);
 703
 704		mark_page_accessed(page);
 705		put_page(page);
 706	}
 707
 708	sg_free_table(ttm->sg);
 709}
 710
 711static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 712				   struct ttm_mem_reg *bo_mem)
 713{
 
 714	struct amdgpu_ttm_tt *gtt = (void*)ttm;
 715	int r;
 
 716
 717	if (gtt->userptr) {
 718		r = amdgpu_ttm_tt_pin_userptr(ttm);
 719		if (r) {
 720			DRM_ERROR("failed to pin userptr\n");
 721			return r;
 722		}
 723	}
 724	if (!ttm->num_pages) {
 725		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
 726		     ttm->num_pages, bo_mem, ttm);
 727	}
 728
 729	if (bo_mem->mem_type == AMDGPU_PL_GDS ||
 730	    bo_mem->mem_type == AMDGPU_PL_GWS ||
 731	    bo_mem->mem_type == AMDGPU_PL_OA)
 732		return -EINVAL;
 733
 734	return 0;
 735}
 
 
 736
 737bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
 738{
 739	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
 740
 741	return gtt && !list_empty(&gtt->list);
 
 
 
 742}
 743
 744int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
 745{
 746	struct ttm_tt *ttm = bo->ttm;
 747	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
 748	uint32_t flags;
 
 
 
 
 749	int r;
 750
 751	if (!ttm || amdgpu_ttm_is_bound(ttm))
 
 752		return 0;
 753
 754	r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
 755				 NULL, bo_mem);
 756	if (r) {
 757		DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 758		return r;
 759	}
 760
 761	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
 762	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
 763	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
 764		ttm->pages, gtt->ttm.dma_address, flags);
 765
 766	if (r) {
 767		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
 768			  ttm->num_pages, gtt->offset);
 769		return r;
 770	}
 771	spin_lock(&gtt->adev->gtt_list_lock);
 772	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
 773	spin_unlock(&gtt->adev->gtt_list_lock);
 774	return 0;
 775}
 776
 777int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
 778{
 779	struct amdgpu_ttm_tt *gtt, *tmp;
 780	struct ttm_mem_reg bo_mem;
 781	uint32_t flags;
 782	int r;
 783
 784	bo_mem.mem_type = TTM_PL_TT;
 785	spin_lock(&adev->gtt_list_lock);
 786	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
 787		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
 788		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
 789				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
 790				     flags);
 791		if (r) {
 792			spin_unlock(&adev->gtt_list_lock);
 793			DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
 794				  gtt->ttm.ttm.num_pages, gtt->offset);
 795			return r;
 796		}
 797	}
 798	spin_unlock(&adev->gtt_list_lock);
 799	return 0;
 800}
 801
 802static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 803{
 
 804	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
 805
 806	if (gtt->userptr)
 807		amdgpu_ttm_tt_unpin_userptr(ttm);
 808
 809	if (!amdgpu_ttm_is_bound(ttm))
 810		return 0;
 811
 812	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
 813	if (gtt->adev->gart.ready)
 814		amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
 815
 816	spin_lock(&gtt->adev->gtt_list_lock);
 817	list_del_init(&gtt->list);
 818	spin_unlock(&gtt->adev->gtt_list_lock);
 819
 820	return 0;
 821}
 822
 823static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
 824{
 825	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 826
 827	ttm_dma_tt_fini(&gtt->ttm);
 828	kfree(gtt);
 829}
 830
 831static struct ttm_backend_func amdgpu_backend_func = {
 832	.bind = &amdgpu_ttm_backend_bind,
 833	.unbind = &amdgpu_ttm_backend_unbind,
 834	.destroy = &amdgpu_ttm_backend_destroy,
 835};
 836
 837static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
 838				    unsigned long size, uint32_t page_flags,
 839				    struct page *dummy_read_page)
 840{
 841	struct amdgpu_device *adev;
 842	struct amdgpu_ttm_tt *gtt;
 843
 844	adev = amdgpu_ttm_adev(bdev);
 845
 846	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
 847	if (gtt == NULL) {
 848		return NULL;
 849	}
 850	gtt->ttm.ttm.func = &amdgpu_backend_func;
 851	gtt->adev = adev;
 852	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
 853		kfree(gtt);
 854		return NULL;
 855	}
 856	INIT_LIST_HEAD(&gtt->list);
 857	return &gtt->ttm.ttm;
 858}
 859
 860static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
 
 861{
 862	struct amdgpu_device *adev;
 863	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 864	unsigned i;
 865	int r;
 866	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 867
 868	if (ttm->state != tt_unpopulated)
 869		return 0;
 870
 871	if (gtt && gtt->userptr) {
 872		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 873		if (!ttm->sg)
 874			return -ENOMEM;
 875
 876		ttm->page_flags |= TTM_PAGE_FLAG_SG;
 877		ttm->state = tt_unbound;
 878		return 0;
 879	}
 880
 881	if (slave && ttm->sg) {
 882		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 883						 gtt->ttm.dma_address, ttm->num_pages);
 
 884		ttm->state = tt_unbound;
 885		return 0;
 886	}
 887
 888	adev = amdgpu_ttm_adev(ttm->bdev);
 889
 890#ifdef CONFIG_SWIOTLB
 891	if (swiotlb_nr_tbl()) {
 892		return ttm_dma_populate(&gtt->ttm, adev->dev);
 893	}
 894#endif
 895
 896	r = ttm_pool_populate(ttm);
 897	if (r) {
 898		return r;
 899	}
 900
 901	for (i = 0; i < ttm->num_pages; i++) {
 902		gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
 903						       0, PAGE_SIZE,
 904						       PCI_DMA_BIDIRECTIONAL);
 905		if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
 906			while (i--) {
 907				pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
 908					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 909				gtt->ttm.dma_address[i] = 0;
 910			}
 911			ttm_pool_unpopulate(ttm);
 912			return -EFAULT;
 913		}
 914	}
 915	return 0;
 916}
 917
 918static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
 919{
 920	struct amdgpu_device *adev;
 921	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 922	unsigned i;
 923	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 924
 925	if (gtt && gtt->userptr) {
 
 926		kfree(ttm->sg);
 927		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
 928		return;
 929	}
 930
 931	if (slave)
 932		return;
 933
 934	adev = amdgpu_ttm_adev(ttm->bdev);
 935
 936#ifdef CONFIG_SWIOTLB
 937	if (swiotlb_nr_tbl()) {
 938		ttm_dma_unpopulate(&gtt->ttm, adev->dev);
 939		return;
 940	}
 941#endif
 942
 943	for (i = 0; i < ttm->num_pages; i++) {
 944		if (gtt->ttm.dma_address[i]) {
 945			pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
 946				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 947		}
 948	}
 949
 950	ttm_pool_unpopulate(ttm);
 951}
 952
 953int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 954			      uint32_t flags)
 955{
 956	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 957
 958	if (gtt == NULL)
 959		return -EINVAL;
 960
 961	gtt->userptr = addr;
 962	gtt->usermm = current->mm;
 963	gtt->userflags = flags;
 964	spin_lock_init(&gtt->guptasklock);
 965	INIT_LIST_HEAD(&gtt->guptasks);
 966	atomic_set(&gtt->mmu_invalidations, 0);
 
 967
 968	return 0;
 969}
 970
 971struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
 972{
 973	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 974
 975	if (gtt == NULL)
 976		return NULL;
 977
 978	return gtt->usermm;
 979}
 980
 981bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
 982				  unsigned long end)
 983{
 984	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 985	struct amdgpu_ttm_gup_task_list *entry;
 986	unsigned long size;
 987
 988	if (gtt == NULL || !gtt->userptr)
 989		return false;
 990
 991	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
 992	if (gtt->userptr > end || gtt->userptr + size <= start)
 993		return false;
 994
 995	spin_lock(&gtt->guptasklock);
 996	list_for_each_entry(entry, &gtt->guptasks, list) {
 997		if (entry->task == current) {
 998			spin_unlock(&gtt->guptasklock);
 999			return false;
1000		}
1001	}
1002	spin_unlock(&gtt->guptasklock);
1003
1004	atomic_inc(&gtt->mmu_invalidations);
1005
1006	return true;
1007}
1008
1009bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1010				       int *last_invalidated)
1011{
1012	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1013	int prev_invalidated = *last_invalidated;
1014
1015	*last_invalidated = atomic_read(&gtt->mmu_invalidations);
1016	return prev_invalidated != *last_invalidated;
1017}
1018
 
 
 
 
 
 
 
 
 
 
1019bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1020{
1021	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1022
1023	if (gtt == NULL)
1024		return false;
1025
1026	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1027}
1028
1029uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1030				 struct ttm_mem_reg *mem)
1031{
1032	uint32_t flags = 0;
1033
1034	if (mem && mem->mem_type != TTM_PL_SYSTEM)
1035		flags |= AMDGPU_PTE_VALID;
1036
1037	if (mem && mem->mem_type == TTM_PL_TT) {
1038		flags |= AMDGPU_PTE_SYSTEM;
1039
1040		if (ttm->caching_state == tt_cached)
1041			flags |= AMDGPU_PTE_SNOOPED;
1042	}
1043
1044	if (adev->asic_type >= CHIP_TONGA)
1045		flags |= AMDGPU_PTE_EXECUTABLE;
1046
1047	flags |= AMDGPU_PTE_READABLE;
1048
1049	if (!amdgpu_ttm_tt_is_readonly(ttm))
1050		flags |= AMDGPU_PTE_WRITEABLE;
1051
1052	return flags;
1053}
1054
1055static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
 
1056{
1057	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1058	unsigned i, j;
1059
1060	for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
1061		struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
1062
1063		for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1064			if (&tbo->lru == lru->lru[j])
1065				lru->lru[j] = tbo->lru.prev;
1066
1067		if (&tbo->swap == lru->swap_lru)
1068			lru->swap_lru = tbo->swap.prev;
 
 
 
 
 
 
1069	}
1070}
1071
1072static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
1073{
1074	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1075	unsigned log2_size = min(ilog2(tbo->num_pages),
1076				 AMDGPU_TTM_LRU_SIZE - 1);
1077
1078	return &adev->mman.log2_size[log2_size];
1079}
1080
1081static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
1082{
1083	struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
1084	struct list_head *res = lru->lru[tbo->mem.mem_type];
1085
1086	lru->lru[tbo->mem.mem_type] = &tbo->lru;
1087	while ((++lru)->lru[tbo->mem.mem_type] == res)
1088		lru->lru[tbo->mem.mem_type] = &tbo->lru;
1089
1090	return res;
1091}
1092
1093static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
1094{
1095	struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
1096	struct list_head *res = lru->swap_lru;
1097
1098	lru->swap_lru = &tbo->swap;
1099	while ((++lru)->swap_lru == res)
1100		lru->swap_lru = &tbo->swap;
1101
1102	return res;
1103}
1104
1105static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1106					    const struct ttm_place *place)
1107{
1108	if (bo->mem.mem_type == TTM_PL_VRAM &&
1109	    bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
1110		unsigned long num_pages = bo->mem.num_pages;
1111		struct drm_mm_node *node = bo->mem.mm_node;
1112
 
1113		/* Check each drm MM node individually */
1114		while (num_pages) {
1115			if (place->fpfn < (node->start + node->size) &&
1116			    !(place->lpfn && place->lpfn <= node->start))
1117				return true;
1118
1119			num_pages -= node->size;
1120			++node;
1121		}
 
1122
1123		return false;
 
1124	}
1125
1126	return ttm_bo_eviction_valuable(bo, place);
1127}
1128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1129static struct ttm_bo_driver amdgpu_bo_driver = {
1130	.ttm_tt_create = &amdgpu_ttm_tt_create,
1131	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
1132	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1133	.invalidate_caches = &amdgpu_invalidate_caches,
1134	.init_mem_type = &amdgpu_init_mem_type,
1135	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1136	.evict_flags = &amdgpu_evict_flags,
1137	.move = &amdgpu_bo_move,
1138	.verify_access = &amdgpu_verify_access,
1139	.move_notify = &amdgpu_bo_move_notify,
1140	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1141	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1142	.io_mem_free = &amdgpu_ttm_io_mem_free,
1143	.lru_removal = &amdgpu_ttm_lru_removal,
1144	.lru_tail = &amdgpu_ttm_lru_tail,
1145	.swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
1146};
1147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1148int amdgpu_ttm_init(struct amdgpu_device *adev)
1149{
1150	unsigned i, j;
1151	int r;
 
1152
1153	r = amdgpu_ttm_global_init(adev);
1154	if (r) {
1155		return r;
1156	}
1157	/* No others user of address space so set it to 0 */
1158	r = ttm_bo_device_init(&adev->mman.bdev,
1159			       adev->mman.bo_global_ref.ref.object,
1160			       &amdgpu_bo_driver,
1161			       adev->ddev->anon_inode->i_mapping,
1162			       DRM_FILE_PAGE_OFFSET,
1163			       adev->need_dma32);
1164	if (r) {
1165		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1166		return r;
1167	}
 
1168
1169	for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
1170		struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
1171
1172		for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1173			lru->lru[j] = &adev->mman.bdev.man[j].lru;
1174		lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
1175	}
1176
1177	for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1178		adev->mman.guard.lru[j] = NULL;
1179	adev->mman.guard.swap_lru = NULL;
1180
1181	adev->mman.initialized = true;
1182	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1183				adev->mc.real_vram_size >> PAGE_SHIFT);
1184	if (r) {
1185		DRM_ERROR("Failed initializing VRAM heap.\n");
1186		return r;
1187	}
 
 
 
 
 
 
 
1188	/* Change the size here instead of the init above so only lpfn is affected */
1189	amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
 
 
 
1190
1191	r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
1192			     AMDGPU_GEM_DOMAIN_VRAM,
1193			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1194			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1195			     NULL, NULL, &adev->stollen_vga_memory);
1196	if (r) {
1197		return r;
1198	}
1199	r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
 
 
 
 
1200	if (r)
1201		return r;
1202	r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
1203	amdgpu_bo_unreserve(adev->stollen_vga_memory);
1204	if (r) {
1205		amdgpu_bo_unref(&adev->stollen_vga_memory);
1206		return r;
1207	}
1208	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1209		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
1210	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
1211				adev->mc.gtt_size >> PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
1212	if (r) {
1213		DRM_ERROR("Failed initializing GTT heap.\n");
1214		return r;
1215	}
1216	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1217		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
1218
1219	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1220	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1221	adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1222	adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1223	adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1224	adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1225	adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1226	adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1227	adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1228	/* GDS Memory */
1229	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1230				adev->gds.mem.total_size >> PAGE_SHIFT);
1231	if (r) {
1232		DRM_ERROR("Failed initializing GDS heap.\n");
1233		return r;
 
 
1234	}
1235
1236	/* GWS */
1237	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1238				adev->gds.gws.total_size >> PAGE_SHIFT);
1239	if (r) {
1240		DRM_ERROR("Failed initializing gws heap.\n");
1241		return r;
 
 
1242	}
1243
1244	/* OA */
1245	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1246				adev->gds.oa.total_size >> PAGE_SHIFT);
1247	if (r) {
1248		DRM_ERROR("Failed initializing oa heap.\n");
1249		return r;
 
 
1250	}
1251
1252	r = amdgpu_ttm_debugfs_init(adev);
1253	if (r) {
1254		DRM_ERROR("Failed to init debugfs\n");
1255		return r;
1256	}
1257	return 0;
1258}
1259
1260void amdgpu_ttm_fini(struct amdgpu_device *adev)
1261{
1262	int r;
1263
1264	if (!adev->mman.initialized)
1265		return;
 
1266	amdgpu_ttm_debugfs_fini(adev);
1267	if (adev->stollen_vga_memory) {
1268		r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
1269		if (r == 0) {
1270			amdgpu_bo_unpin(adev->stollen_vga_memory);
1271			amdgpu_bo_unreserve(adev->stollen_vga_memory);
1272		}
1273		amdgpu_bo_unref(&adev->stollen_vga_memory);
1274	}
1275	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1276	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1277	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1278	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1279	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
 
 
 
1280	ttm_bo_device_release(&adev->mman.bdev);
1281	amdgpu_gart_fini(adev);
1282	amdgpu_ttm_global_fini(adev);
1283	adev->mman.initialized = false;
1284	DRM_INFO("amdgpu: ttm finalized\n");
1285}
1286
1287/* this should only be called at bootup or when userspace
1288 * isn't running */
1289void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
 
 
 
 
 
 
 
1290{
1291	struct ttm_mem_type_manager *man;
 
1292
1293	if (!adev->mman.initialized)
1294		return;
1295
1296	man = &adev->mman.bdev.man[TTM_PL_VRAM];
1297	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
 
 
 
 
1298	man->size = size >> PAGE_SHIFT;
 
1299}
1300
1301int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1302{
1303	struct drm_file *file_priv;
1304	struct amdgpu_device *adev;
1305
1306	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1307		return -EINVAL;
1308
1309	file_priv = filp->private_data;
1310	adev = file_priv->minor->dev->dev_private;
1311	if (adev == NULL)
1312		return -EINVAL;
1313
1314	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1315}
1316
1317int amdgpu_copy_buffer(struct amdgpu_ring *ring,
1318		       uint64_t src_offset,
1319		       uint64_t dst_offset,
1320		       uint32_t byte_count,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321		       struct reservation_object *resv,
1322		       struct dma_fence **fence, bool direct_submit)
 
1323{
1324	struct amdgpu_device *adev = ring->adev;
1325	struct amdgpu_job *job;
1326
1327	uint32_t max_bytes;
1328	unsigned num_loops, num_dw;
1329	unsigned i;
1330	int r;
1331
 
 
 
 
 
1332	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1333	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1334	num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
1335
1336	/* for IB padding */
1337	while (num_dw & 0x7)
1338		num_dw++;
1339
1340	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1341	if (r)
1342		return r;
1343
 
1344	if (resv) {
1345		r = amdgpu_sync_resv(adev, &job->sync, resv,
1346				     AMDGPU_FENCE_OWNER_UNDEFINED);
 
1347		if (r) {
1348			DRM_ERROR("sync failed (%d).\n", r);
1349			goto error_free;
1350		}
1351	}
1352
1353	for (i = 0; i < num_loops; i++) {
1354		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1355
1356		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1357					dst_offset, cur_size_in_bytes);
1358
1359		src_offset += cur_size_in_bytes;
1360		dst_offset += cur_size_in_bytes;
1361		byte_count -= cur_size_in_bytes;
1362	}
1363
1364	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1365	WARN_ON(job->ibs[0].length_dw > num_dw);
1366	if (direct_submit) {
1367		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
1368				       NULL, NULL, fence);
1369		job->fence = dma_fence_get(*fence);
1370		if (r)
1371			DRM_ERROR("Error scheduling IBs (%d)\n", r);
1372		amdgpu_job_free(job);
1373	} else {
1374		r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1375				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1376		if (r)
1377			goto error_free;
1378	}
1379
1380	return r;
1381
1382error_free:
1383	amdgpu_job_free(job);
1384	return r;
1385}
1386
1387int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1388		       uint32_t src_data,
1389		       struct reservation_object *resv,
1390		       struct dma_fence **fence)
1391{
1392	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1393	uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1394	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1395
1396	struct drm_mm_node *mm_node;
1397	unsigned long num_pages;
1398	unsigned int num_loops, num_dw;
1399
1400	struct amdgpu_job *job;
1401	int r;
1402
1403	if (!ring->ready) {
1404		DRM_ERROR("Trying to clear memory with ring turned off.\n");
1405		return -EINVAL;
1406	}
1407
 
 
 
 
 
 
1408	num_pages = bo->tbo.num_pages;
1409	mm_node = bo->tbo.mem.mm_node;
1410	num_loops = 0;
1411	while (num_pages) {
1412		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1413
1414		num_loops += DIV_ROUND_UP(byte_count, max_bytes);
1415		num_pages -= mm_node->size;
1416		++mm_node;
1417	}
1418	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1419
1420	/* for IB padding */
1421	num_dw += 64;
1422
1423	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1424	if (r)
1425		return r;
1426
1427	if (resv) {
1428		r = amdgpu_sync_resv(adev, &job->sync, resv,
1429				     AMDGPU_FENCE_OWNER_UNDEFINED);
1430		if (r) {
1431			DRM_ERROR("sync failed (%d).\n", r);
1432			goto error_free;
1433		}
1434	}
1435
1436	num_pages = bo->tbo.num_pages;
1437	mm_node = bo->tbo.mem.mm_node;
1438
1439	while (num_pages) {
1440		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1441		uint64_t dst_addr;
1442
1443		r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
1444					&bo->tbo.mem, &dst_addr);
1445		if (r)
1446			return r;
1447
1448		while (byte_count) {
1449			uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1450
1451			amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
1452						dst_addr, cur_size_in_bytes);
1453
1454			dst_addr += cur_size_in_bytes;
1455			byte_count -= cur_size_in_bytes;
1456		}
1457
1458		num_pages -= mm_node->size;
1459		++mm_node;
1460	}
1461
1462	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1463	WARN_ON(job->ibs[0].length_dw > num_dw);
1464	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1465			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1466	if (r)
1467		goto error_free;
1468
1469	return 0;
1470
1471error_free:
1472	amdgpu_job_free(job);
1473	return r;
1474}
1475
1476#if defined(CONFIG_DEBUG_FS)
1477
1478static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1479{
1480	struct drm_info_node *node = (struct drm_info_node *)m->private;
1481	unsigned ttm_pl = *(int *)node->info_ent->data;
1482	struct drm_device *dev = node->minor->dev;
1483	struct amdgpu_device *adev = dev->dev_private;
1484	struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
1485	int ret;
1486	struct ttm_bo_global *glob = adev->mman.bdev.glob;
1487
1488	spin_lock(&glob->lru_lock);
1489	ret = drm_mm_dump_table(m, mm);
1490	spin_unlock(&glob->lru_lock);
1491	if (ttm_pl == TTM_PL_VRAM)
1492		seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
1493			   adev->mman.bdev.man[ttm_pl].size,
1494			   (u64)atomic64_read(&adev->vram_usage) >> 20,
1495			   (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
1496	return ret;
1497}
1498
1499static int ttm_pl_vram = TTM_PL_VRAM;
1500static int ttm_pl_tt = TTM_PL_TT;
1501
1502static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
1503	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
1504	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
1505	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1506#ifdef CONFIG_SWIOTLB
1507	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1508#endif
1509};
1510
1511static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1512				    size_t size, loff_t *pos)
1513{
1514	struct amdgpu_device *adev = file_inode(f)->i_private;
1515	ssize_t result = 0;
1516	int r;
1517
1518	if (size & 0x3 || *pos & 0x3)
1519		return -EINVAL;
1520
 
 
 
1521	while (size) {
1522		unsigned long flags;
1523		uint32_t value;
1524
1525		if (*pos >= adev->mc.mc_vram_size)
1526			return result;
1527
1528		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1529		WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1530		WREG32(mmMM_INDEX_HI, *pos >> 31);
1531		value = RREG32(mmMM_DATA);
1532		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1533
1534		r = put_user(value, (uint32_t *)buf);
1535		if (r)
1536			return r;
1537
1538		result += 4;
1539		buf += 4;
1540		*pos += 4;
1541		size -= 4;
1542	}
1543
1544	return result;
1545}
1546
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1547static const struct file_operations amdgpu_ttm_vram_fops = {
1548	.owner = THIS_MODULE,
1549	.read = amdgpu_ttm_vram_read,
1550	.llseek = default_llseek
 
1551};
1552
1553#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1554
1555static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
1556				   size_t size, loff_t *pos)
1557{
1558	struct amdgpu_device *adev = file_inode(f)->i_private;
1559	ssize_t result = 0;
1560	int r;
1561
1562	while (size) {
1563		loff_t p = *pos / PAGE_SIZE;
1564		unsigned off = *pos & ~PAGE_MASK;
1565		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1566		struct page *page;
1567		void *ptr;
1568
1569		if (p >= adev->gart.num_cpu_pages)
1570			return result;
1571
1572		page = adev->gart.pages[p];
1573		if (page) {
1574			ptr = kmap(page);
1575			ptr += off;
1576
1577			r = copy_to_user(buf, ptr, cur_size);
1578			kunmap(adev->gart.pages[p]);
1579		} else
1580			r = clear_user(buf, cur_size);
1581
1582		if (r)
1583			return -EFAULT;
1584
1585		result += cur_size;
1586		buf += cur_size;
1587		*pos += cur_size;
1588		size -= cur_size;
1589	}
1590
1591	return result;
1592}
1593
1594static const struct file_operations amdgpu_ttm_gtt_fops = {
1595	.owner = THIS_MODULE,
1596	.read = amdgpu_ttm_gtt_read,
1597	.llseek = default_llseek
1598};
1599
1600#endif
1601
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1602#endif
1603
1604static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
1605{
1606#if defined(CONFIG_DEBUG_FS)
1607	unsigned count;
1608
1609	struct drm_minor *minor = adev->ddev->primary;
1610	struct dentry *ent, *root = minor->debugfs_root;
1611
1612	ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
1613				  adev, &amdgpu_ttm_vram_fops);
1614	if (IS_ERR(ent))
1615		return PTR_ERR(ent);
1616	i_size_write(ent->d_inode, adev->mc.mc_vram_size);
1617	adev->mman.vram = ent;
1618
1619#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1620	ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
1621				  adev, &amdgpu_ttm_gtt_fops);
1622	if (IS_ERR(ent))
1623		return PTR_ERR(ent);
1624	i_size_write(ent->d_inode, adev->mc.gtt_size);
1625	adev->mman.gtt = ent;
1626
1627#endif
1628	count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
1629
1630#ifdef CONFIG_SWIOTLB
1631	if (!swiotlb_nr_tbl())
1632		--count;
1633#endif
1634
1635	return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
1636#else
1637
1638	return 0;
1639#endif
1640}
1641
1642static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
1643{
1644#if defined(CONFIG_DEBUG_FS)
 
1645
1646	debugfs_remove(adev->mman.vram);
1647	adev->mman.vram = NULL;
1648
1649#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1650	debugfs_remove(adev->mman.gtt);
1651	adev->mman.gtt = NULL;
1652#endif
1653
1654#endif
1655}