Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include <linux/dma-buf.h>
  23#include <linux/list.h>
  24#include <linux/pagemap.h>
  25#include <linux/sched/mm.h>
  26#include <linux/sched/task.h>
  27
  28#include "amdgpu_object.h"
  29#include "amdgpu_gem.h"
  30#include "amdgpu_vm.h"
  31#include "amdgpu_amdkfd.h"
  32#include "amdgpu_dma_buf.h"
  33#include <uapi/linux/kfd_ioctl.h>
  34#include "amdgpu_xgmi.h"
  35
  36/* Userptr restore delay, just long enough to allow consecutive VM
  37 * changes to accumulate
  38 */
  39#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
  40
  41/* Impose limit on how much memory KFD can use */
  42static struct {
  43	uint64_t max_system_mem_limit;
  44	uint64_t max_ttm_mem_limit;
  45	int64_t system_mem_used;
  46	int64_t ttm_mem_used;
  47	spinlock_t mem_limit_lock;
  48} kfd_mem_limit;
  49
  50static const char * const domain_bit_to_string[] = {
  51		"CPU",
  52		"GTT",
  53		"VRAM",
  54		"GDS",
  55		"GWS",
  56		"OA"
  57};
  58
  59#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
  60
  61static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
  62
  63
  64static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
  65{
  66	return (struct amdgpu_device *)kgd;
  67}
  68
  69static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
  70		struct kgd_mem *mem)
  71{
  72	struct kfd_mem_attachment *entry;
  73
  74	list_for_each_entry(entry, &mem->attachments, list)
  75		if (entry->bo_va->base.vm == avm)
  76			return true;
  77
  78	return false;
  79}
  80
  81/* Set memory usage limits. Current, limits are
  82 *  System (TTM + userptr) memory - 15/16th System RAM
  83 *  TTM memory - 3/8th System RAM
  84 */
  85void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
  86{
  87	struct sysinfo si;
  88	uint64_t mem;
  89
  90	si_meminfo(&si);
  91	mem = si.freeram - si.freehigh;
  92	mem *= si.mem_unit;
  93
  94	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
  95	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
  96	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
  97	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
  98		(kfd_mem_limit.max_system_mem_limit >> 20),
  99		(kfd_mem_limit.max_ttm_mem_limit >> 20));
 100}
 101
 102void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
 103{
 104	kfd_mem_limit.system_mem_used += size;
 105}
 106
 107/* Estimate page table size needed to represent a given memory size
 108 *
 109 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
 110 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
 111 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
 112 * for 2MB pages for TLB efficiency. However, small allocations and
 113 * fragmented system memory still need some 4KB pages. We choose a
 114 * compromise that should work in most cases without reserving too
 115 * much memory for page tables unnecessarily (factor 16K, >> 14).
 116 */
 117#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
 118
 119static size_t amdgpu_amdkfd_acc_size(uint64_t size)
 120{
 121	size >>= PAGE_SHIFT;
 122	size *= sizeof(dma_addr_t) + sizeof(void *);
 123
 124	return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
 125		__roundup_pow_of_two(sizeof(struct ttm_tt)) +
 126		PAGE_ALIGN(size);
 127}
 128
 129static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
 130		uint64_t size, u32 domain, bool sg)
 131{
 132	uint64_t reserved_for_pt =
 133		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
 134	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
 135	int ret = 0;
 136
 137	acc_size = amdgpu_amdkfd_acc_size(size);
 138
 139	vram_needed = 0;
 140	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 141		/* TTM GTT memory */
 142		system_mem_needed = acc_size + size;
 143		ttm_mem_needed = acc_size + size;
 144	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
 145		/* Userptr */
 146		system_mem_needed = acc_size + size;
 147		ttm_mem_needed = acc_size;
 148	} else {
 149		/* VRAM and SG */
 150		system_mem_needed = acc_size;
 151		ttm_mem_needed = acc_size;
 152		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
 153			vram_needed = size;
 154	}
 155
 156	spin_lock(&kfd_mem_limit.mem_limit_lock);
 157
 158	if (kfd_mem_limit.system_mem_used + system_mem_needed >
 159	    kfd_mem_limit.max_system_mem_limit)
 160		pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
 161
 162	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
 163	     kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
 164	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
 165	     kfd_mem_limit.max_ttm_mem_limit) ||
 166	    (adev->kfd.vram_used + vram_needed >
 167	     adev->gmc.real_vram_size - reserved_for_pt)) {
 168		ret = -ENOMEM;
 169	} else {
 170		kfd_mem_limit.system_mem_used += system_mem_needed;
 171		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
 172		adev->kfd.vram_used += vram_needed;
 173	}
 174
 175	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 176	return ret;
 177}
 178
 179static void unreserve_mem_limit(struct amdgpu_device *adev,
 180		uint64_t size, u32 domain, bool sg)
 181{
 182	size_t acc_size;
 183
 184	acc_size = amdgpu_amdkfd_acc_size(size);
 185
 186	spin_lock(&kfd_mem_limit.mem_limit_lock);
 187	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 188		kfd_mem_limit.system_mem_used -= (acc_size + size);
 189		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
 190	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
 191		kfd_mem_limit.system_mem_used -= (acc_size + size);
 192		kfd_mem_limit.ttm_mem_used -= acc_size;
 193	} else {
 194		kfd_mem_limit.system_mem_used -= acc_size;
 195		kfd_mem_limit.ttm_mem_used -= acc_size;
 196		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 197			adev->kfd.vram_used -= size;
 198			WARN_ONCE(adev->kfd.vram_used < 0,
 199				  "kfd VRAM memory accounting unbalanced");
 200		}
 201	}
 202	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
 203		  "kfd system memory accounting unbalanced");
 204	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
 205		  "kfd TTM memory accounting unbalanced");
 206
 207	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 208}
 209
 210void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 211{
 212	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 213	u32 domain = bo->preferred_domains;
 214	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
 215
 216	if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
 217		domain = AMDGPU_GEM_DOMAIN_CPU;
 218		sg = false;
 219	}
 220
 221	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
 222}
 223
 224
 225/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
 226 *  reservation object.
 227 *
 228 * @bo: [IN] Remove eviction fence(s) from this BO
 229 * @ef: [IN] This eviction fence is removed if it
 230 *  is present in the shared list.
 231 *
 232 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
 233 */
 234static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 235					struct amdgpu_amdkfd_fence *ef)
 236{
 237	struct dma_resv *resv = bo->tbo.base.resv;
 238	struct dma_resv_list *old, *new;
 239	unsigned int i, j, k;
 240
 241	if (!ef)
 242		return -EINVAL;
 243
 244	old = dma_resv_shared_list(resv);
 245	if (!old)
 246		return 0;
 247
 248	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
 249	if (!new)
 250		return -ENOMEM;
 251
 252	/* Go through all the shared fences in the resevation object and sort
 253	 * the interesting ones to the end of the list.
 254	 */
 255	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
 256		struct dma_fence *f;
 257
 258		f = rcu_dereference_protected(old->shared[i],
 259					      dma_resv_held(resv));
 260
 261		if (f->context == ef->base.context)
 262			RCU_INIT_POINTER(new->shared[--j], f);
 263		else
 264			RCU_INIT_POINTER(new->shared[k++], f);
 265	}
 266	new->shared_max = old->shared_max;
 267	new->shared_count = k;
 268
 269	/* Install the new fence list, seqcount provides the barriers */
 270	write_seqcount_begin(&resv->seq);
 271	RCU_INIT_POINTER(resv->fence, new);
 272	write_seqcount_end(&resv->seq);
 273
 274	/* Drop the references to the removed fences or move them to ef_list */
 275	for (i = j; i < old->shared_count; ++i) {
 276		struct dma_fence *f;
 277
 278		f = rcu_dereference_protected(new->shared[i],
 279					      dma_resv_held(resv));
 280		dma_fence_put(f);
 281	}
 282	kfree_rcu(old, rcu);
 283
 284	return 0;
 285}
 286
 287int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
 288{
 289	struct amdgpu_bo *root = bo;
 290	struct amdgpu_vm_bo_base *vm_bo;
 291	struct amdgpu_vm *vm;
 292	struct amdkfd_process_info *info;
 293	struct amdgpu_amdkfd_fence *ef;
 294	int ret;
 295
 296	/* we can always get vm_bo from root PD bo.*/
 297	while (root->parent)
 298		root = root->parent;
 299
 300	vm_bo = root->vm_bo;
 301	if (!vm_bo)
 302		return 0;
 303
 304	vm = vm_bo->vm;
 305	if (!vm)
 306		return 0;
 307
 308	info = vm->process_info;
 309	if (!info || !info->eviction_fence)
 310		return 0;
 311
 312	ef = container_of(dma_fence_get(&info->eviction_fence->base),
 313			struct amdgpu_amdkfd_fence, base);
 314
 315	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
 316	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
 317	dma_resv_unlock(bo->tbo.base.resv);
 318
 319	dma_fence_put(&ef->base);
 320	return ret;
 321}
 322
 323static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
 324				     bool wait)
 325{
 326	struct ttm_operation_ctx ctx = { false, false };
 327	int ret;
 328
 329	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
 330		 "Called with userptr BO"))
 331		return -EINVAL;
 332
 333	amdgpu_bo_placement_from_domain(bo, domain);
 334
 335	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 336	if (ret)
 337		goto validate_fail;
 338	if (wait)
 339		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
 340
 341validate_fail:
 342	return ret;
 343}
 344
 345static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
 346{
 347	return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
 348}
 349
 350/* vm_validate_pt_pd_bos - Validate page table and directory BOs
 351 *
 352 * Page directories are not updated here because huge page handling
 353 * during page table updates can invalidate page directory entries
 354 * again. Page directories are only updated after updating page
 355 * tables.
 356 */
 357static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
 358{
 359	struct amdgpu_bo *pd = vm->root.bo;
 360	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
 361	int ret;
 362
 363	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
 364	if (ret) {
 365		pr_err("failed to validate PT BOs\n");
 366		return ret;
 367	}
 368
 369	ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
 370	if (ret) {
 371		pr_err("failed to validate PD\n");
 372		return ret;
 373	}
 374
 375	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
 376
 377	if (vm->use_cpu_for_update) {
 378		ret = amdgpu_bo_kmap(pd, NULL);
 379		if (ret) {
 380			pr_err("failed to kmap PD, ret=%d\n", ret);
 381			return ret;
 382		}
 383	}
 384
 385	return 0;
 386}
 387
 388static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 389{
 390	struct amdgpu_bo *pd = vm->root.bo;
 391	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
 392	int ret;
 393
 394	ret = amdgpu_vm_update_pdes(adev, vm, false);
 395	if (ret)
 396		return ret;
 397
 398	return amdgpu_sync_fence(sync, vm->last_update);
 399}
 400
 401static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
 402{
 403	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
 404	bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
 405	bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
 406	uint32_t mapping_flags;
 407	uint64_t pte_flags;
 408	bool snoop = false;
 409
 410	mapping_flags = AMDGPU_VM_PAGE_READABLE;
 411	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
 412		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
 413	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
 414		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
 415
 416	switch (adev->asic_type) {
 417	case CHIP_ARCTURUS:
 418		if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
 419			if (bo_adev == adev)
 420				mapping_flags |= coherent ?
 421					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
 422			else
 423				mapping_flags |= coherent ?
 424					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
 425		} else {
 426			mapping_flags |= coherent ?
 427				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
 428		}
 429		break;
 430	case CHIP_ALDEBARAN:
 431		if (coherent && uncached) {
 432			if (adev->gmc.xgmi.connected_to_cpu ||
 433				!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
 434				snoop = true;
 435			mapping_flags |= AMDGPU_VM_MTYPE_UC;
 436		} else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
 437			if (bo_adev == adev) {
 438				mapping_flags |= coherent ?
 439					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
 440				if (adev->gmc.xgmi.connected_to_cpu)
 441					snoop = true;
 442			} else {
 443				mapping_flags |= coherent ?
 444					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
 445				if (amdgpu_xgmi_same_hive(adev, bo_adev))
 446					snoop = true;
 447			}
 448		} else {
 449			snoop = true;
 450			mapping_flags |= coherent ?
 451				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
 452		}
 453		break;
 454	default:
 455		mapping_flags |= coherent ?
 456			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
 457	}
 458
 459	pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
 460	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
 461
 462	return pte_flags;
 463}
 464
 465static int
 466kfd_mem_dmamap_userptr(struct kgd_mem *mem,
 467		       struct kfd_mem_attachment *attachment)
 468{
 469	enum dma_data_direction direction =
 470		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
 471		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 472	struct ttm_operation_ctx ctx = {.interruptible = true};
 473	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
 474	struct amdgpu_device *adev = attachment->adev;
 475	struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
 476	struct ttm_tt *ttm = bo->tbo.ttm;
 477	int ret;
 478
 479	ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
 480	if (unlikely(!ttm->sg))
 481		return -ENOMEM;
 482
 483	if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
 484		return -EINVAL;
 485
 486	/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
 487	ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
 488					ttm->num_pages, 0,
 489					(u64)ttm->num_pages << PAGE_SHIFT,
 490					GFP_KERNEL);
 491	if (unlikely(ret))
 492		goto free_sg;
 493
 494	ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
 495	if (unlikely(ret))
 496		goto release_sg;
 497
 498	drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
 499				       ttm->num_pages);
 500
 501	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 502	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 503	if (ret)
 504		goto unmap_sg;
 505
 506	return 0;
 507
 508unmap_sg:
 509	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
 510release_sg:
 511	pr_err("DMA map userptr failed: %d\n", ret);
 512	sg_free_table(ttm->sg);
 513free_sg:
 514	kfree(ttm->sg);
 515	ttm->sg = NULL;
 516	return ret;
 517}
 518
 519static int
 520kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
 521{
 522	struct ttm_operation_ctx ctx = {.interruptible = true};
 523	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
 524
 525	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 526	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 527}
 528
 529static int
 530kfd_mem_dmamap_attachment(struct kgd_mem *mem,
 531			  struct kfd_mem_attachment *attachment)
 532{
 533	switch (attachment->type) {
 534	case KFD_MEM_ATT_SHARED:
 535		return 0;
 536	case KFD_MEM_ATT_USERPTR:
 537		return kfd_mem_dmamap_userptr(mem, attachment);
 538	case KFD_MEM_ATT_DMABUF:
 539		return kfd_mem_dmamap_dmabuf(attachment);
 540	default:
 541		WARN_ON_ONCE(1);
 542	}
 543	return -EINVAL;
 544}
 545
 546static void
 547kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
 548			 struct kfd_mem_attachment *attachment)
 549{
 550	enum dma_data_direction direction =
 551		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
 552		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 553	struct ttm_operation_ctx ctx = {.interruptible = false};
 554	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
 555	struct amdgpu_device *adev = attachment->adev;
 556	struct ttm_tt *ttm = bo->tbo.ttm;
 557
 558	if (unlikely(!ttm->sg))
 559		return;
 560
 561	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 562	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 563
 564	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
 565	sg_free_table(ttm->sg);
 566	kfree(ttm->sg);
 567	ttm->sg = NULL;
 568}
 569
 570static void
 571kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
 572{
 573	struct ttm_operation_ctx ctx = {.interruptible = true};
 574	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
 575
 576	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 577	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 578}
 579
 580static void
 581kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
 582			    struct kfd_mem_attachment *attachment)
 583{
 584	switch (attachment->type) {
 585	case KFD_MEM_ATT_SHARED:
 586		break;
 587	case KFD_MEM_ATT_USERPTR:
 588		kfd_mem_dmaunmap_userptr(mem, attachment);
 589		break;
 590	case KFD_MEM_ATT_DMABUF:
 591		kfd_mem_dmaunmap_dmabuf(attachment);
 592		break;
 593	default:
 594		WARN_ON_ONCE(1);
 595	}
 596}
 597
 598static int
 599kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
 600		       struct amdgpu_bo **bo)
 601{
 602	unsigned long bo_size = mem->bo->tbo.base.size;
 603	struct drm_gem_object *gobj;
 604	int ret;
 605
 606	ret = amdgpu_bo_reserve(mem->bo, false);
 607	if (ret)
 608		return ret;
 609
 610	ret = amdgpu_gem_object_create(adev, bo_size, 1,
 611				       AMDGPU_GEM_DOMAIN_CPU,
 612				       AMDGPU_GEM_CREATE_PREEMPTIBLE,
 613				       ttm_bo_type_sg, mem->bo->tbo.base.resv,
 614				       &gobj);
 615	amdgpu_bo_unreserve(mem->bo);
 616	if (ret)
 617		return ret;
 618
 619	*bo = gem_to_amdgpu_bo(gobj);
 620	(*bo)->parent = amdgpu_bo_ref(mem->bo);
 621
 622	return 0;
 623}
 624
 625static int
 626kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
 627		      struct amdgpu_bo **bo)
 628{
 629	struct drm_gem_object *gobj;
 630	int ret;
 631
 632	if (!mem->dmabuf) {
 633		mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
 634			mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
 635				DRM_RDWR : 0);
 636		if (IS_ERR(mem->dmabuf)) {
 637			ret = PTR_ERR(mem->dmabuf);
 638			mem->dmabuf = NULL;
 639			return ret;
 640		}
 641	}
 642
 643	gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
 644	if (IS_ERR(gobj))
 645		return PTR_ERR(gobj);
 646
 647	/* Import takes an extra reference on the dmabuf. Drop it now to
 648	 * avoid leaking it. We only need the one reference in
 649	 * kgd_mem->dmabuf.
 650	 */
 651	dma_buf_put(mem->dmabuf);
 652
 653	*bo = gem_to_amdgpu_bo(gobj);
 654	(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
 655	(*bo)->parent = amdgpu_bo_ref(mem->bo);
 656
 657	return 0;
 658}
 659
 660/* kfd_mem_attach - Add a BO to a VM
 661 *
 662 * Everything that needs to bo done only once when a BO is first added
 663 * to a VM. It can later be mapped and unmapped many times without
 664 * repeating these steps.
 665 *
 666 * 0. Create BO for DMA mapping, if needed
 667 * 1. Allocate and initialize BO VA entry data structure
 668 * 2. Add BO to the VM
 669 * 3. Determine ASIC-specific PTE flags
 670 * 4. Alloc page tables and directories if needed
 671 * 4a.  Validate new page tables and directories
 672 */
 673static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
 674		struct amdgpu_vm *vm, bool is_aql)
 675{
 676	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
 677	unsigned long bo_size = mem->bo->tbo.base.size;
 678	uint64_t va = mem->va;
 679	struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
 680	struct amdgpu_bo *bo[2] = {NULL, NULL};
 681	int i, ret;
 682
 683	if (!va) {
 684		pr_err("Invalid VA when adding BO to VM\n");
 685		return -EINVAL;
 686	}
 687
 688	for (i = 0; i <= is_aql; i++) {
 689		attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
 690		if (unlikely(!attachment[i])) {
 691			ret = -ENOMEM;
 692			goto unwind;
 693		}
 694
 695		pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
 696			 va + bo_size, vm);
 697
 698		if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
 699					amdgpu_xgmi_same_hive(adev, bo_adev))) {
 700			/* Mappings on the local GPU and VRAM mappings in the
 701			 * local hive share the original BO
 702			 */
 703			attachment[i]->type = KFD_MEM_ATT_SHARED;
 704			bo[i] = mem->bo;
 705			drm_gem_object_get(&bo[i]->tbo.base);
 706		} else if (i > 0) {
 707			/* Multiple mappings on the same GPU share the BO */
 708			attachment[i]->type = KFD_MEM_ATT_SHARED;
 709			bo[i] = bo[0];
 710			drm_gem_object_get(&bo[i]->tbo.base);
 711		} else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
 712			/* Create an SG BO to DMA-map userptrs on other GPUs */
 713			attachment[i]->type = KFD_MEM_ATT_USERPTR;
 714			ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
 715			if (ret)
 716				goto unwind;
 717		} else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
 718			   mem->bo->tbo.type != ttm_bo_type_sg) {
 719			/* GTT BOs use DMA-mapping ability of dynamic-attach
 720			 * DMA bufs. TODO: The same should work for VRAM on
 721			 * large-BAR GPUs.
 722			 */
 723			attachment[i]->type = KFD_MEM_ATT_DMABUF;
 724			ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
 725			if (ret)
 726				goto unwind;
 727		} else {
 728			/* FIXME: Need to DMA-map other BO types:
 729			 * large-BAR VRAM, doorbells, MMIO remap
 730			 */
 731			attachment[i]->type = KFD_MEM_ATT_SHARED;
 732			bo[i] = mem->bo;
 733			drm_gem_object_get(&bo[i]->tbo.base);
 734		}
 735
 736		/* Add BO to VM internal data structures */
 737		attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
 738		if (unlikely(!attachment[i]->bo_va)) {
 739			ret = -ENOMEM;
 740			pr_err("Failed to add BO object to VM. ret == %d\n",
 741			       ret);
 742			goto unwind;
 743		}
 744
 745		attachment[i]->va = va;
 746		attachment[i]->pte_flags = get_pte_flags(adev, mem);
 747		attachment[i]->adev = adev;
 748		list_add(&attachment[i]->list, &mem->attachments);
 749
 750		va += bo_size;
 751	}
 752
 753	return 0;
 754
 755unwind:
 756	for (; i >= 0; i--) {
 757		if (!attachment[i])
 758			continue;
 759		if (attachment[i]->bo_va) {
 760			amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
 761			list_del(&attachment[i]->list);
 762		}
 763		if (bo[i])
 764			drm_gem_object_put(&bo[i]->tbo.base);
 765		kfree(attachment[i]);
 766	}
 767	return ret;
 768}
 769
 770static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
 771{
 772	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
 773
 774	pr_debug("\t remove VA 0x%llx in entry %p\n",
 775			attachment->va, attachment);
 776	amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
 777	drm_gem_object_put(&bo->tbo.base);
 778	list_del(&attachment->list);
 779	kfree(attachment);
 780}
 781
 782static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
 783				struct amdkfd_process_info *process_info,
 784				bool userptr)
 785{
 786	struct ttm_validate_buffer *entry = &mem->validate_list;
 787	struct amdgpu_bo *bo = mem->bo;
 788
 789	INIT_LIST_HEAD(&entry->head);
 790	entry->num_shared = 1;
 791	entry->bo = &bo->tbo;
 792	mutex_lock(&process_info->lock);
 793	if (userptr)
 794		list_add_tail(&entry->head, &process_info->userptr_valid_list);
 795	else
 796		list_add_tail(&entry->head, &process_info->kfd_bo_list);
 797	mutex_unlock(&process_info->lock);
 798}
 799
 800static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
 801		struct amdkfd_process_info *process_info)
 802{
 803	struct ttm_validate_buffer *bo_list_entry;
 804
 805	bo_list_entry = &mem->validate_list;
 806	mutex_lock(&process_info->lock);
 807	list_del(&bo_list_entry->head);
 808	mutex_unlock(&process_info->lock);
 809}
 810
 811/* Initializes user pages. It registers the MMU notifier and validates
 812 * the userptr BO in the GTT domain.
 813 *
 814 * The BO must already be on the userptr_valid_list. Otherwise an
 815 * eviction and restore may happen that leaves the new BO unmapped
 816 * with the user mode queues running.
 817 *
 818 * Takes the process_info->lock to protect against concurrent restore
 819 * workers.
 820 *
 821 * Returns 0 for success, negative errno for errors.
 822 */
 823static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
 824{
 825	struct amdkfd_process_info *process_info = mem->process_info;
 826	struct amdgpu_bo *bo = mem->bo;
 827	struct ttm_operation_ctx ctx = { true, false };
 828	int ret = 0;
 829
 830	mutex_lock(&process_info->lock);
 831
 832	ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
 833	if (ret) {
 834		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
 835		goto out;
 836	}
 837
 838	ret = amdgpu_mn_register(bo, user_addr);
 839	if (ret) {
 840		pr_err("%s: Failed to register MMU notifier: %d\n",
 841		       __func__, ret);
 842		goto out;
 843	}
 844
 845	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
 846	if (ret) {
 847		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
 848		goto unregister_out;
 849	}
 850
 851	ret = amdgpu_bo_reserve(bo, true);
 852	if (ret) {
 853		pr_err("%s: Failed to reserve BO\n", __func__);
 854		goto release_out;
 855	}
 856	amdgpu_bo_placement_from_domain(bo, mem->domain);
 857	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 858	if (ret)
 859		pr_err("%s: failed to validate BO\n", __func__);
 860	amdgpu_bo_unreserve(bo);
 861
 862release_out:
 863	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 864unregister_out:
 865	if (ret)
 866		amdgpu_mn_unregister(bo);
 867out:
 868	mutex_unlock(&process_info->lock);
 869	return ret;
 870}
 871
 872/* Reserving a BO and its page table BOs must happen atomically to
 873 * avoid deadlocks. Some operations update multiple VMs at once. Track
 874 * all the reservation info in a context structure. Optionally a sync
 875 * object can track VM updates.
 876 */
 877struct bo_vm_reservation_context {
 878	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
 879	unsigned int n_vms;		    /* Number of VMs reserved	    */
 880	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
 881	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
 882	struct list_head list, duplicates;  /* BO lists			    */
 883	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
 884	bool reserved;			    /* Whether BOs are reserved	    */
 885};
 886
 887enum bo_vm_match {
 888	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
 889	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
 890	BO_VM_ALL,		/* Match all VMs a BO was added to    */
 891};
 892
 893/**
 894 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
 895 * @mem: KFD BO structure.
 896 * @vm: the VM to reserve.
 897 * @ctx: the struct that will be used in unreserve_bo_and_vms().
 898 */
 899static int reserve_bo_and_vm(struct kgd_mem *mem,
 900			      struct amdgpu_vm *vm,
 901			      struct bo_vm_reservation_context *ctx)
 902{
 903	struct amdgpu_bo *bo = mem->bo;
 904	int ret;
 905
 906	WARN_ON(!vm);
 907
 908	ctx->reserved = false;
 909	ctx->n_vms = 1;
 910	ctx->sync = &mem->sync;
 911
 912	INIT_LIST_HEAD(&ctx->list);
 913	INIT_LIST_HEAD(&ctx->duplicates);
 914
 915	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
 916	if (!ctx->vm_pd)
 917		return -ENOMEM;
 918
 919	ctx->kfd_bo.priority = 0;
 920	ctx->kfd_bo.tv.bo = &bo->tbo;
 921	ctx->kfd_bo.tv.num_shared = 1;
 922	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
 923
 924	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
 925
 926	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
 927				     false, &ctx->duplicates);
 928	if (ret) {
 929		pr_err("Failed to reserve buffers in ttm.\n");
 930		kfree(ctx->vm_pd);
 931		ctx->vm_pd = NULL;
 932		return ret;
 933	}
 934
 935	ctx->reserved = true;
 936	return 0;
 937}
 938
 939/**
 940 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
 941 * @mem: KFD BO structure.
 942 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
 943 * is used. Otherwise, a single VM associated with the BO.
 944 * @map_type: the mapping status that will be used to filter the VMs.
 945 * @ctx: the struct that will be used in unreserve_bo_and_vms().
 946 *
 947 * Returns 0 for success, negative for failure.
 948 */
 949static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
 950				struct amdgpu_vm *vm, enum bo_vm_match map_type,
 951				struct bo_vm_reservation_context *ctx)
 952{
 953	struct amdgpu_bo *bo = mem->bo;
 954	struct kfd_mem_attachment *entry;
 955	unsigned int i;
 956	int ret;
 957
 958	ctx->reserved = false;
 959	ctx->n_vms = 0;
 960	ctx->vm_pd = NULL;
 961	ctx->sync = &mem->sync;
 962
 963	INIT_LIST_HEAD(&ctx->list);
 964	INIT_LIST_HEAD(&ctx->duplicates);
 965
 966	list_for_each_entry(entry, &mem->attachments, list) {
 967		if ((vm && vm != entry->bo_va->base.vm) ||
 968			(entry->is_mapped != map_type
 969			&& map_type != BO_VM_ALL))
 970			continue;
 971
 972		ctx->n_vms++;
 973	}
 974
 975	if (ctx->n_vms != 0) {
 976		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
 977				     GFP_KERNEL);
 978		if (!ctx->vm_pd)
 979			return -ENOMEM;
 980	}
 981
 982	ctx->kfd_bo.priority = 0;
 983	ctx->kfd_bo.tv.bo = &bo->tbo;
 984	ctx->kfd_bo.tv.num_shared = 1;
 985	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
 986
 987	i = 0;
 988	list_for_each_entry(entry, &mem->attachments, list) {
 989		if ((vm && vm != entry->bo_va->base.vm) ||
 990			(entry->is_mapped != map_type
 991			&& map_type != BO_VM_ALL))
 992			continue;
 993
 994		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
 995				&ctx->vm_pd[i]);
 996		i++;
 997	}
 998
 999	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1000				     false, &ctx->duplicates);
1001	if (ret) {
1002		pr_err("Failed to reserve buffers in ttm.\n");
1003		kfree(ctx->vm_pd);
1004		ctx->vm_pd = NULL;
1005		return ret;
1006	}
1007
1008	ctx->reserved = true;
1009	return 0;
1010}
1011
1012/**
1013 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1014 * @ctx: Reservation context to unreserve
1015 * @wait: Optionally wait for a sync object representing pending VM updates
1016 * @intr: Whether the wait is interruptible
1017 *
1018 * Also frees any resources allocated in
1019 * reserve_bo_and_(cond_)vm(s). Returns the status from
1020 * amdgpu_sync_wait.
1021 */
1022static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1023				 bool wait, bool intr)
1024{
1025	int ret = 0;
1026
1027	if (wait)
1028		ret = amdgpu_sync_wait(ctx->sync, intr);
1029
1030	if (ctx->reserved)
1031		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1032	kfree(ctx->vm_pd);
1033
1034	ctx->sync = NULL;
1035
1036	ctx->reserved = false;
1037	ctx->vm_pd = NULL;
1038
1039	return ret;
1040}
1041
1042static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1043				struct kfd_mem_attachment *entry,
1044				struct amdgpu_sync *sync)
1045{
1046	struct amdgpu_bo_va *bo_va = entry->bo_va;
1047	struct amdgpu_device *adev = entry->adev;
1048	struct amdgpu_vm *vm = bo_va->base.vm;
1049
1050	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1051
1052	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1053
1054	amdgpu_sync_fence(sync, bo_va->last_pt_update);
1055
1056	kfd_mem_dmaunmap_attachment(mem, entry);
1057}
1058
1059static int update_gpuvm_pte(struct kgd_mem *mem,
1060			    struct kfd_mem_attachment *entry,
1061			    struct amdgpu_sync *sync)
1062{
1063	struct amdgpu_bo_va *bo_va = entry->bo_va;
1064	struct amdgpu_device *adev = entry->adev;
1065	int ret;
1066
1067	ret = kfd_mem_dmamap_attachment(mem, entry);
1068	if (ret)
1069		return ret;
1070
1071	/* Update the page tables  */
1072	ret = amdgpu_vm_bo_update(adev, bo_va, false);
1073	if (ret) {
1074		pr_err("amdgpu_vm_bo_update failed\n");
1075		return ret;
1076	}
1077
1078	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1079}
1080
1081static int map_bo_to_gpuvm(struct kgd_mem *mem,
1082			   struct kfd_mem_attachment *entry,
1083			   struct amdgpu_sync *sync,
1084			   bool no_update_pte)
1085{
1086	int ret;
1087
1088	/* Set virtual address for the allocation */
1089	ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1090			       amdgpu_bo_size(entry->bo_va->base.bo),
1091			       entry->pte_flags);
1092	if (ret) {
1093		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1094				entry->va, ret);
1095		return ret;
1096	}
1097
1098	if (no_update_pte)
1099		return 0;
1100
1101	ret = update_gpuvm_pte(mem, entry, sync);
1102	if (ret) {
1103		pr_err("update_gpuvm_pte() failed\n");
1104		goto update_gpuvm_pte_failed;
1105	}
1106
1107	return 0;
1108
1109update_gpuvm_pte_failed:
1110	unmap_bo_from_gpuvm(mem, entry, sync);
1111	return ret;
1112}
1113
1114static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1115{
1116	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1117
1118	if (!sg)
1119		return NULL;
1120	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1121		kfree(sg);
1122		return NULL;
1123	}
1124	sg->sgl->dma_address = addr;
1125	sg->sgl->length = size;
1126#ifdef CONFIG_NEED_SG_DMA_LENGTH
1127	sg->sgl->dma_length = size;
1128#endif
1129	return sg;
1130}
1131
1132static int process_validate_vms(struct amdkfd_process_info *process_info)
1133{
1134	struct amdgpu_vm *peer_vm;
1135	int ret;
1136
1137	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1138			    vm_list_node) {
1139		ret = vm_validate_pt_pd_bos(peer_vm);
1140		if (ret)
1141			return ret;
1142	}
1143
1144	return 0;
1145}
1146
1147static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1148				 struct amdgpu_sync *sync)
1149{
1150	struct amdgpu_vm *peer_vm;
1151	int ret;
1152
1153	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1154			    vm_list_node) {
1155		struct amdgpu_bo *pd = peer_vm->root.bo;
1156
1157		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1158				       AMDGPU_SYNC_NE_OWNER,
1159				       AMDGPU_FENCE_OWNER_KFD);
1160		if (ret)
1161			return ret;
1162	}
1163
1164	return 0;
1165}
1166
1167static int process_update_pds(struct amdkfd_process_info *process_info,
1168			      struct amdgpu_sync *sync)
1169{
1170	struct amdgpu_vm *peer_vm;
1171	int ret;
1172
1173	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1174			    vm_list_node) {
1175		ret = vm_update_pds(peer_vm, sync);
1176		if (ret)
1177			return ret;
1178	}
1179
1180	return 0;
1181}
1182
1183static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1184		       struct dma_fence **ef)
1185{
1186	struct amdkfd_process_info *info = NULL;
1187	int ret;
1188
1189	if (!*process_info) {
1190		info = kzalloc(sizeof(*info), GFP_KERNEL);
1191		if (!info)
1192			return -ENOMEM;
1193
1194		mutex_init(&info->lock);
1195		INIT_LIST_HEAD(&info->vm_list_head);
1196		INIT_LIST_HEAD(&info->kfd_bo_list);
1197		INIT_LIST_HEAD(&info->userptr_valid_list);
1198		INIT_LIST_HEAD(&info->userptr_inval_list);
1199
1200		info->eviction_fence =
1201			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1202						   current->mm,
1203						   NULL);
1204		if (!info->eviction_fence) {
1205			pr_err("Failed to create eviction fence\n");
1206			ret = -ENOMEM;
1207			goto create_evict_fence_fail;
1208		}
1209
1210		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1211		atomic_set(&info->evicted_bos, 0);
1212		INIT_DELAYED_WORK(&info->restore_userptr_work,
1213				  amdgpu_amdkfd_restore_userptr_worker);
1214
1215		*process_info = info;
1216		*ef = dma_fence_get(&info->eviction_fence->base);
1217	}
1218
1219	vm->process_info = *process_info;
1220
1221	/* Validate page directory and attach eviction fence */
1222	ret = amdgpu_bo_reserve(vm->root.bo, true);
1223	if (ret)
1224		goto reserve_pd_fail;
1225	ret = vm_validate_pt_pd_bos(vm);
1226	if (ret) {
1227		pr_err("validate_pt_pd_bos() failed\n");
1228		goto validate_pd_fail;
1229	}
1230	ret = amdgpu_bo_sync_wait(vm->root.bo,
1231				  AMDGPU_FENCE_OWNER_KFD, false);
1232	if (ret)
1233		goto wait_pd_fail;
1234	ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
1235	if (ret)
1236		goto reserve_shared_fail;
1237	amdgpu_bo_fence(vm->root.bo,
1238			&vm->process_info->eviction_fence->base, true);
1239	amdgpu_bo_unreserve(vm->root.bo);
1240
1241	/* Update process info */
1242	mutex_lock(&vm->process_info->lock);
1243	list_add_tail(&vm->vm_list_node,
1244			&(vm->process_info->vm_list_head));
1245	vm->process_info->n_vms++;
1246	mutex_unlock(&vm->process_info->lock);
1247
1248	return 0;
1249
1250reserve_shared_fail:
1251wait_pd_fail:
1252validate_pd_fail:
1253	amdgpu_bo_unreserve(vm->root.bo);
1254reserve_pd_fail:
1255	vm->process_info = NULL;
1256	if (info) {
1257		/* Two fence references: one in info and one in *ef */
1258		dma_fence_put(&info->eviction_fence->base);
1259		dma_fence_put(*ef);
1260		*ef = NULL;
1261		*process_info = NULL;
1262		put_pid(info->pid);
1263create_evict_fence_fail:
1264		mutex_destroy(&info->lock);
1265		kfree(info);
1266	}
1267	return ret;
1268}
1269
1270int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1271					   struct file *filp, u32 pasid,
1272					   void **process_info,
1273					   struct dma_fence **ef)
1274{
1275	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1276	struct amdgpu_fpriv *drv_priv;
1277	struct amdgpu_vm *avm;
1278	int ret;
1279
1280	ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1281	if (ret)
1282		return ret;
1283	avm = &drv_priv->vm;
1284
1285	/* Already a compute VM? */
1286	if (avm->process_info)
1287		return -EINVAL;
1288
1289	/* Convert VM into a compute VM */
1290	ret = amdgpu_vm_make_compute(adev, avm, pasid);
1291	if (ret)
1292		return ret;
1293
1294	/* Initialize KFD part of the VM and process info */
1295	ret = init_kfd_vm(avm, process_info, ef);
1296	if (ret)
1297		return ret;
1298
1299	amdgpu_vm_set_task_info(avm);
1300
1301	return 0;
1302}
1303
1304void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1305				    struct amdgpu_vm *vm)
1306{
1307	struct amdkfd_process_info *process_info = vm->process_info;
1308	struct amdgpu_bo *pd = vm->root.bo;
1309
1310	if (!process_info)
1311		return;
1312
1313	/* Release eviction fence from PD */
1314	amdgpu_bo_reserve(pd, false);
1315	amdgpu_bo_fence(pd, NULL, false);
1316	amdgpu_bo_unreserve(pd);
1317
1318	/* Update process info */
1319	mutex_lock(&process_info->lock);
1320	process_info->n_vms--;
1321	list_del(&vm->vm_list_node);
1322	mutex_unlock(&process_info->lock);
1323
1324	vm->process_info = NULL;
1325
1326	/* Release per-process resources when last compute VM is destroyed */
1327	if (!process_info->n_vms) {
1328		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1329		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1330		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1331
1332		dma_fence_put(&process_info->eviction_fence->base);
1333		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1334		put_pid(process_info->pid);
1335		mutex_destroy(&process_info->lock);
1336		kfree(process_info);
1337	}
1338}
1339
1340void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1341{
1342	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1343	struct amdgpu_vm *avm;
1344
1345	if (WARN_ON(!kgd || !drm_priv))
1346		return;
1347
1348	avm = drm_priv_to_vm(drm_priv);
1349
1350	pr_debug("Releasing process vm %p\n", avm);
1351
1352	/* The original pasid of amdgpu vm has already been
1353	 * released during making a amdgpu vm to a compute vm
1354	 * The current pasid is managed by kfd and will be
1355	 * released on kfd process destroy. Set amdgpu pasid
1356	 * to 0 to avoid duplicate release.
1357	 */
1358	amdgpu_vm_release_compute(adev, avm);
1359}
1360
1361uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1362{
1363	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1364	struct amdgpu_bo *pd = avm->root.bo;
1365	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1366
1367	if (adev->asic_type < CHIP_VEGA10)
1368		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1369	return avm->pd_phys_addr;
1370}
1371
1372int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1373		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1374		void *drm_priv, struct kgd_mem **mem,
1375		uint64_t *offset, uint32_t flags)
1376{
1377	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1378	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1379	enum ttm_bo_type bo_type = ttm_bo_type_device;
1380	struct sg_table *sg = NULL;
1381	uint64_t user_addr = 0;
1382	struct amdgpu_bo *bo;
1383	struct drm_gem_object *gobj;
1384	u32 domain, alloc_domain;
1385	u64 alloc_flags;
1386	int ret;
1387
1388	/*
1389	 * Check on which domain to allocate BO
1390	 */
1391	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1392		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1393		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1394		alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1395			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1396	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1397		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1398		alloc_flags = 0;
1399	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1400		domain = AMDGPU_GEM_DOMAIN_GTT;
1401		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1402		alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1403		if (!offset || !*offset)
1404			return -EINVAL;
1405		user_addr = untagged_addr(*offset);
1406	} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1407			KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1408		domain = AMDGPU_GEM_DOMAIN_GTT;
1409		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1410		bo_type = ttm_bo_type_sg;
1411		alloc_flags = 0;
1412		if (size > UINT_MAX)
1413			return -EINVAL;
1414		sg = create_doorbell_sg(*offset, size);
1415		if (!sg)
1416			return -ENOMEM;
1417	} else {
1418		return -EINVAL;
1419	}
1420
1421	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1422	if (!*mem) {
1423		ret = -ENOMEM;
1424		goto err;
1425	}
1426	INIT_LIST_HEAD(&(*mem)->attachments);
1427	mutex_init(&(*mem)->lock);
1428	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1429
1430	/* Workaround for AQL queue wraparound bug. Map the same
1431	 * memory twice. That means we only actually allocate half
1432	 * the memory.
1433	 */
1434	if ((*mem)->aql_queue)
1435		size = size >> 1;
1436
1437	(*mem)->alloc_flags = flags;
1438
1439	amdgpu_sync_create(&(*mem)->sync);
1440
1441	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1442	if (ret) {
1443		pr_debug("Insufficient memory\n");
1444		goto err_reserve_limit;
1445	}
1446
1447	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1448			va, size, domain_string(alloc_domain));
1449
1450	ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1451				       bo_type, NULL, &gobj);
1452	if (ret) {
1453		pr_debug("Failed to create BO on domain %s. ret %d\n",
1454			 domain_string(alloc_domain), ret);
1455		goto err_bo_create;
1456	}
1457	ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1458	if (ret) {
1459		pr_debug("Failed to allow vma node access. ret %d\n", ret);
1460		goto err_node_allow;
1461	}
1462	bo = gem_to_amdgpu_bo(gobj);
1463	if (bo_type == ttm_bo_type_sg) {
1464		bo->tbo.sg = sg;
1465		bo->tbo.ttm->sg = sg;
1466	}
1467	bo->kfd_bo = *mem;
1468	(*mem)->bo = bo;
1469	if (user_addr)
1470		bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1471
1472	(*mem)->va = va;
1473	(*mem)->domain = domain;
1474	(*mem)->mapped_to_gpu_memory = 0;
1475	(*mem)->process_info = avm->process_info;
1476	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1477
1478	if (user_addr) {
1479		ret = init_user_pages(*mem, user_addr);
1480		if (ret)
1481			goto allocate_init_user_pages_failed;
1482	}
1483
1484	if (offset)
1485		*offset = amdgpu_bo_mmap_offset(bo);
1486
1487	return 0;
1488
1489allocate_init_user_pages_failed:
1490	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1491	drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1492err_node_allow:
1493	amdgpu_bo_unref(&bo);
1494	/* Don't unreserve system mem limit twice */
1495	goto err_reserve_limit;
1496err_bo_create:
1497	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1498err_reserve_limit:
1499	mutex_destroy(&(*mem)->lock);
1500	kfree(*mem);
1501err:
1502	if (sg) {
1503		sg_free_table(sg);
1504		kfree(sg);
1505	}
1506	return ret;
1507}
1508
1509int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1510		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1511		uint64_t *size)
1512{
1513	struct amdkfd_process_info *process_info = mem->process_info;
1514	unsigned long bo_size = mem->bo->tbo.base.size;
1515	struct kfd_mem_attachment *entry, *tmp;
1516	struct bo_vm_reservation_context ctx;
1517	struct ttm_validate_buffer *bo_list_entry;
1518	unsigned int mapped_to_gpu_memory;
1519	int ret;
1520	bool is_imported = false;
1521
1522	mutex_lock(&mem->lock);
1523	mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1524	is_imported = mem->is_imported;
1525	mutex_unlock(&mem->lock);
1526	/* lock is not needed after this, since mem is unused and will
1527	 * be freed anyway
1528	 */
1529
1530	if (mapped_to_gpu_memory > 0) {
1531		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1532				mem->va, bo_size);
1533		return -EBUSY;
1534	}
1535
1536	/* Make sure restore workers don't access the BO any more */
1537	bo_list_entry = &mem->validate_list;
1538	mutex_lock(&process_info->lock);
1539	list_del(&bo_list_entry->head);
1540	mutex_unlock(&process_info->lock);
1541
1542	/* No more MMU notifiers */
1543	amdgpu_mn_unregister(mem->bo);
1544
1545	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1546	if (unlikely(ret))
1547		return ret;
1548
1549	/* The eviction fence should be removed by the last unmap.
1550	 * TODO: Log an error condition if the bo still has the eviction fence
1551	 * attached
1552	 */
1553	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1554					process_info->eviction_fence);
1555	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1556		mem->va + bo_size * (1 + mem->aql_queue));
1557
1558	ret = unreserve_bo_and_vms(&ctx, false, false);
1559
1560	/* Remove from VM internal data structures */
1561	list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1562		kfd_mem_detach(entry);
1563
1564	/* Free the sync object */
1565	amdgpu_sync_free(&mem->sync);
1566
1567	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1568	 * remap BO. We need to free it.
1569	 */
1570	if (mem->bo->tbo.sg) {
1571		sg_free_table(mem->bo->tbo.sg);
1572		kfree(mem->bo->tbo.sg);
1573	}
1574
1575	/* Update the size of the BO being freed if it was allocated from
1576	 * VRAM and is not imported.
1577	 */
1578	if (size) {
1579		if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1580		    (!is_imported))
1581			*size = bo_size;
1582		else
1583			*size = 0;
1584	}
1585
1586	/* Free the BO*/
1587	drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1588	if (mem->dmabuf)
1589		dma_buf_put(mem->dmabuf);
1590	drm_gem_object_put(&mem->bo->tbo.base);
1591	mutex_destroy(&mem->lock);
1592	kfree(mem);
1593
1594	return ret;
1595}
1596
1597int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1598		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1599{
1600	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1601	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1602	int ret;
1603	struct amdgpu_bo *bo;
1604	uint32_t domain;
1605	struct kfd_mem_attachment *entry;
1606	struct bo_vm_reservation_context ctx;
1607	unsigned long bo_size;
1608	bool is_invalid_userptr = false;
1609
1610	bo = mem->bo;
1611	if (!bo) {
1612		pr_err("Invalid BO when mapping memory to GPU\n");
1613		return -EINVAL;
1614	}
1615
1616	/* Make sure restore is not running concurrently. Since we
1617	 * don't map invalid userptr BOs, we rely on the next restore
1618	 * worker to do the mapping
1619	 */
1620	mutex_lock(&mem->process_info->lock);
1621
1622	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1623	 * sure that the MMU notifier is no longer running
1624	 * concurrently and the queues are actually stopped
1625	 */
1626	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1627		mmap_write_lock(current->mm);
1628		is_invalid_userptr = atomic_read(&mem->invalid);
1629		mmap_write_unlock(current->mm);
1630	}
1631
1632	mutex_lock(&mem->lock);
1633
1634	domain = mem->domain;
1635	bo_size = bo->tbo.base.size;
1636
1637	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1638			mem->va,
1639			mem->va + bo_size * (1 + mem->aql_queue),
1640			avm, domain_string(domain));
1641
1642	if (!kfd_mem_is_attached(avm, mem)) {
1643		ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1644		if (ret)
1645			goto out;
1646	}
1647
1648	ret = reserve_bo_and_vm(mem, avm, &ctx);
1649	if (unlikely(ret))
1650		goto out;
1651
1652	/* Userptr can be marked as "not invalid", but not actually be
1653	 * validated yet (still in the system domain). In that case
1654	 * the queues are still stopped and we can leave mapping for
1655	 * the next restore worker
1656	 */
1657	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1658	    bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1659		is_invalid_userptr = true;
1660
1661	ret = vm_validate_pt_pd_bos(avm);
1662	if (unlikely(ret))
1663		goto out_unreserve;
1664
1665	if (mem->mapped_to_gpu_memory == 0 &&
1666	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1667		/* Validate BO only once. The eviction fence gets added to BO
1668		 * the first time it is mapped. Validate will wait for all
1669		 * background evictions to complete.
1670		 */
1671		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1672		if (ret) {
1673			pr_debug("Validate failed\n");
1674			goto out_unreserve;
1675		}
1676	}
1677
1678	list_for_each_entry(entry, &mem->attachments, list) {
1679		if (entry->bo_va->base.vm != avm || entry->is_mapped)
1680			continue;
1681
1682		pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1683			 entry->va, entry->va + bo_size, entry);
1684
1685		ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1686				      is_invalid_userptr);
1687		if (ret) {
1688			pr_err("Failed to map bo to gpuvm\n");
1689			goto out_unreserve;
1690		}
1691
1692		ret = vm_update_pds(avm, ctx.sync);
1693		if (ret) {
1694			pr_err("Failed to update page directories\n");
1695			goto out_unreserve;
1696		}
1697
1698		entry->is_mapped = true;
1699		mem->mapped_to_gpu_memory++;
1700		pr_debug("\t INC mapping count %d\n",
1701			 mem->mapped_to_gpu_memory);
1702	}
1703
1704	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1705		amdgpu_bo_fence(bo,
1706				&avm->process_info->eviction_fence->base,
1707				true);
1708	ret = unreserve_bo_and_vms(&ctx, false, false);
1709
1710	goto out;
1711
1712out_unreserve:
1713	unreserve_bo_and_vms(&ctx, false, false);
1714out:
1715	mutex_unlock(&mem->process_info->lock);
1716	mutex_unlock(&mem->lock);
1717	return ret;
1718}
1719
1720int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1721		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1722{
1723	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1724	struct amdkfd_process_info *process_info = avm->process_info;
1725	unsigned long bo_size = mem->bo->tbo.base.size;
1726	struct kfd_mem_attachment *entry;
1727	struct bo_vm_reservation_context ctx;
1728	int ret;
1729
1730	mutex_lock(&mem->lock);
1731
1732	ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1733	if (unlikely(ret))
1734		goto out;
1735	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1736	if (ctx.n_vms == 0) {
1737		ret = -EINVAL;
1738		goto unreserve_out;
1739	}
1740
1741	ret = vm_validate_pt_pd_bos(avm);
1742	if (unlikely(ret))
1743		goto unreserve_out;
1744
1745	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1746		mem->va,
1747		mem->va + bo_size * (1 + mem->aql_queue),
1748		avm);
1749
1750	list_for_each_entry(entry, &mem->attachments, list) {
1751		if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1752			continue;
1753
1754		pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1755			 entry->va, entry->va + bo_size, entry);
1756
1757		unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1758		entry->is_mapped = false;
1759
1760		mem->mapped_to_gpu_memory--;
1761		pr_debug("\t DEC mapping count %d\n",
1762			 mem->mapped_to_gpu_memory);
1763	}
1764
1765	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1766	 * required.
1767	 */
1768	if (mem->mapped_to_gpu_memory == 0 &&
1769	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1770	    !mem->bo->tbo.pin_count)
1771		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1772						process_info->eviction_fence);
1773
1774unreserve_out:
1775	unreserve_bo_and_vms(&ctx, false, false);
1776out:
1777	mutex_unlock(&mem->lock);
1778	return ret;
1779}
1780
1781int amdgpu_amdkfd_gpuvm_sync_memory(
1782		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1783{
1784	struct amdgpu_sync sync;
1785	int ret;
1786
1787	amdgpu_sync_create(&sync);
1788
1789	mutex_lock(&mem->lock);
1790	amdgpu_sync_clone(&mem->sync, &sync);
1791	mutex_unlock(&mem->lock);
1792
1793	ret = amdgpu_sync_wait(&sync, intr);
1794	amdgpu_sync_free(&sync);
1795	return ret;
1796}
1797
1798int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1799		struct kgd_mem *mem, void **kptr, uint64_t *size)
1800{
1801	int ret;
1802	struct amdgpu_bo *bo = mem->bo;
1803
1804	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1805		pr_err("userptr can't be mapped to kernel\n");
1806		return -EINVAL;
1807	}
1808
1809	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1810	 * this BO in BO's restoring after eviction.
1811	 */
1812	mutex_lock(&mem->process_info->lock);
1813
1814	ret = amdgpu_bo_reserve(bo, true);
1815	if (ret) {
1816		pr_err("Failed to reserve bo. ret %d\n", ret);
1817		goto bo_reserve_failed;
1818	}
1819
1820	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1821	if (ret) {
1822		pr_err("Failed to pin bo. ret %d\n", ret);
1823		goto pin_failed;
1824	}
1825
1826	ret = amdgpu_bo_kmap(bo, kptr);
1827	if (ret) {
1828		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1829		goto kmap_failed;
1830	}
1831
1832	amdgpu_amdkfd_remove_eviction_fence(
1833		bo, mem->process_info->eviction_fence);
1834	list_del_init(&mem->validate_list.head);
1835
1836	if (size)
1837		*size = amdgpu_bo_size(bo);
1838
1839	amdgpu_bo_unreserve(bo);
1840
1841	mutex_unlock(&mem->process_info->lock);
1842	return 0;
1843
1844kmap_failed:
1845	amdgpu_bo_unpin(bo);
1846pin_failed:
1847	amdgpu_bo_unreserve(bo);
1848bo_reserve_failed:
1849	mutex_unlock(&mem->process_info->lock);
1850
1851	return ret;
1852}
1853
1854int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1855					      struct kfd_vm_fault_info *mem)
1856{
1857	struct amdgpu_device *adev;
1858
1859	adev = (struct amdgpu_device *)kgd;
1860	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1861		*mem = *adev->gmc.vm_fault_info;
1862		mb();
1863		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1864	}
1865	return 0;
1866}
1867
1868int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1869				      struct dma_buf *dma_buf,
1870				      uint64_t va, void *drm_priv,
1871				      struct kgd_mem **mem, uint64_t *size,
1872				      uint64_t *mmap_offset)
1873{
1874	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1875	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1876	struct drm_gem_object *obj;
1877	struct amdgpu_bo *bo;
1878	int ret;
1879
1880	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1881		/* Can't handle non-graphics buffers */
1882		return -EINVAL;
1883
1884	obj = dma_buf->priv;
1885	if (drm_to_adev(obj->dev) != adev)
1886		/* Can't handle buffers from other devices */
1887		return -EINVAL;
1888
1889	bo = gem_to_amdgpu_bo(obj);
1890	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1891				    AMDGPU_GEM_DOMAIN_GTT)))
1892		/* Only VRAM and GTT BOs are supported */
1893		return -EINVAL;
1894
1895	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1896	if (!*mem)
1897		return -ENOMEM;
1898
1899	ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1900	if (ret) {
1901		kfree(mem);
1902		return ret;
1903	}
1904
1905	if (size)
1906		*size = amdgpu_bo_size(bo);
1907
1908	if (mmap_offset)
1909		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1910
1911	INIT_LIST_HEAD(&(*mem)->attachments);
1912	mutex_init(&(*mem)->lock);
1913
1914	(*mem)->alloc_flags =
1915		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1916		KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1917		| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1918		| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1919
1920	drm_gem_object_get(&bo->tbo.base);
1921	(*mem)->bo = bo;
1922	(*mem)->va = va;
1923	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1924		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1925	(*mem)->mapped_to_gpu_memory = 0;
1926	(*mem)->process_info = avm->process_info;
1927	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1928	amdgpu_sync_create(&(*mem)->sync);
1929	(*mem)->is_imported = true;
1930
1931	return 0;
1932}
1933
1934/* Evict a userptr BO by stopping the queues if necessary
1935 *
1936 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1937 * cannot do any memory allocations, and cannot take any locks that
1938 * are held elsewhere while allocating memory. Therefore this is as
1939 * simple as possible, using atomic counters.
1940 *
1941 * It doesn't do anything to the BO itself. The real work happens in
1942 * restore, where we get updated page addresses. This function only
1943 * ensures that GPU access to the BO is stopped.
1944 */
1945int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1946				struct mm_struct *mm)
1947{
1948	struct amdkfd_process_info *process_info = mem->process_info;
1949	int evicted_bos;
1950	int r = 0;
1951
1952	atomic_inc(&mem->invalid);
1953	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1954	if (evicted_bos == 1) {
1955		/* First eviction, stop the queues */
1956		r = kgd2kfd_quiesce_mm(mm);
1957		if (r)
1958			pr_err("Failed to quiesce KFD\n");
1959		schedule_delayed_work(&process_info->restore_userptr_work,
1960			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1961	}
1962
1963	return r;
1964}
1965
1966/* Update invalid userptr BOs
1967 *
1968 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1969 * userptr_inval_list and updates user pages for all BOs that have
1970 * been invalidated since their last update.
1971 */
1972static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1973				     struct mm_struct *mm)
1974{
1975	struct kgd_mem *mem, *tmp_mem;
1976	struct amdgpu_bo *bo;
1977	struct ttm_operation_ctx ctx = { false, false };
1978	int invalid, ret;
1979
1980	/* Move all invalidated BOs to the userptr_inval_list and
1981	 * release their user pages by migration to the CPU domain
1982	 */
1983	list_for_each_entry_safe(mem, tmp_mem,
1984				 &process_info->userptr_valid_list,
1985				 validate_list.head) {
1986		if (!atomic_read(&mem->invalid))
1987			continue; /* BO is still valid */
1988
1989		bo = mem->bo;
1990
1991		if (amdgpu_bo_reserve(bo, true))
1992			return -EAGAIN;
1993		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1994		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1995		amdgpu_bo_unreserve(bo);
1996		if (ret) {
1997			pr_err("%s: Failed to invalidate userptr BO\n",
1998			       __func__);
1999			return -EAGAIN;
2000		}
2001
2002		list_move_tail(&mem->validate_list.head,
2003			       &process_info->userptr_inval_list);
2004	}
2005
2006	if (list_empty(&process_info->userptr_inval_list))
2007		return 0; /* All evicted userptr BOs were freed */
2008
2009	/* Go through userptr_inval_list and update any invalid user_pages */
2010	list_for_each_entry(mem, &process_info->userptr_inval_list,
2011			    validate_list.head) {
2012		invalid = atomic_read(&mem->invalid);
2013		if (!invalid)
2014			/* BO hasn't been invalidated since the last
2015			 * revalidation attempt. Keep its BO list.
2016			 */
2017			continue;
2018
2019		bo = mem->bo;
2020
2021		/* Get updated user pages */
2022		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2023		if (ret) {
2024			pr_debug("%s: Failed to get user pages: %d\n",
2025				__func__, ret);
2026
2027			/* Return error -EBUSY or -ENOMEM, retry restore */
2028			return ret;
2029		}
2030
2031		/*
2032		 * FIXME: Cannot ignore the return code, must hold
2033		 * notifier_lock
2034		 */
2035		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2036
2037		/* Mark the BO as valid unless it was invalidated
2038		 * again concurrently.
2039		 */
2040		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2041			return -EAGAIN;
2042	}
2043
2044	return 0;
2045}
2046
2047/* Validate invalid userptr BOs
2048 *
2049 * Validates BOs on the userptr_inval_list, and moves them back to the
2050 * userptr_valid_list. Also updates GPUVM page tables with new page
2051 * addresses and waits for the page table updates to complete.
2052 */
2053static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2054{
2055	struct amdgpu_bo_list_entry *pd_bo_list_entries;
2056	struct list_head resv_list, duplicates;
2057	struct ww_acquire_ctx ticket;
2058	struct amdgpu_sync sync;
2059
2060	struct amdgpu_vm *peer_vm;
2061	struct kgd_mem *mem, *tmp_mem;
2062	struct amdgpu_bo *bo;
2063	struct ttm_operation_ctx ctx = { false, false };
2064	int i, ret;
2065
2066	pd_bo_list_entries = kcalloc(process_info->n_vms,
2067				     sizeof(struct amdgpu_bo_list_entry),
2068				     GFP_KERNEL);
2069	if (!pd_bo_list_entries) {
2070		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2071		ret = -ENOMEM;
2072		goto out_no_mem;
2073	}
2074
2075	INIT_LIST_HEAD(&resv_list);
2076	INIT_LIST_HEAD(&duplicates);
2077
2078	/* Get all the page directory BOs that need to be reserved */
2079	i = 0;
2080	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2081			    vm_list_node)
2082		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2083				    &pd_bo_list_entries[i++]);
2084	/* Add the userptr_inval_list entries to resv_list */
2085	list_for_each_entry(mem, &process_info->userptr_inval_list,
2086			    validate_list.head) {
2087		list_add_tail(&mem->resv_list.head, &resv_list);
2088		mem->resv_list.bo = mem->validate_list.bo;
2089		mem->resv_list.num_shared = mem->validate_list.num_shared;
2090	}
2091
2092	/* Reserve all BOs and page tables for validation */
2093	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2094	WARN(!list_empty(&duplicates), "Duplicates should be empty");
2095	if (ret)
2096		goto out_free;
2097
2098	amdgpu_sync_create(&sync);
2099
2100	ret = process_validate_vms(process_info);
2101	if (ret)
2102		goto unreserve_out;
2103
2104	/* Validate BOs and update GPUVM page tables */
2105	list_for_each_entry_safe(mem, tmp_mem,
2106				 &process_info->userptr_inval_list,
2107				 validate_list.head) {
2108		struct kfd_mem_attachment *attachment;
2109
2110		bo = mem->bo;
2111
2112		/* Validate the BO if we got user pages */
2113		if (bo->tbo.ttm->pages[0]) {
2114			amdgpu_bo_placement_from_domain(bo, mem->domain);
2115			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2116			if (ret) {
2117				pr_err("%s: failed to validate BO\n", __func__);
2118				goto unreserve_out;
2119			}
2120		}
2121
2122		list_move_tail(&mem->validate_list.head,
2123			       &process_info->userptr_valid_list);
2124
2125		/* Update mapping. If the BO was not validated
2126		 * (because we couldn't get user pages), this will
2127		 * clear the page table entries, which will result in
2128		 * VM faults if the GPU tries to access the invalid
2129		 * memory.
2130		 */
2131		list_for_each_entry(attachment, &mem->attachments, list) {
2132			if (!attachment->is_mapped)
2133				continue;
2134
2135			kfd_mem_dmaunmap_attachment(mem, attachment);
2136			ret = update_gpuvm_pte(mem, attachment, &sync);
2137			if (ret) {
2138				pr_err("%s: update PTE failed\n", __func__);
2139				/* make sure this gets validated again */
2140				atomic_inc(&mem->invalid);
2141				goto unreserve_out;
2142			}
2143		}
2144	}
2145
2146	/* Update page directories */
2147	ret = process_update_pds(process_info, &sync);
2148
2149unreserve_out:
2150	ttm_eu_backoff_reservation(&ticket, &resv_list);
2151	amdgpu_sync_wait(&sync, false);
2152	amdgpu_sync_free(&sync);
2153out_free:
2154	kfree(pd_bo_list_entries);
2155out_no_mem:
2156
2157	return ret;
2158}
2159
2160/* Worker callback to restore evicted userptr BOs
2161 *
2162 * Tries to update and validate all userptr BOs. If successful and no
2163 * concurrent evictions happened, the queues are restarted. Otherwise,
2164 * reschedule for another attempt later.
2165 */
2166static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2167{
2168	struct delayed_work *dwork = to_delayed_work(work);
2169	struct amdkfd_process_info *process_info =
2170		container_of(dwork, struct amdkfd_process_info,
2171			     restore_userptr_work);
2172	struct task_struct *usertask;
2173	struct mm_struct *mm;
2174	int evicted_bos;
2175
2176	evicted_bos = atomic_read(&process_info->evicted_bos);
2177	if (!evicted_bos)
2178		return;
2179
2180	/* Reference task and mm in case of concurrent process termination */
2181	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2182	if (!usertask)
2183		return;
2184	mm = get_task_mm(usertask);
2185	if (!mm) {
2186		put_task_struct(usertask);
2187		return;
2188	}
2189
2190	mutex_lock(&process_info->lock);
2191
2192	if (update_invalid_user_pages(process_info, mm))
2193		goto unlock_out;
2194	/* userptr_inval_list can be empty if all evicted userptr BOs
2195	 * have been freed. In that case there is nothing to validate
2196	 * and we can just restart the queues.
2197	 */
2198	if (!list_empty(&process_info->userptr_inval_list)) {
2199		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2200			goto unlock_out; /* Concurrent eviction, try again */
2201
2202		if (validate_invalid_user_pages(process_info))
2203			goto unlock_out;
2204	}
2205	/* Final check for concurrent evicton and atomic update. If
2206	 * another eviction happens after successful update, it will
2207	 * be a first eviction that calls quiesce_mm. The eviction
2208	 * reference counting inside KFD will handle this case.
2209	 */
2210	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2211	    evicted_bos)
2212		goto unlock_out;
2213	evicted_bos = 0;
2214	if (kgd2kfd_resume_mm(mm)) {
2215		pr_err("%s: Failed to resume KFD\n", __func__);
2216		/* No recovery from this failure. Probably the CP is
2217		 * hanging. No point trying again.
2218		 */
2219	}
2220
2221unlock_out:
2222	mutex_unlock(&process_info->lock);
2223	mmput(mm);
2224	put_task_struct(usertask);
2225
2226	/* If validation failed, reschedule another attempt */
2227	if (evicted_bos)
2228		schedule_delayed_work(&process_info->restore_userptr_work,
2229			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2230}
2231
2232/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2233 *   KFD process identified by process_info
2234 *
2235 * @process_info: amdkfd_process_info of the KFD process
2236 *
2237 * After memory eviction, restore thread calls this function. The function
2238 * should be called when the Process is still valid. BO restore involves -
2239 *
2240 * 1.  Release old eviction fence and create new one
2241 * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2242 * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2243 *     BOs that need to be reserved.
2244 * 4.  Reserve all the BOs
2245 * 5.  Validate of PD and PT BOs.
2246 * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2247 * 7.  Add fence to all PD and PT BOs.
2248 * 8.  Unreserve all BOs
2249 */
2250int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2251{
2252	struct amdgpu_bo_list_entry *pd_bo_list;
2253	struct amdkfd_process_info *process_info = info;
2254	struct amdgpu_vm *peer_vm;
2255	struct kgd_mem *mem;
2256	struct bo_vm_reservation_context ctx;
2257	struct amdgpu_amdkfd_fence *new_fence;
2258	int ret = 0, i;
2259	struct list_head duplicate_save;
2260	struct amdgpu_sync sync_obj;
2261	unsigned long failed_size = 0;
2262	unsigned long total_size = 0;
2263
2264	INIT_LIST_HEAD(&duplicate_save);
2265	INIT_LIST_HEAD(&ctx.list);
2266	INIT_LIST_HEAD(&ctx.duplicates);
2267
2268	pd_bo_list = kcalloc(process_info->n_vms,
2269			     sizeof(struct amdgpu_bo_list_entry),
2270			     GFP_KERNEL);
2271	if (!pd_bo_list)
2272		return -ENOMEM;
2273
2274	i = 0;
2275	mutex_lock(&process_info->lock);
2276	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2277			vm_list_node)
2278		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2279
2280	/* Reserve all BOs and page tables/directory. Add all BOs from
2281	 * kfd_bo_list to ctx.list
2282	 */
2283	list_for_each_entry(mem, &process_info->kfd_bo_list,
2284			    validate_list.head) {
2285
2286		list_add_tail(&mem->resv_list.head, &ctx.list);
2287		mem->resv_list.bo = mem->validate_list.bo;
2288		mem->resv_list.num_shared = mem->validate_list.num_shared;
2289	}
2290
2291	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2292				     false, &duplicate_save);
2293	if (ret) {
2294		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2295		goto ttm_reserve_fail;
2296	}
2297
2298	amdgpu_sync_create(&sync_obj);
2299
2300	/* Validate PDs and PTs */
2301	ret = process_validate_vms(process_info);
2302	if (ret)
2303		goto validate_map_fail;
2304
2305	ret = process_sync_pds_resv(process_info, &sync_obj);
2306	if (ret) {
2307		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2308		goto validate_map_fail;
2309	}
2310
2311	/* Validate BOs and map them to GPUVM (update VM page tables). */
2312	list_for_each_entry(mem, &process_info->kfd_bo_list,
2313			    validate_list.head) {
2314
2315		struct amdgpu_bo *bo = mem->bo;
2316		uint32_t domain = mem->domain;
2317		struct kfd_mem_attachment *attachment;
2318
2319		total_size += amdgpu_bo_size(bo);
2320
2321		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2322		if (ret) {
2323			pr_debug("Memory eviction: Validate BOs failed\n");
2324			failed_size += amdgpu_bo_size(bo);
2325			ret = amdgpu_amdkfd_bo_validate(bo,
2326						AMDGPU_GEM_DOMAIN_GTT, false);
2327			if (ret) {
2328				pr_debug("Memory eviction: Try again\n");
2329				goto validate_map_fail;
2330			}
2331		}
2332		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2333		if (ret) {
2334			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2335			goto validate_map_fail;
2336		}
2337		list_for_each_entry(attachment, &mem->attachments, list) {
2338			if (!attachment->is_mapped)
2339				continue;
2340
2341			kfd_mem_dmaunmap_attachment(mem, attachment);
2342			ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2343			if (ret) {
2344				pr_debug("Memory eviction: update PTE failed. Try again\n");
2345				goto validate_map_fail;
2346			}
2347		}
2348	}
2349
2350	if (failed_size)
2351		pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2352
2353	/* Update page directories */
2354	ret = process_update_pds(process_info, &sync_obj);
2355	if (ret) {
2356		pr_debug("Memory eviction: update PDs failed. Try again\n");
2357		goto validate_map_fail;
2358	}
2359
2360	/* Wait for validate and PT updates to finish */
2361	amdgpu_sync_wait(&sync_obj, false);
2362
2363	/* Release old eviction fence and create new one, because fence only
2364	 * goes from unsignaled to signaled, fence cannot be reused.
2365	 * Use context and mm from the old fence.
2366	 */
2367	new_fence = amdgpu_amdkfd_fence_create(
2368				process_info->eviction_fence->base.context,
2369				process_info->eviction_fence->mm,
2370				NULL);
2371	if (!new_fence) {
2372		pr_err("Failed to create eviction fence\n");
2373		ret = -ENOMEM;
2374		goto validate_map_fail;
2375	}
2376	dma_fence_put(&process_info->eviction_fence->base);
2377	process_info->eviction_fence = new_fence;
2378	*ef = dma_fence_get(&new_fence->base);
2379
2380	/* Attach new eviction fence to all BOs */
2381	list_for_each_entry(mem, &process_info->kfd_bo_list,
2382		validate_list.head)
2383		amdgpu_bo_fence(mem->bo,
2384			&process_info->eviction_fence->base, true);
2385
2386	/* Attach eviction fence to PD / PT BOs */
2387	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2388			    vm_list_node) {
2389		struct amdgpu_bo *bo = peer_vm->root.bo;
2390
2391		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2392	}
2393
2394validate_map_fail:
2395	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2396	amdgpu_sync_free(&sync_obj);
2397ttm_reserve_fail:
2398	mutex_unlock(&process_info->lock);
2399	kfree(pd_bo_list);
2400	return ret;
2401}
2402
2403int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2404{
2405	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2406	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2407	int ret;
2408
2409	if (!info || !gws)
2410		return -EINVAL;
2411
2412	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2413	if (!*mem)
2414		return -ENOMEM;
2415
2416	mutex_init(&(*mem)->lock);
2417	INIT_LIST_HEAD(&(*mem)->attachments);
2418	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2419	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2420	(*mem)->process_info = process_info;
2421	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2422	amdgpu_sync_create(&(*mem)->sync);
2423
2424
2425	/* Validate gws bo the first time it is added to process */
2426	mutex_lock(&(*mem)->process_info->lock);
2427	ret = amdgpu_bo_reserve(gws_bo, false);
2428	if (unlikely(ret)) {
2429		pr_err("Reserve gws bo failed %d\n", ret);
2430		goto bo_reservation_failure;
2431	}
2432
2433	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2434	if (ret) {
2435		pr_err("GWS BO validate failed %d\n", ret);
2436		goto bo_validation_failure;
2437	}
2438	/* GWS resource is shared b/t amdgpu and amdkfd
2439	 * Add process eviction fence to bo so they can
2440	 * evict each other.
2441	 */
2442	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2443	if (ret)
2444		goto reserve_shared_fail;
2445	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2446	amdgpu_bo_unreserve(gws_bo);
2447	mutex_unlock(&(*mem)->process_info->lock);
2448
2449	return ret;
2450
2451reserve_shared_fail:
2452bo_validation_failure:
2453	amdgpu_bo_unreserve(gws_bo);
2454bo_reservation_failure:
2455	mutex_unlock(&(*mem)->process_info->lock);
2456	amdgpu_sync_free(&(*mem)->sync);
2457	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2458	amdgpu_bo_unref(&gws_bo);
2459	mutex_destroy(&(*mem)->lock);
2460	kfree(*mem);
2461	*mem = NULL;
2462	return ret;
2463}
2464
2465int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2466{
2467	int ret;
2468	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2469	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2470	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2471
2472	/* Remove BO from process's validate list so restore worker won't touch
2473	 * it anymore
2474	 */
2475	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2476
2477	ret = amdgpu_bo_reserve(gws_bo, false);
2478	if (unlikely(ret)) {
2479		pr_err("Reserve gws bo failed %d\n", ret);
2480		//TODO add BO back to validate_list?
2481		return ret;
2482	}
2483	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2484			process_info->eviction_fence);
2485	amdgpu_bo_unreserve(gws_bo);
2486	amdgpu_sync_free(&kgd_mem->sync);
2487	amdgpu_bo_unref(&gws_bo);
2488	mutex_destroy(&kgd_mem->lock);
2489	kfree(mem);
2490	return 0;
2491}
2492
2493/* Returns GPU-specific tiling mode information */
2494int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2495				struct tile_config *config)
2496{
2497	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2498
2499	config->gb_addr_config = adev->gfx.config.gb_addr_config;
2500	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2501	config->num_tile_configs =
2502			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2503	config->macro_tile_config_ptr =
2504			adev->gfx.config.macrotile_mode_array;
2505	config->num_macro_tile_configs =
2506			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2507
2508	/* Those values are not set from GFX9 onwards */
2509	config->num_banks = adev->gfx.config.num_banks;
2510	config->num_ranks = adev->gfx.config.num_ranks;
2511
2512	return 0;
2513}