Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice shall be included in
  13 * all copies or substantial portions of the Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21 * OTHER DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/types.h>
  25#include <linux/sched/task.h>
  26#include "amdgpu_sync.h"
  27#include "amdgpu_object.h"
  28#include "amdgpu_vm.h"
  29#include "amdgpu_hmm.h"
  30#include "amdgpu.h"
  31#include "amdgpu_xgmi.h"
  32#include "kfd_priv.h"
  33#include "kfd_svm.h"
  34#include "kfd_migrate.h"
  35#include "kfd_smi_events.h"
  36
  37#ifdef dev_fmt
  38#undef dev_fmt
  39#endif
  40#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
  41
  42#define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
  43
  44/* Long enough to ensure no retry fault comes after svm range is restored and
  45 * page table is updated.
  46 */
  47#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	(2UL * NSEC_PER_MSEC)
  48
  49/* Giant svm range split into smaller ranges based on this, it is decided using
  50 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
  51 * power of 2MB.
  52 */
  53static uint64_t max_svm_range_pages;
  54
  55struct criu_svm_metadata {
  56	struct list_head list;
  57	struct kfd_criu_svm_range_priv_data data;
  58};
  59
  60static void svm_range_evict_svm_bo_worker(struct work_struct *work);
  61static bool
  62svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
  63				    const struct mmu_notifier_range *range,
  64				    unsigned long cur_seq);
  65static int
  66svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
  67		   uint64_t *bo_s, uint64_t *bo_l);
  68static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
  69	.invalidate = svm_range_cpu_invalidate_pagetables,
  70};
  71
  72/**
  73 * svm_range_unlink - unlink svm_range from lists and interval tree
  74 * @prange: svm range structure to be removed
  75 *
  76 * Remove the svm_range from the svms and svm_bo lists and the svms
  77 * interval tree.
  78 *
  79 * Context: The caller must hold svms->lock
  80 */
  81static void svm_range_unlink(struct svm_range *prange)
  82{
  83	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
  84		 prange, prange->start, prange->last);
  85
  86	if (prange->svm_bo) {
  87		spin_lock(&prange->svm_bo->list_lock);
  88		list_del(&prange->svm_bo_list);
  89		spin_unlock(&prange->svm_bo->list_lock);
  90	}
  91
  92	list_del(&prange->list);
  93	if (prange->it_node.start != 0 && prange->it_node.last != 0)
  94		interval_tree_remove(&prange->it_node, &prange->svms->objects);
  95}
  96
  97static void
  98svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
  99{
 100	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
 101		 prange, prange->start, prange->last);
 102
 103	mmu_interval_notifier_insert_locked(&prange->notifier, mm,
 104				     prange->start << PAGE_SHIFT,
 105				     prange->npages << PAGE_SHIFT,
 106				     &svm_range_mn_ops);
 107}
 108
 109/**
 110 * svm_range_add_to_svms - add svm range to svms
 111 * @prange: svm range structure to be added
 112 *
 113 * Add the svm range to svms interval tree and link list
 114 *
 115 * Context: The caller must hold svms->lock
 116 */
 117static void svm_range_add_to_svms(struct svm_range *prange)
 118{
 119	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
 120		 prange, prange->start, prange->last);
 121
 122	list_move_tail(&prange->list, &prange->svms->list);
 123	prange->it_node.start = prange->start;
 124	prange->it_node.last = prange->last;
 125	interval_tree_insert(&prange->it_node, &prange->svms->objects);
 126}
 127
 128static void svm_range_remove_notifier(struct svm_range *prange)
 129{
 130	pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
 131		 prange->svms, prange,
 132		 prange->notifier.interval_tree.start >> PAGE_SHIFT,
 133		 prange->notifier.interval_tree.last >> PAGE_SHIFT);
 134
 135	if (prange->notifier.interval_tree.start != 0 &&
 136	    prange->notifier.interval_tree.last != 0)
 137		mmu_interval_notifier_remove(&prange->notifier);
 138}
 139
 140static bool
 141svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
 142{
 143	return dma_addr && !dma_mapping_error(dev, dma_addr) &&
 144	       !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
 145}
 146
 147static int
 148svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 149		      unsigned long offset, unsigned long npages,
 150		      unsigned long *hmm_pfns, uint32_t gpuidx)
 151{
 152	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
 153	dma_addr_t *addr = prange->dma_addr[gpuidx];
 154	struct device *dev = adev->dev;
 155	struct page *page;
 156	int i, r;
 157
 158	if (!addr) {
 159		addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
 160		if (!addr)
 161			return -ENOMEM;
 162		prange->dma_addr[gpuidx] = addr;
 163	}
 164
 165	addr += offset;
 166	for (i = 0; i < npages; i++) {
 167		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
 168			dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
 169
 170		page = hmm_pfn_to_page(hmm_pfns[i]);
 171		if (is_zone_device_page(page)) {
 172			struct amdgpu_device *bo_adev =
 173					amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
 174
 175			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
 176				   bo_adev->vm_manager.vram_base_offset -
 177				   bo_adev->kfd.dev->pgmap.range.start;
 178			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
 179			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
 180			continue;
 181		}
 182		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
 183		r = dma_mapping_error(dev, addr[i]);
 184		if (r) {
 185			dev_err(dev, "failed %d dma_map_page\n", r);
 186			return r;
 187		}
 188		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
 189				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
 190	}
 191	return 0;
 192}
 193
 194static int
 195svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
 196		  unsigned long offset, unsigned long npages,
 197		  unsigned long *hmm_pfns)
 198{
 199	struct kfd_process *p;
 200	uint32_t gpuidx;
 201	int r;
 202
 203	p = container_of(prange->svms, struct kfd_process, svms);
 204
 205	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
 206		struct kfd_process_device *pdd;
 207
 208		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
 209		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
 210		if (!pdd) {
 211			pr_debug("failed to find device idx %d\n", gpuidx);
 212			return -EINVAL;
 213		}
 214
 215		r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
 216					  hmm_pfns, gpuidx);
 217		if (r)
 218			break;
 219	}
 220
 221	return r;
 222}
 223
 224void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
 225			 unsigned long offset, unsigned long npages)
 226{
 227	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
 228	int i;
 229
 230	if (!dma_addr)
 231		return;
 232
 233	for (i = offset; i < offset + npages; i++) {
 234		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
 235			continue;
 236		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
 237		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
 238		dma_addr[i] = 0;
 239	}
 240}
 241
 242void svm_range_free_dma_mappings(struct svm_range *prange)
 243{
 244	struct kfd_process_device *pdd;
 245	dma_addr_t *dma_addr;
 246	struct device *dev;
 247	struct kfd_process *p;
 248	uint32_t gpuidx;
 249
 250	p = container_of(prange->svms, struct kfd_process, svms);
 251
 252	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
 253		dma_addr = prange->dma_addr[gpuidx];
 254		if (!dma_addr)
 255			continue;
 256
 257		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
 258		if (!pdd) {
 259			pr_debug("failed to find device idx %d\n", gpuidx);
 260			continue;
 261		}
 262		dev = &pdd->dev->adev->pdev->dev;
 263		svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
 264		kvfree(dma_addr);
 265		prange->dma_addr[gpuidx] = NULL;
 266	}
 267}
 268
 269static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
 270{
 271	uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
 272	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
 273
 274	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
 275		 prange->start, prange->last);
 276
 277	svm_range_vram_node_free(prange);
 278	svm_range_free_dma_mappings(prange);
 279
 280	if (update_mem_usage && !p->xnack_enabled) {
 281		pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
 282		amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
 283					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
 284	}
 285	mutex_destroy(&prange->lock);
 286	mutex_destroy(&prange->migrate_mutex);
 287	kfree(prange);
 288}
 289
 290static void
 291svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
 292				 uint8_t *granularity, uint32_t *flags)
 293{
 294	*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
 295	*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
 296	*granularity = 9;
 297	*flags =
 298		KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
 299}
 300
 301static struct
 302svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
 303			 uint64_t last, bool update_mem_usage)
 304{
 305	uint64_t size = last - start + 1;
 306	struct svm_range *prange;
 307	struct kfd_process *p;
 308
 309	prange = kzalloc(sizeof(*prange), GFP_KERNEL);
 310	if (!prange)
 311		return NULL;
 312
 313	p = container_of(svms, struct kfd_process, svms);
 314	if (!p->xnack_enabled && update_mem_usage &&
 315	    amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
 316					    KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
 317		pr_info("SVM mapping failed, exceeds resident system memory limit\n");
 318		kfree(prange);
 319		return NULL;
 320	}
 321	prange->npages = size;
 322	prange->svms = svms;
 323	prange->start = start;
 324	prange->last = last;
 325	INIT_LIST_HEAD(&prange->list);
 326	INIT_LIST_HEAD(&prange->update_list);
 327	INIT_LIST_HEAD(&prange->svm_bo_list);
 328	INIT_LIST_HEAD(&prange->deferred_list);
 329	INIT_LIST_HEAD(&prange->child_list);
 330	atomic_set(&prange->invalid, 0);
 331	prange->validate_timestamp = 0;
 332	mutex_init(&prange->migrate_mutex);
 333	mutex_init(&prange->lock);
 334
 335	if (p->xnack_enabled)
 336		bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
 337			    MAX_GPU_INSTANCE);
 338
 339	svm_range_set_default_attributes(&prange->preferred_loc,
 340					 &prange->prefetch_loc,
 341					 &prange->granularity, &prange->flags);
 342
 343	pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
 344
 345	return prange;
 346}
 347
 348static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
 349{
 350	if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
 351		return false;
 352
 353	return true;
 354}
 355
 356static void svm_range_bo_release(struct kref *kref)
 357{
 358	struct svm_range_bo *svm_bo;
 359
 360	svm_bo = container_of(kref, struct svm_range_bo, kref);
 361	pr_debug("svm_bo 0x%p\n", svm_bo);
 362
 363	spin_lock(&svm_bo->list_lock);
 364	while (!list_empty(&svm_bo->range_list)) {
 365		struct svm_range *prange =
 366				list_first_entry(&svm_bo->range_list,
 367						struct svm_range, svm_bo_list);
 368		/* list_del_init tells a concurrent svm_range_vram_node_new when
 369		 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
 370		 */
 371		list_del_init(&prange->svm_bo_list);
 372		spin_unlock(&svm_bo->list_lock);
 373
 374		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
 375			 prange->start, prange->last);
 376		mutex_lock(&prange->lock);
 377		prange->svm_bo = NULL;
 378		mutex_unlock(&prange->lock);
 379
 380		spin_lock(&svm_bo->list_lock);
 381	}
 382	spin_unlock(&svm_bo->list_lock);
 383	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
 384		/* We're not in the eviction worker.
 385		 * Signal the fence and synchronize with any
 386		 * pending eviction work.
 387		 */
 388		dma_fence_signal(&svm_bo->eviction_fence->base);
 389		cancel_work_sync(&svm_bo->eviction_work);
 390	}
 391	dma_fence_put(&svm_bo->eviction_fence->base);
 392	amdgpu_bo_unref(&svm_bo->bo);
 393	kfree(svm_bo);
 394}
 395
 396static void svm_range_bo_wq_release(struct work_struct *work)
 397{
 398	struct svm_range_bo *svm_bo;
 399
 400	svm_bo = container_of(work, struct svm_range_bo, release_work);
 401	svm_range_bo_release(&svm_bo->kref);
 402}
 403
 404static void svm_range_bo_release_async(struct kref *kref)
 405{
 406	struct svm_range_bo *svm_bo;
 407
 408	svm_bo = container_of(kref, struct svm_range_bo, kref);
 409	pr_debug("svm_bo 0x%p\n", svm_bo);
 410	INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
 411	schedule_work(&svm_bo->release_work);
 412}
 413
 414void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
 415{
 416	kref_put(&svm_bo->kref, svm_range_bo_release_async);
 417}
 418
 419static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
 420{
 421	if (svm_bo)
 422		kref_put(&svm_bo->kref, svm_range_bo_release);
 423}
 424
 425static bool
 426svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
 427{
 428	struct amdgpu_device *bo_adev;
 429
 430	mutex_lock(&prange->lock);
 431	if (!prange->svm_bo) {
 432		mutex_unlock(&prange->lock);
 433		return false;
 434	}
 435	if (prange->ttm_res) {
 436		/* We still have a reference, all is well */
 437		mutex_unlock(&prange->lock);
 438		return true;
 439	}
 440	if (svm_bo_ref_unless_zero(prange->svm_bo)) {
 441		/*
 442		 * Migrate from GPU to GPU, remove range from source bo_adev
 443		 * svm_bo range list, and return false to allocate svm_bo from
 444		 * destination adev.
 445		 */
 446		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
 447		if (bo_adev != adev) {
 448			mutex_unlock(&prange->lock);
 449
 450			spin_lock(&prange->svm_bo->list_lock);
 451			list_del_init(&prange->svm_bo_list);
 452			spin_unlock(&prange->svm_bo->list_lock);
 453
 454			svm_range_bo_unref(prange->svm_bo);
 455			return false;
 456		}
 457		if (READ_ONCE(prange->svm_bo->evicting)) {
 458			struct dma_fence *f;
 459			struct svm_range_bo *svm_bo;
 460			/* The BO is getting evicted,
 461			 * we need to get a new one
 462			 */
 463			mutex_unlock(&prange->lock);
 464			svm_bo = prange->svm_bo;
 465			f = dma_fence_get(&svm_bo->eviction_fence->base);
 466			svm_range_bo_unref(prange->svm_bo);
 467			/* wait for the fence to avoid long spin-loop
 468			 * at list_empty_careful
 469			 */
 470			dma_fence_wait(f, false);
 471			dma_fence_put(f);
 472		} else {
 473			/* The BO was still around and we got
 474			 * a new reference to it
 475			 */
 476			mutex_unlock(&prange->lock);
 477			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
 478				 prange->svms, prange->start, prange->last);
 479
 480			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
 481			return true;
 482		}
 483
 484	} else {
 485		mutex_unlock(&prange->lock);
 486	}
 487
 488	/* We need a new svm_bo. Spin-loop to wait for concurrent
 489	 * svm_range_bo_release to finish removing this range from
 490	 * its range list. After this, it is safe to reuse the
 491	 * svm_bo pointer and svm_bo_list head.
 492	 */
 493	while (!list_empty_careful(&prange->svm_bo_list))
 494		;
 495
 496	return false;
 497}
 498
 499static struct svm_range_bo *svm_range_bo_new(void)
 500{
 501	struct svm_range_bo *svm_bo;
 502
 503	svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
 504	if (!svm_bo)
 505		return NULL;
 506
 507	kref_init(&svm_bo->kref);
 508	INIT_LIST_HEAD(&svm_bo->range_list);
 509	spin_lock_init(&svm_bo->list_lock);
 510
 511	return svm_bo;
 512}
 513
 514int
 515svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
 516			bool clear)
 517{
 518	struct amdgpu_bo_param bp;
 519	struct svm_range_bo *svm_bo;
 520	struct amdgpu_bo_user *ubo;
 521	struct amdgpu_bo *bo;
 522	struct kfd_process *p;
 523	struct mm_struct *mm;
 524	int r;
 525
 526	p = container_of(prange->svms, struct kfd_process, svms);
 527	pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
 528		 prange->start, prange->last);
 529
 530	if (svm_range_validate_svm_bo(adev, prange))
 531		return 0;
 532
 533	svm_bo = svm_range_bo_new();
 534	if (!svm_bo) {
 535		pr_debug("failed to alloc svm bo\n");
 536		return -ENOMEM;
 537	}
 538	mm = get_task_mm(p->lead_thread);
 539	if (!mm) {
 540		pr_debug("failed to get mm\n");
 541		kfree(svm_bo);
 542		return -ESRCH;
 543	}
 544	svm_bo->eviction_fence =
 545		amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
 546					   mm,
 547					   svm_bo);
 548	mmput(mm);
 549	INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
 550	svm_bo->evicting = 0;
 551	memset(&bp, 0, sizeof(bp));
 552	bp.size = prange->npages * PAGE_SIZE;
 553	bp.byte_align = PAGE_SIZE;
 554	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
 555	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 556	bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
 557	bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
 558	bp.type = ttm_bo_type_device;
 559	bp.resv = NULL;
 560
 561	r = amdgpu_bo_create_user(adev, &bp, &ubo);
 562	if (r) {
 563		pr_debug("failed %d to create bo\n", r);
 564		goto create_bo_failed;
 565	}
 566	bo = &ubo->bo;
 567	r = amdgpu_bo_reserve(bo, true);
 568	if (r) {
 569		pr_debug("failed %d to reserve bo\n", r);
 570		goto reserve_bo_failed;
 571	}
 572
 573	if (clear) {
 574		r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
 575		if (r) {
 576			pr_debug("failed %d to sync bo\n", r);
 577			amdgpu_bo_unreserve(bo);
 578			goto reserve_bo_failed;
 579		}
 580	}
 581
 582	r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
 583	if (r) {
 584		pr_debug("failed %d to reserve bo\n", r);
 585		amdgpu_bo_unreserve(bo);
 586		goto reserve_bo_failed;
 587	}
 588	amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
 589
 590	amdgpu_bo_unreserve(bo);
 591
 592	svm_bo->bo = bo;
 593	prange->svm_bo = svm_bo;
 594	prange->ttm_res = bo->tbo.resource;
 595	prange->offset = 0;
 596
 597	spin_lock(&svm_bo->list_lock);
 598	list_add(&prange->svm_bo_list, &svm_bo->range_list);
 599	spin_unlock(&svm_bo->list_lock);
 600
 601	return 0;
 602
 603reserve_bo_failed:
 604	amdgpu_bo_unref(&bo);
 605create_bo_failed:
 606	dma_fence_put(&svm_bo->eviction_fence->base);
 607	kfree(svm_bo);
 608	prange->ttm_res = NULL;
 609
 610	return r;
 611}
 612
 613void svm_range_vram_node_free(struct svm_range *prange)
 614{
 615	svm_range_bo_unref(prange->svm_bo);
 616	prange->ttm_res = NULL;
 617}
 618
 619struct amdgpu_device *
 620svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
 621{
 622	struct kfd_process_device *pdd;
 623	struct kfd_process *p;
 624	int32_t gpu_idx;
 625
 626	p = container_of(prange->svms, struct kfd_process, svms);
 627
 628	gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
 629	if (gpu_idx < 0) {
 630		pr_debug("failed to get device by id 0x%x\n", gpu_id);
 631		return NULL;
 632	}
 633	pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
 634	if (!pdd) {
 635		pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
 636		return NULL;
 637	}
 638
 639	return pdd->dev->adev;
 640}
 641
 642struct kfd_process_device *
 643svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
 644{
 645	struct kfd_process *p;
 646	int32_t gpu_idx, gpuid;
 647	int r;
 648
 649	p = container_of(prange->svms, struct kfd_process, svms);
 650
 651	r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
 652	if (r) {
 653		pr_debug("failed to get device id by adev %p\n", adev);
 654		return NULL;
 655	}
 656
 657	return kfd_process_device_from_gpuidx(p, gpu_idx);
 658}
 659
 660static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
 661{
 662	struct ttm_operation_ctx ctx = { false, false };
 663
 664	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
 665
 666	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 667}
 668
 669static int
 670svm_range_check_attr(struct kfd_process *p,
 671		     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
 672{
 673	uint32_t i;
 674
 675	for (i = 0; i < nattr; i++) {
 676		uint32_t val = attrs[i].value;
 677		int gpuidx = MAX_GPU_INSTANCE;
 678
 679		switch (attrs[i].type) {
 680		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
 681			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
 682			    val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
 683				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
 684			break;
 685		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
 686			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
 687				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
 688			break;
 689		case KFD_IOCTL_SVM_ATTR_ACCESS:
 690		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
 691		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
 692			gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
 693			break;
 694		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
 695			break;
 696		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
 697			break;
 698		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
 699			break;
 700		default:
 701			pr_debug("unknown attr type 0x%x\n", attrs[i].type);
 702			return -EINVAL;
 703		}
 704
 705		if (gpuidx < 0) {
 706			pr_debug("no GPU 0x%x found\n", val);
 707			return -EINVAL;
 708		} else if (gpuidx < MAX_GPU_INSTANCE &&
 709			   !test_bit(gpuidx, p->svms.bitmap_supported)) {
 710			pr_debug("GPU 0x%x not supported\n", val);
 711			return -EINVAL;
 712		}
 713	}
 714
 715	return 0;
 716}
 717
 718static void
 719svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
 720		      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
 721		      bool *update_mapping)
 722{
 723	uint32_t i;
 724	int gpuidx;
 725
 726	for (i = 0; i < nattr; i++) {
 727		switch (attrs[i].type) {
 728		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
 729			prange->preferred_loc = attrs[i].value;
 730			break;
 731		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
 732			prange->prefetch_loc = attrs[i].value;
 733			break;
 734		case KFD_IOCTL_SVM_ATTR_ACCESS:
 735		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
 736		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
 737			*update_mapping = true;
 738			gpuidx = kfd_process_gpuidx_from_gpuid(p,
 739							       attrs[i].value);
 740			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
 741				bitmap_clear(prange->bitmap_access, gpuidx, 1);
 742				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
 743			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
 744				bitmap_set(prange->bitmap_access, gpuidx, 1);
 745				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
 746			} else {
 747				bitmap_clear(prange->bitmap_access, gpuidx, 1);
 748				bitmap_set(prange->bitmap_aip, gpuidx, 1);
 749			}
 750			break;
 751		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
 752			*update_mapping = true;
 753			prange->flags |= attrs[i].value;
 754			break;
 755		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
 756			*update_mapping = true;
 757			prange->flags &= ~attrs[i].value;
 758			break;
 759		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
 760			prange->granularity = attrs[i].value;
 761			break;
 762		default:
 763			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
 764		}
 765	}
 766}
 767
 768static bool
 769svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
 770			uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
 771{
 772	uint32_t i;
 773	int gpuidx;
 774
 775	for (i = 0; i < nattr; i++) {
 776		switch (attrs[i].type) {
 777		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
 778			if (prange->preferred_loc != attrs[i].value)
 779				return false;
 780			break;
 781		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
 782			/* Prefetch should always trigger a migration even
 783			 * if the value of the attribute didn't change.
 784			 */
 785			return false;
 786		case KFD_IOCTL_SVM_ATTR_ACCESS:
 787		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
 788		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
 789			gpuidx = kfd_process_gpuidx_from_gpuid(p,
 790							       attrs[i].value);
 791			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
 792				if (test_bit(gpuidx, prange->bitmap_access) ||
 793				    test_bit(gpuidx, prange->bitmap_aip))
 794					return false;
 795			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
 796				if (!test_bit(gpuidx, prange->bitmap_access))
 797					return false;
 798			} else {
 799				if (!test_bit(gpuidx, prange->bitmap_aip))
 800					return false;
 801			}
 802			break;
 803		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
 804			if ((prange->flags & attrs[i].value) != attrs[i].value)
 805				return false;
 806			break;
 807		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
 808			if ((prange->flags & attrs[i].value) != 0)
 809				return false;
 810			break;
 811		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
 812			if (prange->granularity != attrs[i].value)
 813				return false;
 814			break;
 815		default:
 816			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
 817		}
 818	}
 819
 820	return true;
 821}
 822
 823/**
 824 * svm_range_debug_dump - print all range information from svms
 825 * @svms: svm range list header
 826 *
 827 * debug output svm range start, end, prefetch location from svms
 828 * interval tree and link list
 829 *
 830 * Context: The caller must hold svms->lock
 831 */
 832static void svm_range_debug_dump(struct svm_range_list *svms)
 833{
 834	struct interval_tree_node *node;
 835	struct svm_range *prange;
 836
 837	pr_debug("dump svms 0x%p list\n", svms);
 838	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
 839
 840	list_for_each_entry(prange, &svms->list, list) {
 841		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
 842			 prange, prange->start, prange->npages,
 843			 prange->start + prange->npages - 1,
 844			 prange->actual_loc);
 845	}
 846
 847	pr_debug("dump svms 0x%p interval tree\n", svms);
 848	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
 849	node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
 850	while (node) {
 851		prange = container_of(node, struct svm_range, it_node);
 852		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
 853			 prange, prange->start, prange->npages,
 854			 prange->start + prange->npages - 1,
 855			 prange->actual_loc);
 856		node = interval_tree_iter_next(node, 0, ~0ULL);
 857	}
 858}
 859
 860static int
 861svm_range_split_array(void *ppnew, void *ppold, size_t size,
 862		      uint64_t old_start, uint64_t old_n,
 863		      uint64_t new_start, uint64_t new_n)
 864{
 865	unsigned char *new, *old, *pold;
 866	uint64_t d;
 867
 868	if (!ppold)
 869		return 0;
 870	pold = *(unsigned char **)ppold;
 871	if (!pold)
 872		return 0;
 873
 874	new = kvmalloc_array(new_n, size, GFP_KERNEL);
 875	if (!new)
 876		return -ENOMEM;
 877
 878	d = (new_start - old_start) * size;
 879	memcpy(new, pold + d, new_n * size);
 880
 881	old = kvmalloc_array(old_n, size, GFP_KERNEL);
 882	if (!old) {
 883		kvfree(new);
 884		return -ENOMEM;
 885	}
 886
 887	d = (new_start == old_start) ? new_n * size : 0;
 888	memcpy(old, pold + d, old_n * size);
 889
 890	kvfree(pold);
 891	*(void **)ppold = old;
 892	*(void **)ppnew = new;
 893
 894	return 0;
 895}
 896
 897static int
 898svm_range_split_pages(struct svm_range *new, struct svm_range *old,
 899		      uint64_t start, uint64_t last)
 900{
 901	uint64_t npages = last - start + 1;
 902	int i, r;
 903
 904	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
 905		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
 906					  sizeof(*old->dma_addr[i]), old->start,
 907					  npages, new->start, new->npages);
 908		if (r)
 909			return r;
 910	}
 911
 912	return 0;
 913}
 914
 915static int
 916svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
 917		      uint64_t start, uint64_t last)
 918{
 919	uint64_t npages = last - start + 1;
 920
 921	pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
 922		 new->svms, new, new->start, start, last);
 923
 924	if (new->start == old->start) {
 925		new->offset = old->offset;
 926		old->offset += new->npages;
 927	} else {
 928		new->offset = old->offset + npages;
 929	}
 930
 931	new->svm_bo = svm_range_bo_ref(old->svm_bo);
 932	new->ttm_res = old->ttm_res;
 933
 934	spin_lock(&new->svm_bo->list_lock);
 935	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
 936	spin_unlock(&new->svm_bo->list_lock);
 937
 938	return 0;
 939}
 940
 941/**
 942 * svm_range_split_adjust - split range and adjust
 943 *
 944 * @new: new range
 945 * @old: the old range
 946 * @start: the old range adjust to start address in pages
 947 * @last: the old range adjust to last address in pages
 948 *
 949 * Copy system memory dma_addr or vram ttm_res in old range to new
 950 * range from new_start up to size new->npages, the remaining old range is from
 951 * start to last
 952 *
 953 * Return:
 954 * 0 - OK, -ENOMEM - out of memory
 955 */
 956static int
 957svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
 958		      uint64_t start, uint64_t last)
 959{
 960	int r;
 961
 962	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
 963		 new->svms, new->start, old->start, old->last, start, last);
 964
 965	if (new->start < old->start ||
 966	    new->last > old->last) {
 967		WARN_ONCE(1, "invalid new range start or last\n");
 968		return -EINVAL;
 969	}
 970
 971	r = svm_range_split_pages(new, old, start, last);
 972	if (r)
 973		return r;
 974
 975	if (old->actual_loc && old->ttm_res) {
 976		r = svm_range_split_nodes(new, old, start, last);
 977		if (r)
 978			return r;
 979	}
 980
 981	old->npages = last - start + 1;
 982	old->start = start;
 983	old->last = last;
 984	new->flags = old->flags;
 985	new->preferred_loc = old->preferred_loc;
 986	new->prefetch_loc = old->prefetch_loc;
 987	new->actual_loc = old->actual_loc;
 988	new->granularity = old->granularity;
 989	new->mapped_to_gpu = old->mapped_to_gpu;
 990	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
 991	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
 992
 993	return 0;
 994}
 995
 996/**
 997 * svm_range_split - split a range in 2 ranges
 998 *
 999 * @prange: the svm range to split
1000 * @start: the remaining range start address in pages
1001 * @last: the remaining range last address in pages
1002 * @new: the result new range generated
1003 *
1004 * Two cases only:
1005 * case 1: if start == prange->start
1006 *         prange ==> prange[start, last]
1007 *         new range [last + 1, prange->last]
1008 *
1009 * case 2: if last == prange->last
1010 *         prange ==> prange[start, last]
1011 *         new range [prange->start, start - 1]
1012 *
1013 * Return:
1014 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1015 */
1016static int
1017svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1018		struct svm_range **new)
1019{
1020	uint64_t old_start = prange->start;
1021	uint64_t old_last = prange->last;
1022	struct svm_range_list *svms;
1023	int r = 0;
1024
1025	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1026		 old_start, old_last, start, last);
1027
1028	if (old_start != start && old_last != last)
1029		return -EINVAL;
1030	if (start < old_start || last > old_last)
1031		return -EINVAL;
1032
1033	svms = prange->svms;
1034	if (old_start == start)
1035		*new = svm_range_new(svms, last + 1, old_last, false);
1036	else
1037		*new = svm_range_new(svms, old_start, start - 1, false);
1038	if (!*new)
1039		return -ENOMEM;
1040
1041	r = svm_range_split_adjust(*new, prange, start, last);
1042	if (r) {
1043		pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1044			 r, old_start, old_last, start, last);
1045		svm_range_free(*new, false);
1046		*new = NULL;
1047	}
1048
1049	return r;
1050}
1051
1052static int
1053svm_range_split_tail(struct svm_range *prange,
1054		     uint64_t new_last, struct list_head *insert_list)
1055{
1056	struct svm_range *tail;
1057	int r = svm_range_split(prange, prange->start, new_last, &tail);
1058
1059	if (!r)
1060		list_add(&tail->list, insert_list);
1061	return r;
1062}
1063
1064static int
1065svm_range_split_head(struct svm_range *prange,
1066		     uint64_t new_start, struct list_head *insert_list)
1067{
1068	struct svm_range *head;
1069	int r = svm_range_split(prange, new_start, prange->last, &head);
1070
1071	if (!r)
1072		list_add(&head->list, insert_list);
1073	return r;
1074}
1075
1076static void
1077svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1078		    struct svm_range *pchild, enum svm_work_list_ops op)
1079{
1080	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1081		 pchild, pchild->start, pchild->last, prange, op);
1082
1083	pchild->work_item.mm = mm;
1084	pchild->work_item.op = op;
1085	list_add_tail(&pchild->child_list, &prange->child_list);
1086}
1087
1088/**
1089 * svm_range_split_by_granularity - collect ranges within granularity boundary
1090 *
1091 * @p: the process with svms list
1092 * @mm: mm structure
1093 * @addr: the vm fault address in pages, to split the prange
1094 * @parent: parent range if prange is from child list
1095 * @prange: prange to split
1096 *
1097 * Trims @prange to be a single aligned block of prange->granularity if
1098 * possible. The head and tail are added to the child_list in @parent.
1099 *
1100 * Context: caller must hold mmap_read_lock and prange->lock
1101 *
1102 * Return:
1103 * 0 - OK, otherwise error code
1104 */
1105int
1106svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1107			       unsigned long addr, struct svm_range *parent,
1108			       struct svm_range *prange)
1109{
1110	struct svm_range *head, *tail;
1111	unsigned long start, last, size;
1112	int r;
1113
1114	/* Align splited range start and size to granularity size, then a single
1115	 * PTE will be used for whole range, this reduces the number of PTE
1116	 * updated and the L1 TLB space used for translation.
1117	 */
1118	size = 1UL << prange->granularity;
1119	start = ALIGN_DOWN(addr, size);
1120	last = ALIGN(addr + 1, size) - 1;
1121
1122	pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1123		 prange->svms, prange->start, prange->last, start, last, size);
1124
1125	if (start > prange->start) {
1126		r = svm_range_split(prange, start, prange->last, &head);
1127		if (r)
1128			return r;
1129		svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1130	}
1131
1132	if (last < prange->last) {
1133		r = svm_range_split(prange, prange->start, last, &tail);
1134		if (r)
1135			return r;
1136		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1137	}
1138
1139	/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1140	if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1141		prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1142		pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1143			 prange, prange->start, prange->last,
1144			 SVM_OP_ADD_RANGE_AND_MAP);
1145	}
1146	return 0;
1147}
1148
1149static uint64_t
1150svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1151			int domain)
1152{
1153	struct amdgpu_device *bo_adev;
1154	uint32_t flags = prange->flags;
1155	uint32_t mapping_flags = 0;
1156	uint64_t pte_flags;
1157	bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1158	bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1159
1160	if (domain == SVM_RANGE_VRAM_DOMAIN)
1161		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1162
1163	switch (KFD_GC_VERSION(adev->kfd.dev)) {
1164	case IP_VERSION(9, 4, 1):
1165		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1166			if (bo_adev == adev) {
1167				mapping_flags |= coherent ?
1168					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1169			} else {
1170				mapping_flags |= coherent ?
1171					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1172				if (amdgpu_xgmi_same_hive(adev, bo_adev))
1173					snoop = true;
1174			}
1175		} else {
1176			mapping_flags |= coherent ?
1177				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1178		}
1179		break;
1180	case IP_VERSION(9, 4, 2):
1181		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1182			if (bo_adev == adev) {
1183				mapping_flags |= coherent ?
1184					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1185				if (adev->gmc.xgmi.connected_to_cpu)
1186					snoop = true;
1187			} else {
1188				mapping_flags |= coherent ?
1189					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1190				if (amdgpu_xgmi_same_hive(adev, bo_adev))
1191					snoop = true;
1192			}
1193		} else {
1194			mapping_flags |= coherent ?
1195				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1196		}
1197		break;
1198	default:
1199		mapping_flags |= coherent ?
1200			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1201	}
1202
1203	mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1204
1205	if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1206		mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1207	if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1208		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1209
1210	pte_flags = AMDGPU_PTE_VALID;
1211	pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1212	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1213
1214	pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1215	return pte_flags;
1216}
1217
1218static int
1219svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1220			 uint64_t start, uint64_t last,
1221			 struct dma_fence **fence)
1222{
1223	uint64_t init_pte_value = 0;
1224
1225	pr_debug("[0x%llx 0x%llx]\n", start, last);
1226
1227	return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
1228				      last, init_pte_value, 0, 0, NULL, NULL,
1229				      fence);
1230}
1231
1232static int
1233svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1234			  unsigned long last, uint32_t trigger)
1235{
1236	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1237	struct kfd_process_device *pdd;
1238	struct dma_fence *fence = NULL;
1239	struct kfd_process *p;
1240	uint32_t gpuidx;
1241	int r = 0;
1242
1243	if (!prange->mapped_to_gpu) {
1244		pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1245			 prange, prange->start, prange->last);
1246		return 0;
1247	}
1248
1249	if (prange->start == start && prange->last == last) {
1250		pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1251		prange->mapped_to_gpu = false;
1252	}
1253
1254	bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1255		  MAX_GPU_INSTANCE);
1256	p = container_of(prange->svms, struct kfd_process, svms);
1257
1258	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1259		pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1260		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1261		if (!pdd) {
1262			pr_debug("failed to find device idx %d\n", gpuidx);
1263			return -EINVAL;
1264		}
1265
1266		kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1267					     start, last, trigger);
1268
1269		r = svm_range_unmap_from_gpu(pdd->dev->adev,
1270					     drm_priv_to_vm(pdd->drm_priv),
1271					     start, last, &fence);
1272		if (r)
1273			break;
1274
1275		if (fence) {
1276			r = dma_fence_wait(fence, false);
1277			dma_fence_put(fence);
1278			fence = NULL;
1279			if (r)
1280				break;
1281		}
1282		kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1283	}
1284
1285	return r;
1286}
1287
1288static int
1289svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1290		     unsigned long offset, unsigned long npages, bool readonly,
1291		     dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1292		     struct dma_fence **fence, bool flush_tlb)
1293{
1294	struct amdgpu_device *adev = pdd->dev->adev;
1295	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1296	uint64_t pte_flags;
1297	unsigned long last_start;
1298	int last_domain;
1299	int r = 0;
1300	int64_t i, j;
1301
1302	last_start = prange->start + offset;
1303
1304	pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1305		 last_start, last_start + npages - 1, readonly);
1306
1307	for (i = offset; i < offset + npages; i++) {
1308		last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1309		dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1310
1311		/* Collect all pages in the same address range and memory domain
1312		 * that can be mapped with a single call to update mapping.
1313		 */
1314		if (i < offset + npages - 1 &&
1315		    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1316			continue;
1317
1318		pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1319			 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1320
1321		pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1322		if (readonly)
1323			pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1324
1325		pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1326			 prange->svms, last_start, prange->start + i,
1327			 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1328			 pte_flags);
1329
1330		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
1331					   last_start, prange->start + i,
1332					   pte_flags,
1333					   (last_start - prange->start) << PAGE_SHIFT,
1334					   bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1335					   NULL, dma_addr, &vm->last_update);
1336
1337		for (j = last_start - prange->start; j <= i; j++)
1338			dma_addr[j] |= last_domain;
1339
1340		if (r) {
1341			pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1342			goto out;
1343		}
1344		last_start = prange->start + i + 1;
1345	}
1346
1347	r = amdgpu_vm_update_pdes(adev, vm, false);
1348	if (r) {
1349		pr_debug("failed %d to update directories 0x%lx\n", r,
1350			 prange->start);
1351		goto out;
1352	}
1353
1354	if (fence)
1355		*fence = dma_fence_get(vm->last_update);
1356
1357out:
1358	return r;
1359}
1360
1361static int
1362svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1363		      unsigned long npages, bool readonly,
1364		      unsigned long *bitmap, bool wait, bool flush_tlb)
1365{
1366	struct kfd_process_device *pdd;
1367	struct amdgpu_device *bo_adev;
1368	struct kfd_process *p;
1369	struct dma_fence *fence = NULL;
1370	uint32_t gpuidx;
1371	int r = 0;
1372
1373	if (prange->svm_bo && prange->ttm_res)
1374		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1375	else
1376		bo_adev = NULL;
1377
1378	p = container_of(prange->svms, struct kfd_process, svms);
1379	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1380		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1381		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1382		if (!pdd) {
1383			pr_debug("failed to find device idx %d\n", gpuidx);
1384			return -EINVAL;
1385		}
1386
1387		pdd = kfd_bind_process_to_device(pdd->dev, p);
1388		if (IS_ERR(pdd))
1389			return -EINVAL;
1390
1391		if (bo_adev && pdd->dev->adev != bo_adev &&
1392		    !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1393			pr_debug("cannot map to device idx %d\n", gpuidx);
1394			continue;
1395		}
1396
1397		r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1398					 prange->dma_addr[gpuidx],
1399					 bo_adev, wait ? &fence : NULL,
1400					 flush_tlb);
1401		if (r)
1402			break;
1403
1404		if (fence) {
1405			r = dma_fence_wait(fence, false);
1406			dma_fence_put(fence);
1407			fence = NULL;
1408			if (r) {
1409				pr_debug("failed %d to dma fence wait\n", r);
1410				break;
1411			}
1412		}
1413
1414		kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1415	}
1416
1417	return r;
1418}
1419
1420struct svm_validate_context {
1421	struct kfd_process *process;
1422	struct svm_range *prange;
1423	bool intr;
1424	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1425	struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1426	struct list_head validate_list;
1427	struct ww_acquire_ctx ticket;
1428};
1429
1430static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1431{
1432	struct kfd_process_device *pdd;
1433	struct amdgpu_vm *vm;
1434	uint32_t gpuidx;
1435	int r;
1436
1437	INIT_LIST_HEAD(&ctx->validate_list);
1438	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1439		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1440		if (!pdd) {
1441			pr_debug("failed to find device idx %d\n", gpuidx);
1442			return -EINVAL;
1443		}
1444		vm = drm_priv_to_vm(pdd->drm_priv);
1445
1446		ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1447		ctx->tv[gpuidx].num_shared = 4;
1448		list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1449	}
1450
1451	r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1452				   ctx->intr, NULL);
1453	if (r) {
1454		pr_debug("failed %d to reserve bo\n", r);
1455		return r;
1456	}
1457
1458	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1459		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1460		if (!pdd) {
1461			pr_debug("failed to find device idx %d\n", gpuidx);
1462			r = -EINVAL;
1463			goto unreserve_out;
1464		}
1465
1466		r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1467					      drm_priv_to_vm(pdd->drm_priv),
1468					      svm_range_bo_validate, NULL);
1469		if (r) {
1470			pr_debug("failed %d validate pt bos\n", r);
1471			goto unreserve_out;
1472		}
1473	}
1474
1475	return 0;
1476
1477unreserve_out:
1478	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1479	return r;
1480}
1481
1482static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1483{
1484	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1485}
1486
1487static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1488{
1489	struct kfd_process_device *pdd;
1490
1491	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1492
1493	return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1494}
1495
1496/*
1497 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1498 *
1499 * To prevent concurrent destruction or change of range attributes, the
1500 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1501 * because that would block concurrent evictions and lead to deadlocks. To
1502 * serialize concurrent migrations or validations of the same range, the
1503 * prange->migrate_mutex must be held.
1504 *
1505 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1506 * eviction fence.
1507 *
1508 * The following sequence ensures race-free validation and GPU mapping:
1509 *
1510 * 1. Reserve page table (and SVM BO if range is in VRAM)
1511 * 2. hmm_range_fault to get page addresses (if system memory)
1512 * 3. DMA-map pages (if system memory)
1513 * 4-a. Take notifier lock
1514 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1515 * 4-c. Check that the range was not split or otherwise invalidated
1516 * 4-d. Update GPU page table
1517 * 4.e. Release notifier lock
1518 * 5. Release page table (and SVM BO) reservation
1519 */
1520static int svm_range_validate_and_map(struct mm_struct *mm,
1521				      struct svm_range *prange, int32_t gpuidx,
1522				      bool intr, bool wait, bool flush_tlb)
1523{
1524	struct svm_validate_context ctx;
1525	unsigned long start, end, addr;
1526	struct kfd_process *p;
1527	void *owner;
1528	int32_t idx;
1529	int r = 0;
1530
1531	ctx.process = container_of(prange->svms, struct kfd_process, svms);
1532	ctx.prange = prange;
1533	ctx.intr = intr;
1534
1535	if (gpuidx < MAX_GPU_INSTANCE) {
1536		bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1537		bitmap_set(ctx.bitmap, gpuidx, 1);
1538	} else if (ctx.process->xnack_enabled) {
1539		bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1540
1541		/* If prefetch range to GPU, or GPU retry fault migrate range to
1542		 * GPU, which has ACCESS attribute to the range, create mapping
1543		 * on that GPU.
1544		 */
1545		if (prange->actual_loc) {
1546			gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1547							prange->actual_loc);
1548			if (gpuidx < 0) {
1549				WARN_ONCE(1, "failed get device by id 0x%x\n",
1550					 prange->actual_loc);
1551				return -EINVAL;
1552			}
1553			if (test_bit(gpuidx, prange->bitmap_access))
1554				bitmap_set(ctx.bitmap, gpuidx, 1);
1555		}
1556	} else {
1557		bitmap_or(ctx.bitmap, prange->bitmap_access,
1558			  prange->bitmap_aip, MAX_GPU_INSTANCE);
1559	}
1560
1561	if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
1562		if (!prange->mapped_to_gpu)
1563			return 0;
1564
1565		bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1566	}
1567
1568	if (prange->actual_loc && !prange->ttm_res) {
1569		/* This should never happen. actual_loc gets set by
1570		 * svm_migrate_ram_to_vram after allocating a BO.
1571		 */
1572		WARN_ONCE(1, "VRAM BO missing during validation\n");
1573		return -EINVAL;
1574	}
1575
1576	svm_range_reserve_bos(&ctx);
1577
1578	p = container_of(prange->svms, struct kfd_process, svms);
1579	owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1580						MAX_GPU_INSTANCE));
1581	for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1582		if (kfd_svm_page_owner(p, idx) != owner) {
1583			owner = NULL;
1584			break;
1585		}
1586	}
1587
1588	start = prange->start << PAGE_SHIFT;
1589	end = (prange->last + 1) << PAGE_SHIFT;
1590	for (addr = start; addr < end && !r; ) {
1591		struct hmm_range *hmm_range;
1592		struct vm_area_struct *vma;
1593		unsigned long next;
1594		unsigned long offset;
1595		unsigned long npages;
1596		bool readonly;
1597
1598		vma = vma_lookup(mm, addr);
1599		if (!vma) {
1600			r = -EFAULT;
1601			goto unreserve_out;
1602		}
1603		readonly = !(vma->vm_flags & VM_WRITE);
1604
1605		next = min(vma->vm_end, end);
1606		npages = (next - addr) >> PAGE_SHIFT;
1607		WRITE_ONCE(p->svms.faulting_task, current);
1608		r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1609					       readonly, owner, NULL,
1610					       &hmm_range);
1611		WRITE_ONCE(p->svms.faulting_task, NULL);
1612		if (r) {
1613			pr_debug("failed %d to get svm range pages\n", r);
1614			goto unreserve_out;
1615		}
1616
1617		offset = (addr - start) >> PAGE_SHIFT;
1618		r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1619				      hmm_range->hmm_pfns);
1620		if (r) {
1621			pr_debug("failed %d to dma map range\n", r);
1622			goto unreserve_out;
1623		}
1624
1625		svm_range_lock(prange);
1626		if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1627			pr_debug("hmm update the range, need validate again\n");
1628			r = -EAGAIN;
1629			goto unlock_out;
1630		}
1631		if (!list_empty(&prange->child_list)) {
1632			pr_debug("range split by unmap in parallel, validate again\n");
1633			r = -EAGAIN;
1634			goto unlock_out;
1635		}
1636
1637		r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1638					  ctx.bitmap, wait, flush_tlb);
1639
1640unlock_out:
1641		svm_range_unlock(prange);
1642
1643		addr = next;
1644	}
1645
1646	if (addr == end) {
1647		prange->validated_once = true;
1648		prange->mapped_to_gpu = true;
1649	}
1650
1651unreserve_out:
1652	svm_range_unreserve_bos(&ctx);
1653
1654	if (!r)
1655		prange->validate_timestamp = ktime_get_boottime();
1656
1657	return r;
1658}
1659
1660/**
1661 * svm_range_list_lock_and_flush_work - flush pending deferred work
1662 *
1663 * @svms: the svm range list
1664 * @mm: the mm structure
1665 *
1666 * Context: Returns with mmap write lock held, pending deferred work flushed
1667 *
1668 */
1669void
1670svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1671				   struct mm_struct *mm)
1672{
1673retry_flush_work:
1674	flush_work(&svms->deferred_list_work);
1675	mmap_write_lock(mm);
1676
1677	if (list_empty(&svms->deferred_range_list))
1678		return;
1679	mmap_write_unlock(mm);
1680	pr_debug("retry flush\n");
1681	goto retry_flush_work;
1682}
1683
1684static void svm_range_restore_work(struct work_struct *work)
1685{
1686	struct delayed_work *dwork = to_delayed_work(work);
1687	struct amdkfd_process_info *process_info;
1688	struct svm_range_list *svms;
1689	struct svm_range *prange;
1690	struct kfd_process *p;
1691	struct mm_struct *mm;
1692	int evicted_ranges;
1693	int invalid;
1694	int r;
1695
1696	svms = container_of(dwork, struct svm_range_list, restore_work);
1697	evicted_ranges = atomic_read(&svms->evicted_ranges);
1698	if (!evicted_ranges)
1699		return;
1700
1701	pr_debug("restore svm ranges\n");
1702
1703	p = container_of(svms, struct kfd_process, svms);
1704	process_info = p->kgd_process_info;
1705
1706	/* Keep mm reference when svm_range_validate_and_map ranges */
1707	mm = get_task_mm(p->lead_thread);
1708	if (!mm) {
1709		pr_debug("svms 0x%p process mm gone\n", svms);
1710		return;
1711	}
1712
1713	mutex_lock(&process_info->lock);
1714	svm_range_list_lock_and_flush_work(svms, mm);
1715	mutex_lock(&svms->lock);
1716
1717	evicted_ranges = atomic_read(&svms->evicted_ranges);
1718
1719	list_for_each_entry(prange, &svms->list, list) {
1720		invalid = atomic_read(&prange->invalid);
1721		if (!invalid)
1722			continue;
1723
1724		pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1725			 prange->svms, prange, prange->start, prange->last,
1726			 invalid);
1727
1728		/*
1729		 * If range is migrating, wait for migration is done.
1730		 */
1731		mutex_lock(&prange->migrate_mutex);
1732
1733		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1734					       false, true, false);
1735		if (r)
1736			pr_debug("failed %d to map 0x%lx to gpus\n", r,
1737				 prange->start);
1738
1739		mutex_unlock(&prange->migrate_mutex);
1740		if (r)
1741			goto out_reschedule;
1742
1743		if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1744			goto out_reschedule;
1745	}
1746
1747	if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1748	    evicted_ranges)
1749		goto out_reschedule;
1750
1751	evicted_ranges = 0;
1752
1753	r = kgd2kfd_resume_mm(mm);
1754	if (r) {
1755		/* No recovery from this failure. Probably the CP is
1756		 * hanging. No point trying again.
1757		 */
1758		pr_debug("failed %d to resume KFD\n", r);
1759	}
1760
1761	pr_debug("restore svm ranges successfully\n");
1762
1763out_reschedule:
1764	mutex_unlock(&svms->lock);
1765	mmap_write_unlock(mm);
1766	mutex_unlock(&process_info->lock);
1767
1768	/* If validation failed, reschedule another attempt */
1769	if (evicted_ranges) {
1770		pr_debug("reschedule to restore svm range\n");
1771		schedule_delayed_work(&svms->restore_work,
1772			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1773
1774		kfd_smi_event_queue_restore_rescheduled(mm);
1775	}
1776	mmput(mm);
1777}
1778
1779/**
1780 * svm_range_evict - evict svm range
1781 * @prange: svm range structure
1782 * @mm: current process mm_struct
1783 * @start: starting process queue number
1784 * @last: last process queue number
1785 *
1786 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1787 * return to let CPU evict the buffer and proceed CPU pagetable update.
1788 *
1789 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1790 * If invalidation happens while restore work is running, restore work will
1791 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1792 * the queues.
1793 */
1794static int
1795svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1796		unsigned long start, unsigned long last,
1797		enum mmu_notifier_event event)
1798{
1799	struct svm_range_list *svms = prange->svms;
1800	struct svm_range *pchild;
1801	struct kfd_process *p;
1802	int r = 0;
1803
1804	p = container_of(svms, struct kfd_process, svms);
1805
1806	pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1807		 svms, prange->start, prange->last, start, last);
1808
1809	if (!p->xnack_enabled ||
1810	    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1811		int evicted_ranges;
1812		bool mapped = prange->mapped_to_gpu;
1813
1814		list_for_each_entry(pchild, &prange->child_list, child_list) {
1815			if (!pchild->mapped_to_gpu)
1816				continue;
1817			mapped = true;
1818			mutex_lock_nested(&pchild->lock, 1);
1819			if (pchild->start <= last && pchild->last >= start) {
1820				pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1821					 pchild->start, pchild->last);
1822				atomic_inc(&pchild->invalid);
1823			}
1824			mutex_unlock(&pchild->lock);
1825		}
1826
1827		if (!mapped)
1828			return r;
1829
1830		if (prange->start <= last && prange->last >= start)
1831			atomic_inc(&prange->invalid);
1832
1833		evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1834		if (evicted_ranges != 1)
1835			return r;
1836
1837		pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1838			 prange->svms, prange->start, prange->last);
1839
1840		/* First eviction, stop the queues */
1841		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1842		if (r)
1843			pr_debug("failed to quiesce KFD\n");
1844
1845		pr_debug("schedule to restore svm %p ranges\n", svms);
1846		schedule_delayed_work(&svms->restore_work,
1847			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1848	} else {
1849		unsigned long s, l;
1850		uint32_t trigger;
1851
1852		if (event == MMU_NOTIFY_MIGRATE)
1853			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1854		else
1855			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1856
1857		pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1858			 prange->svms, start, last);
1859		list_for_each_entry(pchild, &prange->child_list, child_list) {
1860			mutex_lock_nested(&pchild->lock, 1);
1861			s = max(start, pchild->start);
1862			l = min(last, pchild->last);
1863			if (l >= s)
1864				svm_range_unmap_from_gpus(pchild, s, l, trigger);
1865			mutex_unlock(&pchild->lock);
1866		}
1867		s = max(start, prange->start);
1868		l = min(last, prange->last);
1869		if (l >= s)
1870			svm_range_unmap_from_gpus(prange, s, l, trigger);
1871	}
1872
1873	return r;
1874}
1875
1876static struct svm_range *svm_range_clone(struct svm_range *old)
1877{
1878	struct svm_range *new;
1879
1880	new = svm_range_new(old->svms, old->start, old->last, false);
1881	if (!new)
1882		return NULL;
1883
1884	if (old->svm_bo) {
1885		new->ttm_res = old->ttm_res;
1886		new->offset = old->offset;
1887		new->svm_bo = svm_range_bo_ref(old->svm_bo);
1888		spin_lock(&new->svm_bo->list_lock);
1889		list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1890		spin_unlock(&new->svm_bo->list_lock);
1891	}
1892	new->flags = old->flags;
1893	new->preferred_loc = old->preferred_loc;
1894	new->prefetch_loc = old->prefetch_loc;
1895	new->actual_loc = old->actual_loc;
1896	new->granularity = old->granularity;
1897	new->mapped_to_gpu = old->mapped_to_gpu;
1898	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1899	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1900
1901	return new;
1902}
1903
1904void svm_range_set_max_pages(struct amdgpu_device *adev)
1905{
1906	uint64_t max_pages;
1907	uint64_t pages, _pages;
1908
1909	/* 1/32 VRAM size in pages */
1910	pages = adev->gmc.real_vram_size >> 17;
1911	pages = clamp(pages, 1ULL << 9, 1ULL << 18);
1912	pages = rounddown_pow_of_two(pages);
1913	do {
1914		max_pages = READ_ONCE(max_svm_range_pages);
1915		_pages = min_not_zero(max_pages, pages);
1916	} while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
1917}
1918
1919static int
1920svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
1921		    uint64_t max_pages, struct list_head *insert_list,
1922		    struct list_head *update_list)
1923{
1924	struct svm_range *prange;
1925	uint64_t l;
1926
1927	pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
1928		 max_pages, start, last);
1929
1930	while (last >= start) {
1931		l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
1932
1933		prange = svm_range_new(svms, start, l, true);
1934		if (!prange)
1935			return -ENOMEM;
1936		list_add(&prange->list, insert_list);
1937		list_add(&prange->update_list, update_list);
1938
1939		start = l + 1;
1940	}
1941	return 0;
1942}
1943
1944/**
1945 * svm_range_add - add svm range and handle overlap
1946 * @p: the range add to this process svms
1947 * @start: page size aligned
1948 * @size: page size aligned
1949 * @nattr: number of attributes
1950 * @attrs: array of attributes
1951 * @update_list: output, the ranges need validate and update GPU mapping
1952 * @insert_list: output, the ranges need insert to svms
1953 * @remove_list: output, the ranges are replaced and need remove from svms
1954 *
1955 * Check if the virtual address range has overlap with any existing ranges,
1956 * split partly overlapping ranges and add new ranges in the gaps. All changes
1957 * should be applied to the range_list and interval tree transactionally. If
1958 * any range split or allocation fails, the entire update fails. Therefore any
1959 * existing overlapping svm_ranges are cloned and the original svm_ranges left
1960 * unchanged.
1961 *
1962 * If the transaction succeeds, the caller can update and insert clones and
1963 * new ranges, then free the originals.
1964 *
1965 * Otherwise the caller can free the clones and new ranges, while the old
1966 * svm_ranges remain unchanged.
1967 *
1968 * Context: Process context, caller must hold svms->lock
1969 *
1970 * Return:
1971 * 0 - OK, otherwise error code
1972 */
1973static int
1974svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
1975	      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
1976	      struct list_head *update_list, struct list_head *insert_list,
1977	      struct list_head *remove_list)
1978{
1979	unsigned long last = start + size - 1UL;
1980	struct svm_range_list *svms = &p->svms;
1981	struct interval_tree_node *node;
1982	struct svm_range *prange;
1983	struct svm_range *tmp;
1984	struct list_head new_list;
1985	int r = 0;
1986
1987	pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
1988
1989	INIT_LIST_HEAD(update_list);
1990	INIT_LIST_HEAD(insert_list);
1991	INIT_LIST_HEAD(remove_list);
1992	INIT_LIST_HEAD(&new_list);
1993
1994	node = interval_tree_iter_first(&svms->objects, start, last);
1995	while (node) {
1996		struct interval_tree_node *next;
1997		unsigned long next_start;
1998
1999		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2000			 node->last);
2001
2002		prange = container_of(node, struct svm_range, it_node);
2003		next = interval_tree_iter_next(node, start, last);
2004		next_start = min(node->last, last) + 1;
2005
2006		if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
2007			/* nothing to do */
2008		} else if (node->start < start || node->last > last) {
2009			/* node intersects the update range and its attributes
2010			 * will change. Clone and split it, apply updates only
2011			 * to the overlapping part
2012			 */
2013			struct svm_range *old = prange;
2014
2015			prange = svm_range_clone(old);
2016			if (!prange) {
2017				r = -ENOMEM;
2018				goto out;
2019			}
2020
2021			list_add(&old->update_list, remove_list);
2022			list_add(&prange->list, insert_list);
2023			list_add(&prange->update_list, update_list);
2024
2025			if (node->start < start) {
2026				pr_debug("change old range start\n");
2027				r = svm_range_split_head(prange, start,
2028							 insert_list);
2029				if (r)
2030					goto out;
2031			}
2032			if (node->last > last) {
2033				pr_debug("change old range last\n");
2034				r = svm_range_split_tail(prange, last,
2035							 insert_list);
2036				if (r)
2037					goto out;
2038			}
2039		} else {
2040			/* The node is contained within start..last,
2041			 * just update it
2042			 */
2043			list_add(&prange->update_list, update_list);
2044		}
2045
2046		/* insert a new node if needed */
2047		if (node->start > start) {
2048			r = svm_range_split_new(svms, start, node->start - 1,
2049						READ_ONCE(max_svm_range_pages),
2050						&new_list, update_list);
2051			if (r)
2052				goto out;
2053		}
2054
2055		node = next;
2056		start = next_start;
2057	}
2058
2059	/* add a final range at the end if needed */
2060	if (start <= last)
2061		r = svm_range_split_new(svms, start, last,
2062					READ_ONCE(max_svm_range_pages),
2063					&new_list, update_list);
2064
2065out:
2066	if (r) {
2067		list_for_each_entry_safe(prange, tmp, insert_list, list)
2068			svm_range_free(prange, false);
2069		list_for_each_entry_safe(prange, tmp, &new_list, list)
2070			svm_range_free(prange, true);
2071	} else {
2072		list_splice(&new_list, insert_list);
2073	}
2074
2075	return r;
2076}
2077
2078static void
2079svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2080					    struct svm_range *prange)
2081{
2082	unsigned long start;
2083	unsigned long last;
2084
2085	start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2086	last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2087
2088	if (prange->start == start && prange->last == last)
2089		return;
2090
2091	pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2092		  prange->svms, prange, start, last, prange->start,
2093		  prange->last);
2094
2095	if (start != 0 && last != 0) {
2096		interval_tree_remove(&prange->it_node, &prange->svms->objects);
2097		svm_range_remove_notifier(prange);
2098	}
2099	prange->it_node.start = prange->start;
2100	prange->it_node.last = prange->last;
2101
2102	interval_tree_insert(&prange->it_node, &prange->svms->objects);
2103	svm_range_add_notifier_locked(mm, prange);
2104}
2105
2106static void
2107svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2108			 struct mm_struct *mm)
2109{
2110	switch (prange->work_item.op) {
2111	case SVM_OP_NULL:
2112		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2113			 svms, prange, prange->start, prange->last);
2114		break;
2115	case SVM_OP_UNMAP_RANGE:
2116		pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2117			 svms, prange, prange->start, prange->last);
2118		svm_range_unlink(prange);
2119		svm_range_remove_notifier(prange);
2120		svm_range_free(prange, true);
2121		break;
2122	case SVM_OP_UPDATE_RANGE_NOTIFIER:
2123		pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2124			 svms, prange, prange->start, prange->last);
2125		svm_range_update_notifier_and_interval_tree(mm, prange);
2126		break;
2127	case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2128		pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2129			 svms, prange, prange->start, prange->last);
2130		svm_range_update_notifier_and_interval_tree(mm, prange);
2131		/* TODO: implement deferred validation and mapping */
2132		break;
2133	case SVM_OP_ADD_RANGE:
2134		pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2135			 prange->start, prange->last);
2136		svm_range_add_to_svms(prange);
2137		svm_range_add_notifier_locked(mm, prange);
2138		break;
2139	case SVM_OP_ADD_RANGE_AND_MAP:
2140		pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2141			 prange, prange->start, prange->last);
2142		svm_range_add_to_svms(prange);
2143		svm_range_add_notifier_locked(mm, prange);
2144		/* TODO: implement deferred validation and mapping */
2145		break;
2146	default:
2147		WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2148			 prange->work_item.op);
2149	}
2150}
2151
2152static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2153{
2154	struct kfd_process_device *pdd;
2155	struct kfd_process *p;
2156	int drain;
2157	uint32_t i;
2158
2159	p = container_of(svms, struct kfd_process, svms);
2160
2161restart:
2162	drain = atomic_read(&svms->drain_pagefaults);
2163	if (!drain)
2164		return;
2165
2166	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2167		pdd = p->pdds[i];
2168		if (!pdd)
2169			continue;
2170
2171		pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2172
2173		amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2174						     &pdd->dev->adev->irq.ih1);
2175		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2176	}
2177	if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2178		goto restart;
2179}
2180
2181static void svm_range_deferred_list_work(struct work_struct *work)
2182{
2183	struct svm_range_list *svms;
2184	struct svm_range *prange;
2185	struct mm_struct *mm;
2186
2187	svms = container_of(work, struct svm_range_list, deferred_list_work);
2188	pr_debug("enter svms 0x%p\n", svms);
2189
2190	spin_lock(&svms->deferred_list_lock);
2191	while (!list_empty(&svms->deferred_range_list)) {
2192		prange = list_first_entry(&svms->deferred_range_list,
2193					  struct svm_range, deferred_list);
2194		spin_unlock(&svms->deferred_list_lock);
2195
2196		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2197			 prange->start, prange->last, prange->work_item.op);
2198
2199		mm = prange->work_item.mm;
2200retry:
2201		mmap_write_lock(mm);
2202
2203		/* Checking for the need to drain retry faults must be inside
2204		 * mmap write lock to serialize with munmap notifiers.
2205		 */
2206		if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2207			mmap_write_unlock(mm);
2208			svm_range_drain_retry_fault(svms);
2209			goto retry;
2210		}
2211
2212		/* Remove from deferred_list must be inside mmap write lock, for
2213		 * two race cases:
2214		 * 1. unmap_from_cpu may change work_item.op and add the range
2215		 *    to deferred_list again, cause use after free bug.
2216		 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2217		 *    lock and continue because deferred_list is empty, but
2218		 *    deferred_list work is actually waiting for mmap lock.
2219		 */
2220		spin_lock(&svms->deferred_list_lock);
2221		list_del_init(&prange->deferred_list);
2222		spin_unlock(&svms->deferred_list_lock);
2223
2224		mutex_lock(&svms->lock);
2225		mutex_lock(&prange->migrate_mutex);
2226		while (!list_empty(&prange->child_list)) {
2227			struct svm_range *pchild;
2228
2229			pchild = list_first_entry(&prange->child_list,
2230						struct svm_range, child_list);
2231			pr_debug("child prange 0x%p op %d\n", pchild,
2232				 pchild->work_item.op);
2233			list_del_init(&pchild->child_list);
2234			svm_range_handle_list_op(svms, pchild, mm);
2235		}
2236		mutex_unlock(&prange->migrate_mutex);
2237
2238		svm_range_handle_list_op(svms, prange, mm);
2239		mutex_unlock(&svms->lock);
2240		mmap_write_unlock(mm);
2241
2242		/* Pairs with mmget in svm_range_add_list_work */
2243		mmput(mm);
2244
2245		spin_lock(&svms->deferred_list_lock);
2246	}
2247	spin_unlock(&svms->deferred_list_lock);
2248	pr_debug("exit svms 0x%p\n", svms);
2249}
2250
2251void
2252svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2253			struct mm_struct *mm, enum svm_work_list_ops op)
2254{
2255	spin_lock(&svms->deferred_list_lock);
2256	/* if prange is on the deferred list */
2257	if (!list_empty(&prange->deferred_list)) {
2258		pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2259		WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2260		if (op != SVM_OP_NULL &&
2261		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
2262			prange->work_item.op = op;
2263	} else {
2264		prange->work_item.op = op;
2265
2266		/* Pairs with mmput in deferred_list_work */
2267		mmget(mm);
2268		prange->work_item.mm = mm;
2269		list_add_tail(&prange->deferred_list,
2270			      &prange->svms->deferred_range_list);
2271		pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2272			 prange, prange->start, prange->last, op);
2273	}
2274	spin_unlock(&svms->deferred_list_lock);
2275}
2276
2277void schedule_deferred_list_work(struct svm_range_list *svms)
2278{
2279	spin_lock(&svms->deferred_list_lock);
2280	if (!list_empty(&svms->deferred_range_list))
2281		schedule_work(&svms->deferred_list_work);
2282	spin_unlock(&svms->deferred_list_lock);
2283}
2284
2285static void
2286svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2287		      struct svm_range *prange, unsigned long start,
2288		      unsigned long last)
2289{
2290	struct svm_range *head;
2291	struct svm_range *tail;
2292
2293	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2294		pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2295			 prange->start, prange->last);
2296		return;
2297	}
2298	if (start > prange->last || last < prange->start)
2299		return;
2300
2301	head = tail = prange;
2302	if (start > prange->start)
2303		svm_range_split(prange, prange->start, start - 1, &tail);
2304	if (last < tail->last)
2305		svm_range_split(tail, last + 1, tail->last, &head);
2306
2307	if (head != prange && tail != prange) {
2308		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2309		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2310	} else if (tail != prange) {
2311		svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2312	} else if (head != prange) {
2313		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2314	} else if (parent != prange) {
2315		prange->work_item.op = SVM_OP_UNMAP_RANGE;
2316	}
2317}
2318
2319static void
2320svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2321			 unsigned long start, unsigned long last)
2322{
2323	uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2324	struct svm_range_list *svms;
2325	struct svm_range *pchild;
2326	struct kfd_process *p;
2327	unsigned long s, l;
2328	bool unmap_parent;
2329
2330	p = kfd_lookup_process_by_mm(mm);
2331	if (!p)
2332		return;
2333	svms = &p->svms;
2334
2335	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2336		 prange, prange->start, prange->last, start, last);
2337
2338	/* Make sure pending page faults are drained in the deferred worker
2339	 * before the range is freed to avoid straggler interrupts on
2340	 * unmapped memory causing "phantom faults".
2341	 */
2342	atomic_inc(&svms->drain_pagefaults);
2343
2344	unmap_parent = start <= prange->start && last >= prange->last;
2345
2346	list_for_each_entry(pchild, &prange->child_list, child_list) {
2347		mutex_lock_nested(&pchild->lock, 1);
2348		s = max(start, pchild->start);
2349		l = min(last, pchild->last);
2350		if (l >= s)
2351			svm_range_unmap_from_gpus(pchild, s, l, trigger);
2352		svm_range_unmap_split(mm, prange, pchild, start, last);
2353		mutex_unlock(&pchild->lock);
2354	}
2355	s = max(start, prange->start);
2356	l = min(last, prange->last);
2357	if (l >= s)
2358		svm_range_unmap_from_gpus(prange, s, l, trigger);
2359	svm_range_unmap_split(mm, prange, prange, start, last);
2360
2361	if (unmap_parent)
2362		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2363	else
2364		svm_range_add_list_work(svms, prange, mm,
2365					SVM_OP_UPDATE_RANGE_NOTIFIER);
2366	schedule_deferred_list_work(svms);
2367
2368	kfd_unref_process(p);
2369}
2370
2371/**
2372 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2373 * @mni: mmu_interval_notifier struct
2374 * @range: mmu_notifier_range struct
2375 * @cur_seq: value to pass to mmu_interval_set_seq()
2376 *
2377 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2378 * is from migration, or CPU page invalidation callback.
2379 *
2380 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2381 * work thread, and split prange if only part of prange is unmapped.
2382 *
2383 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2384 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2385 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2386 * update GPU mapping to recover.
2387 *
2388 * Context: mmap lock, notifier_invalidate_start lock are held
2389 *          for invalidate event, prange lock is held if this is from migration
2390 */
2391static bool
2392svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2393				    const struct mmu_notifier_range *range,
2394				    unsigned long cur_seq)
2395{
2396	struct svm_range *prange;
2397	unsigned long start;
2398	unsigned long last;
2399
2400	if (range->event == MMU_NOTIFY_RELEASE)
2401		return true;
2402	if (!mmget_not_zero(mni->mm))
2403		return true;
2404
2405	start = mni->interval_tree.start;
2406	last = mni->interval_tree.last;
2407	start = max(start, range->start) >> PAGE_SHIFT;
2408	last = min(last, range->end - 1) >> PAGE_SHIFT;
2409	pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2410		 start, last, range->start >> PAGE_SHIFT,
2411		 (range->end - 1) >> PAGE_SHIFT,
2412		 mni->interval_tree.start >> PAGE_SHIFT,
2413		 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2414
2415	prange = container_of(mni, struct svm_range, notifier);
2416
2417	svm_range_lock(prange);
2418	mmu_interval_set_seq(mni, cur_seq);
2419
2420	switch (range->event) {
2421	case MMU_NOTIFY_UNMAP:
2422		svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2423		break;
2424	default:
2425		svm_range_evict(prange, mni->mm, start, last, range->event);
2426		break;
2427	}
2428
2429	svm_range_unlock(prange);
2430	mmput(mni->mm);
2431
2432	return true;
2433}
2434
2435/**
2436 * svm_range_from_addr - find svm range from fault address
2437 * @svms: svm range list header
2438 * @addr: address to search range interval tree, in pages
2439 * @parent: parent range if range is on child list
2440 *
2441 * Context: The caller must hold svms->lock
2442 *
2443 * Return: the svm_range found or NULL
2444 */
2445struct svm_range *
2446svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2447		    struct svm_range **parent)
2448{
2449	struct interval_tree_node *node;
2450	struct svm_range *prange;
2451	struct svm_range *pchild;
2452
2453	node = interval_tree_iter_first(&svms->objects, addr, addr);
2454	if (!node)
2455		return NULL;
2456
2457	prange = container_of(node, struct svm_range, it_node);
2458	pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2459		 addr, prange->start, prange->last, node->start, node->last);
2460
2461	if (addr >= prange->start && addr <= prange->last) {
2462		if (parent)
2463			*parent = prange;
2464		return prange;
2465	}
2466	list_for_each_entry(pchild, &prange->child_list, child_list)
2467		if (addr >= pchild->start && addr <= pchild->last) {
2468			pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2469				 addr, pchild->start, pchild->last);
2470			if (parent)
2471				*parent = prange;
2472			return pchild;
2473		}
2474
2475	return NULL;
2476}
2477
2478/* svm_range_best_restore_location - decide the best fault restore location
2479 * @prange: svm range structure
2480 * @adev: the GPU on which vm fault happened
2481 *
2482 * This is only called when xnack is on, to decide the best location to restore
2483 * the range mapping after GPU vm fault. Caller uses the best location to do
2484 * migration if actual loc is not best location, then update GPU page table
2485 * mapping to the best location.
2486 *
2487 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2488 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2489 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2490 *    if range actual loc is cpu, best_loc is cpu
2491 *    if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2492 *    range actual loc.
2493 * Otherwise, GPU no access, best_loc is -1.
2494 *
2495 * Return:
2496 * -1 means vm fault GPU no access
2497 * 0 for CPU or GPU id
2498 */
2499static int32_t
2500svm_range_best_restore_location(struct svm_range *prange,
2501				struct amdgpu_device *adev,
2502				int32_t *gpuidx)
2503{
2504	struct amdgpu_device *bo_adev, *preferred_adev;
2505	struct kfd_process *p;
2506	uint32_t gpuid;
2507	int r;
2508
2509	p = container_of(prange->svms, struct kfd_process, svms);
2510
2511	r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
2512	if (r < 0) {
2513		pr_debug("failed to get gpuid from kgd\n");
2514		return -1;
2515	}
2516
2517	if (prange->preferred_loc == gpuid ||
2518	    prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2519		return prange->preferred_loc;
2520	} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2521		preferred_adev = svm_range_get_adev_by_id(prange,
2522							prange->preferred_loc);
2523		if (amdgpu_xgmi_same_hive(adev, preferred_adev))
2524			return prange->preferred_loc;
2525		/* fall through */
2526	}
2527
2528	if (test_bit(*gpuidx, prange->bitmap_access))
2529		return gpuid;
2530
2531	if (test_bit(*gpuidx, prange->bitmap_aip)) {
2532		if (!prange->actual_loc)
2533			return 0;
2534
2535		bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2536		if (amdgpu_xgmi_same_hive(adev, bo_adev))
2537			return prange->actual_loc;
2538		else
2539			return 0;
2540	}
2541
2542	return -1;
2543}
2544
2545static int
2546svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2547			       unsigned long *start, unsigned long *last,
2548			       bool *is_heap_stack)
2549{
2550	struct vm_area_struct *vma;
2551	struct interval_tree_node *node;
2552	unsigned long start_limit, end_limit;
2553
2554	vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2555	if (!vma) {
2556		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2557		return -EFAULT;
2558	}
2559
2560	*is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2561			  vma->vm_end >= vma->vm_mm->start_brk) ||
2562			 (vma->vm_start <= vma->vm_mm->start_stack &&
2563			  vma->vm_end >= vma->vm_mm->start_stack);
2564
2565	start_limit = max(vma->vm_start >> PAGE_SHIFT,
2566		      (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2567	end_limit = min(vma->vm_end >> PAGE_SHIFT,
2568		    (unsigned long)ALIGN(addr + 1, 2UL << 8));
2569	/* First range that starts after the fault address */
2570	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2571	if (node) {
2572		end_limit = min(end_limit, node->start);
2573		/* Last range that ends before the fault address */
2574		node = container_of(rb_prev(&node->rb),
2575				    struct interval_tree_node, rb);
2576	} else {
2577		/* Last range must end before addr because
2578		 * there was no range after addr
2579		 */
2580		node = container_of(rb_last(&p->svms.objects.rb_root),
2581				    struct interval_tree_node, rb);
2582	}
2583	if (node) {
2584		if (node->last >= addr) {
2585			WARN(1, "Overlap with prev node and page fault addr\n");
2586			return -EFAULT;
2587		}
2588		start_limit = max(start_limit, node->last + 1);
2589	}
2590
2591	*start = start_limit;
2592	*last = end_limit - 1;
2593
2594	pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2595		 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2596		 *start, *last, *is_heap_stack);
2597
2598	return 0;
2599}
2600
2601static int
2602svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2603			   uint64_t *bo_s, uint64_t *bo_l)
2604{
2605	struct amdgpu_bo_va_mapping *mapping;
2606	struct interval_tree_node *node;
2607	struct amdgpu_bo *bo = NULL;
2608	unsigned long userptr;
2609	uint32_t i;
2610	int r;
2611
2612	for (i = 0; i < p->n_pdds; i++) {
2613		struct amdgpu_vm *vm;
2614
2615		if (!p->pdds[i]->drm_priv)
2616			continue;
2617
2618		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2619		r = amdgpu_bo_reserve(vm->root.bo, false);
2620		if (r)
2621			return r;
2622
2623		/* Check userptr by searching entire vm->va interval tree */
2624		node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2625		while (node) {
2626			mapping = container_of((struct rb_node *)node,
2627					       struct amdgpu_bo_va_mapping, rb);
2628			bo = mapping->bo_va->base.bo;
2629
2630			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2631							 start << PAGE_SHIFT,
2632							 last << PAGE_SHIFT,
2633							 &userptr)) {
2634				node = interval_tree_iter_next(node, 0, ~0ULL);
2635				continue;
2636			}
2637
2638			pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2639				 start, last);
2640			if (bo_s && bo_l) {
2641				*bo_s = userptr >> PAGE_SHIFT;
2642				*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2643			}
2644			amdgpu_bo_unreserve(vm->root.bo);
2645			return -EADDRINUSE;
2646		}
2647		amdgpu_bo_unreserve(vm->root.bo);
2648	}
2649	return 0;
2650}
2651
2652static struct
2653svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2654						struct kfd_process *p,
2655						struct mm_struct *mm,
2656						int64_t addr)
2657{
2658	struct svm_range *prange = NULL;
2659	unsigned long start, last;
2660	uint32_t gpuid, gpuidx;
2661	bool is_heap_stack;
2662	uint64_t bo_s = 0;
2663	uint64_t bo_l = 0;
2664	int r;
2665
2666	if (svm_range_get_range_boundaries(p, addr, &start, &last,
2667					   &is_heap_stack))
2668		return NULL;
2669
2670	r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2671	if (r != -EADDRINUSE)
2672		r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2673
2674	if (r == -EADDRINUSE) {
2675		if (addr >= bo_s && addr <= bo_l)
2676			return NULL;
2677
2678		/* Create one page svm range if 2MB range overlapping */
2679		start = addr;
2680		last = addr;
2681	}
2682
2683	prange = svm_range_new(&p->svms, start, last, true);
2684	if (!prange) {
2685		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2686		return NULL;
2687	}
2688	if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
2689		pr_debug("failed to get gpuid from kgd\n");
2690		svm_range_free(prange, true);
2691		return NULL;
2692	}
2693
2694	if (is_heap_stack)
2695		prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2696
2697	svm_range_add_to_svms(prange);
2698	svm_range_add_notifier_locked(mm, prange);
2699
2700	return prange;
2701}
2702
2703/* svm_range_skip_recover - decide if prange can be recovered
2704 * @prange: svm range structure
2705 *
2706 * GPU vm retry fault handle skip recover the range for cases:
2707 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2708 *    deferred list work will drain the stale fault before free the prange.
2709 * 2. prange is on deferred list to add interval notifier after split, or
2710 * 3. prange is child range, it is split from parent prange, recover later
2711 *    after interval notifier is added.
2712 *
2713 * Return: true to skip recover, false to recover
2714 */
2715static bool svm_range_skip_recover(struct svm_range *prange)
2716{
2717	struct svm_range_list *svms = prange->svms;
2718
2719	spin_lock(&svms->deferred_list_lock);
2720	if (list_empty(&prange->deferred_list) &&
2721	    list_empty(&prange->child_list)) {
2722		spin_unlock(&svms->deferred_list_lock);
2723		return false;
2724	}
2725	spin_unlock(&svms->deferred_list_lock);
2726
2727	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2728		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2729			 svms, prange, prange->start, prange->last);
2730		return true;
2731	}
2732	if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2733	    prange->work_item.op == SVM_OP_ADD_RANGE) {
2734		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2735			 svms, prange, prange->start, prange->last);
2736		return true;
2737	}
2738	return false;
2739}
2740
2741static void
2742svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2743		      int32_t gpuidx)
2744{
2745	struct kfd_process_device *pdd;
2746
2747	/* fault is on different page of same range
2748	 * or fault is skipped to recover later
2749	 * or fault is on invalid virtual address
2750	 */
2751	if (gpuidx == MAX_GPU_INSTANCE) {
2752		uint32_t gpuid;
2753		int r;
2754
2755		r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
2756		if (r < 0)
2757			return;
2758	}
2759
2760	/* fault is recovered
2761	 * or fault cannot recover because GPU no access on the range
2762	 */
2763	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2764	if (pdd)
2765		WRITE_ONCE(pdd->faults, pdd->faults + 1);
2766}
2767
2768static bool
2769svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2770{
2771	unsigned long requested = VM_READ;
2772
2773	if (write_fault)
2774		requested |= VM_WRITE;
2775
2776	pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2777		vma->vm_flags);
2778	return (vma->vm_flags & requested) == requested;
2779}
2780
2781int
2782svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2783			uint64_t addr, bool write_fault)
2784{
2785	struct mm_struct *mm = NULL;
2786	struct svm_range_list *svms;
2787	struct svm_range *prange;
2788	struct kfd_process *p;
2789	ktime_t timestamp = ktime_get_boottime();
2790	int32_t best_loc;
2791	int32_t gpuidx = MAX_GPU_INSTANCE;
2792	bool write_locked = false;
2793	struct vm_area_struct *vma;
2794	bool migration = false;
2795	int r = 0;
2796
2797	if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2798		pr_debug("device does not support SVM\n");
2799		return -EFAULT;
2800	}
2801
2802	p = kfd_lookup_process_by_pasid(pasid);
2803	if (!p) {
2804		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2805		return 0;
2806	}
2807	svms = &p->svms;
2808
2809	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2810
2811	if (atomic_read(&svms->drain_pagefaults)) {
2812		pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2813		r = 0;
2814		goto out;
2815	}
2816
2817	if (!p->xnack_enabled) {
2818		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2819		r = -EFAULT;
2820		goto out;
2821	}
2822
2823	/* p->lead_thread is available as kfd_process_wq_release flush the work
2824	 * before releasing task ref.
2825	 */
2826	mm = get_task_mm(p->lead_thread);
2827	if (!mm) {
2828		pr_debug("svms 0x%p failed to get mm\n", svms);
2829		r = 0;
2830		goto out;
2831	}
2832
2833	mmap_read_lock(mm);
2834retry_write_locked:
2835	mutex_lock(&svms->lock);
2836	prange = svm_range_from_addr(svms, addr, NULL);
2837	if (!prange) {
2838		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2839			 svms, addr);
2840		if (!write_locked) {
2841			/* Need the write lock to create new range with MMU notifier.
2842			 * Also flush pending deferred work to make sure the interval
2843			 * tree is up to date before we add a new range
2844			 */
2845			mutex_unlock(&svms->lock);
2846			mmap_read_unlock(mm);
2847			mmap_write_lock(mm);
2848			write_locked = true;
2849			goto retry_write_locked;
2850		}
2851		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2852		if (!prange) {
2853			pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2854				 svms, addr);
2855			mmap_write_downgrade(mm);
2856			r = -EFAULT;
2857			goto out_unlock_svms;
2858		}
2859	}
2860	if (write_locked)
2861		mmap_write_downgrade(mm);
2862
2863	mutex_lock(&prange->migrate_mutex);
2864
2865	if (svm_range_skip_recover(prange)) {
2866		amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2867		r = 0;
2868		goto out_unlock_range;
2869	}
2870
2871	/* skip duplicate vm fault on different pages of same range */
2872	if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
2873				AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
2874		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2875			 svms, prange->start, prange->last);
2876		r = 0;
2877		goto out_unlock_range;
2878	}
2879
2880	/* __do_munmap removed VMA, return success as we are handling stale
2881	 * retry fault.
2882	 */
2883	vma = vma_lookup(mm, addr << PAGE_SHIFT);
2884	if (!vma) {
2885		pr_debug("address 0x%llx VMA is removed\n", addr);
2886		r = 0;
2887		goto out_unlock_range;
2888	}
2889
2890	if (!svm_fault_allowed(vma, write_fault)) {
2891		pr_debug("fault addr 0x%llx no %s permission\n", addr,
2892			write_fault ? "write" : "read");
2893		r = -EPERM;
2894		goto out_unlock_range;
2895	}
2896
2897	best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2898	if (best_loc == -1) {
2899		pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2900			 svms, prange->start, prange->last);
2901		r = -EACCES;
2902		goto out_unlock_range;
2903	}
2904
2905	pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2906		 svms, prange->start, prange->last, best_loc,
2907		 prange->actual_loc);
2908
2909	kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr,
2910				       write_fault, timestamp);
2911
2912	if (prange->actual_loc != best_loc) {
2913		migration = true;
2914		if (best_loc) {
2915			r = svm_migrate_to_vram(prange, best_loc, mm,
2916					KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2917			if (r) {
2918				pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2919					 r, addr);
2920				/* Fallback to system memory if migration to
2921				 * VRAM failed
2922				 */
2923				if (prange->actual_loc)
2924					r = svm_migrate_vram_to_ram(prange, mm,
2925					   KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
2926					   NULL);
2927				else
2928					r = 0;
2929			}
2930		} else {
2931			r = svm_migrate_vram_to_ram(prange, mm,
2932					KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
2933					NULL);
2934		}
2935		if (r) {
2936			pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2937				 r, svms, prange->start, prange->last);
2938			goto out_unlock_range;
2939		}
2940	}
2941
2942	r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
2943	if (r)
2944		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2945			 r, svms, prange->start, prange->last);
2946
2947	kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr,
2948				     migration);
2949
2950out_unlock_range:
2951	mutex_unlock(&prange->migrate_mutex);
2952out_unlock_svms:
2953	mutex_unlock(&svms->lock);
2954	mmap_read_unlock(mm);
2955
2956	svm_range_count_fault(adev, p, gpuidx);
2957
2958	mmput(mm);
2959out:
2960	kfd_unref_process(p);
2961
2962	if (r == -EAGAIN) {
2963		pr_debug("recover vm fault later\n");
2964		amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2965		r = 0;
2966	}
2967	return r;
2968}
2969
2970int
2971svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
2972{
2973	struct svm_range *prange, *pchild;
2974	uint64_t reserved_size = 0;
2975	uint64_t size;
2976	int r = 0;
2977
2978	pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
2979
2980	mutex_lock(&p->svms.lock);
2981
2982	list_for_each_entry(prange, &p->svms.list, list) {
2983		svm_range_lock(prange);
2984		list_for_each_entry(pchild, &prange->child_list, child_list) {
2985			size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
2986			if (xnack_enabled) {
2987				amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
2988						KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
2989			} else {
2990				r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
2991						KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
2992				if (r)
2993					goto out_unlock;
2994				reserved_size += size;
2995			}
2996		}
2997
2998		size = (prange->last - prange->start + 1) << PAGE_SHIFT;
2999		if (xnack_enabled) {
3000			amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3001						KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3002		} else {
3003			r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3004						KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3005			if (r)
3006				goto out_unlock;
3007			reserved_size += size;
3008		}
3009out_unlock:
3010		svm_range_unlock(prange);
3011		if (r)
3012			break;
3013	}
3014
3015	if (r)
3016		amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3017						KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3018	else
3019		/* Change xnack mode must be inside svms lock, to avoid race with
3020		 * svm_range_deferred_list_work unreserve memory in parallel.
3021		 */
3022		p->xnack_enabled = xnack_enabled;
3023
3024	mutex_unlock(&p->svms.lock);
3025	return r;
3026}
3027
3028void svm_range_list_fini(struct kfd_process *p)
3029{
3030	struct svm_range *prange;
3031	struct svm_range *next;
3032
3033	pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3034
3035	cancel_delayed_work_sync(&p->svms.restore_work);
3036
3037	/* Ensure list work is finished before process is destroyed */
3038	flush_work(&p->svms.deferred_list_work);
3039
3040	/*
3041	 * Ensure no retry fault comes in afterwards, as page fault handler will
3042	 * not find kfd process and take mm lock to recover fault.
3043	 */
3044	atomic_inc(&p->svms.drain_pagefaults);
3045	svm_range_drain_retry_fault(&p->svms);
3046
3047	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3048		svm_range_unlink(prange);
3049		svm_range_remove_notifier(prange);
3050		svm_range_free(prange, true);
3051	}
3052
3053	mutex_destroy(&p->svms.lock);
3054
3055	pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3056}
3057
3058int svm_range_list_init(struct kfd_process *p)
3059{
3060	struct svm_range_list *svms = &p->svms;
3061	int i;
3062
3063	svms->objects = RB_ROOT_CACHED;
3064	mutex_init(&svms->lock);
3065	INIT_LIST_HEAD(&svms->list);
3066	atomic_set(&svms->evicted_ranges, 0);
3067	atomic_set(&svms->drain_pagefaults, 0);
3068	INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3069	INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3070	INIT_LIST_HEAD(&svms->deferred_range_list);
3071	INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3072	spin_lock_init(&svms->deferred_list_lock);
3073
3074	for (i = 0; i < p->n_pdds; i++)
3075		if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
3076			bitmap_set(svms->bitmap_supported, i, 1);
3077
3078	return 0;
3079}
3080
3081/**
3082 * svm_range_check_vm - check if virtual address range mapped already
3083 * @p: current kfd_process
3084 * @start: range start address, in pages
3085 * @last: range last address, in pages
3086 * @bo_s: mapping start address in pages if address range already mapped
3087 * @bo_l: mapping last address in pages if address range already mapped
3088 *
3089 * The purpose is to avoid virtual address ranges already allocated by
3090 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3091 * It looks for each pdd in the kfd_process.
3092 *
3093 * Context: Process context
3094 *
3095 * Return 0 - OK, if the range is not mapped.
3096 * Otherwise error code:
3097 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3098 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3099 * a signal. Release all buffer reservations and return to user-space.
3100 */
3101static int
3102svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3103		   uint64_t *bo_s, uint64_t *bo_l)
3104{
3105	struct amdgpu_bo_va_mapping *mapping;
3106	struct interval_tree_node *node;
3107	uint32_t i;
3108	int r;
3109
3110	for (i = 0; i < p->n_pdds; i++) {
3111		struct amdgpu_vm *vm;
3112
3113		if (!p->pdds[i]->drm_priv)
3114			continue;
3115
3116		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3117		r = amdgpu_bo_reserve(vm->root.bo, false);
3118		if (r)
3119			return r;
3120
3121		node = interval_tree_iter_first(&vm->va, start, last);
3122		if (node) {
3123			pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3124				 start, last);
3125			mapping = container_of((struct rb_node *)node,
3126					       struct amdgpu_bo_va_mapping, rb);
3127			if (bo_s && bo_l) {
3128				*bo_s = mapping->start;
3129				*bo_l = mapping->last;
3130			}
3131			amdgpu_bo_unreserve(vm->root.bo);
3132			return -EADDRINUSE;
3133		}
3134		amdgpu_bo_unreserve(vm->root.bo);
3135	}
3136
3137	return 0;
3138}
3139
3140/**
3141 * svm_range_is_valid - check if virtual address range is valid
3142 * @p: current kfd_process
3143 * @start: range start address, in pages
3144 * @size: range size, in pages
3145 *
3146 * Valid virtual address range means it belongs to one or more VMAs
3147 *
3148 * Context: Process context
3149 *
3150 * Return:
3151 *  0 - OK, otherwise error code
3152 */
3153static int
3154svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3155{
3156	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3157	struct vm_area_struct *vma;
3158	unsigned long end;
3159	unsigned long start_unchg = start;
3160
3161	start <<= PAGE_SHIFT;
3162	end = start + (size << PAGE_SHIFT);
3163	do {
3164		vma = vma_lookup(p->mm, start);
3165		if (!vma || (vma->vm_flags & device_vma))
3166			return -EFAULT;
3167		start = min(end, vma->vm_end);
3168	} while (start < end);
3169
3170	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3171				  NULL);
3172}
3173
3174/**
3175 * svm_range_best_prefetch_location - decide the best prefetch location
3176 * @prange: svm range structure
3177 *
3178 * For xnack off:
3179 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3180 * can be CPU or GPU.
3181 *
3182 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3183 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3184 * the best prefetch location is always CPU, because GPU can not have coherent
3185 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3186 *
3187 * For xnack on:
3188 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3189 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3190 *
3191 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3192 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3193 * prefetch location is always CPU.
3194 *
3195 * Context: Process context
3196 *
3197 * Return:
3198 * 0 for CPU or GPU id
3199 */
3200static uint32_t
3201svm_range_best_prefetch_location(struct svm_range *prange)
3202{
3203	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3204	uint32_t best_loc = prange->prefetch_loc;
3205	struct kfd_process_device *pdd;
3206	struct amdgpu_device *bo_adev;
3207	struct kfd_process *p;
3208	uint32_t gpuidx;
3209
3210	p = container_of(prange->svms, struct kfd_process, svms);
3211
3212	if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3213		goto out;
3214
3215	bo_adev = svm_range_get_adev_by_id(prange, best_loc);
3216	if (!bo_adev) {
3217		WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
3218		best_loc = 0;
3219		goto out;
3220	}
3221
3222	if (p->xnack_enabled)
3223		bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3224	else
3225		bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3226			  MAX_GPU_INSTANCE);
3227
3228	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3229		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3230		if (!pdd) {
3231			pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3232			continue;
3233		}
3234
3235		if (pdd->dev->adev == bo_adev)
3236			continue;
3237
3238		if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
3239			best_loc = 0;
3240			break;
3241		}
3242	}
3243
3244out:
3245	pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3246		 p->xnack_enabled, &p->svms, prange->start, prange->last,
3247		 best_loc);
3248
3249	return best_loc;
3250}
3251
3252/* svm_range_trigger_migration - start page migration if prefetch loc changed
3253 * @mm: current process mm_struct
3254 * @prange: svm range structure
3255 * @migrated: output, true if migration is triggered
3256 *
3257 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3258 * from ram to vram.
3259 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3260 * from vram to ram.
3261 *
3262 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3263 * and restore work:
3264 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3265 *    stops all queues, schedule restore work
3266 * 2. svm_range_restore_work wait for migration is done by
3267 *    a. svm_range_validate_vram takes prange->migrate_mutex
3268 *    b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3269 * 3. restore work update mappings of GPU, resume all queues.
3270 *
3271 * Context: Process context
3272 *
3273 * Return:
3274 * 0 - OK, otherwise - error code of migration
3275 */
3276static int
3277svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3278			    bool *migrated)
3279{
3280	uint32_t best_loc;
3281	int r = 0;
3282
3283	*migrated = false;
3284	best_loc = svm_range_best_prefetch_location(prange);
3285
3286	if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3287	    best_loc == prange->actual_loc)
3288		return 0;
3289
3290	if (!best_loc) {
3291		r = svm_migrate_vram_to_ram(prange, mm,
3292					KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3293		*migrated = !r;
3294		return r;
3295	}
3296
3297	r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3298	*migrated = !r;
3299
3300	return r;
3301}
3302
3303int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3304{
3305	if (!fence)
3306		return -EINVAL;
3307
3308	if (dma_fence_is_signaled(&fence->base))
3309		return 0;
3310
3311	if (fence->svm_bo) {
3312		WRITE_ONCE(fence->svm_bo->evicting, 1);
3313		schedule_work(&fence->svm_bo->eviction_work);
3314	}
3315
3316	return 0;
3317}
3318
3319static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3320{
3321	struct svm_range_bo *svm_bo;
3322	struct mm_struct *mm;
3323	int r = 0;
3324
3325	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3326	if (!svm_bo_ref_unless_zero(svm_bo))
3327		return; /* svm_bo was freed while eviction was pending */
3328
3329	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3330		mm = svm_bo->eviction_fence->mm;
3331	} else {
3332		svm_range_bo_unref(svm_bo);
3333		return;
3334	}
3335
3336	mmap_read_lock(mm);
3337	spin_lock(&svm_bo->list_lock);
3338	while (!list_empty(&svm_bo->range_list) && !r) {
3339		struct svm_range *prange =
3340				list_first_entry(&svm_bo->range_list,
3341						struct svm_range, svm_bo_list);
3342		int retries = 3;
3343
3344		list_del_init(&prange->svm_bo_list);
3345		spin_unlock(&svm_bo->list_lock);
3346
3347		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3348			 prange->start, prange->last);
3349
3350		mutex_lock(&prange->migrate_mutex);
3351		do {
3352			r = svm_migrate_vram_to_ram(prange, mm,
3353					KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3354		} while (!r && prange->actual_loc && --retries);
3355
3356		if (!r && prange->actual_loc)
3357			pr_info_once("Migration failed during eviction");
3358
3359		if (!prange->actual_loc) {
3360			mutex_lock(&prange->lock);
3361			prange->svm_bo = NULL;
3362			mutex_unlock(&prange->lock);
3363		}
3364		mutex_unlock(&prange->migrate_mutex);
3365
3366		spin_lock(&svm_bo->list_lock);
3367	}
3368	spin_unlock(&svm_bo->list_lock);
3369	mmap_read_unlock(mm);
3370	mmput(mm);
3371
3372	dma_fence_signal(&svm_bo->eviction_fence->base);
3373
3374	/* This is the last reference to svm_bo, after svm_range_vram_node_free
3375	 * has been called in svm_migrate_vram_to_ram
3376	 */
3377	WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3378	svm_range_bo_unref(svm_bo);
3379}
3380
3381static int
3382svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3383		   uint64_t start, uint64_t size, uint32_t nattr,
3384		   struct kfd_ioctl_svm_attribute *attrs)
3385{
3386	struct amdkfd_process_info *process_info = p->kgd_process_info;
3387	struct list_head update_list;
3388	struct list_head insert_list;
3389	struct list_head remove_list;
3390	struct svm_range_list *svms;
3391	struct svm_range *prange;
3392	struct svm_range *next;
3393	bool update_mapping = false;
3394	bool flush_tlb;
3395	int r = 0;
3396
3397	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3398		 p->pasid, &p->svms, start, start + size - 1, size);
3399
3400	r = svm_range_check_attr(p, nattr, attrs);
3401	if (r)
3402		return r;
3403
3404	svms = &p->svms;
3405
3406	mutex_lock(&process_info->lock);
3407
3408	svm_range_list_lock_and_flush_work(svms, mm);
3409
3410	r = svm_range_is_valid(p, start, size);
3411	if (r) {
3412		pr_debug("invalid range r=%d\n", r);
3413		mmap_write_unlock(mm);
3414		goto out;
3415	}
3416
3417	mutex_lock(&svms->lock);
3418
3419	/* Add new range and split existing ranges as needed */
3420	r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3421			  &insert_list, &remove_list);
3422	if (r) {
3423		mutex_unlock(&svms->lock);
3424		mmap_write_unlock(mm);
3425		goto out;
3426	}
3427	/* Apply changes as a transaction */
3428	list_for_each_entry_safe(prange, next, &insert_list, list) {
3429		svm_range_add_to_svms(prange);
3430		svm_range_add_notifier_locked(mm, prange);
3431	}
3432	list_for_each_entry(prange, &update_list, update_list) {
3433		svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3434		/* TODO: unmap ranges from GPU that lost access */
3435	}
3436	list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3437		pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3438			 prange->svms, prange, prange->start,
3439			 prange->last);
3440		svm_range_unlink(prange);
3441		svm_range_remove_notifier(prange);
3442		svm_range_free(prange, false);
3443	}
3444
3445	mmap_write_downgrade(mm);
3446	/* Trigger migrations and revalidate and map to GPUs as needed. If
3447	 * this fails we may be left with partially completed actions. There
3448	 * is no clean way of rolling back to the previous state in such a
3449	 * case because the rollback wouldn't be guaranteed to work either.
3450	 */
3451	list_for_each_entry(prange, &update_list, update_list) {
3452		bool migrated;
3453
3454		mutex_lock(&prange->migrate_mutex);
3455
3456		r = svm_range_trigger_migration(mm, prange, &migrated);
3457		if (r)
3458			goto out_unlock_range;
3459
3460		if (migrated && (!p->xnack_enabled ||
3461		    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3462		    prange->mapped_to_gpu) {
3463			pr_debug("restore_work will update mappings of GPUs\n");
3464			mutex_unlock(&prange->migrate_mutex);
3465			continue;
3466		}
3467
3468		if (!migrated && !update_mapping) {
3469			mutex_unlock(&prange->migrate_mutex);
3470			continue;
3471		}
3472
3473		flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3474
3475		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3476					       true, true, flush_tlb);
3477		if (r)
3478			pr_debug("failed %d to map svm range\n", r);
3479
3480out_unlock_range:
3481		mutex_unlock(&prange->migrate_mutex);
3482		if (r)
3483			break;
3484	}
3485
3486	svm_range_debug_dump(svms);
3487
3488	mutex_unlock(&svms->lock);
3489	mmap_read_unlock(mm);
3490out:
3491	mutex_unlock(&process_info->lock);
3492
3493	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3494		 &p->svms, start, start + size - 1, r);
3495
3496	return r;
3497}
3498
3499static int
3500svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3501		   uint64_t start, uint64_t size, uint32_t nattr,
3502		   struct kfd_ioctl_svm_attribute *attrs)
3503{
3504	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3505	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3506	bool get_preferred_loc = false;
3507	bool get_prefetch_loc = false;
3508	bool get_granularity = false;
3509	bool get_accessible = false;
3510	bool get_flags = false;
3511	uint64_t last = start + size - 1UL;
3512	uint8_t granularity = 0xff;
3513	struct interval_tree_node *node;
3514	struct svm_range_list *svms;
3515	struct svm_range *prange;
3516	uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3517	uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3518	uint32_t flags_and = 0xffffffff;
3519	uint32_t flags_or = 0;
3520	int gpuidx;
3521	uint32_t i;
3522	int r = 0;
3523
3524	pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3525		 start + size - 1, nattr);
3526
3527	/* Flush pending deferred work to avoid racing with deferred actions from
3528	 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3529	 * can still race with get_attr because we don't hold the mmap lock. But that
3530	 * would be a race condition in the application anyway, and undefined
3531	 * behaviour is acceptable in that case.
3532	 */
3533	flush_work(&p->svms.deferred_list_work);
3534
3535	mmap_read_lock(mm);
3536	r = svm_range_is_valid(p, start, size);
3537	mmap_read_unlock(mm);
3538	if (r) {
3539		pr_debug("invalid range r=%d\n", r);
3540		return r;
3541	}
3542
3543	for (i = 0; i < nattr; i++) {
3544		switch (attrs[i].type) {
3545		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3546			get_preferred_loc = true;
3547			break;
3548		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3549			get_prefetch_loc = true;
3550			break;
3551		case KFD_IOCTL_SVM_ATTR_ACCESS:
3552			get_accessible = true;
3553			break;
3554		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3555		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3556			get_flags = true;
3557			break;
3558		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3559			get_granularity = true;
3560			break;
3561		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3562		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3563			fallthrough;
3564		default:
3565			pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3566			return -EINVAL;
3567		}
3568	}
3569
3570	svms = &p->svms;
3571
3572	mutex_lock(&svms->lock);
3573
3574	node = interval_tree_iter_first(&svms->objects, start, last);
3575	if (!node) {
3576		pr_debug("range attrs not found return default values\n");
3577		svm_range_set_default_attributes(&location, &prefetch_loc,
3578						 &granularity, &flags_and);
3579		flags_or = flags_and;
3580		if (p->xnack_enabled)
3581			bitmap_copy(bitmap_access, svms->bitmap_supported,
3582				    MAX_GPU_INSTANCE);
3583		else
3584			bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3585		bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3586		goto fill_values;
3587	}
3588	bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3589	bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3590
3591	while (node) {
3592		struct interval_tree_node *next;
3593
3594		prange = container_of(node, struct svm_range, it_node);
3595		next = interval_tree_iter_next(node, start, last);
3596
3597		if (get_preferred_loc) {
3598			if (prange->preferred_loc ==
3599					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3600			    (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3601			     location != prange->preferred_loc)) {
3602				location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3603				get_preferred_loc = false;
3604			} else {
3605				location = prange->preferred_loc;
3606			}
3607		}
3608		if (get_prefetch_loc) {
3609			if (prange->prefetch_loc ==
3610					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3611			    (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3612			     prefetch_loc != prange->prefetch_loc)) {
3613				prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3614				get_prefetch_loc = false;
3615			} else {
3616				prefetch_loc = prange->prefetch_loc;
3617			}
3618		}
3619		if (get_accessible) {
3620			bitmap_and(bitmap_access, bitmap_access,
3621				   prange->bitmap_access, MAX_GPU_INSTANCE);
3622			bitmap_and(bitmap_aip, bitmap_aip,
3623				   prange->bitmap_aip, MAX_GPU_INSTANCE);
3624		}
3625		if (get_flags) {
3626			flags_and &= prange->flags;
3627			flags_or |= prange->flags;
3628		}
3629
3630		if (get_granularity && prange->granularity < granularity)
3631			granularity = prange->granularity;
3632
3633		node = next;
3634	}
3635fill_values:
3636	mutex_unlock(&svms->lock);
3637
3638	for (i = 0; i < nattr; i++) {
3639		switch (attrs[i].type) {
3640		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3641			attrs[i].value = location;
3642			break;
3643		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3644			attrs[i].value = prefetch_loc;
3645			break;
3646		case KFD_IOCTL_SVM_ATTR_ACCESS:
3647			gpuidx = kfd_process_gpuidx_from_gpuid(p,
3648							       attrs[i].value);
3649			if (gpuidx < 0) {
3650				pr_debug("invalid gpuid %x\n", attrs[i].value);
3651				return -EINVAL;
3652			}
3653			if (test_bit(gpuidx, bitmap_access))
3654				attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3655			else if (test_bit(gpuidx, bitmap_aip))
3656				attrs[i].type =
3657					KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3658			else
3659				attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3660			break;
3661		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3662			attrs[i].value = flags_and;
3663			break;
3664		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3665			attrs[i].value = ~flags_or;
3666			break;
3667		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3668			attrs[i].value = (uint32_t)granularity;
3669			break;
3670		}
3671	}
3672
3673	return 0;
3674}
3675
3676int kfd_criu_resume_svm(struct kfd_process *p)
3677{
3678	struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3679	int nattr_common = 4, nattr_accessibility = 1;
3680	struct criu_svm_metadata *criu_svm_md = NULL;
3681	struct svm_range_list *svms = &p->svms;
3682	struct criu_svm_metadata *next = NULL;
3683	uint32_t set_flags = 0xffffffff;
3684	int i, j, num_attrs, ret = 0;
3685	uint64_t set_attr_size;
3686	struct mm_struct *mm;
3687
3688	if (list_empty(&svms->criu_svm_metadata_list)) {
3689		pr_debug("No SVM data from CRIU restore stage 2\n");
3690		return ret;
3691	}
3692
3693	mm = get_task_mm(p->lead_thread);
3694	if (!mm) {
3695		pr_err("failed to get mm for the target process\n");
3696		return -ESRCH;
3697	}
3698
3699	num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3700
3701	i = j = 0;
3702	list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3703		pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3704			 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3705
3706		for (j = 0; j < num_attrs; j++) {
3707			pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3708				 i, j, criu_svm_md->data.attrs[j].type,
3709				 i, j, criu_svm_md->data.attrs[j].value);
3710			switch (criu_svm_md->data.attrs[j].type) {
3711			/* During Checkpoint operation, the query for
3712			 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3713			 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3714			 * not used by the range which was checkpointed. Care
3715			 * must be taken to not restore with an invalid value
3716			 * otherwise the gpuidx value will be invalid and
3717			 * set_attr would eventually fail so just replace those
3718			 * with another dummy attribute such as
3719			 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3720			 */
3721			case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3722				if (criu_svm_md->data.attrs[j].value ==
3723				    KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3724					criu_svm_md->data.attrs[j].type =
3725						KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3726					criu_svm_md->data.attrs[j].value = 0;
3727				}
3728				break;
3729			case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3730				set_flags = criu_svm_md->data.attrs[j].value;
3731				break;
3732			default:
3733				break;
3734			}
3735		}
3736
3737		/* CLR_FLAGS is not available via get_attr during checkpoint but
3738		 * it needs to be inserted before restoring the ranges so
3739		 * allocate extra space for it before calling set_attr
3740		 */
3741		set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3742						(num_attrs + 1);
3743		set_attr_new = krealloc(set_attr, set_attr_size,
3744					    GFP_KERNEL);
3745		if (!set_attr_new) {
3746			ret = -ENOMEM;
3747			goto exit;
3748		}
3749		set_attr = set_attr_new;
3750
3751		memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3752					sizeof(struct kfd_ioctl_svm_attribute));
3753		set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3754		set_attr[num_attrs].value = ~set_flags;
3755
3756		ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3757					 criu_svm_md->data.size, num_attrs + 1,
3758					 set_attr);
3759		if (ret) {
3760			pr_err("CRIU: failed to set range attributes\n");
3761			goto exit;
3762		}
3763
3764		i++;
3765	}
3766exit:
3767	kfree(set_attr);
3768	list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3769		pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3770						criu_svm_md->data.start_addr);
3771		kfree(criu_svm_md);
3772	}
3773
3774	mmput(mm);
3775	return ret;
3776
3777}
3778
3779int kfd_criu_restore_svm(struct kfd_process *p,
3780			 uint8_t __user *user_priv_ptr,
3781			 uint64_t *priv_data_offset,
3782			 uint64_t max_priv_data_size)
3783{
3784	uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3785	int nattr_common = 4, nattr_accessibility = 1;
3786	struct criu_svm_metadata *criu_svm_md = NULL;
3787	struct svm_range_list *svms = &p->svms;
3788	uint32_t num_devices;
3789	int ret = 0;
3790
3791	num_devices = p->n_pdds;
3792	/* Handle one SVM range object at a time, also the number of gpus are
3793	 * assumed to be same on the restore node, checking must be done while
3794	 * evaluating the topology earlier
3795	 */
3796
3797	svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3798		(nattr_common + nattr_accessibility * num_devices);
3799	svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3800
3801	svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3802								svm_attrs_size;
3803
3804	criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3805	if (!criu_svm_md) {
3806		pr_err("failed to allocate memory to store svm metadata\n");
3807		return -ENOMEM;
3808	}
3809	if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3810		ret = -EINVAL;
3811		goto exit;
3812	}
3813
3814	ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3815			     svm_priv_data_size);
3816	if (ret) {
3817		ret = -EFAULT;
3818		goto exit;
3819	}
3820	*priv_data_offset += svm_priv_data_size;
3821
3822	list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3823
3824	return 0;
3825
3826
3827exit:
3828	kfree(criu_svm_md);
3829	return ret;
3830}
3831
3832int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3833		       uint64_t *svm_priv_data_size)
3834{
3835	uint64_t total_size, accessibility_size, common_attr_size;
3836	int nattr_common = 4, nattr_accessibility = 1;
3837	int num_devices = p->n_pdds;
3838	struct svm_range_list *svms;
3839	struct svm_range *prange;
3840	uint32_t count = 0;
3841
3842	*svm_priv_data_size = 0;
3843
3844	svms = &p->svms;
3845	if (!svms)
3846		return -EINVAL;
3847
3848	mutex_lock(&svms->lock);
3849	list_for_each_entry(prange, &svms->list, list) {
3850		pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3851			 prange, prange->start, prange->npages,
3852			 prange->start + prange->npages - 1);
3853		count++;
3854	}
3855	mutex_unlock(&svms->lock);
3856
3857	*num_svm_ranges = count;
3858	/* Only the accessbility attributes need to be queried for all the gpus
3859	 * individually, remaining ones are spanned across the entire process
3860	 * regardless of the various gpu nodes. Of the remaining attributes,
3861	 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
3862	 *
3863	 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
3864	 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
3865	 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
3866	 * KFD_IOCTL_SVM_ATTR_GRANULARITY
3867	 *
3868	 * ** ACCESSBILITY ATTRIBUTES **
3869	 * (Considered as one, type is altered during query, value is gpuid)
3870	 * KFD_IOCTL_SVM_ATTR_ACCESS
3871	 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
3872	 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
3873	 */
3874	if (*num_svm_ranges > 0) {
3875		common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3876			nattr_common;
3877		accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
3878			nattr_accessibility * num_devices;
3879
3880		total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3881			common_attr_size + accessibility_size;
3882
3883		*svm_priv_data_size = *num_svm_ranges * total_size;
3884	}
3885
3886	pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
3887		 *svm_priv_data_size);
3888	return 0;
3889}
3890
3891int kfd_criu_checkpoint_svm(struct kfd_process *p,
3892			    uint8_t __user *user_priv_data,
3893			    uint64_t *priv_data_offset)
3894{
3895	struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
3896	struct kfd_ioctl_svm_attribute *query_attr = NULL;
3897	uint64_t svm_priv_data_size, query_attr_size = 0;
3898	int index, nattr_common = 4, ret = 0;
3899	struct svm_range_list *svms;
3900	int num_devices = p->n_pdds;
3901	struct svm_range *prange;
3902	struct mm_struct *mm;
3903
3904	svms = &p->svms;
3905	if (!svms)
3906		return -EINVAL;
3907
3908	mm = get_task_mm(p->lead_thread);
3909	if (!mm) {
3910		pr_err("failed to get mm for the target process\n");
3911		return -ESRCH;
3912	}
3913
3914	query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3915				(nattr_common + num_devices);
3916
3917	query_attr = kzalloc(query_attr_size, GFP_KERNEL);
3918	if (!query_attr) {
3919		ret = -ENOMEM;
3920		goto exit;
3921	}
3922
3923	query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
3924	query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
3925	query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3926	query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
3927
3928	for (index = 0; index < num_devices; index++) {
3929		struct kfd_process_device *pdd = p->pdds[index];
3930
3931		query_attr[index + nattr_common].type =
3932			KFD_IOCTL_SVM_ATTR_ACCESS;
3933		query_attr[index + nattr_common].value = pdd->user_gpu_id;
3934	}
3935
3936	svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
3937
3938	svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
3939	if (!svm_priv) {
3940		ret = -ENOMEM;
3941		goto exit_query;
3942	}
3943
3944	index = 0;
3945	list_for_each_entry(prange, &svms->list, list) {
3946
3947		svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
3948		svm_priv->start_addr = prange->start;
3949		svm_priv->size = prange->npages;
3950		memcpy(&svm_priv->attrs, query_attr, query_attr_size);
3951		pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
3952			 prange, prange->start, prange->npages,
3953			 prange->start + prange->npages - 1,
3954			 prange->npages * PAGE_SIZE);
3955
3956		ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
3957					 svm_priv->size,
3958					 (nattr_common + num_devices),
3959					 svm_priv->attrs);
3960		if (ret) {
3961			pr_err("CRIU: failed to obtain range attributes\n");
3962			goto exit_priv;
3963		}
3964
3965		if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
3966				 svm_priv_data_size)) {
3967			pr_err("Failed to copy svm priv to user\n");
3968			ret = -EFAULT;
3969			goto exit_priv;
3970		}
3971
3972		*priv_data_offset += svm_priv_data_size;
3973
3974	}
3975
3976
3977exit_priv:
3978	kfree(svm_priv);
3979exit_query:
3980	kfree(query_attr);
3981exit:
3982	mmput(mm);
3983	return ret;
3984}
3985
3986int
3987svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3988	  uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3989{
3990	struct mm_struct *mm = current->mm;
3991	int r;
3992
3993	start >>= PAGE_SHIFT;
3994	size >>= PAGE_SHIFT;
3995
3996	switch (op) {
3997	case KFD_IOCTL_SVM_OP_SET_ATTR:
3998		r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
3999		break;
4000	case KFD_IOCTL_SVM_OP_GET_ATTR:
4001		r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4002		break;
4003	default:
4004		r = EINVAL;
4005		break;
4006	}
4007
4008	return r;
4009}