Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28
  29#include <linux/dma-fence-array.h>
  30#include <linux/interval_tree_generic.h>
  31#include <linux/idr.h>
  32#include <linux/dma-buf.h>
  33
  34#include <drm/amdgpu_drm.h>
  35#include <drm/drm_drv.h>
  36#include "amdgpu.h"
  37#include "amdgpu_trace.h"
  38#include "amdgpu_amdkfd.h"
  39#include "amdgpu_gmc.h"
  40#include "amdgpu_xgmi.h"
  41#include "amdgpu_dma_buf.h"
  42#include "amdgpu_res_cursor.h"
  43#include "kfd_svm.h"
  44
  45/**
  46 * DOC: GPUVM
  47 *
  48 * GPUVM is similar to the legacy gart on older asics, however
  49 * rather than there being a single global gart table
  50 * for the entire GPU, there are multiple VM page tables active
  51 * at any given time.  The VM page tables can contain a mix
  52 * vram pages and system memory pages and system memory pages
  53 * can be mapped as snooped (cached system pages) or unsnooped
  54 * (uncached system pages).
  55 * Each VM has an ID associated with it and there is a page table
  56 * associated with each VMID.  When execting a command buffer,
  57 * the kernel tells the the ring what VMID to use for that command
  58 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  59 * The userspace drivers maintain their own address space and the kernel
  60 * sets up their pages tables accordingly when they submit their
  61 * command buffers and a VMID is assigned.
  62 * Cayman/Trinity support up to 8 active VMs at any given time;
  63 * SI supports 16.
  64 */
  65
  66#define START(node) ((node)->start)
  67#define LAST(node) ((node)->last)
  68
  69INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
  70		     START, LAST, static, amdgpu_vm_it)
  71
  72#undef START
  73#undef LAST
  74
  75/**
  76 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
  77 */
  78struct amdgpu_prt_cb {
  79
  80	/**
  81	 * @adev: amdgpu device
  82	 */
  83	struct amdgpu_device *adev;
  84
  85	/**
  86	 * @cb: callback
  87	 */
  88	struct dma_fence_cb cb;
  89};
  90
  91/*
  92 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
  93 * happens while holding this lock anywhere to prevent deadlocks when
  94 * an MMU notifier runs in reclaim-FS context.
  95 */
  96static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
  97{
  98	mutex_lock(&vm->eviction_lock);
  99	vm->saved_flags = memalloc_noreclaim_save();
 100}
 101
 102static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
 103{
 104	if (mutex_trylock(&vm->eviction_lock)) {
 105		vm->saved_flags = memalloc_noreclaim_save();
 106		return 1;
 107	}
 108	return 0;
 109}
 110
 111static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
 112{
 113	memalloc_noreclaim_restore(vm->saved_flags);
 114	mutex_unlock(&vm->eviction_lock);
 115}
 116
 117/**
 118 * amdgpu_vm_level_shift - return the addr shift for each level
 119 *
 120 * @adev: amdgpu_device pointer
 121 * @level: VMPT level
 122 *
 123 * Returns:
 124 * The number of bits the pfn needs to be right shifted for a level.
 125 */
 126static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
 127				      unsigned level)
 128{
 129	switch (level) {
 130	case AMDGPU_VM_PDB2:
 131	case AMDGPU_VM_PDB1:
 132	case AMDGPU_VM_PDB0:
 133		return 9 * (AMDGPU_VM_PDB0 - level) +
 134			adev->vm_manager.block_size;
 135	case AMDGPU_VM_PTB:
 136		return 0;
 137	default:
 138		return ~0;
 139	}
 140}
 141
 142/**
 143 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
 144 *
 145 * @adev: amdgpu_device pointer
 146 * @level: VMPT level
 147 *
 148 * Returns:
 149 * The number of entries in a page directory or page table.
 150 */
 151static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
 152				      unsigned level)
 153{
 154	unsigned shift = amdgpu_vm_level_shift(adev,
 155					       adev->vm_manager.root_level);
 156
 157	if (level == adev->vm_manager.root_level)
 158		/* For the root directory */
 159		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
 160			>> shift;
 161	else if (level != AMDGPU_VM_PTB)
 162		/* Everything in between */
 163		return 512;
 164	else
 165		/* For the page tables on the leaves */
 166		return AMDGPU_VM_PTE_COUNT(adev);
 167}
 168
 169/**
 170 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
 171 *
 172 * @adev: amdgpu_device pointer
 173 *
 174 * Returns:
 175 * The number of entries in the root page directory which needs the ATS setting.
 176 */
 177static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
 178{
 179	unsigned shift;
 180
 181	shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
 182	return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
 183}
 184
 185/**
 186 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
 187 *
 188 * @adev: amdgpu_device pointer
 189 * @level: VMPT level
 190 *
 191 * Returns:
 192 * The mask to extract the entry number of a PD/PT from an address.
 193 */
 194static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
 195				       unsigned int level)
 196{
 197	if (level <= adev->vm_manager.root_level)
 198		return 0xffffffff;
 199	else if (level != AMDGPU_VM_PTB)
 200		return 0x1ff;
 201	else
 202		return AMDGPU_VM_PTE_COUNT(adev) - 1;
 203}
 204
 205/**
 206 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
 207 *
 208 * @adev: amdgpu_device pointer
 209 * @level: VMPT level
 210 *
 211 * Returns:
 212 * The size of the BO for a page directory or page table in bytes.
 213 */
 214static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
 215{
 216	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
 217}
 218
 219/**
 220 * amdgpu_vm_bo_evicted - vm_bo is evicted
 221 *
 222 * @vm_bo: vm_bo which is evicted
 223 *
 224 * State for PDs/PTs and per VM BOs which are not at the location they should
 225 * be.
 226 */
 227static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
 228{
 229	struct amdgpu_vm *vm = vm_bo->vm;
 230	struct amdgpu_bo *bo = vm_bo->bo;
 231
 232	vm_bo->moved = true;
 233	if (bo->tbo.type == ttm_bo_type_kernel)
 234		list_move(&vm_bo->vm_status, &vm->evicted);
 235	else
 236		list_move_tail(&vm_bo->vm_status, &vm->evicted);
 237}
 238/**
 239 * amdgpu_vm_bo_moved - vm_bo is moved
 240 *
 241 * @vm_bo: vm_bo which is moved
 242 *
 243 * State for per VM BOs which are moved, but that change is not yet reflected
 244 * in the page tables.
 245 */
 246static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
 247{
 248	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 249}
 250
 251/**
 252 * amdgpu_vm_bo_idle - vm_bo is idle
 253 *
 254 * @vm_bo: vm_bo which is now idle
 255 *
 256 * State for PDs/PTs and per VM BOs which have gone through the state machine
 257 * and are now idle.
 258 */
 259static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
 260{
 261	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
 262	vm_bo->moved = false;
 263}
 264
 265/**
 266 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
 267 *
 268 * @vm_bo: vm_bo which is now invalidated
 269 *
 270 * State for normal BOs which are invalidated and that change not yet reflected
 271 * in the PTs.
 272 */
 273static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
 274{
 275	spin_lock(&vm_bo->vm->invalidated_lock);
 276	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
 277	spin_unlock(&vm_bo->vm->invalidated_lock);
 278}
 279
 280/**
 281 * amdgpu_vm_bo_relocated - vm_bo is reloacted
 282 *
 283 * @vm_bo: vm_bo which is relocated
 284 *
 285 * State for PDs/PTs which needs to update their parent PD.
 286 * For the root PD, just move to idle state.
 287 */
 288static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
 289{
 290	if (vm_bo->bo->parent)
 291		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
 292	else
 293		amdgpu_vm_bo_idle(vm_bo);
 294}
 295
 296/**
 297 * amdgpu_vm_bo_done - vm_bo is done
 298 *
 299 * @vm_bo: vm_bo which is now done
 300 *
 301 * State for normal BOs which are invalidated and that change has been updated
 302 * in the PTs.
 303 */
 304static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
 305{
 306	spin_lock(&vm_bo->vm->invalidated_lock);
 307	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
 308	spin_unlock(&vm_bo->vm->invalidated_lock);
 309}
 310
 311/**
 312 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
 313 *
 314 * @base: base structure for tracking BO usage in a VM
 315 * @vm: vm to which bo is to be added
 316 * @bo: amdgpu buffer object
 317 *
 318 * Initialize a bo_va_base structure and add it to the appropriate lists
 319 *
 320 */
 321static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 322				   struct amdgpu_vm *vm,
 323				   struct amdgpu_bo *bo)
 324{
 325	base->vm = vm;
 326	base->bo = bo;
 327	base->next = NULL;
 328	INIT_LIST_HEAD(&base->vm_status);
 329
 330	if (!bo)
 331		return;
 332	base->next = bo->vm_bo;
 333	bo->vm_bo = base;
 334
 335	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
 336		return;
 337
 338	vm->bulk_moveable = false;
 339	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
 340		amdgpu_vm_bo_relocated(base);
 341	else
 342		amdgpu_vm_bo_idle(base);
 343
 344	if (bo->preferred_domains &
 345	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
 346		return;
 347
 348	/*
 349	 * we checked all the prerequisites, but it looks like this per vm bo
 350	 * is currently evicted. add the bo to the evicted list to make sure it
 351	 * is validated on next vm use to avoid fault.
 352	 * */
 353	amdgpu_vm_bo_evicted(base);
 354}
 355
 356/**
 357 * amdgpu_vm_pt_parent - get the parent page directory
 358 *
 359 * @pt: child page table
 360 *
 361 * Helper to get the parent entry for the child page table. NULL if we are at
 362 * the root page directory.
 363 */
 364static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
 365{
 366	struct amdgpu_bo *parent = pt->bo->parent;
 367
 368	if (!parent)
 369		return NULL;
 370
 371	return parent->vm_bo;
 372}
 373
 374/*
 375 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
 376 */
 377struct amdgpu_vm_pt_cursor {
 378	uint64_t pfn;
 379	struct amdgpu_vm_bo_base *parent;
 380	struct amdgpu_vm_bo_base *entry;
 381	unsigned level;
 382};
 383
 384/**
 385 * amdgpu_vm_pt_start - start PD/PT walk
 386 *
 387 * @adev: amdgpu_device pointer
 388 * @vm: amdgpu_vm structure
 389 * @start: start address of the walk
 390 * @cursor: state to initialize
 391 *
 392 * Initialize a amdgpu_vm_pt_cursor to start a walk.
 393 */
 394static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
 395			       struct amdgpu_vm *vm, uint64_t start,
 396			       struct amdgpu_vm_pt_cursor *cursor)
 397{
 398	cursor->pfn = start;
 399	cursor->parent = NULL;
 400	cursor->entry = &vm->root;
 401	cursor->level = adev->vm_manager.root_level;
 402}
 403
 404/**
 405 * amdgpu_vm_pt_descendant - go to child node
 406 *
 407 * @adev: amdgpu_device pointer
 408 * @cursor: current state
 409 *
 410 * Walk to the child node of the current node.
 411 * Returns:
 412 * True if the walk was possible, false otherwise.
 413 */
 414static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
 415				    struct amdgpu_vm_pt_cursor *cursor)
 416{
 417	unsigned mask, shift, idx;
 418
 419	if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
 420	    !cursor->entry->bo)
 421		return false;
 422
 
 423	mask = amdgpu_vm_entries_mask(adev, cursor->level);
 424	shift = amdgpu_vm_level_shift(adev, cursor->level);
 425
 426	++cursor->level;
 427	idx = (cursor->pfn >> shift) & mask;
 428	cursor->parent = cursor->entry;
 429	cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
 430	return true;
 431}
 432
 433/**
 434 * amdgpu_vm_pt_sibling - go to sibling node
 435 *
 436 * @adev: amdgpu_device pointer
 437 * @cursor: current state
 438 *
 439 * Walk to the sibling node of the current node.
 440 * Returns:
 441 * True if the walk was possible, false otherwise.
 442 */
 443static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
 444				 struct amdgpu_vm_pt_cursor *cursor)
 445{
 446	unsigned shift, num_entries;
 447
 448	/* Root doesn't have a sibling */
 449	if (!cursor->parent)
 450		return false;
 451
 452	/* Go to our parents and see if we got a sibling */
 453	shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
 454	num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
 455
 456	if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1])
 457		return false;
 458
 459	cursor->pfn += 1ULL << shift;
 460	cursor->pfn &= ~((1ULL << shift) - 1);
 461	++cursor->entry;
 462	return true;
 463}
 464
 465/**
 466 * amdgpu_vm_pt_ancestor - go to parent node
 467 *
 468 * @cursor: current state
 469 *
 470 * Walk to the parent node of the current node.
 471 * Returns:
 472 * True if the walk was possible, false otherwise.
 473 */
 474static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
 475{
 476	if (!cursor->parent)
 477		return false;
 478
 479	--cursor->level;
 480	cursor->entry = cursor->parent;
 481	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
 482	return true;
 483}
 484
 485/**
 486 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
 487 *
 488 * @adev: amdgpu_device pointer
 489 * @cursor: current state
 490 *
 491 * Walk the PD/PT tree to the next node.
 492 */
 493static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
 494			      struct amdgpu_vm_pt_cursor *cursor)
 495{
 496	/* First try a newborn child */
 497	if (amdgpu_vm_pt_descendant(adev, cursor))
 498		return;
 499
 500	/* If that didn't worked try to find a sibling */
 501	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
 502		/* No sibling, go to our parents and grandparents */
 503		if (!amdgpu_vm_pt_ancestor(cursor)) {
 504			cursor->pfn = ~0ll;
 505			return;
 506		}
 507	}
 508}
 509
 510/**
 511 * amdgpu_vm_pt_first_dfs - start a deep first search
 512 *
 513 * @adev: amdgpu_device structure
 514 * @vm: amdgpu_vm structure
 515 * @start: optional cursor to start with
 516 * @cursor: state to initialize
 517 *
 518 * Starts a deep first traversal of the PD/PT tree.
 519 */
 520static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
 521				   struct amdgpu_vm *vm,
 522				   struct amdgpu_vm_pt_cursor *start,
 523				   struct amdgpu_vm_pt_cursor *cursor)
 524{
 525	if (start)
 526		*cursor = *start;
 527	else
 528		amdgpu_vm_pt_start(adev, vm, 0, cursor);
 529	while (amdgpu_vm_pt_descendant(adev, cursor));
 530}
 531
 532/**
 533 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
 534 *
 535 * @start: starting point for the search
 536 * @entry: current entry
 537 *
 538 * Returns:
 539 * True when the search should continue, false otherwise.
 540 */
 541static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
 542				      struct amdgpu_vm_bo_base *entry)
 543{
 544	return entry && (!start || entry != start->entry);
 545}
 546
 547/**
 548 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
 549 *
 550 * @adev: amdgpu_device structure
 551 * @cursor: current state
 552 *
 553 * Move the cursor to the next node in a deep first search.
 554 */
 555static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
 556				  struct amdgpu_vm_pt_cursor *cursor)
 557{
 558	if (!cursor->entry)
 559		return;
 560
 561	if (!cursor->parent)
 562		cursor->entry = NULL;
 563	else if (amdgpu_vm_pt_sibling(adev, cursor))
 564		while (amdgpu_vm_pt_descendant(adev, cursor));
 565	else
 566		amdgpu_vm_pt_ancestor(cursor);
 567}
 568
 569/*
 570 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
 571 */
 572#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
 573	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
 574	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
 575	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
 576	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
 577
 578/**
 579 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
 580 *
 581 * @vm: vm providing the BOs
 582 * @validated: head of validation list
 583 * @entry: entry to add
 584 *
 585 * Add the page directory to the list of BOs to
 586 * validate for command submission.
 587 */
 588void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 589			 struct list_head *validated,
 590			 struct amdgpu_bo_list_entry *entry)
 591{
 592	entry->priority = 0;
 593	entry->tv.bo = &vm->root.bo->tbo;
 594	/* Two for VM updates, one for TTM and one for the CS job */
 595	entry->tv.num_shared = 4;
 596	entry->user_pages = NULL;
 597	list_add(&entry->tv.head, validated);
 598}
 599
 600/**
 601 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
 602 *
 603 * @bo: BO which was removed from the LRU
 604 *
 605 * Make sure the bulk_moveable flag is updated when a BO is removed from the
 606 * LRU.
 607 */
 608void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
 609{
 610	struct amdgpu_bo *abo;
 611	struct amdgpu_vm_bo_base *bo_base;
 612
 613	if (!amdgpu_bo_is_amdgpu_bo(bo))
 614		return;
 615
 616	if (bo->pin_count)
 617		return;
 618
 619	abo = ttm_to_amdgpu_bo(bo);
 620	if (!abo->parent)
 621		return;
 622	for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
 623		struct amdgpu_vm *vm = bo_base->vm;
 624
 625		if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv)
 626			vm->bulk_moveable = false;
 627	}
 628
 629}
 630/**
 631 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
 632 *
 633 * @adev: amdgpu device pointer
 634 * @vm: vm providing the BOs
 635 *
 636 * Move all BOs to the end of LRU and remember their positions to put them
 637 * together.
 638 */
 639void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 640				struct amdgpu_vm *vm)
 641{
 642	struct amdgpu_vm_bo_base *bo_base;
 643
 644	if (vm->bulk_moveable) {
 645		spin_lock(&adev->mman.bdev.lru_lock);
 646		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
 647		spin_unlock(&adev->mman.bdev.lru_lock);
 648		return;
 649	}
 650
 651	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 652
 653	spin_lock(&adev->mman.bdev.lru_lock);
 654	list_for_each_entry(bo_base, &vm->idle, vm_status) {
 655		struct amdgpu_bo *bo = bo_base->bo;
 656		struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
 657
 658		if (!bo->parent)
 659			continue;
 660
 661		ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
 662					&vm->lru_bulk_move);
 663		if (shadow)
 664			ttm_bo_move_to_lru_tail(&shadow->tbo,
 665						shadow->tbo.resource,
 666						&vm->lru_bulk_move);
 667	}
 668	spin_unlock(&adev->mman.bdev.lru_lock);
 669
 670	vm->bulk_moveable = true;
 671}
 672
 673/**
 674 * amdgpu_vm_validate_pt_bos - validate the page table BOs
 675 *
 676 * @adev: amdgpu device pointer
 677 * @vm: vm providing the BOs
 678 * @validate: callback to do the validation
 679 * @param: parameter for the validation callback
 680 *
 681 * Validate the page table BOs on command submission if neccessary.
 682 *
 683 * Returns:
 684 * Validation result.
 685 */
 686int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 687			      int (*validate)(void *p, struct amdgpu_bo *bo),
 688			      void *param)
 689{
 690	struct amdgpu_vm_bo_base *bo_base, *tmp;
 691	int r;
 692
 693	vm->bulk_moveable &= list_empty(&vm->evicted);
 694
 695	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
 696		struct amdgpu_bo *bo = bo_base->bo;
 697		struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
 698
 699		r = validate(param, bo);
 700		if (r)
 701			return r;
 702		if (shadow) {
 703			r = validate(param, shadow);
 704			if (r)
 705				return r;
 706		}
 707
 708		if (bo->tbo.type != ttm_bo_type_kernel) {
 709			amdgpu_vm_bo_moved(bo_base);
 710		} else {
 711			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
 712			amdgpu_vm_bo_relocated(bo_base);
 713		}
 714	}
 715
 716	amdgpu_vm_eviction_lock(vm);
 717	vm->evicting = false;
 718	amdgpu_vm_eviction_unlock(vm);
 719
 720	return 0;
 721}
 722
 723/**
 724 * amdgpu_vm_ready - check VM is ready for updates
 725 *
 726 * @vm: VM to check
 727 *
 728 * Check if all VM PDs/PTs are ready for updates
 729 *
 730 * Returns:
 731 * True if eviction list is empty.
 732 */
 733bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 734{
 735	return list_empty(&vm->evicted);
 736}
 737
 738/**
 739 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
 740 *
 741 * @adev: amdgpu_device pointer
 742 * @vm: VM to clear BO from
 743 * @vmbo: BO to clear
 744 * @immediate: use an immediate update
 745 *
 746 * Root PD needs to be reserved when calling this.
 747 *
 748 * Returns:
 749 * 0 on success, errno otherwise.
 750 */
 751static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 752			      struct amdgpu_vm *vm,
 753			      struct amdgpu_bo_vm *vmbo,
 754			      bool immediate)
 755{
 756	struct ttm_operation_ctx ctx = { true, false };
 757	unsigned level = adev->vm_manager.root_level;
 758	struct amdgpu_vm_update_params params;
 759	struct amdgpu_bo *ancestor = &vmbo->bo;
 760	struct amdgpu_bo *bo = &vmbo->bo;
 761	unsigned entries, ats_entries;
 762	uint64_t addr;
 763	int r;
 764
 765	/* Figure out our place in the hierarchy */
 766	if (ancestor->parent) {
 767		++level;
 768		while (ancestor->parent->parent) {
 769			++level;
 770			ancestor = ancestor->parent;
 771		}
 772	}
 773
 774	entries = amdgpu_bo_size(bo) / 8;
 775	if (!vm->pte_support_ats) {
 776		ats_entries = 0;
 777
 778	} else if (!bo->parent) {
 779		ats_entries = amdgpu_vm_num_ats_entries(adev);
 780		ats_entries = min(ats_entries, entries);
 781		entries -= ats_entries;
 782
 783	} else {
 784		struct amdgpu_vm_bo_base *pt;
 785
 786		pt = ancestor->vm_bo;
 787		ats_entries = amdgpu_vm_num_ats_entries(adev);
 788		if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) {
 789			ats_entries = 0;
 790		} else {
 791			ats_entries = entries;
 792			entries = 0;
 793		}
 794	}
 795
 796	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 797	if (r)
 798		return r;
 799
 800	if (vmbo->shadow) {
 801		struct amdgpu_bo *shadow = vmbo->shadow;
 802
 803		r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
 804		if (r)
 805			return r;
 806	}
 807
 808	r = vm->update_funcs->map_table(vmbo);
 809	if (r)
 810		return r;
 811
 812	memset(&params, 0, sizeof(params));
 813	params.adev = adev;
 814	params.vm = vm;
 815	params.immediate = immediate;
 816
 817	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
 818	if (r)
 819		return r;
 820
 821	addr = 0;
 822	if (ats_entries) {
 823		uint64_t value = 0, flags;
 824
 825		flags = AMDGPU_PTE_DEFAULT_ATC;
 826		if (level != AMDGPU_VM_PTB) {
 827			/* Handle leaf PDEs as PTEs */
 828			flags |= AMDGPU_PDE_PTE;
 829			amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
 830		}
 831
 832		r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
 833					     value, flags);
 834		if (r)
 835			return r;
 836
 837		addr += ats_entries * 8;
 838	}
 839
 840	if (entries) {
 841		uint64_t value = 0, flags = 0;
 842
 843		if (adev->asic_type >= CHIP_VEGA10) {
 844			if (level != AMDGPU_VM_PTB) {
 845				/* Handle leaf PDEs as PTEs */
 846				flags |= AMDGPU_PDE_PTE;
 847				amdgpu_gmc_get_vm_pde(adev, level,
 848						      &value, &flags);
 849			} else {
 850				/* Workaround for fault priority problem on GMC9 */
 851				flags = AMDGPU_PTE_EXECUTABLE;
 852			}
 853		}
 854
 855		r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
 856					     value, flags);
 857		if (r)
 858			return r;
 859	}
 860
 861	return vm->update_funcs->commit(&params, NULL);
 862}
 863
 864/**
 865 * amdgpu_vm_pt_create - create bo for PD/PT
 866 *
 867 * @adev: amdgpu_device pointer
 868 * @vm: requesting vm
 869 * @level: the page table level
 870 * @immediate: use a immediate update
 871 * @vmbo: pointer to the buffer object pointer
 872 */
 873static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
 874			       struct amdgpu_vm *vm,
 875			       int level, bool immediate,
 876			       struct amdgpu_bo_vm **vmbo)
 877{
 878	struct amdgpu_bo_param bp;
 879	struct amdgpu_bo *bo;
 880	struct dma_resv *resv;
 881	unsigned int num_entries;
 882	int r;
 883
 884	memset(&bp, 0, sizeof(bp));
 885
 886	bp.size = amdgpu_vm_bo_size(adev, level);
 887	bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
 888	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
 889	bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
 890	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
 891		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 892
 893	if (level < AMDGPU_VM_PTB)
 894		num_entries = amdgpu_vm_num_entries(adev, level);
 895	else
 896		num_entries = 0;
 897
 898	bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
 899
 900	if (vm->use_cpu_for_update)
 901		bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 902
 903	bp.type = ttm_bo_type_kernel;
 904	bp.no_wait_gpu = immediate;
 905	if (vm->root.bo)
 906		bp.resv = vm->root.bo->tbo.base.resv;
 907
 908	r = amdgpu_bo_create_vm(adev, &bp, vmbo);
 909	if (r)
 910		return r;
 911
 912	bo = &(*vmbo)->bo;
 913	if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
 914		(*vmbo)->shadow = NULL;
 915		return 0;
 916	}
 917
 918	if (!bp.resv)
 919		WARN_ON(dma_resv_lock(bo->tbo.base.resv,
 920				      NULL));
 921	resv = bp.resv;
 922	memset(&bp, 0, sizeof(bp));
 923	bp.size = amdgpu_vm_bo_size(adev, level);
 924	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
 925	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 926	bp.type = ttm_bo_type_kernel;
 927	bp.resv = bo->tbo.base.resv;
 928	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 929
 930	r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
 931
 932	if (!resv)
 933		dma_resv_unlock(bo->tbo.base.resv);
 934
 935	if (r) {
 936		amdgpu_bo_unref(&bo);
 937		return r;
 938	}
 939
 940	(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
 941	amdgpu_bo_add_to_shadow_list(*vmbo);
 942
 943	return 0;
 944}
 945
 946/**
 947 * amdgpu_vm_alloc_pts - Allocate a specific page table
 948 *
 949 * @adev: amdgpu_device pointer
 950 * @vm: VM to allocate page tables for
 951 * @cursor: Which page table to allocate
 952 * @immediate: use an immediate update
 953 *
 954 * Make sure a specific page table or directory is allocated.
 955 *
 956 * Returns:
 957 * 1 if page table needed to be allocated, 0 if page table was already
 958 * allocated, negative errno if an error occurred.
 959 */
 960static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
 961			       struct amdgpu_vm *vm,
 962			       struct amdgpu_vm_pt_cursor *cursor,
 963			       bool immediate)
 964{
 965	struct amdgpu_vm_bo_base *entry = cursor->entry;
 966	struct amdgpu_bo *pt_bo;
 967	struct amdgpu_bo_vm *pt;
 968	int r;
 969
 970	if (entry->bo)
 
 
 
 
 
 
 
 
 
 
 
 971		return 0;
 972
 973	r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
 
 
 974	if (r)
 975		return r;
 976
 977	/* Keep a reference to the root directory to avoid
 978	 * freeing them up in the wrong order.
 979	 */
 980	pt_bo = &pt->bo;
 981	pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
 982	amdgpu_vm_bo_base_init(entry, vm, pt_bo);
 983	r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
 984	if (r)
 985		goto error_free_pt;
 986
 987	return 0;
 988
 989error_free_pt:
 990	amdgpu_bo_unref(&pt->shadow);
 991	amdgpu_bo_unref(&pt_bo);
 992	return r;
 993}
 994
 995/**
 996 * amdgpu_vm_free_table - fre one PD/PT
 997 *
 998 * @entry: PDE to free
 999 */
1000static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry)
1001{
1002	struct amdgpu_bo *shadow;
1003
1004	if (!entry->bo)
1005		return;
1006	shadow = amdgpu_bo_shadowed(entry->bo);
1007	entry->bo->vm_bo = NULL;
1008	list_del(&entry->vm_status);
1009	amdgpu_bo_unref(&shadow);
1010	amdgpu_bo_unref(&entry->bo);
1011}
1012
1013/**
1014 * amdgpu_vm_free_pts - free PD/PT levels
1015 *
1016 * @adev: amdgpu device structure
1017 * @vm: amdgpu vm structure
1018 * @start: optional cursor where to start freeing PDs/PTs
1019 *
1020 * Free the page directory or page table level and all sub levels.
1021 */
1022static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
1023			       struct amdgpu_vm *vm,
1024			       struct amdgpu_vm_pt_cursor *start)
1025{
1026	struct amdgpu_vm_pt_cursor cursor;
1027	struct amdgpu_vm_bo_base *entry;
1028
1029	vm->bulk_moveable = false;
1030
1031	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
1032		amdgpu_vm_free_table(entry);
1033
1034	if (start)
1035		amdgpu_vm_free_table(start->entry);
1036}
1037
1038/**
1039 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
1040 *
1041 * @adev: amdgpu_device pointer
1042 */
1043void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
1044{
1045	const struct amdgpu_ip_block *ip_block;
1046	bool has_compute_vm_bug;
1047	struct amdgpu_ring *ring;
1048	int i;
1049
1050	has_compute_vm_bug = false;
1051
1052	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
1053	if (ip_block) {
1054		/* Compute has a VM bug for GFX version < 7.
1055		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1056		if (ip_block->version->major <= 7)
1057			has_compute_vm_bug = true;
1058		else if (ip_block->version->major == 8)
1059			if (adev->gfx.mec_fw_version < 673)
1060				has_compute_vm_bug = true;
1061	}
1062
1063	for (i = 0; i < adev->num_rings; i++) {
1064		ring = adev->rings[i];
1065		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
1066			/* only compute rings */
1067			ring->has_compute_vm_bug = has_compute_vm_bug;
1068		else
1069			ring->has_compute_vm_bug = false;
1070	}
1071}
1072
1073/**
1074 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1075 *
1076 * @ring: ring on which the job will be submitted
1077 * @job: job to submit
1078 *
1079 * Returns:
1080 * True if sync is needed.
1081 */
1082bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1083				  struct amdgpu_job *job)
1084{
1085	struct amdgpu_device *adev = ring->adev;
1086	unsigned vmhub = ring->funcs->vmhub;
1087	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1088	struct amdgpu_vmid *id;
1089	bool gds_switch_needed;
1090	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1091
1092	if (job->vmid == 0)
1093		return false;
1094	id = &id_mgr->ids[job->vmid];
1095	gds_switch_needed = ring->funcs->emit_gds_switch && (
1096		id->gds_base != job->gds_base ||
1097		id->gds_size != job->gds_size ||
1098		id->gws_base != job->gws_base ||
1099		id->gws_size != job->gws_size ||
1100		id->oa_base != job->oa_base ||
1101		id->oa_size != job->oa_size);
1102
1103	if (amdgpu_vmid_had_gpu_reset(adev, id))
1104		return true;
1105
1106	return vm_flush_needed || gds_switch_needed;
1107}
1108
1109/**
1110 * amdgpu_vm_flush - hardware flush the vm
1111 *
1112 * @ring: ring to use for flush
1113 * @job:  related job
1114 * @need_pipe_sync: is pipe sync needed
1115 *
1116 * Emit a VM flush when it is necessary.
1117 *
1118 * Returns:
1119 * 0 on success, errno otherwise.
1120 */
1121int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
1122		    bool need_pipe_sync)
1123{
1124	struct amdgpu_device *adev = ring->adev;
1125	unsigned vmhub = ring->funcs->vmhub;
1126	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1127	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1128	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1129		id->gds_base != job->gds_base ||
1130		id->gds_size != job->gds_size ||
1131		id->gws_base != job->gws_base ||
1132		id->gws_size != job->gws_size ||
1133		id->oa_base != job->oa_base ||
1134		id->oa_size != job->oa_size);
1135	bool vm_flush_needed = job->vm_needs_flush;
1136	struct dma_fence *fence = NULL;
1137	bool pasid_mapping_needed = false;
1138	unsigned patch_offset = 0;
1139	bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
1140	int r;
1141
1142	if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
1143		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
1144
1145	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1146		gds_switch_needed = true;
1147		vm_flush_needed = true;
1148		pasid_mapping_needed = true;
1149	}
1150
1151	mutex_lock(&id_mgr->lock);
1152	if (id->pasid != job->pasid || !id->pasid_mapping ||
1153	    !dma_fence_is_signaled(id->pasid_mapping))
1154		pasid_mapping_needed = true;
1155	mutex_unlock(&id_mgr->lock);
1156
1157	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1158	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
1159			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1160	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1161		ring->funcs->emit_wreg;
1162
1163	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1164		return 0;
1165
1166	if (ring->funcs->init_cond_exec)
1167		patch_offset = amdgpu_ring_init_cond_exec(ring);
1168
1169	if (need_pipe_sync)
1170		amdgpu_ring_emit_pipeline_sync(ring);
1171
1172	if (vm_flush_needed) {
1173		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1174		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1175	}
1176
1177	if (pasid_mapping_needed)
1178		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1179
1180	if (vm_flush_needed || pasid_mapping_needed) {
1181		r = amdgpu_fence_emit(ring, &fence, 0);
1182		if (r)
1183			return r;
1184	}
1185
1186	if (vm_flush_needed) {
1187		mutex_lock(&id_mgr->lock);
1188		dma_fence_put(id->last_flush);
1189		id->last_flush = dma_fence_get(fence);
1190		id->current_gpu_reset_count =
1191			atomic_read(&adev->gpu_reset_counter);
1192		mutex_unlock(&id_mgr->lock);
1193	}
1194
1195	if (pasid_mapping_needed) {
1196		mutex_lock(&id_mgr->lock);
1197		id->pasid = job->pasid;
1198		dma_fence_put(id->pasid_mapping);
1199		id->pasid_mapping = dma_fence_get(fence);
1200		mutex_unlock(&id_mgr->lock);
1201	}
1202	dma_fence_put(fence);
1203
1204	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1205		id->gds_base = job->gds_base;
1206		id->gds_size = job->gds_size;
1207		id->gws_base = job->gws_base;
1208		id->gws_size = job->gws_size;
1209		id->oa_base = job->oa_base;
1210		id->oa_size = job->oa_size;
1211		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1212					    job->gds_size, job->gws_base,
1213					    job->gws_size, job->oa_base,
1214					    job->oa_size);
1215	}
1216
1217	if (ring->funcs->patch_cond_exec)
1218		amdgpu_ring_patch_cond_exec(ring, patch_offset);
1219
1220	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1221	if (ring->funcs->emit_switch_buffer) {
1222		amdgpu_ring_emit_switch_buffer(ring);
1223		amdgpu_ring_emit_switch_buffer(ring);
1224	}
1225	return 0;
1226}
1227
1228/**
1229 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1230 *
1231 * @vm: requested vm
1232 * @bo: requested buffer object
1233 *
1234 * Find @bo inside the requested vm.
1235 * Search inside the @bos vm list for the requested vm
1236 * Returns the found bo_va or NULL if none is found
1237 *
1238 * Object has to be reserved!
1239 *
1240 * Returns:
1241 * Found bo_va or NULL.
1242 */
1243struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1244				       struct amdgpu_bo *bo)
1245{
1246	struct amdgpu_vm_bo_base *base;
1247
1248	for (base = bo->vm_bo; base; base = base->next) {
1249		if (base->vm != vm)
1250			continue;
1251
1252		return container_of(base, struct amdgpu_bo_va, base);
1253	}
1254	return NULL;
1255}
1256
1257/**
1258 * amdgpu_vm_map_gart - Resolve gart mapping of addr
1259 *
1260 * @pages_addr: optional DMA address to use for lookup
1261 * @addr: the unmapped addr
1262 *
1263 * Look up the physical address of the page that the pte resolves
1264 * to.
1265 *
1266 * Returns:
1267 * The pointer for the page table entry.
1268 */
1269uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1270{
1271	uint64_t result;
1272
1273	/* page table offset */
1274	result = pages_addr[addr >> PAGE_SHIFT];
1275
1276	/* in case cpu page size != gpu page size*/
1277	result |= addr & (~PAGE_MASK);
1278
1279	result &= 0xFFFFFFFFFFFFF000ULL;
1280
1281	return result;
1282}
1283
1284/**
1285 * amdgpu_vm_update_pde - update a single level in the hierarchy
1286 *
1287 * @params: parameters for the update
1288 * @vm: requested vm
1289 * @entry: entry to update
1290 *
1291 * Makes sure the requested entry in parent is up to date.
1292 */
1293static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
1294				struct amdgpu_vm *vm,
1295				struct amdgpu_vm_bo_base *entry)
1296{
1297	struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
1298	struct amdgpu_bo *bo = parent->bo, *pbo;
1299	uint64_t pde, pt, flags;
1300	unsigned level;
1301
1302	for (level = 0, pbo = bo->parent; pbo; ++level)
1303		pbo = pbo->parent;
1304
1305	level += params->adev->vm_manager.root_level;
1306	amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
1307	pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
1308	return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
1309					1, 0, flags);
1310}
1311
1312/**
1313 * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1314 *
1315 * @adev: amdgpu_device pointer
1316 * @vm: related vm
1317 *
1318 * Mark all PD level as invalid after an error.
1319 */
1320static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1321				     struct amdgpu_vm *vm)
1322{
1323	struct amdgpu_vm_pt_cursor cursor;
1324	struct amdgpu_vm_bo_base *entry;
1325
1326	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1327		if (entry->bo && !entry->moved)
1328			amdgpu_vm_bo_relocated(entry);
1329}
1330
1331/**
1332 * amdgpu_vm_update_pdes - make sure that all directories are valid
1333 *
1334 * @adev: amdgpu_device pointer
1335 * @vm: requested vm
1336 * @immediate: submit immediately to the paging queue
1337 *
1338 * Makes sure all directories are up to date.
1339 *
1340 * Returns:
1341 * 0 for success, error for failure.
1342 */
1343int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
1344			  struct amdgpu_vm *vm, bool immediate)
1345{
1346	struct amdgpu_vm_update_params params;
1347	int r;
1348
1349	if (list_empty(&vm->relocated))
1350		return 0;
1351
1352	memset(&params, 0, sizeof(params));
1353	params.adev = adev;
1354	params.vm = vm;
1355	params.immediate = immediate;
1356
1357	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
1358	if (r)
1359		return r;
1360
1361	while (!list_empty(&vm->relocated)) {
1362		struct amdgpu_vm_bo_base *entry;
1363
1364		entry = list_first_entry(&vm->relocated,
1365					 struct amdgpu_vm_bo_base,
1366					 vm_status);
1367		amdgpu_vm_bo_idle(entry);
1368
1369		r = amdgpu_vm_update_pde(&params, vm, entry);
1370		if (r)
1371			goto error;
1372	}
1373
1374	r = vm->update_funcs->commit(&params, &vm->last_update);
1375	if (r)
1376		goto error;
1377	return 0;
1378
1379error:
1380	amdgpu_vm_invalidate_pds(adev, vm);
1381	return r;
1382}
1383
1384/*
1385 * amdgpu_vm_update_flags - figure out flags for PTE updates
1386 *
1387 * Make sure to set the right flags for the PTEs at the desired level.
1388 */
1389static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
1390				   struct amdgpu_bo_vm *pt, unsigned int level,
1391				   uint64_t pe, uint64_t addr,
1392				   unsigned int count, uint32_t incr,
1393				   uint64_t flags)
1394
1395{
1396	if (level != AMDGPU_VM_PTB) {
1397		flags |= AMDGPU_PDE_PTE;
1398		amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1399
1400	} else if (params->adev->asic_type >= CHIP_VEGA10 &&
1401		   !(flags & AMDGPU_PTE_VALID) &&
1402		   !(flags & AMDGPU_PTE_PRT)) {
1403
1404		/* Workaround for fault priority problem on GMC9 */
1405		flags |= AMDGPU_PTE_EXECUTABLE;
1406	}
1407
1408	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
1409					 flags);
1410}
1411
1412/**
1413 * amdgpu_vm_fragment - get fragment for PTEs
1414 *
1415 * @params: see amdgpu_vm_update_params definition
1416 * @start: first PTE to handle
1417 * @end: last PTE to handle
1418 * @flags: hw mapping flags
1419 * @frag: resulting fragment size
1420 * @frag_end: end of this fragment
1421 *
1422 * Returns the first possible fragment for the start and end address.
1423 */
1424static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
1425			       uint64_t start, uint64_t end, uint64_t flags,
1426			       unsigned int *frag, uint64_t *frag_end)
1427{
1428	/**
1429	 * The MC L1 TLB supports variable sized pages, based on a fragment
1430	 * field in the PTE. When this field is set to a non-zero value, page
1431	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1432	 * flags are considered valid for all PTEs within the fragment range
1433	 * and corresponding mappings are assumed to be physically contiguous.
1434	 *
1435	 * The L1 TLB can store a single PTE for the whole fragment,
1436	 * significantly increasing the space available for translation
1437	 * caching. This leads to large improvements in throughput when the
1438	 * TLB is under pressure.
1439	 *
1440	 * The L2 TLB distributes small and large fragments into two
1441	 * asymmetric partitions. The large fragment cache is significantly
1442	 * larger. Thus, we try to use large fragments wherever possible.
1443	 * Userspace can support this by aligning virtual base address and
1444	 * allocation size to the fragment size.
1445	 *
1446	 * Starting with Vega10 the fragment size only controls the L1. The L2
1447	 * is now directly feed with small/huge/giant pages from the walker.
1448	 */
1449	unsigned max_frag;
1450
1451	if (params->adev->asic_type < CHIP_VEGA10)
1452		max_frag = params->adev->vm_manager.fragment_size;
1453	else
1454		max_frag = 31;
1455
1456	/* system pages are non continuously */
1457	if (params->pages_addr) {
1458		*frag = 0;
1459		*frag_end = end;
1460		return;
1461	}
1462
1463	/* This intentionally wraps around if no bit is set */
1464	*frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1465	if (*frag >= max_frag) {
1466		*frag = max_frag;
1467		*frag_end = end & ~((1ULL << max_frag) - 1);
1468	} else {
1469		*frag_end = start + (1 << *frag);
1470	}
1471}
1472
1473/**
1474 * amdgpu_vm_update_ptes - make sure that page tables are valid
1475 *
1476 * @params: see amdgpu_vm_update_params definition
1477 * @start: start of GPU address range
1478 * @end: end of GPU address range
1479 * @dst: destination address to map to, the next dst inside the function
1480 * @flags: mapping flags
1481 *
1482 * Update the page tables in the range @start - @end.
1483 *
1484 * Returns:
1485 * 0 for success, -EINVAL for failure.
1486 */
1487static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
1488				 uint64_t start, uint64_t end,
1489				 uint64_t dst, uint64_t flags)
1490{
1491	struct amdgpu_device *adev = params->adev;
1492	struct amdgpu_vm_pt_cursor cursor;
1493	uint64_t frag_start = start, frag_end;
1494	unsigned int frag;
1495	int r;
1496
1497	/* figure out the initial fragment */
1498	amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1499
1500	/* walk over the address space and update the PTs */
1501	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1502	while (cursor.pfn < end) {
1503		unsigned shift, parent_shift, mask;
1504		uint64_t incr, entry_end, pe_start;
1505		struct amdgpu_bo *pt;
1506
1507		if (!params->unlocked) {
1508			/* make sure that the page tables covering the
1509			 * address range are actually allocated
1510			 */
1511			r = amdgpu_vm_alloc_pts(params->adev, params->vm,
1512						&cursor, params->immediate);
1513			if (r)
1514				return r;
1515		}
1516
1517		shift = amdgpu_vm_level_shift(adev, cursor.level);
1518		parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1519		if (params->unlocked) {
1520			/* Unlocked updates are only allowed on the leaves */
1521			if (amdgpu_vm_pt_descendant(adev, &cursor))
1522				continue;
1523		} else if (adev->asic_type < CHIP_VEGA10 &&
1524			   (flags & AMDGPU_PTE_VALID)) {
1525			/* No huge page support before GMC v9 */
1526			if (cursor.level != AMDGPU_VM_PTB) {
1527				if (!amdgpu_vm_pt_descendant(adev, &cursor))
1528					return -ENOENT;
1529				continue;
1530			}
1531		} else if (frag < shift) {
1532			/* We can't use this level when the fragment size is
1533			 * smaller than the address shift. Go to the next
1534			 * child entry and try again.
1535			 */
1536			if (amdgpu_vm_pt_descendant(adev, &cursor))
1537				continue;
1538		} else if (frag >= parent_shift) {
1539			/* If the fragment size is even larger than the parent
1540			 * shift we should go up one level and check it again.
1541			 */
1542			if (!amdgpu_vm_pt_ancestor(&cursor))
1543				return -EINVAL;
1544			continue;
1545		}
1546
1547		pt = cursor.entry->bo;
1548		if (!pt) {
1549			/* We need all PDs and PTs for mapping something, */
1550			if (flags & AMDGPU_PTE_VALID)
1551				return -ENOENT;
1552
1553			/* but unmapping something can happen at a higher
1554			 * level.
1555			 */
1556			if (!amdgpu_vm_pt_ancestor(&cursor))
1557				return -EINVAL;
1558
1559			pt = cursor.entry->bo;
1560			shift = parent_shift;
1561			frag_end = max(frag_end, ALIGN(frag_start + 1,
1562				   1ULL << shift));
1563		}
1564
1565		/* Looks good so far, calculate parameters for the update */
1566		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1567		mask = amdgpu_vm_entries_mask(adev, cursor.level);
1568		pe_start = ((cursor.pfn >> shift) & mask) * 8;
1569		entry_end = ((uint64_t)mask + 1) << shift;
1570		entry_end += cursor.pfn & ~(entry_end - 1);
1571		entry_end = min(entry_end, end);
1572
1573		do {
1574			struct amdgpu_vm *vm = params->vm;
1575			uint64_t upd_end = min(entry_end, frag_end);
1576			unsigned nptes = (upd_end - frag_start) >> shift;
1577			uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
1578
1579			/* This can happen when we set higher level PDs to
1580			 * silent to stop fault floods.
1581			 */
1582			nptes = max(nptes, 1u);
1583
1584			trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
1585						    nptes, dst, incr, upd_flags,
1586						    vm->task_info.pid,
1587						    vm->immediate.fence_context);
1588			amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
1589					       cursor.level, pe_start, dst,
1590					       nptes, incr, upd_flags);
1591
1592			pe_start += nptes * 8;
1593			dst += nptes * incr;
1594
1595			frag_start = upd_end;
1596			if (frag_start >= frag_end) {
1597				/* figure out the next fragment */
1598				amdgpu_vm_fragment(params, frag_start, end,
1599						   flags, &frag, &frag_end);
1600				if (frag < shift)
1601					break;
1602			}
1603		} while (frag_start < entry_end);
1604
1605		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1606			/* Free all child entries.
1607			 * Update the tables with the flags and addresses and free up subsequent
1608			 * tables in the case of huge pages or freed up areas.
1609			 * This is the maximum you can free, because all other page tables are not
1610			 * completely covered by the range and so potentially still in use.
1611			 */
1612			while (cursor.pfn < frag_start) {
1613				/* Make sure previous mapping is freed */
1614				if (cursor.entry->bo) {
1615					params->table_freed = true;
1616					amdgpu_vm_free_pts(adev, params->vm, &cursor);
1617				}
1618				amdgpu_vm_pt_next(adev, &cursor);
1619			}
1620
1621		} else if (frag >= shift) {
1622			/* or just move on to the next on the same level. */
1623			amdgpu_vm_pt_next(adev, &cursor);
1624		}
1625	}
1626
1627	return 0;
1628}
1629
1630/**
1631 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1632 *
1633 * @adev: amdgpu_device pointer of the VM
1634 * @bo_adev: amdgpu_device pointer of the mapped BO
1635 * @vm: requested vm
1636 * @immediate: immediate submission in a page fault
1637 * @unlocked: unlocked invalidation during MM callback
1638 * @resv: fences we need to sync to
1639 * @start: start of mapped range
1640 * @last: last mapped entry
1641 * @flags: flags for the entries
1642 * @offset: offset into nodes and pages_addr
1643 * @res: ttm_resource to map
1644 * @pages_addr: DMA addresses to use for mapping
1645 * @fence: optional resulting fence
1646 * @table_freed: return true if page table is freed
1647 *
1648 * Fill in the page table entries between @start and @last.
1649 *
1650 * Returns:
1651 * 0 for success, -EINVAL for failure.
1652 */
1653int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1654				struct amdgpu_device *bo_adev,
1655				struct amdgpu_vm *vm, bool immediate,
1656				bool unlocked, struct dma_resv *resv,
1657				uint64_t start, uint64_t last,
1658				uint64_t flags, uint64_t offset,
1659				struct ttm_resource *res,
1660				dma_addr_t *pages_addr,
1661				struct dma_fence **fence,
1662				bool *table_freed)
1663{
1664	struct amdgpu_vm_update_params params;
1665	struct amdgpu_res_cursor cursor;
1666	enum amdgpu_sync_mode sync_mode;
1667	int r, idx;
1668
1669	if (!drm_dev_enter(&adev->ddev, &idx))
1670		return -ENODEV;
1671
1672	memset(&params, 0, sizeof(params));
1673	params.adev = adev;
1674	params.vm = vm;
1675	params.immediate = immediate;
1676	params.pages_addr = pages_addr;
1677	params.unlocked = unlocked;
1678
1679	/* Implicitly sync to command submissions in the same VM before
1680	 * unmapping. Sync to moving fences before mapping.
1681	 */
1682	if (!(flags & AMDGPU_PTE_VALID))
1683		sync_mode = AMDGPU_SYNC_EQ_OWNER;
1684	else
1685		sync_mode = AMDGPU_SYNC_EXPLICIT;
1686
1687	amdgpu_vm_eviction_lock(vm);
1688	if (vm->evicting) {
1689		r = -EBUSY;
1690		goto error_unlock;
1691	}
1692
1693	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1694		struct dma_fence *tmp = dma_fence_get_stub();
1695
1696		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1697		swap(vm->last_unlocked, tmp);
1698		dma_fence_put(tmp);
1699	}
1700
1701	r = vm->update_funcs->prepare(&params, resv, sync_mode);
1702	if (r)
1703		goto error_unlock;
1704
1705	amdgpu_res_first(pages_addr ? NULL : res, offset,
1706			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1707	while (cursor.remaining) {
1708		uint64_t tmp, num_entries, addr;
1709
1710		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1711		if (pages_addr) {
1712			bool contiguous = true;
1713
1714			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1715				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1716				uint64_t count;
 
1717
1718				contiguous = pages_addr[pfn + 1] ==
1719					pages_addr[pfn] + PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1720
1721				tmp = num_entries /
1722					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1723				for (count = 2; count < tmp; ++count) {
1724					uint64_t idx = pfn + count;
 
 
 
1725
1726					if (contiguous != (pages_addr[idx] ==
1727					    pages_addr[idx - 1] + PAGE_SIZE))
1728						break;
1729				}
1730				num_entries = count *
1731					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732			}
1733
1734			if (!contiguous) {
1735				addr = cursor.start;
1736				params.pages_addr = pages_addr;
1737			} else {
1738				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1739				params.pages_addr = NULL;
 
1740			}
1741
1742		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
1743			addr = bo_adev->vm_manager.vram_base_offset +
1744				cursor.start;
1745		} else {
1746			addr = 0;
1747		}
1748
1749		tmp = start + num_entries;
1750		r = amdgpu_vm_update_ptes(&params, start, tmp, addr, flags);
 
 
1751		if (r)
1752			goto error_unlock;
1753
1754		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1755		start = tmp;
1756	}
1757
1758	r = vm->update_funcs->commit(&params, fence);
 
 
 
 
 
1759
1760	if (table_freed)
1761		*table_freed = params.table_freed;
1762
1763error_unlock:
1764	amdgpu_vm_eviction_unlock(vm);
1765	drm_dev_exit(idx);
1766	return r;
1767}
1768
1769void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
1770				uint64_t *gtt_mem, uint64_t *cpu_mem)
1771{
1772	struct amdgpu_bo_va *bo_va, *tmp;
1773
1774	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
1775		if (!bo_va->base.bo)
1776			continue;
1777		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1778				gtt_mem, cpu_mem);
1779	}
1780	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
1781		if (!bo_va->base.bo)
1782			continue;
1783		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1784				gtt_mem, cpu_mem);
1785	}
1786	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
1787		if (!bo_va->base.bo)
1788			continue;
1789		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1790				gtt_mem, cpu_mem);
1791	}
1792	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
1793		if (!bo_va->base.bo)
1794			continue;
1795		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1796				gtt_mem, cpu_mem);
1797	}
1798	spin_lock(&vm->invalidated_lock);
1799	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
1800		if (!bo_va->base.bo)
1801			continue;
1802		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1803				gtt_mem, cpu_mem);
1804	}
1805	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
1806		if (!bo_va->base.bo)
1807			continue;
1808		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1809				gtt_mem, cpu_mem);
1810	}
1811	spin_unlock(&vm->invalidated_lock);
1812}
1813/**
1814 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1815 *
1816 * @adev: amdgpu_device pointer
1817 * @bo_va: requested BO and VM object
1818 * @clear: if true clear the entries
1819 *
1820 * Fill in the page table entries for @bo_va.
1821 *
1822 * Returns:
1823 * 0 for success, -EINVAL for failure.
1824 */
1825int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1826			bool clear)
1827{
1828	struct amdgpu_bo *bo = bo_va->base.bo;
1829	struct amdgpu_vm *vm = bo_va->base.vm;
1830	struct amdgpu_bo_va_mapping *mapping;
1831	dma_addr_t *pages_addr = NULL;
1832	struct ttm_resource *mem;
 
1833	struct dma_fence **last_update;
1834	struct dma_resv *resv;
1835	uint64_t flags;
1836	struct amdgpu_device *bo_adev = adev;
1837	int r;
1838
1839	if (clear || !bo) {
1840		mem = NULL;
1841		resv = vm->root.bo->tbo.base.resv;
 
1842	} else {
1843		struct drm_gem_object *obj = &bo->tbo.base;
1844
 
 
 
 
 
 
1845		resv = bo->tbo.base.resv;
1846		if (obj->import_attach && bo_va->is_xgmi) {
1847			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1848			struct drm_gem_object *gobj = dma_buf->priv;
1849			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1850
1851			if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
1852				bo = gem_to_amdgpu_bo(gobj);
1853		}
1854		mem = bo->tbo.resource;
1855		if (mem->mem_type == TTM_PL_TT ||
1856		    mem->mem_type == AMDGPU_PL_PREEMPT)
1857			pages_addr = bo->tbo.ttm->dma_address;
1858	}
1859
1860	if (bo) {
1861		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1862
1863		if (amdgpu_bo_encrypted(bo))
1864			flags |= AMDGPU_PTE_TMZ;
1865
1866		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1867	} else {
1868		flags = 0x0;
1869	}
1870
1871	if (clear || (bo && bo->tbo.base.resv ==
1872		      vm->root.bo->tbo.base.resv))
1873		last_update = &vm->last_update;
1874	else
1875		last_update = &bo_va->last_pt_update;
1876
1877	if (!clear && bo_va->base.moved) {
1878		bo_va->base.moved = false;
1879		list_splice_init(&bo_va->valids, &bo_va->invalids);
1880
1881	} else if (bo_va->cleared != clear) {
1882		list_splice_init(&bo_va->valids, &bo_va->invalids);
1883	}
1884
1885	list_for_each_entry(mapping, &bo_va->invalids, list) {
1886		uint64_t update_flags = flags;
1887
1888		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1889		 * but in case of something, we filter the flags in first place
1890		 */
1891		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1892			update_flags &= ~AMDGPU_PTE_READABLE;
1893		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1894			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1895
1896		/* Apply ASIC specific mapping flags */
1897		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1898
1899		trace_amdgpu_vm_bo_update(mapping);
1900
1901		r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
1902						resv, mapping->start,
1903						mapping->last, update_flags,
1904						mapping->offset, mem,
1905						pages_addr, last_update, NULL);
1906		if (r)
1907			return r;
1908	}
1909
1910	/* If the BO is not in its preferred location add it back to
1911	 * the evicted list so that it gets validated again on the
1912	 * next command submission.
1913	 */
1914	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1915		uint32_t mem_type = bo->tbo.resource->mem_type;
1916
1917		if (!(bo->preferred_domains &
1918		      amdgpu_mem_type_to_domain(mem_type)))
1919			amdgpu_vm_bo_evicted(&bo_va->base);
1920		else
1921			amdgpu_vm_bo_idle(&bo_va->base);
1922	} else {
1923		amdgpu_vm_bo_done(&bo_va->base);
1924	}
1925
1926	list_splice_init(&bo_va->invalids, &bo_va->valids);
1927	bo_va->cleared = clear;
1928
1929	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1930		list_for_each_entry(mapping, &bo_va->valids, list)
1931			trace_amdgpu_vm_bo_mapping(mapping);
1932	}
1933
1934	return 0;
1935}
1936
1937/**
1938 * amdgpu_vm_update_prt_state - update the global PRT state
1939 *
1940 * @adev: amdgpu_device pointer
1941 */
1942static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1943{
1944	unsigned long flags;
1945	bool enable;
1946
1947	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1948	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1949	adev->gmc.gmc_funcs->set_prt(adev, enable);
1950	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1951}
1952
1953/**
1954 * amdgpu_vm_prt_get - add a PRT user
1955 *
1956 * @adev: amdgpu_device pointer
1957 */
1958static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1959{
1960	if (!adev->gmc.gmc_funcs->set_prt)
1961		return;
1962
1963	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1964		amdgpu_vm_update_prt_state(adev);
1965}
1966
1967/**
1968 * amdgpu_vm_prt_put - drop a PRT user
1969 *
1970 * @adev: amdgpu_device pointer
1971 */
1972static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1973{
1974	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1975		amdgpu_vm_update_prt_state(adev);
1976}
1977
1978/**
1979 * amdgpu_vm_prt_cb - callback for updating the PRT status
1980 *
1981 * @fence: fence for the callback
1982 * @_cb: the callback function
1983 */
1984static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1985{
1986	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1987
1988	amdgpu_vm_prt_put(cb->adev);
1989	kfree(cb);
1990}
1991
1992/**
1993 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1994 *
1995 * @adev: amdgpu_device pointer
1996 * @fence: fence for the callback
1997 */
1998static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1999				 struct dma_fence *fence)
2000{
2001	struct amdgpu_prt_cb *cb;
2002
2003	if (!adev->gmc.gmc_funcs->set_prt)
2004		return;
2005
2006	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
2007	if (!cb) {
2008		/* Last resort when we are OOM */
2009		if (fence)
2010			dma_fence_wait(fence, false);
2011
2012		amdgpu_vm_prt_put(adev);
2013	} else {
2014		cb->adev = adev;
2015		if (!fence || dma_fence_add_callback(fence, &cb->cb,
2016						     amdgpu_vm_prt_cb))
2017			amdgpu_vm_prt_cb(fence, &cb->cb);
2018	}
2019}
2020
2021/**
2022 * amdgpu_vm_free_mapping - free a mapping
2023 *
2024 * @adev: amdgpu_device pointer
2025 * @vm: requested vm
2026 * @mapping: mapping to be freed
2027 * @fence: fence of the unmap operation
2028 *
2029 * Free a mapping and make sure we decrease the PRT usage count if applicable.
2030 */
2031static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
2032				   struct amdgpu_vm *vm,
2033				   struct amdgpu_bo_va_mapping *mapping,
2034				   struct dma_fence *fence)
2035{
2036	if (mapping->flags & AMDGPU_PTE_PRT)
2037		amdgpu_vm_add_prt_cb(adev, fence);
2038	kfree(mapping);
2039}
2040
2041/**
2042 * amdgpu_vm_prt_fini - finish all prt mappings
2043 *
2044 * @adev: amdgpu_device pointer
2045 * @vm: requested vm
2046 *
2047 * Register a cleanup callback to disable PRT support after VM dies.
2048 */
2049static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2050{
2051	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
2052	struct dma_fence *excl, **shared;
2053	unsigned i, shared_count;
2054	int r;
2055
2056	r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
 
2057	if (r) {
2058		/* Not enough memory to grab the fence list, as last resort
2059		 * block for all the fences to complete.
2060		 */
2061		dma_resv_wait_timeout(resv, true, false,
2062						    MAX_SCHEDULE_TIMEOUT);
2063		return;
2064	}
2065
2066	/* Add a callback for each fence in the reservation object */
2067	amdgpu_vm_prt_get(adev);
2068	amdgpu_vm_add_prt_cb(adev, excl);
2069
2070	for (i = 0; i < shared_count; ++i) {
2071		amdgpu_vm_prt_get(adev);
2072		amdgpu_vm_add_prt_cb(adev, shared[i]);
2073	}
2074
2075	kfree(shared);
2076}
2077
2078/**
2079 * amdgpu_vm_clear_freed - clear freed BOs in the PT
2080 *
2081 * @adev: amdgpu_device pointer
2082 * @vm: requested vm
2083 * @fence: optional resulting fence (unchanged if no work needed to be done
2084 * or if an error occurred)
2085 *
2086 * Make sure all freed BOs are cleared in the PT.
2087 * PTs have to be reserved and mutex must be locked!
2088 *
2089 * Returns:
2090 * 0 for success.
2091 *
2092 */
2093int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2094			  struct amdgpu_vm *vm,
2095			  struct dma_fence **fence)
2096{
2097	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
2098	struct amdgpu_bo_va_mapping *mapping;
2099	uint64_t init_pte_value = 0;
2100	struct dma_fence *f = NULL;
2101	int r;
2102
2103	while (!list_empty(&vm->freed)) {
2104		mapping = list_first_entry(&vm->freed,
2105			struct amdgpu_bo_va_mapping, list);
2106		list_del(&mapping->list);
2107
2108		if (vm->pte_support_ats &&
2109		    mapping->start < AMDGPU_GMC_HOLE_START)
2110			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
2111
2112		r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false,
2113						resv, mapping->start,
2114						mapping->last, init_pte_value,
2115						0, NULL, NULL, &f, NULL);
2116		amdgpu_vm_free_mapping(adev, vm, mapping, f);
2117		if (r) {
2118			dma_fence_put(f);
2119			return r;
2120		}
2121	}
2122
2123	if (fence && f) {
2124		dma_fence_put(*fence);
2125		*fence = f;
2126	} else {
2127		dma_fence_put(f);
2128	}
2129
2130	return 0;
2131
2132}
2133
2134/**
2135 * amdgpu_vm_handle_moved - handle moved BOs in the PT
2136 *
2137 * @adev: amdgpu_device pointer
2138 * @vm: requested vm
2139 *
2140 * Make sure all BOs which are moved are updated in the PTs.
2141 *
2142 * Returns:
2143 * 0 for success.
2144 *
2145 * PTs have to be reserved!
2146 */
2147int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2148			   struct amdgpu_vm *vm)
2149{
2150	struct amdgpu_bo_va *bo_va, *tmp;
2151	struct dma_resv *resv;
2152	bool clear;
2153	int r;
2154
2155	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2156		/* Per VM BOs never need to bo cleared in the page tables */
2157		r = amdgpu_vm_bo_update(adev, bo_va, false);
2158		if (r)
2159			return r;
2160	}
2161
2162	spin_lock(&vm->invalidated_lock);
2163	while (!list_empty(&vm->invalidated)) {
2164		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2165					 base.vm_status);
2166		resv = bo_va->base.bo->tbo.base.resv;
2167		spin_unlock(&vm->invalidated_lock);
2168
2169		/* Try to reserve the BO to avoid clearing its ptes */
2170		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
2171			clear = false;
2172		/* Somebody else is using the BO right now */
2173		else
2174			clear = true;
2175
2176		r = amdgpu_vm_bo_update(adev, bo_va, clear);
2177		if (r)
2178			return r;
2179
2180		if (!clear)
2181			dma_resv_unlock(resv);
2182		spin_lock(&vm->invalidated_lock);
2183	}
2184	spin_unlock(&vm->invalidated_lock);
2185
2186	return 0;
2187}
2188
2189/**
2190 * amdgpu_vm_bo_add - add a bo to a specific vm
2191 *
2192 * @adev: amdgpu_device pointer
2193 * @vm: requested vm
2194 * @bo: amdgpu buffer object
2195 *
2196 * Add @bo into the requested vm.
2197 * Add @bo to the list of bos associated with the vm
2198 *
2199 * Returns:
2200 * Newly added bo_va or NULL for failure
2201 *
2202 * Object has to be reserved!
2203 */
2204struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2205				      struct amdgpu_vm *vm,
2206				      struct amdgpu_bo *bo)
2207{
2208	struct amdgpu_bo_va *bo_va;
2209
2210	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2211	if (bo_va == NULL) {
2212		return NULL;
2213	}
2214	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2215
2216	bo_va->ref_count = 1;
2217	INIT_LIST_HEAD(&bo_va->valids);
2218	INIT_LIST_HEAD(&bo_va->invalids);
2219
2220	if (!bo)
2221		return bo_va;
2222
2223	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
2224		bo_va->is_xgmi = true;
2225		/* Power up XGMI if it can be potentially used */
2226		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
2227	}
2228
2229	return bo_va;
2230}
2231
2232
2233/**
2234 * amdgpu_vm_bo_insert_map - insert a new mapping
2235 *
2236 * @adev: amdgpu_device pointer
2237 * @bo_va: bo_va to store the address
2238 * @mapping: the mapping to insert
2239 *
2240 * Insert a new mapping into all structures.
2241 */
2242static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2243				    struct amdgpu_bo_va *bo_va,
2244				    struct amdgpu_bo_va_mapping *mapping)
2245{
2246	struct amdgpu_vm *vm = bo_va->base.vm;
2247	struct amdgpu_bo *bo = bo_va->base.bo;
2248
2249	mapping->bo_va = bo_va;
2250	list_add(&mapping->list, &bo_va->invalids);
2251	amdgpu_vm_it_insert(mapping, &vm->va);
2252
2253	if (mapping->flags & AMDGPU_PTE_PRT)
2254		amdgpu_vm_prt_get(adev);
2255
2256	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
2257	    !bo_va->base.moved) {
2258		list_move(&bo_va->base.vm_status, &vm->moved);
2259	}
2260	trace_amdgpu_vm_bo_map(bo_va, mapping);
2261}
2262
2263/**
2264 * amdgpu_vm_bo_map - map bo inside a vm
2265 *
2266 * @adev: amdgpu_device pointer
2267 * @bo_va: bo_va to store the address
2268 * @saddr: where to map the BO
2269 * @offset: requested offset in the BO
2270 * @size: BO size in bytes
2271 * @flags: attributes of pages (read/write/valid/etc.)
2272 *
2273 * Add a mapping of the BO at the specefied addr into the VM.
2274 *
2275 * Returns:
2276 * 0 for success, error for failure.
2277 *
2278 * Object has to be reserved and unreserved outside!
2279 */
2280int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2281		     struct amdgpu_bo_va *bo_va,
2282		     uint64_t saddr, uint64_t offset,
2283		     uint64_t size, uint64_t flags)
2284{
2285	struct amdgpu_bo_va_mapping *mapping, *tmp;
2286	struct amdgpu_bo *bo = bo_va->base.bo;
2287	struct amdgpu_vm *vm = bo_va->base.vm;
2288	uint64_t eaddr;
2289
2290	/* validate the parameters */
2291	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
2292	    size == 0 || size & ~PAGE_MASK)
2293		return -EINVAL;
2294
2295	/* make sure object fit at this offset */
2296	eaddr = saddr + size - 1;
2297	if (saddr >= eaddr ||
2298	    (bo && offset + size > amdgpu_bo_size(bo)) ||
2299	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
2300		return -EINVAL;
2301
2302	saddr /= AMDGPU_GPU_PAGE_SIZE;
2303	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2304
2305	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2306	if (tmp) {
2307		/* bo and tmp overlap, invalid addr */
2308		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2309			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2310			tmp->start, tmp->last + 1);
2311		return -EINVAL;
2312	}
2313
2314	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2315	if (!mapping)
2316		return -ENOMEM;
2317
2318	mapping->start = saddr;
2319	mapping->last = eaddr;
2320	mapping->offset = offset;
2321	mapping->flags = flags;
2322
2323	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2324
2325	return 0;
2326}
2327
2328/**
2329 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2330 *
2331 * @adev: amdgpu_device pointer
2332 * @bo_va: bo_va to store the address
2333 * @saddr: where to map the BO
2334 * @offset: requested offset in the BO
2335 * @size: BO size in bytes
2336 * @flags: attributes of pages (read/write/valid/etc.)
2337 *
2338 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2339 * mappings as we do so.
2340 *
2341 * Returns:
2342 * 0 for success, error for failure.
2343 *
2344 * Object has to be reserved and unreserved outside!
2345 */
2346int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2347			     struct amdgpu_bo_va *bo_va,
2348			     uint64_t saddr, uint64_t offset,
2349			     uint64_t size, uint64_t flags)
2350{
2351	struct amdgpu_bo_va_mapping *mapping;
2352	struct amdgpu_bo *bo = bo_va->base.bo;
2353	uint64_t eaddr;
2354	int r;
2355
2356	/* validate the parameters */
2357	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
2358	    size == 0 || size & ~PAGE_MASK)
2359		return -EINVAL;
2360
2361	/* make sure object fit at this offset */
2362	eaddr = saddr + size - 1;
2363	if (saddr >= eaddr ||
2364	    (bo && offset + size > amdgpu_bo_size(bo)) ||
2365	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
2366		return -EINVAL;
2367
2368	/* Allocate all the needed memory */
2369	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2370	if (!mapping)
2371		return -ENOMEM;
2372
2373	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2374	if (r) {
2375		kfree(mapping);
2376		return r;
2377	}
2378
2379	saddr /= AMDGPU_GPU_PAGE_SIZE;
2380	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2381
2382	mapping->start = saddr;
2383	mapping->last = eaddr;
2384	mapping->offset = offset;
2385	mapping->flags = flags;
2386
2387	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2388
2389	return 0;
2390}
2391
2392/**
2393 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2394 *
2395 * @adev: amdgpu_device pointer
2396 * @bo_va: bo_va to remove the address from
2397 * @saddr: where to the BO is mapped
2398 *
2399 * Remove a mapping of the BO at the specefied addr from the VM.
2400 *
2401 * Returns:
2402 * 0 for success, error for failure.
2403 *
2404 * Object has to be reserved and unreserved outside!
2405 */
2406int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2407		       struct amdgpu_bo_va *bo_va,
2408		       uint64_t saddr)
2409{
2410	struct amdgpu_bo_va_mapping *mapping;
2411	struct amdgpu_vm *vm = bo_va->base.vm;
2412	bool valid = true;
2413
2414	saddr /= AMDGPU_GPU_PAGE_SIZE;
2415
2416	list_for_each_entry(mapping, &bo_va->valids, list) {
2417		if (mapping->start == saddr)
2418			break;
2419	}
2420
2421	if (&mapping->list == &bo_va->valids) {
2422		valid = false;
2423
2424		list_for_each_entry(mapping, &bo_va->invalids, list) {
2425			if (mapping->start == saddr)
2426				break;
2427		}
2428
2429		if (&mapping->list == &bo_va->invalids)
2430			return -ENOENT;
2431	}
2432
2433	list_del(&mapping->list);
2434	amdgpu_vm_it_remove(mapping, &vm->va);
2435	mapping->bo_va = NULL;
2436	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2437
2438	if (valid)
2439		list_add(&mapping->list, &vm->freed);
2440	else
2441		amdgpu_vm_free_mapping(adev, vm, mapping,
2442				       bo_va->last_pt_update);
2443
2444	return 0;
2445}
2446
2447/**
2448 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2449 *
2450 * @adev: amdgpu_device pointer
2451 * @vm: VM structure to use
2452 * @saddr: start of the range
2453 * @size: size of the range
2454 *
2455 * Remove all mappings in a range, split them as appropriate.
2456 *
2457 * Returns:
2458 * 0 for success, error for failure.
2459 */
2460int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2461				struct amdgpu_vm *vm,
2462				uint64_t saddr, uint64_t size)
2463{
2464	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2465	LIST_HEAD(removed);
2466	uint64_t eaddr;
2467
2468	eaddr = saddr + size - 1;
2469	saddr /= AMDGPU_GPU_PAGE_SIZE;
2470	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2471
2472	/* Allocate all the needed memory */
2473	before = kzalloc(sizeof(*before), GFP_KERNEL);
2474	if (!before)
2475		return -ENOMEM;
2476	INIT_LIST_HEAD(&before->list);
2477
2478	after = kzalloc(sizeof(*after), GFP_KERNEL);
2479	if (!after) {
2480		kfree(before);
2481		return -ENOMEM;
2482	}
2483	INIT_LIST_HEAD(&after->list);
2484
2485	/* Now gather all removed mappings */
2486	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2487	while (tmp) {
2488		/* Remember mapping split at the start */
2489		if (tmp->start < saddr) {
2490			before->start = tmp->start;
2491			before->last = saddr - 1;
2492			before->offset = tmp->offset;
2493			before->flags = tmp->flags;
2494			before->bo_va = tmp->bo_va;
2495			list_add(&before->list, &tmp->bo_va->invalids);
2496		}
2497
2498		/* Remember mapping split at the end */
2499		if (tmp->last > eaddr) {
2500			after->start = eaddr + 1;
2501			after->last = tmp->last;
2502			after->offset = tmp->offset;
2503			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2504			after->flags = tmp->flags;
2505			after->bo_va = tmp->bo_va;
2506			list_add(&after->list, &tmp->bo_va->invalids);
2507		}
2508
2509		list_del(&tmp->list);
2510		list_add(&tmp->list, &removed);
2511
2512		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2513	}
2514
2515	/* And free them up */
2516	list_for_each_entry_safe(tmp, next, &removed, list) {
2517		amdgpu_vm_it_remove(tmp, &vm->va);
2518		list_del(&tmp->list);
2519
2520		if (tmp->start < saddr)
2521		    tmp->start = saddr;
2522		if (tmp->last > eaddr)
2523		    tmp->last = eaddr;
2524
2525		tmp->bo_va = NULL;
2526		list_add(&tmp->list, &vm->freed);
2527		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2528	}
2529
2530	/* Insert partial mapping before the range */
2531	if (!list_empty(&before->list)) {
2532		amdgpu_vm_it_insert(before, &vm->va);
2533		if (before->flags & AMDGPU_PTE_PRT)
2534			amdgpu_vm_prt_get(adev);
2535	} else {
2536		kfree(before);
2537	}
2538
2539	/* Insert partial mapping after the range */
2540	if (!list_empty(&after->list)) {
2541		amdgpu_vm_it_insert(after, &vm->va);
2542		if (after->flags & AMDGPU_PTE_PRT)
2543			amdgpu_vm_prt_get(adev);
2544	} else {
2545		kfree(after);
2546	}
2547
2548	return 0;
2549}
2550
2551/**
2552 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2553 *
2554 * @vm: the requested VM
2555 * @addr: the address
2556 *
2557 * Find a mapping by it's address.
2558 *
2559 * Returns:
2560 * The amdgpu_bo_va_mapping matching for addr or NULL
2561 *
2562 */
2563struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2564							 uint64_t addr)
2565{
2566	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2567}
2568
2569/**
2570 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2571 *
2572 * @vm: the requested vm
2573 * @ticket: CS ticket
2574 *
2575 * Trace all mappings of BOs reserved during a command submission.
2576 */
2577void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2578{
2579	struct amdgpu_bo_va_mapping *mapping;
2580
2581	if (!trace_amdgpu_vm_bo_cs_enabled())
2582		return;
2583
2584	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2585	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2586		if (mapping->bo_va && mapping->bo_va->base.bo) {
2587			struct amdgpu_bo *bo;
2588
2589			bo = mapping->bo_va->base.bo;
2590			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2591			    ticket)
2592				continue;
2593		}
2594
2595		trace_amdgpu_vm_bo_cs(mapping);
2596	}
2597}
2598
2599/**
2600 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2601 *
2602 * @adev: amdgpu_device pointer
2603 * @bo_va: requested bo_va
2604 *
2605 * Remove @bo_va->bo from the requested vm.
2606 *
2607 * Object have to be reserved!
2608 */
2609void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2610		      struct amdgpu_bo_va *bo_va)
2611{
2612	struct amdgpu_bo_va_mapping *mapping, *next;
2613	struct amdgpu_bo *bo = bo_va->base.bo;
2614	struct amdgpu_vm *vm = bo_va->base.vm;
2615	struct amdgpu_vm_bo_base **base;
2616
2617	if (bo) {
2618		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2619			vm->bulk_moveable = false;
2620
2621		for (base = &bo_va->base.bo->vm_bo; *base;
2622		     base = &(*base)->next) {
2623			if (*base != &bo_va->base)
2624				continue;
2625
2626			*base = bo_va->base.next;
2627			break;
2628		}
2629	}
2630
2631	spin_lock(&vm->invalidated_lock);
2632	list_del(&bo_va->base.vm_status);
2633	spin_unlock(&vm->invalidated_lock);
2634
2635	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2636		list_del(&mapping->list);
2637		amdgpu_vm_it_remove(mapping, &vm->va);
2638		mapping->bo_va = NULL;
2639		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2640		list_add(&mapping->list, &vm->freed);
2641	}
2642	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2643		list_del(&mapping->list);
2644		amdgpu_vm_it_remove(mapping, &vm->va);
2645		amdgpu_vm_free_mapping(adev, vm, mapping,
2646				       bo_va->last_pt_update);
2647	}
2648
2649	dma_fence_put(bo_va->last_pt_update);
2650
2651	if (bo && bo_va->is_xgmi)
2652		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2653
2654	kfree(bo_va);
2655}
2656
2657/**
2658 * amdgpu_vm_evictable - check if we can evict a VM
2659 *
2660 * @bo: A page table of the VM.
2661 *
2662 * Check if it is possible to evict a VM.
2663 */
2664bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2665{
2666	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2667
2668	/* Page tables of a destroyed VM can go away immediately */
2669	if (!bo_base || !bo_base->vm)
2670		return true;
2671
2672	/* Don't evict VM page tables while they are busy */
2673	if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
2674		return false;
2675
2676	/* Try to block ongoing updates */
2677	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2678		return false;
2679
2680	/* Don't evict VM page tables while they are updated */
2681	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2682		amdgpu_vm_eviction_unlock(bo_base->vm);
2683		return false;
2684	}
2685
2686	bo_base->vm->evicting = true;
2687	amdgpu_vm_eviction_unlock(bo_base->vm);
2688	return true;
2689}
2690
2691/**
2692 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2693 *
2694 * @adev: amdgpu_device pointer
2695 * @bo: amdgpu buffer object
2696 * @evicted: is the BO evicted
2697 *
2698 * Mark @bo as invalid.
2699 */
2700void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2701			     struct amdgpu_bo *bo, bool evicted)
2702{
2703	struct amdgpu_vm_bo_base *bo_base;
2704
2705	/* shadow bo doesn't have bo base, its validation needs its parent */
2706	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
2707		bo = bo->parent;
2708
2709	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2710		struct amdgpu_vm *vm = bo_base->vm;
2711
2712		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
2713			amdgpu_vm_bo_evicted(bo_base);
2714			continue;
2715		}
2716
2717		if (bo_base->moved)
2718			continue;
2719		bo_base->moved = true;
2720
2721		if (bo->tbo.type == ttm_bo_type_kernel)
2722			amdgpu_vm_bo_relocated(bo_base);
2723		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2724			amdgpu_vm_bo_moved(bo_base);
2725		else
2726			amdgpu_vm_bo_invalidated(bo_base);
2727	}
2728}
2729
2730/**
2731 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2732 *
2733 * @vm_size: VM size
2734 *
2735 * Returns:
2736 * VM page table as power of two
2737 */
2738static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2739{
2740	/* Total bits covered by PD + PTs */
2741	unsigned bits = ilog2(vm_size) + 18;
2742
2743	/* Make sure the PD is 4K in size up to 8GB address space.
2744	   Above that split equal between PD and PTs */
2745	if (vm_size <= 8)
2746		return (bits - 9);
2747	else
2748		return ((bits + 3) / 2);
2749}
2750
2751/**
2752 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2753 *
2754 * @adev: amdgpu_device pointer
2755 * @min_vm_size: the minimum vm size in GB if it's set auto
2756 * @fragment_size_default: Default PTE fragment size
2757 * @max_level: max VMPT level
2758 * @max_bits: max address space size in bits
2759 *
2760 */
2761void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2762			   uint32_t fragment_size_default, unsigned max_level,
2763			   unsigned max_bits)
2764{
2765	unsigned int max_size = 1 << (max_bits - 30);
2766	unsigned int vm_size;
2767	uint64_t tmp;
2768
2769	/* adjust vm size first */
2770	if (amdgpu_vm_size != -1) {
2771		vm_size = amdgpu_vm_size;
2772		if (vm_size > max_size) {
2773			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2774				 amdgpu_vm_size, max_size);
2775			vm_size = max_size;
2776		}
2777	} else {
2778		struct sysinfo si;
2779		unsigned int phys_ram_gb;
2780
2781		/* Optimal VM size depends on the amount of physical
2782		 * RAM available. Underlying requirements and
2783		 * assumptions:
2784		 *
2785		 *  - Need to map system memory and VRAM from all GPUs
2786		 *     - VRAM from other GPUs not known here
2787		 *     - Assume VRAM <= system memory
2788		 *  - On GFX8 and older, VM space can be segmented for
2789		 *    different MTYPEs
2790		 *  - Need to allow room for fragmentation, guard pages etc.
2791		 *
2792		 * This adds up to a rough guess of system memory x3.
2793		 * Round up to power of two to maximize the available
2794		 * VM size with the given page table size.
2795		 */
2796		si_meminfo(&si);
2797		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2798			       (1 << 30) - 1) >> 30;
2799		vm_size = roundup_pow_of_two(
2800			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2801	}
2802
2803	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2804
2805	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2806	if (amdgpu_vm_block_size != -1)
2807		tmp >>= amdgpu_vm_block_size - 9;
2808	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2809	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2810	switch (adev->vm_manager.num_level) {
2811	case 3:
2812		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2813		break;
2814	case 2:
2815		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2816		break;
2817	case 1:
2818		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2819		break;
2820	default:
2821		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2822	}
2823	/* block size depends on vm size and hw setup*/
2824	if (amdgpu_vm_block_size != -1)
2825		adev->vm_manager.block_size =
2826			min((unsigned)amdgpu_vm_block_size, max_bits
2827			    - AMDGPU_GPU_PAGE_SHIFT
2828			    - 9 * adev->vm_manager.num_level);
2829	else if (adev->vm_manager.num_level > 1)
2830		adev->vm_manager.block_size = 9;
2831	else
2832		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2833
2834	if (amdgpu_vm_fragment_size == -1)
2835		adev->vm_manager.fragment_size = fragment_size_default;
2836	else
2837		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2838
2839	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2840		 vm_size, adev->vm_manager.num_level + 1,
2841		 adev->vm_manager.block_size,
2842		 adev->vm_manager.fragment_size);
2843}
2844
2845/**
2846 * amdgpu_vm_wait_idle - wait for the VM to become idle
2847 *
2848 * @vm: VM object to wait for
2849 * @timeout: timeout to wait for VM to become idle
2850 */
2851long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2852{
2853	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
2854					true, timeout);
2855	if (timeout <= 0)
2856		return timeout;
2857
2858	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2859}
2860
2861/**
2862 * amdgpu_vm_init - initialize a vm instance
2863 *
2864 * @adev: amdgpu_device pointer
2865 * @vm: requested vm
 
2866 * @pasid: Process address space identifier
2867 *
2868 * Init @vm fields.
2869 *
2870 * Returns:
2871 * 0 for success, error for failure.
2872 */
2873int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
 
2874{
2875	struct amdgpu_bo *root_bo;
2876	struct amdgpu_bo_vm *root;
2877	int r, i;
2878
2879	vm->va = RB_ROOT_CACHED;
2880	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2881		vm->reserved_vmid[i] = NULL;
2882	INIT_LIST_HEAD(&vm->evicted);
2883	INIT_LIST_HEAD(&vm->relocated);
2884	INIT_LIST_HEAD(&vm->moved);
2885	INIT_LIST_HEAD(&vm->idle);
2886	INIT_LIST_HEAD(&vm->invalidated);
2887	spin_lock_init(&vm->invalidated_lock);
2888	INIT_LIST_HEAD(&vm->freed);
2889	INIT_LIST_HEAD(&vm->done);
2890
2891	/* create scheduler entities for page table updates */
2892	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
2893				  adev->vm_manager.vm_pte_scheds,
2894				  adev->vm_manager.vm_pte_num_scheds, NULL);
2895	if (r)
2896		return r;
2897
2898	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
2899				  adev->vm_manager.vm_pte_scheds,
2900				  adev->vm_manager.vm_pte_num_scheds, NULL);
2901	if (r)
2902		goto error_free_immediate;
2903
2904	vm->pte_support_ats = false;
2905	vm->is_compute_context = false;
2906
2907	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2908				    AMDGPU_VM_USE_CPU_FOR_GFX);
 
2909
 
 
 
 
 
 
2910	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2911			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2912	WARN_ONCE((vm->use_cpu_for_update &&
2913		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2914		  "CPU update of VM recommended only for large BAR system\n");
2915
2916	if (vm->use_cpu_for_update)
2917		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2918	else
2919		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2920	vm->last_update = NULL;
2921	vm->last_unlocked = dma_fence_get_stub();
2922
2923	mutex_init(&vm->eviction_lock);
2924	vm->evicting = false;
2925
2926	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2927				false, &root);
 
 
2928	if (r)
2929		goto error_free_delayed;
2930	root_bo = &root->bo;
2931	r = amdgpu_bo_reserve(root_bo, true);
2932	if (r)
2933		goto error_free_root;
2934
2935	r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
2936	if (r)
2937		goto error_unreserve;
2938
2939	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2940
2941	r = amdgpu_vm_clear_bo(adev, vm, root, false);
2942	if (r)
2943		goto error_unreserve;
2944
2945	amdgpu_bo_unreserve(vm->root.bo);
2946
2947	if (pasid) {
2948		unsigned long flags;
2949
2950		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2951		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2952			      GFP_ATOMIC);
2953		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2954		if (r < 0)
2955			goto error_free_root;
2956
2957		vm->pasid = pasid;
2958	}
2959
2960	INIT_KFIFO(vm->faults);
2961
2962	return 0;
2963
2964error_unreserve:
2965	amdgpu_bo_unreserve(vm->root.bo);
2966
2967error_free_root:
2968	amdgpu_bo_unref(&root->shadow);
2969	amdgpu_bo_unref(&root_bo);
2970	vm->root.bo = NULL;
2971
2972error_free_delayed:
2973	dma_fence_put(vm->last_unlocked);
2974	drm_sched_entity_destroy(&vm->delayed);
2975
2976error_free_immediate:
2977	drm_sched_entity_destroy(&vm->immediate);
2978
2979	return r;
2980}
2981
2982/**
2983 * amdgpu_vm_check_clean_reserved - check if a VM is clean
2984 *
2985 * @adev: amdgpu_device pointer
2986 * @vm: the VM to check
2987 *
2988 * check all entries of the root PD, if any subsequent PDs are allocated,
2989 * it means there are page table creating and filling, and is no a clean
2990 * VM
2991 *
2992 * Returns:
2993 *	0 if this VM is clean
2994 */
2995static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2996					  struct amdgpu_vm *vm)
2997{
2998	enum amdgpu_vm_level root = adev->vm_manager.root_level;
2999	unsigned int entries = amdgpu_vm_num_entries(adev, root);
3000	unsigned int i = 0;
3001
 
 
 
3002	for (i = 0; i < entries; i++) {
3003		if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
3004			return -EINVAL;
3005	}
3006
3007	return 0;
3008}
3009
3010/**
3011 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
3012 *
3013 * @adev: amdgpu_device pointer
3014 * @vm: requested vm
3015 * @pasid: pasid to use
3016 *
3017 * This only works on GFX VMs that don't have any BOs added and no
3018 * page tables allocated yet.
3019 *
3020 * Changes the following VM parameters:
3021 * - use_cpu_for_update
3022 * - pte_supports_ats
3023 * - pasid (old PASID is released, because compute manages its own PASIDs)
3024 *
3025 * Reinitializes the page directory to reflect the changed ATS
3026 * setting.
3027 *
3028 * Returns:
3029 * 0 for success, -errno for errors.
3030 */
3031int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
3032			   u32 pasid)
3033{
3034	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
3035	int r;
3036
3037	r = amdgpu_bo_reserve(vm->root.bo, true);
3038	if (r)
3039		return r;
3040
3041	/* Sanity checks */
3042	r = amdgpu_vm_check_clean_reserved(adev, vm);
3043	if (r)
3044		goto unreserve_bo;
3045
3046	if (pasid) {
3047		unsigned long flags;
3048
3049		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3050		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
3051			      GFP_ATOMIC);
3052		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3053
3054		if (r == -ENOSPC)
3055			goto unreserve_bo;
3056		r = 0;
3057	}
3058
3059	/* Check if PD needs to be reinitialized and do it before
3060	 * changing any other state, in case it fails.
3061	 */
3062	if (pte_support_ats != vm->pte_support_ats) {
3063		vm->pte_support_ats = pte_support_ats;
3064		r = amdgpu_vm_clear_bo(adev, vm,
3065				       to_amdgpu_bo_vm(vm->root.bo),
3066				       false);
3067		if (r)
3068			goto free_idr;
3069	}
3070
3071	/* Update VM state */
3072	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3073				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
3074	DRM_DEBUG_DRIVER("VM update mode is %s\n",
3075			 vm->use_cpu_for_update ? "CPU" : "SDMA");
3076	WARN_ONCE((vm->use_cpu_for_update &&
3077		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3078		  "CPU update of VM recommended only for large BAR system\n");
3079
3080	if (vm->use_cpu_for_update) {
3081		/* Sync with last SDMA update/clear before switching to CPU */
3082		r = amdgpu_bo_sync_wait(vm->root.bo,
3083					AMDGPU_FENCE_OWNER_UNDEFINED, true);
3084		if (r)
3085			goto free_idr;
3086
3087		vm->update_funcs = &amdgpu_vm_cpu_funcs;
3088	} else {
3089		vm->update_funcs = &amdgpu_vm_sdma_funcs;
3090	}
3091	dma_fence_put(vm->last_update);
3092	vm->last_update = NULL;
3093	vm->is_compute_context = true;
3094
3095	if (vm->pasid) {
3096		unsigned long flags;
3097
3098		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3099		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3100		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3101
3102		/* Free the original amdgpu allocated pasid
3103		 * Will be replaced with kfd allocated pasid
3104		 */
3105		amdgpu_pasid_free(vm->pasid);
3106		vm->pasid = 0;
3107	}
3108
3109	/* Free the shadow bo for compute VM */
3110	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
3111
3112	if (pasid)
3113		vm->pasid = pasid;
3114
3115	goto unreserve_bo;
3116
3117free_idr:
3118	if (pasid) {
3119		unsigned long flags;
3120
3121		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3122		idr_remove(&adev->vm_manager.pasid_idr, pasid);
3123		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3124	}
3125unreserve_bo:
3126	amdgpu_bo_unreserve(vm->root.bo);
3127	return r;
3128}
3129
3130/**
3131 * amdgpu_vm_release_compute - release a compute vm
3132 * @adev: amdgpu_device pointer
3133 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3134 *
3135 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
3136 * pasid from vm. Compute should stop use of vm after this call.
3137 */
3138void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3139{
3140	if (vm->pasid) {
3141		unsigned long flags;
3142
3143		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3144		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3145		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3146	}
3147	vm->pasid = 0;
3148	vm->is_compute_context = false;
3149}
3150
3151/**
3152 * amdgpu_vm_fini - tear down a vm instance
3153 *
3154 * @adev: amdgpu_device pointer
3155 * @vm: requested vm
3156 *
3157 * Tear down @vm.
3158 * Unbind the VM and remove all bos from the vm bo list
3159 */
3160void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3161{
3162	struct amdgpu_bo_va_mapping *mapping, *tmp;
3163	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
3164	struct amdgpu_bo *root;
3165	int i;
3166
3167	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
3168
3169	root = amdgpu_bo_ref(vm->root.bo);
3170	amdgpu_bo_reserve(root, true);
3171	if (vm->pasid) {
3172		unsigned long flags;
3173
3174		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3175		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3176		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3177		vm->pasid = 0;
3178	}
3179
3180	dma_fence_wait(vm->last_unlocked, false);
3181	dma_fence_put(vm->last_unlocked);
3182
3183	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
3184		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
3185			amdgpu_vm_prt_fini(adev, vm);
3186			prt_fini_needed = false;
3187		}
3188
3189		list_del(&mapping->list);
3190		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
3191	}
3192
3193	amdgpu_vm_free_pts(adev, vm, NULL);
3194	amdgpu_bo_unreserve(root);
3195	amdgpu_bo_unref(&root);
3196	WARN_ON(vm->root.bo);
3197
3198	drm_sched_entity_destroy(&vm->immediate);
3199	drm_sched_entity_destroy(&vm->delayed);
3200
3201	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
3202		dev_err(adev->dev, "still active bo inside vm\n");
3203	}
3204	rbtree_postorder_for_each_entry_safe(mapping, tmp,
3205					     &vm->va.rb_root, rb) {
3206		/* Don't remove the mapping here, we don't want to trigger a
3207		 * rebalance and the tree is about to be destroyed anyway.
3208		 */
3209		list_del(&mapping->list);
3210		kfree(mapping);
3211	}
3212
3213	dma_fence_put(vm->last_update);
3214	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3215		amdgpu_vmid_free_reserved(adev, vm, i);
3216}
3217
3218/**
3219 * amdgpu_vm_manager_init - init the VM manager
3220 *
3221 * @adev: amdgpu_device pointer
3222 *
3223 * Initialize the VM manager structures
3224 */
3225void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3226{
3227	unsigned i;
3228
3229	/* Concurrent flushes are only possible starting with Vega10 and
3230	 * are broken on Navi10 and Navi14.
3231	 */
3232	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
3233					      adev->asic_type == CHIP_NAVI10 ||
3234					      adev->asic_type == CHIP_NAVI14);
3235	amdgpu_vmid_mgr_init(adev);
3236
3237	adev->vm_manager.fence_context =
3238		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3239	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3240		adev->vm_manager.seqno[i] = 0;
3241
3242	spin_lock_init(&adev->vm_manager.prt_lock);
3243	atomic_set(&adev->vm_manager.num_prt_users, 0);
3244
3245	/* If not overridden by the user, by default, only in large BAR systems
3246	 * Compute VM tables will be updated by CPU
3247	 */
3248#ifdef CONFIG_X86_64
3249	if (amdgpu_vm_update_mode == -1) {
3250		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3251			adev->vm_manager.vm_update_mode =
3252				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3253		else
3254			adev->vm_manager.vm_update_mode = 0;
3255	} else
3256		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3257#else
3258	adev->vm_manager.vm_update_mode = 0;
3259#endif
3260
3261	idr_init(&adev->vm_manager.pasid_idr);
3262	spin_lock_init(&adev->vm_manager.pasid_lock);
3263}
3264
3265/**
3266 * amdgpu_vm_manager_fini - cleanup VM manager
3267 *
3268 * @adev: amdgpu_device pointer
3269 *
3270 * Cleanup the VM manager and free resources.
3271 */
3272void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3273{
3274	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3275	idr_destroy(&adev->vm_manager.pasid_idr);
3276
3277	amdgpu_vmid_mgr_fini(adev);
3278}
3279
3280/**
3281 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3282 *
3283 * @dev: drm device pointer
3284 * @data: drm_amdgpu_vm
3285 * @filp: drm file pointer
3286 *
3287 * Returns:
3288 * 0 for success, -errno for errors.
3289 */
3290int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3291{
3292	union drm_amdgpu_vm *args = data;
3293	struct amdgpu_device *adev = drm_to_adev(dev);
3294	struct amdgpu_fpriv *fpriv = filp->driver_priv;
3295	long timeout = msecs_to_jiffies(2000);
3296	int r;
3297
3298	switch (args->in.op) {
3299	case AMDGPU_VM_OP_RESERVE_VMID:
3300		/* We only have requirement to reserve vmid from gfxhub */
3301		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
3302					       AMDGPU_GFXHUB_0);
3303		if (r)
3304			return r;
3305		break;
3306	case AMDGPU_VM_OP_UNRESERVE_VMID:
3307		if (amdgpu_sriov_runtime(adev))
3308			timeout = 8 * timeout;
3309
3310		/* Wait vm idle to make sure the vmid set in SPM_VMID is
3311		 * not referenced anymore.
3312		 */
3313		r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
3314		if (r)
3315			return r;
3316
3317		r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
3318		if (r < 0)
3319			return r;
3320
3321		amdgpu_bo_unreserve(fpriv->vm.root.bo);
3322		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
3323		break;
3324	default:
3325		return -EINVAL;
3326	}
3327
3328	return 0;
3329}
3330
3331/**
3332 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3333 *
3334 * @adev: drm device pointer
3335 * @pasid: PASID identifier for VM
3336 * @task_info: task_info to fill.
3337 */
3338void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
3339			 struct amdgpu_task_info *task_info)
3340{
3341	struct amdgpu_vm *vm;
3342	unsigned long flags;
3343
3344	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3345
3346	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3347	if (vm)
3348		*task_info = vm->task_info;
3349
3350	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3351}
3352
3353/**
3354 * amdgpu_vm_set_task_info - Sets VMs task info.
3355 *
3356 * @vm: vm for which to set the info
3357 */
3358void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3359{
3360	if (vm->task_info.pid)
3361		return;
3362
3363	vm->task_info.pid = current->pid;
3364	get_task_comm(vm->task_info.task_name, current);
3365
3366	if (current->group_leader->mm != current->mm)
3367		return;
3368
3369	vm->task_info.tgid = current->group_leader->pid;
3370	get_task_comm(vm->task_info.process_name, current->group_leader);
3371}
3372
3373/**
3374 * amdgpu_vm_handle_fault - graceful handling of VM faults.
3375 * @adev: amdgpu device pointer
3376 * @pasid: PASID of the VM
3377 * @addr: Address of the fault
3378 *
3379 * Try to gracefully handle a VM fault. Return true if the fault was handled and
3380 * shouldn't be reported any more.
3381 */
3382bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
3383			    uint64_t addr)
3384{
3385	bool is_compute_context = false;
3386	struct amdgpu_bo *root;
3387	unsigned long irqflags;
3388	uint64_t value, flags;
3389	struct amdgpu_vm *vm;
3390	int r;
3391
3392	spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
3393	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3394	if (vm) {
3395		root = amdgpu_bo_ref(vm->root.bo);
3396		is_compute_context = vm->is_compute_context;
3397	} else {
3398		root = NULL;
3399	}
3400	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
3401
3402	if (!root)
3403		return false;
3404
3405	addr /= AMDGPU_GPU_PAGE_SIZE;
3406
3407	if (is_compute_context &&
3408	    !svm_range_restore_pages(adev, pasid, addr)) {
3409		amdgpu_bo_unref(&root);
3410		return true;
3411	}
3412
3413	r = amdgpu_bo_reserve(root, true);
3414	if (r)
3415		goto error_unref;
3416
3417	/* Double check that the VM still exists */
3418	spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
3419	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3420	if (vm && vm->root.bo != root)
3421		vm = NULL;
3422	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
3423	if (!vm)
3424		goto error_unlock;
3425
 
3426	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
3427		AMDGPU_PTE_SYSTEM;
3428
3429	if (is_compute_context) {
3430		/* Intentionally setting invalid PTE flag
3431		 * combination to force a no-retry-fault
3432		 */
3433		flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
3434			AMDGPU_PTE_TF;
3435		value = 0;
 
3436	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3437		/* Redirect the access to the dummy page */
3438		value = adev->dummy_page_addr;
3439		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3440			AMDGPU_PTE_WRITEABLE;
3441
3442	} else {
3443		/* Let the hw retry silently on the PTE */
3444		value = 0;
3445	}
3446
3447	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
3448	if (r) {
3449		pr_debug("failed %d to reserve fence slot\n", r);
3450		goto error_unlock;
3451	}
3452
3453	r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
3454					addr, flags, value, NULL, NULL, NULL,
3455					NULL);
3456	if (r)
3457		goto error_unlock;
3458
3459	r = amdgpu_vm_update_pdes(adev, vm, true);
3460
3461error_unlock:
3462	amdgpu_bo_unreserve(root);
3463	if (r < 0)
3464		DRM_ERROR("Can't handle page fault (%d)\n", r);
3465
3466error_unref:
3467	amdgpu_bo_unref(&root);
3468
3469	return false;
3470}
3471
3472#if defined(CONFIG_DEBUG_FS)
3473/**
3474 * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
3475 *
3476 * @vm: Requested VM for printing BO info
3477 * @m: debugfs file
3478 *
3479 * Print BO information in debugfs file for the VM
3480 */
3481void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3482{
3483	struct amdgpu_bo_va *bo_va, *tmp;
3484	u64 total_idle = 0;
3485	u64 total_evicted = 0;
3486	u64 total_relocated = 0;
3487	u64 total_moved = 0;
3488	u64 total_invalidated = 0;
3489	u64 total_done = 0;
3490	unsigned int total_idle_objs = 0;
3491	unsigned int total_evicted_objs = 0;
3492	unsigned int total_relocated_objs = 0;
3493	unsigned int total_moved_objs = 0;
3494	unsigned int total_invalidated_objs = 0;
3495	unsigned int total_done_objs = 0;
3496	unsigned int id = 0;
3497
3498	seq_puts(m, "\tIdle BOs:\n");
3499	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3500		if (!bo_va->base.bo)
3501			continue;
3502		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3503	}
3504	total_idle_objs = id;
3505	id = 0;
3506
3507	seq_puts(m, "\tEvicted BOs:\n");
3508	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3509		if (!bo_va->base.bo)
3510			continue;
3511		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3512	}
3513	total_evicted_objs = id;
3514	id = 0;
3515
3516	seq_puts(m, "\tRelocated BOs:\n");
3517	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3518		if (!bo_va->base.bo)
3519			continue;
3520		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3521	}
3522	total_relocated_objs = id;
3523	id = 0;
3524
3525	seq_puts(m, "\tMoved BOs:\n");
3526	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3527		if (!bo_va->base.bo)
3528			continue;
3529		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3530	}
3531	total_moved_objs = id;
3532	id = 0;
3533
3534	seq_puts(m, "\tInvalidated BOs:\n");
3535	spin_lock(&vm->invalidated_lock);
3536	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3537		if (!bo_va->base.bo)
3538			continue;
3539		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3540	}
3541	total_invalidated_objs = id;
3542	id = 0;
3543
3544	seq_puts(m, "\tDone BOs:\n");
3545	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3546		if (!bo_va->base.bo)
3547			continue;
3548		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3549	}
3550	spin_unlock(&vm->invalidated_lock);
3551	total_done_objs = id;
3552
3553	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3554		   total_idle_objs);
3555	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3556		   total_evicted_objs);
3557	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3558		   total_relocated_objs);
3559	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3560		   total_moved_objs);
3561	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3562		   total_invalidated_objs);
3563	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3564		   total_done_objs);
3565}
3566#endif
v5.9
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
 
  28#include <linux/dma-fence-array.h>
  29#include <linux/interval_tree_generic.h>
  30#include <linux/idr.h>
 
  31
  32#include <drm/amdgpu_drm.h>
 
  33#include "amdgpu.h"
  34#include "amdgpu_trace.h"
  35#include "amdgpu_amdkfd.h"
  36#include "amdgpu_gmc.h"
  37#include "amdgpu_xgmi.h"
 
 
 
  38
  39/**
  40 * DOC: GPUVM
  41 *
  42 * GPUVM is similar to the legacy gart on older asics, however
  43 * rather than there being a single global gart table
  44 * for the entire GPU, there are multiple VM page tables active
  45 * at any given time.  The VM page tables can contain a mix
  46 * vram pages and system memory pages and system memory pages
  47 * can be mapped as snooped (cached system pages) or unsnooped
  48 * (uncached system pages).
  49 * Each VM has an ID associated with it and there is a page table
  50 * associated with each VMID.  When execting a command buffer,
  51 * the kernel tells the the ring what VMID to use for that command
  52 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  53 * The userspace drivers maintain their own address space and the kernel
  54 * sets up their pages tables accordingly when they submit their
  55 * command buffers and a VMID is assigned.
  56 * Cayman/Trinity support up to 8 active VMs at any given time;
  57 * SI supports 16.
  58 */
  59
  60#define START(node) ((node)->start)
  61#define LAST(node) ((node)->last)
  62
  63INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
  64		     START, LAST, static, amdgpu_vm_it)
  65
  66#undef START
  67#undef LAST
  68
  69/**
  70 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
  71 */
  72struct amdgpu_prt_cb {
  73
  74	/**
  75	 * @adev: amdgpu device
  76	 */
  77	struct amdgpu_device *adev;
  78
  79	/**
  80	 * @cb: callback
  81	 */
  82	struct dma_fence_cb cb;
  83};
  84
  85/*
  86 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
  87 * happens while holding this lock anywhere to prevent deadlocks when
  88 * an MMU notifier runs in reclaim-FS context.
  89 */
  90static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
  91{
  92	mutex_lock(&vm->eviction_lock);
  93	vm->saved_flags = memalloc_nofs_save();
  94}
  95
  96static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
  97{
  98	if (mutex_trylock(&vm->eviction_lock)) {
  99		vm->saved_flags = memalloc_nofs_save();
 100		return 1;
 101	}
 102	return 0;
 103}
 104
 105static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
 106{
 107	memalloc_nofs_restore(vm->saved_flags);
 108	mutex_unlock(&vm->eviction_lock);
 109}
 110
 111/**
 112 * amdgpu_vm_level_shift - return the addr shift for each level
 113 *
 114 * @adev: amdgpu_device pointer
 115 * @level: VMPT level
 116 *
 117 * Returns:
 118 * The number of bits the pfn needs to be right shifted for a level.
 119 */
 120static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
 121				      unsigned level)
 122{
 123	switch (level) {
 124	case AMDGPU_VM_PDB2:
 125	case AMDGPU_VM_PDB1:
 126	case AMDGPU_VM_PDB0:
 127		return 9 * (AMDGPU_VM_PDB0 - level) +
 128			adev->vm_manager.block_size;
 129	case AMDGPU_VM_PTB:
 130		return 0;
 131	default:
 132		return ~0;
 133	}
 134}
 135
 136/**
 137 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
 138 *
 139 * @adev: amdgpu_device pointer
 140 * @level: VMPT level
 141 *
 142 * Returns:
 143 * The number of entries in a page directory or page table.
 144 */
 145static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
 146				      unsigned level)
 147{
 148	unsigned shift = amdgpu_vm_level_shift(adev,
 149					       adev->vm_manager.root_level);
 150
 151	if (level == adev->vm_manager.root_level)
 152		/* For the root directory */
 153		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
 154			>> shift;
 155	else if (level != AMDGPU_VM_PTB)
 156		/* Everything in between */
 157		return 512;
 158	else
 159		/* For the page tables on the leaves */
 160		return AMDGPU_VM_PTE_COUNT(adev);
 161}
 162
 163/**
 164 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
 165 *
 166 * @adev: amdgpu_device pointer
 167 *
 168 * Returns:
 169 * The number of entries in the root page directory which needs the ATS setting.
 170 */
 171static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
 172{
 173	unsigned shift;
 174
 175	shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
 176	return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
 177}
 178
 179/**
 180 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
 181 *
 182 * @adev: amdgpu_device pointer
 183 * @level: VMPT level
 184 *
 185 * Returns:
 186 * The mask to extract the entry number of a PD/PT from an address.
 187 */
 188static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
 189				       unsigned int level)
 190{
 191	if (level <= adev->vm_manager.root_level)
 192		return 0xffffffff;
 193	else if (level != AMDGPU_VM_PTB)
 194		return 0x1ff;
 195	else
 196		return AMDGPU_VM_PTE_COUNT(adev) - 1;
 197}
 198
 199/**
 200 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
 201 *
 202 * @adev: amdgpu_device pointer
 203 * @level: VMPT level
 204 *
 205 * Returns:
 206 * The size of the BO for a page directory or page table in bytes.
 207 */
 208static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
 209{
 210	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
 211}
 212
 213/**
 214 * amdgpu_vm_bo_evicted - vm_bo is evicted
 215 *
 216 * @vm_bo: vm_bo which is evicted
 217 *
 218 * State for PDs/PTs and per VM BOs which are not at the location they should
 219 * be.
 220 */
 221static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
 222{
 223	struct amdgpu_vm *vm = vm_bo->vm;
 224	struct amdgpu_bo *bo = vm_bo->bo;
 225
 226	vm_bo->moved = true;
 227	if (bo->tbo.type == ttm_bo_type_kernel)
 228		list_move(&vm_bo->vm_status, &vm->evicted);
 229	else
 230		list_move_tail(&vm_bo->vm_status, &vm->evicted);
 231}
 232/**
 233 * amdgpu_vm_bo_moved - vm_bo is moved
 234 *
 235 * @vm_bo: vm_bo which is moved
 236 *
 237 * State for per VM BOs which are moved, but that change is not yet reflected
 238 * in the page tables.
 239 */
 240static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
 241{
 242	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 243}
 244
 245/**
 246 * amdgpu_vm_bo_idle - vm_bo is idle
 247 *
 248 * @vm_bo: vm_bo which is now idle
 249 *
 250 * State for PDs/PTs and per VM BOs which have gone through the state machine
 251 * and are now idle.
 252 */
 253static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
 254{
 255	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
 256	vm_bo->moved = false;
 257}
 258
 259/**
 260 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
 261 *
 262 * @vm_bo: vm_bo which is now invalidated
 263 *
 264 * State for normal BOs which are invalidated and that change not yet reflected
 265 * in the PTs.
 266 */
 267static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
 268{
 269	spin_lock(&vm_bo->vm->invalidated_lock);
 270	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
 271	spin_unlock(&vm_bo->vm->invalidated_lock);
 272}
 273
 274/**
 275 * amdgpu_vm_bo_relocated - vm_bo is reloacted
 276 *
 277 * @vm_bo: vm_bo which is relocated
 278 *
 279 * State for PDs/PTs which needs to update their parent PD.
 280 * For the root PD, just move to idle state.
 281 */
 282static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
 283{
 284	if (vm_bo->bo->parent)
 285		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
 286	else
 287		amdgpu_vm_bo_idle(vm_bo);
 288}
 289
 290/**
 291 * amdgpu_vm_bo_done - vm_bo is done
 292 *
 293 * @vm_bo: vm_bo which is now done
 294 *
 295 * State for normal BOs which are invalidated and that change has been updated
 296 * in the PTs.
 297 */
 298static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
 299{
 300	spin_lock(&vm_bo->vm->invalidated_lock);
 301	list_del_init(&vm_bo->vm_status);
 302	spin_unlock(&vm_bo->vm->invalidated_lock);
 303}
 304
 305/**
 306 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
 307 *
 308 * @base: base structure for tracking BO usage in a VM
 309 * @vm: vm to which bo is to be added
 310 * @bo: amdgpu buffer object
 311 *
 312 * Initialize a bo_va_base structure and add it to the appropriate lists
 313 *
 314 */
 315static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 316				   struct amdgpu_vm *vm,
 317				   struct amdgpu_bo *bo)
 318{
 319	base->vm = vm;
 320	base->bo = bo;
 321	base->next = NULL;
 322	INIT_LIST_HEAD(&base->vm_status);
 323
 324	if (!bo)
 325		return;
 326	base->next = bo->vm_bo;
 327	bo->vm_bo = base;
 328
 329	if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
 330		return;
 331
 332	vm->bulk_moveable = false;
 333	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
 334		amdgpu_vm_bo_relocated(base);
 335	else
 336		amdgpu_vm_bo_idle(base);
 337
 338	if (bo->preferred_domains &
 339	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
 340		return;
 341
 342	/*
 343	 * we checked all the prerequisites, but it looks like this per vm bo
 344	 * is currently evicted. add the bo to the evicted list to make sure it
 345	 * is validated on next vm use to avoid fault.
 346	 * */
 347	amdgpu_vm_bo_evicted(base);
 348}
 349
 350/**
 351 * amdgpu_vm_pt_parent - get the parent page directory
 352 *
 353 * @pt: child page table
 354 *
 355 * Helper to get the parent entry for the child page table. NULL if we are at
 356 * the root page directory.
 357 */
 358static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
 359{
 360	struct amdgpu_bo *parent = pt->base.bo->parent;
 361
 362	if (!parent)
 363		return NULL;
 364
 365	return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
 366}
 367
 368/*
 369 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
 370 */
 371struct amdgpu_vm_pt_cursor {
 372	uint64_t pfn;
 373	struct amdgpu_vm_pt *parent;
 374	struct amdgpu_vm_pt *entry;
 375	unsigned level;
 376};
 377
 378/**
 379 * amdgpu_vm_pt_start - start PD/PT walk
 380 *
 381 * @adev: amdgpu_device pointer
 382 * @vm: amdgpu_vm structure
 383 * @start: start address of the walk
 384 * @cursor: state to initialize
 385 *
 386 * Initialize a amdgpu_vm_pt_cursor to start a walk.
 387 */
 388static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
 389			       struct amdgpu_vm *vm, uint64_t start,
 390			       struct amdgpu_vm_pt_cursor *cursor)
 391{
 392	cursor->pfn = start;
 393	cursor->parent = NULL;
 394	cursor->entry = &vm->root;
 395	cursor->level = adev->vm_manager.root_level;
 396}
 397
 398/**
 399 * amdgpu_vm_pt_descendant - go to child node
 400 *
 401 * @adev: amdgpu_device pointer
 402 * @cursor: current state
 403 *
 404 * Walk to the child node of the current node.
 405 * Returns:
 406 * True if the walk was possible, false otherwise.
 407 */
 408static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
 409				    struct amdgpu_vm_pt_cursor *cursor)
 410{
 411	unsigned mask, shift, idx;
 412
 413	if (!cursor->entry->entries)
 
 414		return false;
 415
 416	BUG_ON(!cursor->entry->base.bo);
 417	mask = amdgpu_vm_entries_mask(adev, cursor->level);
 418	shift = amdgpu_vm_level_shift(adev, cursor->level);
 419
 420	++cursor->level;
 421	idx = (cursor->pfn >> shift) & mask;
 422	cursor->parent = cursor->entry;
 423	cursor->entry = &cursor->entry->entries[idx];
 424	return true;
 425}
 426
 427/**
 428 * amdgpu_vm_pt_sibling - go to sibling node
 429 *
 430 * @adev: amdgpu_device pointer
 431 * @cursor: current state
 432 *
 433 * Walk to the sibling node of the current node.
 434 * Returns:
 435 * True if the walk was possible, false otherwise.
 436 */
 437static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
 438				 struct amdgpu_vm_pt_cursor *cursor)
 439{
 440	unsigned shift, num_entries;
 441
 442	/* Root doesn't have a sibling */
 443	if (!cursor->parent)
 444		return false;
 445
 446	/* Go to our parents and see if we got a sibling */
 447	shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
 448	num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
 449
 450	if (cursor->entry == &cursor->parent->entries[num_entries - 1])
 451		return false;
 452
 453	cursor->pfn += 1ULL << shift;
 454	cursor->pfn &= ~((1ULL << shift) - 1);
 455	++cursor->entry;
 456	return true;
 457}
 458
 459/**
 460 * amdgpu_vm_pt_ancestor - go to parent node
 461 *
 462 * @cursor: current state
 463 *
 464 * Walk to the parent node of the current node.
 465 * Returns:
 466 * True if the walk was possible, false otherwise.
 467 */
 468static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
 469{
 470	if (!cursor->parent)
 471		return false;
 472
 473	--cursor->level;
 474	cursor->entry = cursor->parent;
 475	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
 476	return true;
 477}
 478
 479/**
 480 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
 481 *
 482 * @adev: amdgpu_device pointer
 483 * @cursor: current state
 484 *
 485 * Walk the PD/PT tree to the next node.
 486 */
 487static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
 488			      struct amdgpu_vm_pt_cursor *cursor)
 489{
 490	/* First try a newborn child */
 491	if (amdgpu_vm_pt_descendant(adev, cursor))
 492		return;
 493
 494	/* If that didn't worked try to find a sibling */
 495	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
 496		/* No sibling, go to our parents and grandparents */
 497		if (!amdgpu_vm_pt_ancestor(cursor)) {
 498			cursor->pfn = ~0ll;
 499			return;
 500		}
 501	}
 502}
 503
 504/**
 505 * amdgpu_vm_pt_first_dfs - start a deep first search
 506 *
 507 * @adev: amdgpu_device structure
 508 * @vm: amdgpu_vm structure
 509 * @start: optional cursor to start with
 510 * @cursor: state to initialize
 511 *
 512 * Starts a deep first traversal of the PD/PT tree.
 513 */
 514static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
 515				   struct amdgpu_vm *vm,
 516				   struct amdgpu_vm_pt_cursor *start,
 517				   struct amdgpu_vm_pt_cursor *cursor)
 518{
 519	if (start)
 520		*cursor = *start;
 521	else
 522		amdgpu_vm_pt_start(adev, vm, 0, cursor);
 523	while (amdgpu_vm_pt_descendant(adev, cursor));
 524}
 525
 526/**
 527 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
 528 *
 529 * @start: starting point for the search
 530 * @entry: current entry
 531 *
 532 * Returns:
 533 * True when the search should continue, false otherwise.
 534 */
 535static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
 536				      struct amdgpu_vm_pt *entry)
 537{
 538	return entry && (!start || entry != start->entry);
 539}
 540
 541/**
 542 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
 543 *
 544 * @adev: amdgpu_device structure
 545 * @cursor: current state
 546 *
 547 * Move the cursor to the next node in a deep first search.
 548 */
 549static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
 550				  struct amdgpu_vm_pt_cursor *cursor)
 551{
 552	if (!cursor->entry)
 553		return;
 554
 555	if (!cursor->parent)
 556		cursor->entry = NULL;
 557	else if (amdgpu_vm_pt_sibling(adev, cursor))
 558		while (amdgpu_vm_pt_descendant(adev, cursor));
 559	else
 560		amdgpu_vm_pt_ancestor(cursor);
 561}
 562
 563/*
 564 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
 565 */
 566#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
 567	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
 568	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
 569	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
 570	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
 571
 572/**
 573 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
 574 *
 575 * @vm: vm providing the BOs
 576 * @validated: head of validation list
 577 * @entry: entry to add
 578 *
 579 * Add the page directory to the list of BOs to
 580 * validate for command submission.
 581 */
 582void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 583			 struct list_head *validated,
 584			 struct amdgpu_bo_list_entry *entry)
 585{
 586	entry->priority = 0;
 587	entry->tv.bo = &vm->root.base.bo->tbo;
 588	/* Two for VM updates, one for TTM and one for the CS job */
 589	entry->tv.num_shared = 4;
 590	entry->user_pages = NULL;
 591	list_add(&entry->tv.head, validated);
 592}
 593
 594/**
 595 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
 596 *
 597 * @bo: BO which was removed from the LRU
 598 *
 599 * Make sure the bulk_moveable flag is updated when a BO is removed from the
 600 * LRU.
 601 */
 602void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
 603{
 604	struct amdgpu_bo *abo;
 605	struct amdgpu_vm_bo_base *bo_base;
 606
 607	if (!amdgpu_bo_is_amdgpu_bo(bo))
 608		return;
 609
 610	if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
 611		return;
 612
 613	abo = ttm_to_amdgpu_bo(bo);
 614	if (!abo->parent)
 615		return;
 616	for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
 617		struct amdgpu_vm *vm = bo_base->vm;
 618
 619		if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
 620			vm->bulk_moveable = false;
 621	}
 622
 623}
 624/**
 625 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
 626 *
 627 * @adev: amdgpu device pointer
 628 * @vm: vm providing the BOs
 629 *
 630 * Move all BOs to the end of LRU and remember their positions to put them
 631 * together.
 632 */
 633void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 634				struct amdgpu_vm *vm)
 635{
 636	struct amdgpu_vm_bo_base *bo_base;
 637
 638	if (vm->bulk_moveable) {
 639		spin_lock(&ttm_bo_glob.lru_lock);
 640		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
 641		spin_unlock(&ttm_bo_glob.lru_lock);
 642		return;
 643	}
 644
 645	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 646
 647	spin_lock(&ttm_bo_glob.lru_lock);
 648	list_for_each_entry(bo_base, &vm->idle, vm_status) {
 649		struct amdgpu_bo *bo = bo_base->bo;
 
 650
 651		if (!bo->parent)
 652			continue;
 653
 654		ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
 655		if (bo->shadow)
 656			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
 
 
 657						&vm->lru_bulk_move);
 658	}
 659	spin_unlock(&ttm_bo_glob.lru_lock);
 660
 661	vm->bulk_moveable = true;
 662}
 663
 664/**
 665 * amdgpu_vm_validate_pt_bos - validate the page table BOs
 666 *
 667 * @adev: amdgpu device pointer
 668 * @vm: vm providing the BOs
 669 * @validate: callback to do the validation
 670 * @param: parameter for the validation callback
 671 *
 672 * Validate the page table BOs on command submission if neccessary.
 673 *
 674 * Returns:
 675 * Validation result.
 676 */
 677int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 678			      int (*validate)(void *p, struct amdgpu_bo *bo),
 679			      void *param)
 680{
 681	struct amdgpu_vm_bo_base *bo_base, *tmp;
 682	int r;
 683
 684	vm->bulk_moveable &= list_empty(&vm->evicted);
 685
 686	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
 687		struct amdgpu_bo *bo = bo_base->bo;
 
 688
 689		r = validate(param, bo);
 690		if (r)
 691			return r;
 
 
 
 
 
 692
 693		if (bo->tbo.type != ttm_bo_type_kernel) {
 694			amdgpu_vm_bo_moved(bo_base);
 695		} else {
 696			vm->update_funcs->map_table(bo);
 697			amdgpu_vm_bo_relocated(bo_base);
 698		}
 699	}
 700
 701	amdgpu_vm_eviction_lock(vm);
 702	vm->evicting = false;
 703	amdgpu_vm_eviction_unlock(vm);
 704
 705	return 0;
 706}
 707
 708/**
 709 * amdgpu_vm_ready - check VM is ready for updates
 710 *
 711 * @vm: VM to check
 712 *
 713 * Check if all VM PDs/PTs are ready for updates
 714 *
 715 * Returns:
 716 * True if eviction list is empty.
 717 */
 718bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 719{
 720	return list_empty(&vm->evicted);
 721}
 722
 723/**
 724 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
 725 *
 726 * @adev: amdgpu_device pointer
 727 * @vm: VM to clear BO from
 728 * @bo: BO to clear
 729 * @immediate: use an immediate update
 730 *
 731 * Root PD needs to be reserved when calling this.
 732 *
 733 * Returns:
 734 * 0 on success, errno otherwise.
 735 */
 736static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 737			      struct amdgpu_vm *vm,
 738			      struct amdgpu_bo *bo,
 739			      bool immediate)
 740{
 741	struct ttm_operation_ctx ctx = { true, false };
 742	unsigned level = adev->vm_manager.root_level;
 743	struct amdgpu_vm_update_params params;
 744	struct amdgpu_bo *ancestor = bo;
 
 745	unsigned entries, ats_entries;
 746	uint64_t addr;
 747	int r;
 748
 749	/* Figure out our place in the hierarchy */
 750	if (ancestor->parent) {
 751		++level;
 752		while (ancestor->parent->parent) {
 753			++level;
 754			ancestor = ancestor->parent;
 755		}
 756	}
 757
 758	entries = amdgpu_bo_size(bo) / 8;
 759	if (!vm->pte_support_ats) {
 760		ats_entries = 0;
 761
 762	} else if (!bo->parent) {
 763		ats_entries = amdgpu_vm_num_ats_entries(adev);
 764		ats_entries = min(ats_entries, entries);
 765		entries -= ats_entries;
 766
 767	} else {
 768		struct amdgpu_vm_pt *pt;
 769
 770		pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
 771		ats_entries = amdgpu_vm_num_ats_entries(adev);
 772		if ((pt - vm->root.entries) >= ats_entries) {
 773			ats_entries = 0;
 774		} else {
 775			ats_entries = entries;
 776			entries = 0;
 777		}
 778	}
 779
 780	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 781	if (r)
 782		return r;
 783
 784	if (bo->shadow) {
 785		r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
 786				    &ctx);
 
 787		if (r)
 788			return r;
 789	}
 790
 791	r = vm->update_funcs->map_table(bo);
 792	if (r)
 793		return r;
 794
 795	memset(&params, 0, sizeof(params));
 796	params.adev = adev;
 797	params.vm = vm;
 798	params.immediate = immediate;
 799
 800	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
 801	if (r)
 802		return r;
 803
 804	addr = 0;
 805	if (ats_entries) {
 806		uint64_t value = 0, flags;
 807
 808		flags = AMDGPU_PTE_DEFAULT_ATC;
 809		if (level != AMDGPU_VM_PTB) {
 810			/* Handle leaf PDEs as PTEs */
 811			flags |= AMDGPU_PDE_PTE;
 812			amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
 813		}
 814
 815		r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
 816					     value, flags);
 817		if (r)
 818			return r;
 819
 820		addr += ats_entries * 8;
 821	}
 822
 823	if (entries) {
 824		uint64_t value = 0, flags = 0;
 825
 826		if (adev->asic_type >= CHIP_VEGA10) {
 827			if (level != AMDGPU_VM_PTB) {
 828				/* Handle leaf PDEs as PTEs */
 829				flags |= AMDGPU_PDE_PTE;
 830				amdgpu_gmc_get_vm_pde(adev, level,
 831						      &value, &flags);
 832			} else {
 833				/* Workaround for fault priority problem on GMC9 */
 834				flags = AMDGPU_PTE_EXECUTABLE;
 835			}
 836		}
 837
 838		r = vm->update_funcs->update(&params, bo, addr, 0, entries,
 839					     value, flags);
 840		if (r)
 841			return r;
 842	}
 843
 844	return vm->update_funcs->commit(&params, NULL);
 845}
 846
 847/**
 848 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
 849 *
 850 * @adev: amdgpu_device pointer
 851 * @vm: requesting vm
 852 * @level: the page table level
 853 * @immediate: use a immediate update
 854 * @bp: resulting BO allocation parameters
 855 */
 856static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 857			       int level, bool immediate,
 858			       struct amdgpu_bo_param *bp)
 859{
 860	memset(bp, 0, sizeof(*bp));
 
 
 
 
 861
 862	bp->size = amdgpu_vm_bo_size(adev, level);
 863	bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
 864	bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
 865	bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
 866	bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
 
 
 867		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 
 
 
 
 
 
 
 
 868	if (vm->use_cpu_for_update)
 869		bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 870	else if (!vm->root.base.bo || vm->root.base.bo->shadow)
 871		bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
 872	bp->type = ttm_bo_type_kernel;
 873	bp->no_wait_gpu = immediate;
 874	if (vm->root.base.bo)
 875		bp->resv = vm->root.base.bo->tbo.base.resv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876}
 877
 878/**
 879 * amdgpu_vm_alloc_pts - Allocate a specific page table
 880 *
 881 * @adev: amdgpu_device pointer
 882 * @vm: VM to allocate page tables for
 883 * @cursor: Which page table to allocate
 884 * @immediate: use an immediate update
 885 *
 886 * Make sure a specific page table or directory is allocated.
 887 *
 888 * Returns:
 889 * 1 if page table needed to be allocated, 0 if page table was already
 890 * allocated, negative errno if an error occurred.
 891 */
 892static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
 893			       struct amdgpu_vm *vm,
 894			       struct amdgpu_vm_pt_cursor *cursor,
 895			       bool immediate)
 896{
 897	struct amdgpu_vm_pt *entry = cursor->entry;
 898	struct amdgpu_bo_param bp;
 899	struct amdgpu_bo *pt;
 900	int r;
 901
 902	if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
 903		unsigned num_entries;
 904
 905		num_entries = amdgpu_vm_num_entries(adev, cursor->level);
 906		entry->entries = kvmalloc_array(num_entries,
 907						sizeof(*entry->entries),
 908						GFP_KERNEL | __GFP_ZERO);
 909		if (!entry->entries)
 910			return -ENOMEM;
 911	}
 912
 913	if (entry->base.bo)
 914		return 0;
 915
 916	amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);
 917
 918	r = amdgpu_bo_create(adev, &bp, &pt);
 919	if (r)
 920		return r;
 921
 922	/* Keep a reference to the root directory to avoid
 923	 * freeing them up in the wrong order.
 924	 */
 925	pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
 926	amdgpu_vm_bo_base_init(&entry->base, vm, pt);
 927
 928	r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
 929	if (r)
 930		goto error_free_pt;
 931
 932	return 0;
 933
 934error_free_pt:
 935	amdgpu_bo_unref(&pt->shadow);
 936	amdgpu_bo_unref(&pt);
 937	return r;
 938}
 939
 940/**
 941 * amdgpu_vm_free_table - fre one PD/PT
 942 *
 943 * @entry: PDE to free
 944 */
 945static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
 946{
 947	if (entry->base.bo) {
 948		entry->base.bo->vm_bo = NULL;
 949		list_del(&entry->base.vm_status);
 950		amdgpu_bo_unref(&entry->base.bo->shadow);
 951		amdgpu_bo_unref(&entry->base.bo);
 952	}
 953	kvfree(entry->entries);
 954	entry->entries = NULL;
 
 955}
 956
 957/**
 958 * amdgpu_vm_free_pts - free PD/PT levels
 959 *
 960 * @adev: amdgpu device structure
 961 * @vm: amdgpu vm structure
 962 * @start: optional cursor where to start freeing PDs/PTs
 963 *
 964 * Free the page directory or page table level and all sub levels.
 965 */
 966static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
 967			       struct amdgpu_vm *vm,
 968			       struct amdgpu_vm_pt_cursor *start)
 969{
 970	struct amdgpu_vm_pt_cursor cursor;
 971	struct amdgpu_vm_pt *entry;
 972
 973	vm->bulk_moveable = false;
 974
 975	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
 976		amdgpu_vm_free_table(entry);
 977
 978	if (start)
 979		amdgpu_vm_free_table(start->entry);
 980}
 981
 982/**
 983 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 984 *
 985 * @adev: amdgpu_device pointer
 986 */
 987void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
 988{
 989	const struct amdgpu_ip_block *ip_block;
 990	bool has_compute_vm_bug;
 991	struct amdgpu_ring *ring;
 992	int i;
 993
 994	has_compute_vm_bug = false;
 995
 996	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
 997	if (ip_block) {
 998		/* Compute has a VM bug for GFX version < 7.
 999		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1000		if (ip_block->version->major <= 7)
1001			has_compute_vm_bug = true;
1002		else if (ip_block->version->major == 8)
1003			if (adev->gfx.mec_fw_version < 673)
1004				has_compute_vm_bug = true;
1005	}
1006
1007	for (i = 0; i < adev->num_rings; i++) {
1008		ring = adev->rings[i];
1009		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
1010			/* only compute rings */
1011			ring->has_compute_vm_bug = has_compute_vm_bug;
1012		else
1013			ring->has_compute_vm_bug = false;
1014	}
1015}
1016
1017/**
1018 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1019 *
1020 * @ring: ring on which the job will be submitted
1021 * @job: job to submit
1022 *
1023 * Returns:
1024 * True if sync is needed.
1025 */
1026bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1027				  struct amdgpu_job *job)
1028{
1029	struct amdgpu_device *adev = ring->adev;
1030	unsigned vmhub = ring->funcs->vmhub;
1031	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1032	struct amdgpu_vmid *id;
1033	bool gds_switch_needed;
1034	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1035
1036	if (job->vmid == 0)
1037		return false;
1038	id = &id_mgr->ids[job->vmid];
1039	gds_switch_needed = ring->funcs->emit_gds_switch && (
1040		id->gds_base != job->gds_base ||
1041		id->gds_size != job->gds_size ||
1042		id->gws_base != job->gws_base ||
1043		id->gws_size != job->gws_size ||
1044		id->oa_base != job->oa_base ||
1045		id->oa_size != job->oa_size);
1046
1047	if (amdgpu_vmid_had_gpu_reset(adev, id))
1048		return true;
1049
1050	return vm_flush_needed || gds_switch_needed;
1051}
1052
1053/**
1054 * amdgpu_vm_flush - hardware flush the vm
1055 *
1056 * @ring: ring to use for flush
1057 * @job:  related job
1058 * @need_pipe_sync: is pipe sync needed
1059 *
1060 * Emit a VM flush when it is necessary.
1061 *
1062 * Returns:
1063 * 0 on success, errno otherwise.
1064 */
1065int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
1066		    bool need_pipe_sync)
1067{
1068	struct amdgpu_device *adev = ring->adev;
1069	unsigned vmhub = ring->funcs->vmhub;
1070	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1071	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1072	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1073		id->gds_base != job->gds_base ||
1074		id->gds_size != job->gds_size ||
1075		id->gws_base != job->gws_base ||
1076		id->gws_size != job->gws_size ||
1077		id->oa_base != job->oa_base ||
1078		id->oa_size != job->oa_size);
1079	bool vm_flush_needed = job->vm_needs_flush;
1080	struct dma_fence *fence = NULL;
1081	bool pasid_mapping_needed = false;
1082	unsigned patch_offset = 0;
1083	bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
1084	int r;
1085
1086	if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
1087		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
1088
1089	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1090		gds_switch_needed = true;
1091		vm_flush_needed = true;
1092		pasid_mapping_needed = true;
1093	}
1094
1095	mutex_lock(&id_mgr->lock);
1096	if (id->pasid != job->pasid || !id->pasid_mapping ||
1097	    !dma_fence_is_signaled(id->pasid_mapping))
1098		pasid_mapping_needed = true;
1099	mutex_unlock(&id_mgr->lock);
1100
1101	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1102	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
1103			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1104	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1105		ring->funcs->emit_wreg;
1106
1107	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1108		return 0;
1109
1110	if (ring->funcs->init_cond_exec)
1111		patch_offset = amdgpu_ring_init_cond_exec(ring);
1112
1113	if (need_pipe_sync)
1114		amdgpu_ring_emit_pipeline_sync(ring);
1115
1116	if (vm_flush_needed) {
1117		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1118		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1119	}
1120
1121	if (pasid_mapping_needed)
1122		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1123
1124	if (vm_flush_needed || pasid_mapping_needed) {
1125		r = amdgpu_fence_emit(ring, &fence, 0);
1126		if (r)
1127			return r;
1128	}
1129
1130	if (vm_flush_needed) {
1131		mutex_lock(&id_mgr->lock);
1132		dma_fence_put(id->last_flush);
1133		id->last_flush = dma_fence_get(fence);
1134		id->current_gpu_reset_count =
1135			atomic_read(&adev->gpu_reset_counter);
1136		mutex_unlock(&id_mgr->lock);
1137	}
1138
1139	if (pasid_mapping_needed) {
1140		mutex_lock(&id_mgr->lock);
1141		id->pasid = job->pasid;
1142		dma_fence_put(id->pasid_mapping);
1143		id->pasid_mapping = dma_fence_get(fence);
1144		mutex_unlock(&id_mgr->lock);
1145	}
1146	dma_fence_put(fence);
1147
1148	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1149		id->gds_base = job->gds_base;
1150		id->gds_size = job->gds_size;
1151		id->gws_base = job->gws_base;
1152		id->gws_size = job->gws_size;
1153		id->oa_base = job->oa_base;
1154		id->oa_size = job->oa_size;
1155		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1156					    job->gds_size, job->gws_base,
1157					    job->gws_size, job->oa_base,
1158					    job->oa_size);
1159	}
1160
1161	if (ring->funcs->patch_cond_exec)
1162		amdgpu_ring_patch_cond_exec(ring, patch_offset);
1163
1164	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1165	if (ring->funcs->emit_switch_buffer) {
1166		amdgpu_ring_emit_switch_buffer(ring);
1167		amdgpu_ring_emit_switch_buffer(ring);
1168	}
1169	return 0;
1170}
1171
1172/**
1173 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1174 *
1175 * @vm: requested vm
1176 * @bo: requested buffer object
1177 *
1178 * Find @bo inside the requested vm.
1179 * Search inside the @bos vm list for the requested vm
1180 * Returns the found bo_va or NULL if none is found
1181 *
1182 * Object has to be reserved!
1183 *
1184 * Returns:
1185 * Found bo_va or NULL.
1186 */
1187struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1188				       struct amdgpu_bo *bo)
1189{
1190	struct amdgpu_vm_bo_base *base;
1191
1192	for (base = bo->vm_bo; base; base = base->next) {
1193		if (base->vm != vm)
1194			continue;
1195
1196		return container_of(base, struct amdgpu_bo_va, base);
1197	}
1198	return NULL;
1199}
1200
1201/**
1202 * amdgpu_vm_map_gart - Resolve gart mapping of addr
1203 *
1204 * @pages_addr: optional DMA address to use for lookup
1205 * @addr: the unmapped addr
1206 *
1207 * Look up the physical address of the page that the pte resolves
1208 * to.
1209 *
1210 * Returns:
1211 * The pointer for the page table entry.
1212 */
1213uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1214{
1215	uint64_t result;
1216
1217	/* page table offset */
1218	result = pages_addr[addr >> PAGE_SHIFT];
1219
1220	/* in case cpu page size != gpu page size*/
1221	result |= addr & (~PAGE_MASK);
1222
1223	result &= 0xFFFFFFFFFFFFF000ULL;
1224
1225	return result;
1226}
1227
1228/**
1229 * amdgpu_vm_update_pde - update a single level in the hierarchy
1230 *
1231 * @params: parameters for the update
1232 * @vm: requested vm
1233 * @entry: entry to update
1234 *
1235 * Makes sure the requested entry in parent is up to date.
1236 */
1237static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
1238				struct amdgpu_vm *vm,
1239				struct amdgpu_vm_pt *entry)
1240{
1241	struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
1242	struct amdgpu_bo *bo = parent->base.bo, *pbo;
1243	uint64_t pde, pt, flags;
1244	unsigned level;
1245
1246	for (level = 0, pbo = bo->parent; pbo; ++level)
1247		pbo = pbo->parent;
1248
1249	level += params->adev->vm_manager.root_level;
1250	amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1251	pde = (entry - parent->entries) * 8;
1252	return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
 
1253}
1254
1255/**
1256 * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1257 *
1258 * @adev: amdgpu_device pointer
1259 * @vm: related vm
1260 *
1261 * Mark all PD level as invalid after an error.
1262 */
1263static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1264				     struct amdgpu_vm *vm)
1265{
1266	struct amdgpu_vm_pt_cursor cursor;
1267	struct amdgpu_vm_pt *entry;
1268
1269	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1270		if (entry->base.bo && !entry->base.moved)
1271			amdgpu_vm_bo_relocated(&entry->base);
1272}
1273
1274/**
1275 * amdgpu_vm_update_pdes - make sure that all directories are valid
1276 *
1277 * @adev: amdgpu_device pointer
1278 * @vm: requested vm
1279 * @immediate: submit immediately to the paging queue
1280 *
1281 * Makes sure all directories are up to date.
1282 *
1283 * Returns:
1284 * 0 for success, error for failure.
1285 */
1286int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
1287			  struct amdgpu_vm *vm, bool immediate)
1288{
1289	struct amdgpu_vm_update_params params;
1290	int r;
1291
1292	if (list_empty(&vm->relocated))
1293		return 0;
1294
1295	memset(&params, 0, sizeof(params));
1296	params.adev = adev;
1297	params.vm = vm;
1298	params.immediate = immediate;
1299
1300	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
1301	if (r)
1302		return r;
1303
1304	while (!list_empty(&vm->relocated)) {
1305		struct amdgpu_vm_pt *entry;
1306
1307		entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1308					 base.vm_status);
1309		amdgpu_vm_bo_idle(&entry->base);
 
1310
1311		r = amdgpu_vm_update_pde(&params, vm, entry);
1312		if (r)
1313			goto error;
1314	}
1315
1316	r = vm->update_funcs->commit(&params, &vm->last_update);
1317	if (r)
1318		goto error;
1319	return 0;
1320
1321error:
1322	amdgpu_vm_invalidate_pds(adev, vm);
1323	return r;
1324}
1325
1326/*
1327 * amdgpu_vm_update_flags - figure out flags for PTE updates
1328 *
1329 * Make sure to set the right flags for the PTEs at the desired level.
1330 */
1331static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
1332				   struct amdgpu_bo *bo, unsigned level,
1333				   uint64_t pe, uint64_t addr,
1334				   unsigned count, uint32_t incr,
1335				   uint64_t flags)
1336
1337{
1338	if (level != AMDGPU_VM_PTB) {
1339		flags |= AMDGPU_PDE_PTE;
1340		amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1341
1342	} else if (params->adev->asic_type >= CHIP_VEGA10 &&
1343		   !(flags & AMDGPU_PTE_VALID) &&
1344		   !(flags & AMDGPU_PTE_PRT)) {
1345
1346		/* Workaround for fault priority problem on GMC9 */
1347		flags |= AMDGPU_PTE_EXECUTABLE;
1348	}
1349
1350	params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
1351					 flags);
1352}
1353
1354/**
1355 * amdgpu_vm_fragment - get fragment for PTEs
1356 *
1357 * @params: see amdgpu_vm_update_params definition
1358 * @start: first PTE to handle
1359 * @end: last PTE to handle
1360 * @flags: hw mapping flags
1361 * @frag: resulting fragment size
1362 * @frag_end: end of this fragment
1363 *
1364 * Returns the first possible fragment for the start and end address.
1365 */
1366static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
1367			       uint64_t start, uint64_t end, uint64_t flags,
1368			       unsigned int *frag, uint64_t *frag_end)
1369{
1370	/**
1371	 * The MC L1 TLB supports variable sized pages, based on a fragment
1372	 * field in the PTE. When this field is set to a non-zero value, page
1373	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1374	 * flags are considered valid for all PTEs within the fragment range
1375	 * and corresponding mappings are assumed to be physically contiguous.
1376	 *
1377	 * The L1 TLB can store a single PTE for the whole fragment,
1378	 * significantly increasing the space available for translation
1379	 * caching. This leads to large improvements in throughput when the
1380	 * TLB is under pressure.
1381	 *
1382	 * The L2 TLB distributes small and large fragments into two
1383	 * asymmetric partitions. The large fragment cache is significantly
1384	 * larger. Thus, we try to use large fragments wherever possible.
1385	 * Userspace can support this by aligning virtual base address and
1386	 * allocation size to the fragment size.
1387	 *
1388	 * Starting with Vega10 the fragment size only controls the L1. The L2
1389	 * is now directly feed with small/huge/giant pages from the walker.
1390	 */
1391	unsigned max_frag;
1392
1393	if (params->adev->asic_type < CHIP_VEGA10)
1394		max_frag = params->adev->vm_manager.fragment_size;
1395	else
1396		max_frag = 31;
1397
1398	/* system pages are non continuously */
1399	if (params->pages_addr) {
1400		*frag = 0;
1401		*frag_end = end;
1402		return;
1403	}
1404
1405	/* This intentionally wraps around if no bit is set */
1406	*frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1407	if (*frag >= max_frag) {
1408		*frag = max_frag;
1409		*frag_end = end & ~((1ULL << max_frag) - 1);
1410	} else {
1411		*frag_end = start + (1 << *frag);
1412	}
1413}
1414
1415/**
1416 * amdgpu_vm_update_ptes - make sure that page tables are valid
1417 *
1418 * @params: see amdgpu_vm_update_params definition
1419 * @start: start of GPU address range
1420 * @end: end of GPU address range
1421 * @dst: destination address to map to, the next dst inside the function
1422 * @flags: mapping flags
1423 *
1424 * Update the page tables in the range @start - @end.
1425 *
1426 * Returns:
1427 * 0 for success, -EINVAL for failure.
1428 */
1429static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
1430				 uint64_t start, uint64_t end,
1431				 uint64_t dst, uint64_t flags)
1432{
1433	struct amdgpu_device *adev = params->adev;
1434	struct amdgpu_vm_pt_cursor cursor;
1435	uint64_t frag_start = start, frag_end;
1436	unsigned int frag;
1437	int r;
1438
1439	/* figure out the initial fragment */
1440	amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1441
1442	/* walk over the address space and update the PTs */
1443	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1444	while (cursor.pfn < end) {
1445		unsigned shift, parent_shift, mask;
1446		uint64_t incr, entry_end, pe_start;
1447		struct amdgpu_bo *pt;
1448
1449		if (!params->unlocked) {
1450			/* make sure that the page tables covering the
1451			 * address range are actually allocated
1452			 */
1453			r = amdgpu_vm_alloc_pts(params->adev, params->vm,
1454						&cursor, params->immediate);
1455			if (r)
1456				return r;
1457		}
1458
1459		shift = amdgpu_vm_level_shift(adev, cursor.level);
1460		parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1461		if (params->unlocked) {
1462			/* Unlocked updates are only allowed on the leaves */
1463			if (amdgpu_vm_pt_descendant(adev, &cursor))
1464				continue;
1465		} else if (adev->asic_type < CHIP_VEGA10 &&
1466			   (flags & AMDGPU_PTE_VALID)) {
1467			/* No huge page support before GMC v9 */
1468			if (cursor.level != AMDGPU_VM_PTB) {
1469				if (!amdgpu_vm_pt_descendant(adev, &cursor))
1470					return -ENOENT;
1471				continue;
1472			}
1473		} else if (frag < shift) {
1474			/* We can't use this level when the fragment size is
1475			 * smaller than the address shift. Go to the next
1476			 * child entry and try again.
1477			 */
1478			if (amdgpu_vm_pt_descendant(adev, &cursor))
1479				continue;
1480		} else if (frag >= parent_shift) {
1481			/* If the fragment size is even larger than the parent
1482			 * shift we should go up one level and check it again.
1483			 */
1484			if (!amdgpu_vm_pt_ancestor(&cursor))
1485				return -EINVAL;
1486			continue;
1487		}
1488
1489		pt = cursor.entry->base.bo;
1490		if (!pt) {
1491			/* We need all PDs and PTs for mapping something, */
1492			if (flags & AMDGPU_PTE_VALID)
1493				return -ENOENT;
1494
1495			/* but unmapping something can happen at a higher
1496			 * level.
1497			 */
1498			if (!amdgpu_vm_pt_ancestor(&cursor))
1499				return -EINVAL;
1500
1501			pt = cursor.entry->base.bo;
1502			shift = parent_shift;
 
 
1503		}
1504
1505		/* Looks good so far, calculate parameters for the update */
1506		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1507		mask = amdgpu_vm_entries_mask(adev, cursor.level);
1508		pe_start = ((cursor.pfn >> shift) & mask) * 8;
1509		entry_end = ((uint64_t)mask + 1) << shift;
1510		entry_end += cursor.pfn & ~(entry_end - 1);
1511		entry_end = min(entry_end, end);
1512
1513		do {
 
1514			uint64_t upd_end = min(entry_end, frag_end);
1515			unsigned nptes = (upd_end - frag_start) >> shift;
 
1516
1517			/* This can happen when we set higher level PDs to
1518			 * silent to stop fault floods.
1519			 */
1520			nptes = max(nptes, 1u);
1521			amdgpu_vm_update_flags(params, pt, cursor.level,
1522					       pe_start, dst, nptes, incr,
1523					       flags | AMDGPU_PTE_FRAG(frag));
 
 
 
 
 
1524
1525			pe_start += nptes * 8;
1526			dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1527
1528			frag_start = upd_end;
1529			if (frag_start >= frag_end) {
1530				/* figure out the next fragment */
1531				amdgpu_vm_fragment(params, frag_start, end,
1532						   flags, &frag, &frag_end);
1533				if (frag < shift)
1534					break;
1535			}
1536		} while (frag_start < entry_end);
1537
1538		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1539			/* Free all child entries.
1540			 * Update the tables with the flags and addresses and free up subsequent
1541			 * tables in the case of huge pages or freed up areas.
1542			 * This is the maximum you can free, because all other page tables are not
1543			 * completely covered by the range and so potentially still in use.
1544			 */
1545			while (cursor.pfn < frag_start) {
1546				amdgpu_vm_free_pts(adev, params->vm, &cursor);
 
 
 
 
1547				amdgpu_vm_pt_next(adev, &cursor);
1548			}
1549
1550		} else if (frag >= shift) {
1551			/* or just move on to the next on the same level. */
1552			amdgpu_vm_pt_next(adev, &cursor);
1553		}
1554	}
1555
1556	return 0;
1557}
1558
1559/**
1560 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1561 *
1562 * @adev: amdgpu_device pointer
 
1563 * @vm: requested vm
1564 * @immediate: immediate submission in a page fault
1565 * @unlocked: unlocked invalidation during MM callback
1566 * @resv: fences we need to sync to
1567 * @start: start of mapped range
1568 * @last: last mapped entry
1569 * @flags: flags for the entries
1570 * @addr: addr to set the area to
 
1571 * @pages_addr: DMA addresses to use for mapping
1572 * @fence: optional resulting fence
 
1573 *
1574 * Fill in the page table entries between @start and @last.
1575 *
1576 * Returns:
1577 * 0 for success, -EINVAL for failure.
1578 */
1579static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1580				       struct amdgpu_vm *vm, bool immediate,
1581				       bool unlocked, struct dma_resv *resv,
1582				       uint64_t start, uint64_t last,
1583				       uint64_t flags, uint64_t addr,
1584				       dma_addr_t *pages_addr,
1585				       struct dma_fence **fence)
 
 
 
1586{
1587	struct amdgpu_vm_update_params params;
 
1588	enum amdgpu_sync_mode sync_mode;
1589	int r;
 
 
 
1590
1591	memset(&params, 0, sizeof(params));
1592	params.adev = adev;
1593	params.vm = vm;
1594	params.immediate = immediate;
1595	params.pages_addr = pages_addr;
1596	params.unlocked = unlocked;
1597
1598	/* Implicitly sync to command submissions in the same VM before
1599	 * unmapping. Sync to moving fences before mapping.
1600	 */
1601	if (!(flags & AMDGPU_PTE_VALID))
1602		sync_mode = AMDGPU_SYNC_EQ_OWNER;
1603	else
1604		sync_mode = AMDGPU_SYNC_EXPLICIT;
1605
1606	amdgpu_vm_eviction_lock(vm);
1607	if (vm->evicting) {
1608		r = -EBUSY;
1609		goto error_unlock;
1610	}
1611
1612	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1613		struct dma_fence *tmp = dma_fence_get_stub();
1614
1615		amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
1616		swap(vm->last_unlocked, tmp);
1617		dma_fence_put(tmp);
1618	}
1619
1620	r = vm->update_funcs->prepare(&params, resv, sync_mode);
1621	if (r)
1622		goto error_unlock;
1623
1624	r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
1625	if (r)
1626		goto error_unlock;
 
1627
1628	r = vm->update_funcs->commit(&params, fence);
 
 
1629
1630error_unlock:
1631	amdgpu_vm_eviction_unlock(vm);
1632	return r;
1633}
1634
1635/**
1636 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1637 *
1638 * @adev: amdgpu_device pointer
1639 * @resv: fences we need to sync to
1640 * @pages_addr: DMA addresses to use for mapping
1641 * @vm: requested vm
1642 * @mapping: mapped range and flags to use for the update
1643 * @flags: HW flags for the mapping
1644 * @bo_adev: amdgpu_device pointer that bo actually been allocated
1645 * @nodes: array of drm_mm_nodes with the MC addresses
1646 * @fence: optional resulting fence
1647 *
1648 * Split the mapping into smaller chunks so that each update fits
1649 * into a SDMA IB.
1650 *
1651 * Returns:
1652 * 0 for success, -EINVAL for failure.
1653 */
1654static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1655				      struct dma_resv *resv,
1656				      dma_addr_t *pages_addr,
1657				      struct amdgpu_vm *vm,
1658				      struct amdgpu_bo_va_mapping *mapping,
1659				      uint64_t flags,
1660				      struct amdgpu_device *bo_adev,
1661				      struct drm_mm_node *nodes,
1662				      struct dma_fence **fence)
1663{
1664	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1665	uint64_t pfn, start = mapping->start;
1666	int r;
1667
1668	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1669	 * but in case of something, we filter the flags in first place
1670	 */
1671	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1672		flags &= ~AMDGPU_PTE_READABLE;
1673	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1674		flags &= ~AMDGPU_PTE_WRITEABLE;
1675
1676	/* Apply ASIC specific mapping flags */
1677	amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
1678
1679	trace_amdgpu_vm_bo_update(mapping);
1680
1681	pfn = mapping->offset >> PAGE_SHIFT;
1682	if (nodes) {
1683		while (pfn >= nodes->size) {
1684			pfn -= nodes->size;
1685			++nodes;
1686		}
1687	}
1688
1689	do {
1690		dma_addr_t *dma_addr = NULL;
1691		uint64_t max_entries;
1692		uint64_t addr, last;
1693
1694		if (nodes) {
1695			addr = nodes->start << PAGE_SHIFT;
1696			max_entries = (nodes->size - pfn) *
1697				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1698		} else {
1699			addr = 0;
1700			max_entries = S64_MAX;
1701		}
1702
1703		if (pages_addr) {
1704			uint64_t count;
1705
1706			for (count = 1;
1707			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1708			     ++count) {
1709				uint64_t idx = pfn + count;
1710
1711				if (pages_addr[idx] !=
1712				    (pages_addr[idx - 1] + PAGE_SIZE))
1713					break;
1714			}
1715
1716			if (count < min_linear_pages) {
1717				addr = pfn << PAGE_SHIFT;
1718				dma_addr = pages_addr;
1719			} else {
1720				addr = pages_addr[pfn];
1721				max_entries = count *
1722					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1723			}
1724
1725		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
1726			addr += bo_adev->vm_manager.vram_base_offset;
1727			addr += pfn << PAGE_SHIFT;
 
 
1728		}
1729
1730		last = min((uint64_t)mapping->last, start + max_entries - 1);
1731		r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
1732						start, last, flags, addr,
1733						dma_addr, fence);
1734		if (r)
1735			return r;
 
 
 
 
1736
1737		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1738		if (nodes && nodes->size == pfn) {
1739			pfn = 0;
1740			++nodes;
1741		}
1742		start = last + 1;
1743
1744	} while (unlikely(start != mapping->last + 1));
 
1745
1746	return 0;
 
 
 
1747}
1748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1749/**
1750 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1751 *
1752 * @adev: amdgpu_device pointer
1753 * @bo_va: requested BO and VM object
1754 * @clear: if true clear the entries
1755 *
1756 * Fill in the page table entries for @bo_va.
1757 *
1758 * Returns:
1759 * 0 for success, -EINVAL for failure.
1760 */
1761int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1762			bool clear)
1763{
1764	struct amdgpu_bo *bo = bo_va->base.bo;
1765	struct amdgpu_vm *vm = bo_va->base.vm;
1766	struct amdgpu_bo_va_mapping *mapping;
1767	dma_addr_t *pages_addr = NULL;
1768	struct ttm_mem_reg *mem;
1769	struct drm_mm_node *nodes;
1770	struct dma_fence **last_update;
1771	struct dma_resv *resv;
1772	uint64_t flags;
1773	struct amdgpu_device *bo_adev = adev;
1774	int r;
1775
1776	if (clear || !bo) {
1777		mem = NULL;
1778		nodes = NULL;
1779		resv = vm->root.base.bo->tbo.base.resv;
1780	} else {
1781		struct ttm_dma_tt *ttm;
1782
1783		mem = &bo->tbo.mem;
1784		nodes = mem->mm_node;
1785		if (mem->mem_type == TTM_PL_TT) {
1786			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1787			pages_addr = ttm->dma_address;
1788		}
1789		resv = bo->tbo.base.resv;
 
 
 
 
 
 
 
 
 
 
 
 
1790	}
1791
1792	if (bo) {
1793		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1794
1795		if (amdgpu_bo_encrypted(bo))
1796			flags |= AMDGPU_PTE_TMZ;
1797
1798		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1799	} else {
1800		flags = 0x0;
1801	}
1802
1803	if (clear || (bo && bo->tbo.base.resv ==
1804		      vm->root.base.bo->tbo.base.resv))
1805		last_update = &vm->last_update;
1806	else
1807		last_update = &bo_va->last_pt_update;
1808
1809	if (!clear && bo_va->base.moved) {
1810		bo_va->base.moved = false;
1811		list_splice_init(&bo_va->valids, &bo_va->invalids);
1812
1813	} else if (bo_va->cleared != clear) {
1814		list_splice_init(&bo_va->valids, &bo_va->invalids);
1815	}
1816
1817	list_for_each_entry(mapping, &bo_va->invalids, list) {
1818		r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
1819					       mapping, flags, bo_adev, nodes,
1820					       last_update);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1821		if (r)
1822			return r;
1823	}
1824
1825	/* If the BO is not in its preferred location add it back to
1826	 * the evicted list so that it gets validated again on the
1827	 * next command submission.
1828	 */
1829	if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
1830		uint32_t mem_type = bo->tbo.mem.mem_type;
1831
1832		if (!(bo->preferred_domains &
1833		      amdgpu_mem_type_to_domain(mem_type)))
1834			amdgpu_vm_bo_evicted(&bo_va->base);
1835		else
1836			amdgpu_vm_bo_idle(&bo_va->base);
1837	} else {
1838		amdgpu_vm_bo_done(&bo_va->base);
1839	}
1840
1841	list_splice_init(&bo_va->invalids, &bo_va->valids);
1842	bo_va->cleared = clear;
1843
1844	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1845		list_for_each_entry(mapping, &bo_va->valids, list)
1846			trace_amdgpu_vm_bo_mapping(mapping);
1847	}
1848
1849	return 0;
1850}
1851
1852/**
1853 * amdgpu_vm_update_prt_state - update the global PRT state
1854 *
1855 * @adev: amdgpu_device pointer
1856 */
1857static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1858{
1859	unsigned long flags;
1860	bool enable;
1861
1862	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1863	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1864	adev->gmc.gmc_funcs->set_prt(adev, enable);
1865	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1866}
1867
1868/**
1869 * amdgpu_vm_prt_get - add a PRT user
1870 *
1871 * @adev: amdgpu_device pointer
1872 */
1873static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1874{
1875	if (!adev->gmc.gmc_funcs->set_prt)
1876		return;
1877
1878	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1879		amdgpu_vm_update_prt_state(adev);
1880}
1881
1882/**
1883 * amdgpu_vm_prt_put - drop a PRT user
1884 *
1885 * @adev: amdgpu_device pointer
1886 */
1887static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1888{
1889	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1890		amdgpu_vm_update_prt_state(adev);
1891}
1892
1893/**
1894 * amdgpu_vm_prt_cb - callback for updating the PRT status
1895 *
1896 * @fence: fence for the callback
1897 * @_cb: the callback function
1898 */
1899static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1900{
1901	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1902
1903	amdgpu_vm_prt_put(cb->adev);
1904	kfree(cb);
1905}
1906
1907/**
1908 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1909 *
1910 * @adev: amdgpu_device pointer
1911 * @fence: fence for the callback
1912 */
1913static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1914				 struct dma_fence *fence)
1915{
1916	struct amdgpu_prt_cb *cb;
1917
1918	if (!adev->gmc.gmc_funcs->set_prt)
1919		return;
1920
1921	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1922	if (!cb) {
1923		/* Last resort when we are OOM */
1924		if (fence)
1925			dma_fence_wait(fence, false);
1926
1927		amdgpu_vm_prt_put(adev);
1928	} else {
1929		cb->adev = adev;
1930		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1931						     amdgpu_vm_prt_cb))
1932			amdgpu_vm_prt_cb(fence, &cb->cb);
1933	}
1934}
1935
1936/**
1937 * amdgpu_vm_free_mapping - free a mapping
1938 *
1939 * @adev: amdgpu_device pointer
1940 * @vm: requested vm
1941 * @mapping: mapping to be freed
1942 * @fence: fence of the unmap operation
1943 *
1944 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1945 */
1946static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1947				   struct amdgpu_vm *vm,
1948				   struct amdgpu_bo_va_mapping *mapping,
1949				   struct dma_fence *fence)
1950{
1951	if (mapping->flags & AMDGPU_PTE_PRT)
1952		amdgpu_vm_add_prt_cb(adev, fence);
1953	kfree(mapping);
1954}
1955
1956/**
1957 * amdgpu_vm_prt_fini - finish all prt mappings
1958 *
1959 * @adev: amdgpu_device pointer
1960 * @vm: requested vm
1961 *
1962 * Register a cleanup callback to disable PRT support after VM dies.
1963 */
1964static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1965{
1966	struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
1967	struct dma_fence *excl, **shared;
1968	unsigned i, shared_count;
1969	int r;
1970
1971	r = dma_resv_get_fences_rcu(resv, &excl,
1972					      &shared_count, &shared);
1973	if (r) {
1974		/* Not enough memory to grab the fence list, as last resort
1975		 * block for all the fences to complete.
1976		 */
1977		dma_resv_wait_timeout_rcu(resv, true, false,
1978						    MAX_SCHEDULE_TIMEOUT);
1979		return;
1980	}
1981
1982	/* Add a callback for each fence in the reservation object */
1983	amdgpu_vm_prt_get(adev);
1984	amdgpu_vm_add_prt_cb(adev, excl);
1985
1986	for (i = 0; i < shared_count; ++i) {
1987		amdgpu_vm_prt_get(adev);
1988		amdgpu_vm_add_prt_cb(adev, shared[i]);
1989	}
1990
1991	kfree(shared);
1992}
1993
1994/**
1995 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1996 *
1997 * @adev: amdgpu_device pointer
1998 * @vm: requested vm
1999 * @fence: optional resulting fence (unchanged if no work needed to be done
2000 * or if an error occurred)
2001 *
2002 * Make sure all freed BOs are cleared in the PT.
2003 * PTs have to be reserved and mutex must be locked!
2004 *
2005 * Returns:
2006 * 0 for success.
2007 *
2008 */
2009int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2010			  struct amdgpu_vm *vm,
2011			  struct dma_fence **fence)
2012{
2013	struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
2014	struct amdgpu_bo_va_mapping *mapping;
2015	uint64_t init_pte_value = 0;
2016	struct dma_fence *f = NULL;
2017	int r;
2018
2019	while (!list_empty(&vm->freed)) {
2020		mapping = list_first_entry(&vm->freed,
2021			struct amdgpu_bo_va_mapping, list);
2022		list_del(&mapping->list);
2023
2024		if (vm->pte_support_ats &&
2025		    mapping->start < AMDGPU_GMC_HOLE_START)
2026			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
2027
2028		r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
2029						mapping->start, mapping->last,
2030						init_pte_value, 0, NULL, &f);
 
2031		amdgpu_vm_free_mapping(adev, vm, mapping, f);
2032		if (r) {
2033			dma_fence_put(f);
2034			return r;
2035		}
2036	}
2037
2038	if (fence && f) {
2039		dma_fence_put(*fence);
2040		*fence = f;
2041	} else {
2042		dma_fence_put(f);
2043	}
2044
2045	return 0;
2046
2047}
2048
2049/**
2050 * amdgpu_vm_handle_moved - handle moved BOs in the PT
2051 *
2052 * @adev: amdgpu_device pointer
2053 * @vm: requested vm
2054 *
2055 * Make sure all BOs which are moved are updated in the PTs.
2056 *
2057 * Returns:
2058 * 0 for success.
2059 *
2060 * PTs have to be reserved!
2061 */
2062int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2063			   struct amdgpu_vm *vm)
2064{
2065	struct amdgpu_bo_va *bo_va, *tmp;
2066	struct dma_resv *resv;
2067	bool clear;
2068	int r;
2069
2070	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2071		/* Per VM BOs never need to bo cleared in the page tables */
2072		r = amdgpu_vm_bo_update(adev, bo_va, false);
2073		if (r)
2074			return r;
2075	}
2076
2077	spin_lock(&vm->invalidated_lock);
2078	while (!list_empty(&vm->invalidated)) {
2079		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2080					 base.vm_status);
2081		resv = bo_va->base.bo->tbo.base.resv;
2082		spin_unlock(&vm->invalidated_lock);
2083
2084		/* Try to reserve the BO to avoid clearing its ptes */
2085		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
2086			clear = false;
2087		/* Somebody else is using the BO right now */
2088		else
2089			clear = true;
2090
2091		r = amdgpu_vm_bo_update(adev, bo_va, clear);
2092		if (r)
2093			return r;
2094
2095		if (!clear)
2096			dma_resv_unlock(resv);
2097		spin_lock(&vm->invalidated_lock);
2098	}
2099	spin_unlock(&vm->invalidated_lock);
2100
2101	return 0;
2102}
2103
2104/**
2105 * amdgpu_vm_bo_add - add a bo to a specific vm
2106 *
2107 * @adev: amdgpu_device pointer
2108 * @vm: requested vm
2109 * @bo: amdgpu buffer object
2110 *
2111 * Add @bo into the requested vm.
2112 * Add @bo to the list of bos associated with the vm
2113 *
2114 * Returns:
2115 * Newly added bo_va or NULL for failure
2116 *
2117 * Object has to be reserved!
2118 */
2119struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2120				      struct amdgpu_vm *vm,
2121				      struct amdgpu_bo *bo)
2122{
2123	struct amdgpu_bo_va *bo_va;
2124
2125	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2126	if (bo_va == NULL) {
2127		return NULL;
2128	}
2129	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2130
2131	bo_va->ref_count = 1;
2132	INIT_LIST_HEAD(&bo_va->valids);
2133	INIT_LIST_HEAD(&bo_va->invalids);
2134
2135	if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
2136	    (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
 
 
2137		bo_va->is_xgmi = true;
2138		/* Power up XGMI if it can be potentially used */
2139		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
2140	}
2141
2142	return bo_va;
2143}
2144
2145
2146/**
2147 * amdgpu_vm_bo_insert_mapping - insert a new mapping
2148 *
2149 * @adev: amdgpu_device pointer
2150 * @bo_va: bo_va to store the address
2151 * @mapping: the mapping to insert
2152 *
2153 * Insert a new mapping into all structures.
2154 */
2155static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2156				    struct amdgpu_bo_va *bo_va,
2157				    struct amdgpu_bo_va_mapping *mapping)
2158{
2159	struct amdgpu_vm *vm = bo_va->base.vm;
2160	struct amdgpu_bo *bo = bo_va->base.bo;
2161
2162	mapping->bo_va = bo_va;
2163	list_add(&mapping->list, &bo_va->invalids);
2164	amdgpu_vm_it_insert(mapping, &vm->va);
2165
2166	if (mapping->flags & AMDGPU_PTE_PRT)
2167		amdgpu_vm_prt_get(adev);
2168
2169	if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
2170	    !bo_va->base.moved) {
2171		list_move(&bo_va->base.vm_status, &vm->moved);
2172	}
2173	trace_amdgpu_vm_bo_map(bo_va, mapping);
2174}
2175
2176/**
2177 * amdgpu_vm_bo_map - map bo inside a vm
2178 *
2179 * @adev: amdgpu_device pointer
2180 * @bo_va: bo_va to store the address
2181 * @saddr: where to map the BO
2182 * @offset: requested offset in the BO
2183 * @size: BO size in bytes
2184 * @flags: attributes of pages (read/write/valid/etc.)
2185 *
2186 * Add a mapping of the BO at the specefied addr into the VM.
2187 *
2188 * Returns:
2189 * 0 for success, error for failure.
2190 *
2191 * Object has to be reserved and unreserved outside!
2192 */
2193int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2194		     struct amdgpu_bo_va *bo_va,
2195		     uint64_t saddr, uint64_t offset,
2196		     uint64_t size, uint64_t flags)
2197{
2198	struct amdgpu_bo_va_mapping *mapping, *tmp;
2199	struct amdgpu_bo *bo = bo_va->base.bo;
2200	struct amdgpu_vm *vm = bo_va->base.vm;
2201	uint64_t eaddr;
2202
2203	/* validate the parameters */
2204	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2205	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2206		return -EINVAL;
2207
2208	/* make sure object fit at this offset */
2209	eaddr = saddr + size - 1;
2210	if (saddr >= eaddr ||
2211	    (bo && offset + size > amdgpu_bo_size(bo)) ||
2212	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
2213		return -EINVAL;
2214
2215	saddr /= AMDGPU_GPU_PAGE_SIZE;
2216	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2217
2218	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2219	if (tmp) {
2220		/* bo and tmp overlap, invalid addr */
2221		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2222			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2223			tmp->start, tmp->last + 1);
2224		return -EINVAL;
2225	}
2226
2227	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2228	if (!mapping)
2229		return -ENOMEM;
2230
2231	mapping->start = saddr;
2232	mapping->last = eaddr;
2233	mapping->offset = offset;
2234	mapping->flags = flags;
2235
2236	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2237
2238	return 0;
2239}
2240
2241/**
2242 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2243 *
2244 * @adev: amdgpu_device pointer
2245 * @bo_va: bo_va to store the address
2246 * @saddr: where to map the BO
2247 * @offset: requested offset in the BO
2248 * @size: BO size in bytes
2249 * @flags: attributes of pages (read/write/valid/etc.)
2250 *
2251 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2252 * mappings as we do so.
2253 *
2254 * Returns:
2255 * 0 for success, error for failure.
2256 *
2257 * Object has to be reserved and unreserved outside!
2258 */
2259int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2260			     struct amdgpu_bo_va *bo_va,
2261			     uint64_t saddr, uint64_t offset,
2262			     uint64_t size, uint64_t flags)
2263{
2264	struct amdgpu_bo_va_mapping *mapping;
2265	struct amdgpu_bo *bo = bo_va->base.bo;
2266	uint64_t eaddr;
2267	int r;
2268
2269	/* validate the parameters */
2270	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2271	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2272		return -EINVAL;
2273
2274	/* make sure object fit at this offset */
2275	eaddr = saddr + size - 1;
2276	if (saddr >= eaddr ||
2277	    (bo && offset + size > amdgpu_bo_size(bo)) ||
2278	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
2279		return -EINVAL;
2280
2281	/* Allocate all the needed memory */
2282	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2283	if (!mapping)
2284		return -ENOMEM;
2285
2286	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2287	if (r) {
2288		kfree(mapping);
2289		return r;
2290	}
2291
2292	saddr /= AMDGPU_GPU_PAGE_SIZE;
2293	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2294
2295	mapping->start = saddr;
2296	mapping->last = eaddr;
2297	mapping->offset = offset;
2298	mapping->flags = flags;
2299
2300	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2301
2302	return 0;
2303}
2304
2305/**
2306 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2307 *
2308 * @adev: amdgpu_device pointer
2309 * @bo_va: bo_va to remove the address from
2310 * @saddr: where to the BO is mapped
2311 *
2312 * Remove a mapping of the BO at the specefied addr from the VM.
2313 *
2314 * Returns:
2315 * 0 for success, error for failure.
2316 *
2317 * Object has to be reserved and unreserved outside!
2318 */
2319int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2320		       struct amdgpu_bo_va *bo_va,
2321		       uint64_t saddr)
2322{
2323	struct amdgpu_bo_va_mapping *mapping;
2324	struct amdgpu_vm *vm = bo_va->base.vm;
2325	bool valid = true;
2326
2327	saddr /= AMDGPU_GPU_PAGE_SIZE;
2328
2329	list_for_each_entry(mapping, &bo_va->valids, list) {
2330		if (mapping->start == saddr)
2331			break;
2332	}
2333
2334	if (&mapping->list == &bo_va->valids) {
2335		valid = false;
2336
2337		list_for_each_entry(mapping, &bo_va->invalids, list) {
2338			if (mapping->start == saddr)
2339				break;
2340		}
2341
2342		if (&mapping->list == &bo_va->invalids)
2343			return -ENOENT;
2344	}
2345
2346	list_del(&mapping->list);
2347	amdgpu_vm_it_remove(mapping, &vm->va);
2348	mapping->bo_va = NULL;
2349	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2350
2351	if (valid)
2352		list_add(&mapping->list, &vm->freed);
2353	else
2354		amdgpu_vm_free_mapping(adev, vm, mapping,
2355				       bo_va->last_pt_update);
2356
2357	return 0;
2358}
2359
2360/**
2361 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2362 *
2363 * @adev: amdgpu_device pointer
2364 * @vm: VM structure to use
2365 * @saddr: start of the range
2366 * @size: size of the range
2367 *
2368 * Remove all mappings in a range, split them as appropriate.
2369 *
2370 * Returns:
2371 * 0 for success, error for failure.
2372 */
2373int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2374				struct amdgpu_vm *vm,
2375				uint64_t saddr, uint64_t size)
2376{
2377	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2378	LIST_HEAD(removed);
2379	uint64_t eaddr;
2380
2381	eaddr = saddr + size - 1;
2382	saddr /= AMDGPU_GPU_PAGE_SIZE;
2383	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2384
2385	/* Allocate all the needed memory */
2386	before = kzalloc(sizeof(*before), GFP_KERNEL);
2387	if (!before)
2388		return -ENOMEM;
2389	INIT_LIST_HEAD(&before->list);
2390
2391	after = kzalloc(sizeof(*after), GFP_KERNEL);
2392	if (!after) {
2393		kfree(before);
2394		return -ENOMEM;
2395	}
2396	INIT_LIST_HEAD(&after->list);
2397
2398	/* Now gather all removed mappings */
2399	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2400	while (tmp) {
2401		/* Remember mapping split at the start */
2402		if (tmp->start < saddr) {
2403			before->start = tmp->start;
2404			before->last = saddr - 1;
2405			before->offset = tmp->offset;
2406			before->flags = tmp->flags;
2407			before->bo_va = tmp->bo_va;
2408			list_add(&before->list, &tmp->bo_va->invalids);
2409		}
2410
2411		/* Remember mapping split at the end */
2412		if (tmp->last > eaddr) {
2413			after->start = eaddr + 1;
2414			after->last = tmp->last;
2415			after->offset = tmp->offset;
2416			after->offset += after->start - tmp->start;
2417			after->flags = tmp->flags;
2418			after->bo_va = tmp->bo_va;
2419			list_add(&after->list, &tmp->bo_va->invalids);
2420		}
2421
2422		list_del(&tmp->list);
2423		list_add(&tmp->list, &removed);
2424
2425		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2426	}
2427
2428	/* And free them up */
2429	list_for_each_entry_safe(tmp, next, &removed, list) {
2430		amdgpu_vm_it_remove(tmp, &vm->va);
2431		list_del(&tmp->list);
2432
2433		if (tmp->start < saddr)
2434		    tmp->start = saddr;
2435		if (tmp->last > eaddr)
2436		    tmp->last = eaddr;
2437
2438		tmp->bo_va = NULL;
2439		list_add(&tmp->list, &vm->freed);
2440		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2441	}
2442
2443	/* Insert partial mapping before the range */
2444	if (!list_empty(&before->list)) {
2445		amdgpu_vm_it_insert(before, &vm->va);
2446		if (before->flags & AMDGPU_PTE_PRT)
2447			amdgpu_vm_prt_get(adev);
2448	} else {
2449		kfree(before);
2450	}
2451
2452	/* Insert partial mapping after the range */
2453	if (!list_empty(&after->list)) {
2454		amdgpu_vm_it_insert(after, &vm->va);
2455		if (after->flags & AMDGPU_PTE_PRT)
2456			amdgpu_vm_prt_get(adev);
2457	} else {
2458		kfree(after);
2459	}
2460
2461	return 0;
2462}
2463
2464/**
2465 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2466 *
2467 * @vm: the requested VM
2468 * @addr: the address
2469 *
2470 * Find a mapping by it's address.
2471 *
2472 * Returns:
2473 * The amdgpu_bo_va_mapping matching for addr or NULL
2474 *
2475 */
2476struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2477							 uint64_t addr)
2478{
2479	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2480}
2481
2482/**
2483 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2484 *
2485 * @vm: the requested vm
2486 * @ticket: CS ticket
2487 *
2488 * Trace all mappings of BOs reserved during a command submission.
2489 */
2490void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2491{
2492	struct amdgpu_bo_va_mapping *mapping;
2493
2494	if (!trace_amdgpu_vm_bo_cs_enabled())
2495		return;
2496
2497	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2498	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2499		if (mapping->bo_va && mapping->bo_va->base.bo) {
2500			struct amdgpu_bo *bo;
2501
2502			bo = mapping->bo_va->base.bo;
2503			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2504			    ticket)
2505				continue;
2506		}
2507
2508		trace_amdgpu_vm_bo_cs(mapping);
2509	}
2510}
2511
2512/**
2513 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2514 *
2515 * @adev: amdgpu_device pointer
2516 * @bo_va: requested bo_va
2517 *
2518 * Remove @bo_va->bo from the requested vm.
2519 *
2520 * Object have to be reserved!
2521 */
2522void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2523		      struct amdgpu_bo_va *bo_va)
2524{
2525	struct amdgpu_bo_va_mapping *mapping, *next;
2526	struct amdgpu_bo *bo = bo_va->base.bo;
2527	struct amdgpu_vm *vm = bo_va->base.vm;
2528	struct amdgpu_vm_bo_base **base;
2529
2530	if (bo) {
2531		if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2532			vm->bulk_moveable = false;
2533
2534		for (base = &bo_va->base.bo->vm_bo; *base;
2535		     base = &(*base)->next) {
2536			if (*base != &bo_va->base)
2537				continue;
2538
2539			*base = bo_va->base.next;
2540			break;
2541		}
2542	}
2543
2544	spin_lock(&vm->invalidated_lock);
2545	list_del(&bo_va->base.vm_status);
2546	spin_unlock(&vm->invalidated_lock);
2547
2548	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2549		list_del(&mapping->list);
2550		amdgpu_vm_it_remove(mapping, &vm->va);
2551		mapping->bo_va = NULL;
2552		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2553		list_add(&mapping->list, &vm->freed);
2554	}
2555	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2556		list_del(&mapping->list);
2557		amdgpu_vm_it_remove(mapping, &vm->va);
2558		amdgpu_vm_free_mapping(adev, vm, mapping,
2559				       bo_va->last_pt_update);
2560	}
2561
2562	dma_fence_put(bo_va->last_pt_update);
2563
2564	if (bo && bo_va->is_xgmi)
2565		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2566
2567	kfree(bo_va);
2568}
2569
2570/**
2571 * amdgpu_vm_evictable - check if we can evict a VM
2572 *
2573 * @bo: A page table of the VM.
2574 *
2575 * Check if it is possible to evict a VM.
2576 */
2577bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2578{
2579	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2580
2581	/* Page tables of a destroyed VM can go away immediately */
2582	if (!bo_base || !bo_base->vm)
2583		return true;
2584
2585	/* Don't evict VM page tables while they are busy */
2586	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
2587		return false;
2588
2589	/* Try to block ongoing updates */
2590	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2591		return false;
2592
2593	/* Don't evict VM page tables while they are updated */
2594	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2595		amdgpu_vm_eviction_unlock(bo_base->vm);
2596		return false;
2597	}
2598
2599	bo_base->vm->evicting = true;
2600	amdgpu_vm_eviction_unlock(bo_base->vm);
2601	return true;
2602}
2603
2604/**
2605 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2606 *
2607 * @adev: amdgpu_device pointer
2608 * @bo: amdgpu buffer object
2609 * @evicted: is the BO evicted
2610 *
2611 * Mark @bo as invalid.
2612 */
2613void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2614			     struct amdgpu_bo *bo, bool evicted)
2615{
2616	struct amdgpu_vm_bo_base *bo_base;
2617
2618	/* shadow bo doesn't have bo base, its validation needs its parent */
2619	if (bo->parent && bo->parent->shadow == bo)
2620		bo = bo->parent;
2621
2622	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2623		struct amdgpu_vm *vm = bo_base->vm;
2624
2625		if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
2626			amdgpu_vm_bo_evicted(bo_base);
2627			continue;
2628		}
2629
2630		if (bo_base->moved)
2631			continue;
2632		bo_base->moved = true;
2633
2634		if (bo->tbo.type == ttm_bo_type_kernel)
2635			amdgpu_vm_bo_relocated(bo_base);
2636		else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2637			amdgpu_vm_bo_moved(bo_base);
2638		else
2639			amdgpu_vm_bo_invalidated(bo_base);
2640	}
2641}
2642
2643/**
2644 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2645 *
2646 * @vm_size: VM size
2647 *
2648 * Returns:
2649 * VM page table as power of two
2650 */
2651static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2652{
2653	/* Total bits covered by PD + PTs */
2654	unsigned bits = ilog2(vm_size) + 18;
2655
2656	/* Make sure the PD is 4K in size up to 8GB address space.
2657	   Above that split equal between PD and PTs */
2658	if (vm_size <= 8)
2659		return (bits - 9);
2660	else
2661		return ((bits + 3) / 2);
2662}
2663
2664/**
2665 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2666 *
2667 * @adev: amdgpu_device pointer
2668 * @min_vm_size: the minimum vm size in GB if it's set auto
2669 * @fragment_size_default: Default PTE fragment size
2670 * @max_level: max VMPT level
2671 * @max_bits: max address space size in bits
2672 *
2673 */
2674void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2675			   uint32_t fragment_size_default, unsigned max_level,
2676			   unsigned max_bits)
2677{
2678	unsigned int max_size = 1 << (max_bits - 30);
2679	unsigned int vm_size;
2680	uint64_t tmp;
2681
2682	/* adjust vm size first */
2683	if (amdgpu_vm_size != -1) {
2684		vm_size = amdgpu_vm_size;
2685		if (vm_size > max_size) {
2686			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2687				 amdgpu_vm_size, max_size);
2688			vm_size = max_size;
2689		}
2690	} else {
2691		struct sysinfo si;
2692		unsigned int phys_ram_gb;
2693
2694		/* Optimal VM size depends on the amount of physical
2695		 * RAM available. Underlying requirements and
2696		 * assumptions:
2697		 *
2698		 *  - Need to map system memory and VRAM from all GPUs
2699		 *     - VRAM from other GPUs not known here
2700		 *     - Assume VRAM <= system memory
2701		 *  - On GFX8 and older, VM space can be segmented for
2702		 *    different MTYPEs
2703		 *  - Need to allow room for fragmentation, guard pages etc.
2704		 *
2705		 * This adds up to a rough guess of system memory x3.
2706		 * Round up to power of two to maximize the available
2707		 * VM size with the given page table size.
2708		 */
2709		si_meminfo(&si);
2710		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2711			       (1 << 30) - 1) >> 30;
2712		vm_size = roundup_pow_of_two(
2713			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2714	}
2715
2716	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2717
2718	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2719	if (amdgpu_vm_block_size != -1)
2720		tmp >>= amdgpu_vm_block_size - 9;
2721	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2722	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2723	switch (adev->vm_manager.num_level) {
2724	case 3:
2725		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2726		break;
2727	case 2:
2728		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2729		break;
2730	case 1:
2731		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2732		break;
2733	default:
2734		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2735	}
2736	/* block size depends on vm size and hw setup*/
2737	if (amdgpu_vm_block_size != -1)
2738		adev->vm_manager.block_size =
2739			min((unsigned)amdgpu_vm_block_size, max_bits
2740			    - AMDGPU_GPU_PAGE_SHIFT
2741			    - 9 * adev->vm_manager.num_level);
2742	else if (adev->vm_manager.num_level > 1)
2743		adev->vm_manager.block_size = 9;
2744	else
2745		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2746
2747	if (amdgpu_vm_fragment_size == -1)
2748		adev->vm_manager.fragment_size = fragment_size_default;
2749	else
2750		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2751
2752	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2753		 vm_size, adev->vm_manager.num_level + 1,
2754		 adev->vm_manager.block_size,
2755		 adev->vm_manager.fragment_size);
2756}
2757
2758/**
2759 * amdgpu_vm_wait_idle - wait for the VM to become idle
2760 *
2761 * @vm: VM object to wait for
2762 * @timeout: timeout to wait for VM to become idle
2763 */
2764long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2765{
2766	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
2767					    true, true, timeout);
2768	if (timeout <= 0)
2769		return timeout;
2770
2771	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2772}
2773
2774/**
2775 * amdgpu_vm_init - initialize a vm instance
2776 *
2777 * @adev: amdgpu_device pointer
2778 * @vm: requested vm
2779 * @vm_context: Indicates if it GFX or Compute context
2780 * @pasid: Process address space identifier
2781 *
2782 * Init @vm fields.
2783 *
2784 * Returns:
2785 * 0 for success, error for failure.
2786 */
2787int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2788		   int vm_context, unsigned int pasid)
2789{
2790	struct amdgpu_bo_param bp;
2791	struct amdgpu_bo *root;
2792	int r, i;
2793
2794	vm->va = RB_ROOT_CACHED;
2795	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2796		vm->reserved_vmid[i] = NULL;
2797	INIT_LIST_HEAD(&vm->evicted);
2798	INIT_LIST_HEAD(&vm->relocated);
2799	INIT_LIST_HEAD(&vm->moved);
2800	INIT_LIST_HEAD(&vm->idle);
2801	INIT_LIST_HEAD(&vm->invalidated);
2802	spin_lock_init(&vm->invalidated_lock);
2803	INIT_LIST_HEAD(&vm->freed);
2804
2805
2806	/* create scheduler entities for page table updates */
2807	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
2808				  adev->vm_manager.vm_pte_scheds,
2809				  adev->vm_manager.vm_pte_num_scheds, NULL);
2810	if (r)
2811		return r;
2812
2813	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
2814				  adev->vm_manager.vm_pte_scheds,
2815				  adev->vm_manager.vm_pte_num_scheds, NULL);
2816	if (r)
2817		goto error_free_immediate;
2818
2819	vm->pte_support_ats = false;
2820	vm->is_compute_context = false;
2821
2822	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2823		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2824						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2825
2826		if (adev->asic_type == CHIP_RAVEN)
2827			vm->pte_support_ats = true;
2828	} else {
2829		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2830						AMDGPU_VM_USE_CPU_FOR_GFX);
2831	}
2832	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2833			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2834	WARN_ONCE((vm->use_cpu_for_update &&
2835		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2836		  "CPU update of VM recommended only for large BAR system\n");
2837
2838	if (vm->use_cpu_for_update)
2839		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2840	else
2841		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2842	vm->last_update = NULL;
2843	vm->last_unlocked = dma_fence_get_stub();
2844
2845	mutex_init(&vm->eviction_lock);
2846	vm->evicting = false;
2847
2848	amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
2849	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
2850		bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
2851	r = amdgpu_bo_create(adev, &bp, &root);
2852	if (r)
2853		goto error_free_delayed;
2854
2855	r = amdgpu_bo_reserve(root, true);
2856	if (r)
2857		goto error_free_root;
2858
2859	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
2860	if (r)
2861		goto error_unreserve;
2862
2863	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2864
2865	r = amdgpu_vm_clear_bo(adev, vm, root, false);
2866	if (r)
2867		goto error_unreserve;
2868
2869	amdgpu_bo_unreserve(vm->root.base.bo);
2870
2871	if (pasid) {
2872		unsigned long flags;
2873
2874		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2875		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2876			      GFP_ATOMIC);
2877		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2878		if (r < 0)
2879			goto error_free_root;
2880
2881		vm->pasid = pasid;
2882	}
2883
2884	INIT_KFIFO(vm->faults);
2885
2886	return 0;
2887
2888error_unreserve:
2889	amdgpu_bo_unreserve(vm->root.base.bo);
2890
2891error_free_root:
2892	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2893	amdgpu_bo_unref(&vm->root.base.bo);
2894	vm->root.base.bo = NULL;
2895
2896error_free_delayed:
2897	dma_fence_put(vm->last_unlocked);
2898	drm_sched_entity_destroy(&vm->delayed);
2899
2900error_free_immediate:
2901	drm_sched_entity_destroy(&vm->immediate);
2902
2903	return r;
2904}
2905
2906/**
2907 * amdgpu_vm_check_clean_reserved - check if a VM is clean
2908 *
2909 * @adev: amdgpu_device pointer
2910 * @vm: the VM to check
2911 *
2912 * check all entries of the root PD, if any subsequent PDs are allocated,
2913 * it means there are page table creating and filling, and is no a clean
2914 * VM
2915 *
2916 * Returns:
2917 *	0 if this VM is clean
2918 */
2919static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2920	struct amdgpu_vm *vm)
2921{
2922	enum amdgpu_vm_level root = adev->vm_manager.root_level;
2923	unsigned int entries = amdgpu_vm_num_entries(adev, root);
2924	unsigned int i = 0;
2925
2926	if (!(vm->root.entries))
2927		return 0;
2928
2929	for (i = 0; i < entries; i++) {
2930		if (vm->root.entries[i].base.bo)
2931			return -EINVAL;
2932	}
2933
2934	return 0;
2935}
2936
2937/**
2938 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2939 *
2940 * @adev: amdgpu_device pointer
2941 * @vm: requested vm
2942 * @pasid: pasid to use
2943 *
2944 * This only works on GFX VMs that don't have any BOs added and no
2945 * page tables allocated yet.
2946 *
2947 * Changes the following VM parameters:
2948 * - use_cpu_for_update
2949 * - pte_supports_ats
2950 * - pasid (old PASID is released, because compute manages its own PASIDs)
2951 *
2952 * Reinitializes the page directory to reflect the changed ATS
2953 * setting.
2954 *
2955 * Returns:
2956 * 0 for success, -errno for errors.
2957 */
2958int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2959			   unsigned int pasid)
2960{
2961	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2962	int r;
2963
2964	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2965	if (r)
2966		return r;
2967
2968	/* Sanity checks */
2969	r = amdgpu_vm_check_clean_reserved(adev, vm);
2970	if (r)
2971		goto unreserve_bo;
2972
2973	if (pasid) {
2974		unsigned long flags;
2975
2976		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2977		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2978			      GFP_ATOMIC);
2979		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2980
2981		if (r == -ENOSPC)
2982			goto unreserve_bo;
2983		r = 0;
2984	}
2985
2986	/* Check if PD needs to be reinitialized and do it before
2987	 * changing any other state, in case it fails.
2988	 */
2989	if (pte_support_ats != vm->pte_support_ats) {
2990		vm->pte_support_ats = pte_support_ats;
2991		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
 
 
2992		if (r)
2993			goto free_idr;
2994	}
2995
2996	/* Update VM state */
2997	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2998				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2999	DRM_DEBUG_DRIVER("VM update mode is %s\n",
3000			 vm->use_cpu_for_update ? "CPU" : "SDMA");
3001	WARN_ONCE((vm->use_cpu_for_update &&
3002		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3003		  "CPU update of VM recommended only for large BAR system\n");
3004
3005	if (vm->use_cpu_for_update) {
3006		/* Sync with last SDMA update/clear before switching to CPU */
3007		r = amdgpu_bo_sync_wait(vm->root.base.bo,
3008					AMDGPU_FENCE_OWNER_UNDEFINED, true);
3009		if (r)
3010			goto free_idr;
3011
3012		vm->update_funcs = &amdgpu_vm_cpu_funcs;
3013	} else {
3014		vm->update_funcs = &amdgpu_vm_sdma_funcs;
3015	}
3016	dma_fence_put(vm->last_update);
3017	vm->last_update = NULL;
3018	vm->is_compute_context = true;
3019
3020	if (vm->pasid) {
3021		unsigned long flags;
3022
3023		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3024		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3025		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3026
3027		/* Free the original amdgpu allocated pasid
3028		 * Will be replaced with kfd allocated pasid
3029		 */
3030		amdgpu_pasid_free(vm->pasid);
3031		vm->pasid = 0;
3032	}
3033
3034	/* Free the shadow bo for compute VM */
3035	amdgpu_bo_unref(&vm->root.base.bo->shadow);
3036
3037	if (pasid)
3038		vm->pasid = pasid;
3039
3040	goto unreserve_bo;
3041
3042free_idr:
3043	if (pasid) {
3044		unsigned long flags;
3045
3046		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3047		idr_remove(&adev->vm_manager.pasid_idr, pasid);
3048		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3049	}
3050unreserve_bo:
3051	amdgpu_bo_unreserve(vm->root.base.bo);
3052	return r;
3053}
3054
3055/**
3056 * amdgpu_vm_release_compute - release a compute vm
3057 * @adev: amdgpu_device pointer
3058 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3059 *
3060 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
3061 * pasid from vm. Compute should stop use of vm after this call.
3062 */
3063void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3064{
3065	if (vm->pasid) {
3066		unsigned long flags;
3067
3068		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3069		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3070		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3071	}
3072	vm->pasid = 0;
3073	vm->is_compute_context = false;
3074}
3075
3076/**
3077 * amdgpu_vm_fini - tear down a vm instance
3078 *
3079 * @adev: amdgpu_device pointer
3080 * @vm: requested vm
3081 *
3082 * Tear down @vm.
3083 * Unbind the VM and remove all bos from the vm bo list
3084 */
3085void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3086{
3087	struct amdgpu_bo_va_mapping *mapping, *tmp;
3088	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
3089	struct amdgpu_bo *root;
3090	int i;
3091
3092	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
3093
3094	root = amdgpu_bo_ref(vm->root.base.bo);
3095	amdgpu_bo_reserve(root, true);
3096	if (vm->pasid) {
3097		unsigned long flags;
3098
3099		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3100		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3101		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3102		vm->pasid = 0;
3103	}
3104
3105	dma_fence_wait(vm->last_unlocked, false);
3106	dma_fence_put(vm->last_unlocked);
3107
3108	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
3109		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
3110			amdgpu_vm_prt_fini(adev, vm);
3111			prt_fini_needed = false;
3112		}
3113
3114		list_del(&mapping->list);
3115		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
3116	}
3117
3118	amdgpu_vm_free_pts(adev, vm, NULL);
3119	amdgpu_bo_unreserve(root);
3120	amdgpu_bo_unref(&root);
3121	WARN_ON(vm->root.base.bo);
3122
3123	drm_sched_entity_destroy(&vm->immediate);
3124	drm_sched_entity_destroy(&vm->delayed);
3125
3126	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
3127		dev_err(adev->dev, "still active bo inside vm\n");
3128	}
3129	rbtree_postorder_for_each_entry_safe(mapping, tmp,
3130					     &vm->va.rb_root, rb) {
3131		/* Don't remove the mapping here, we don't want to trigger a
3132		 * rebalance and the tree is about to be destroyed anyway.
3133		 */
3134		list_del(&mapping->list);
3135		kfree(mapping);
3136	}
3137
3138	dma_fence_put(vm->last_update);
3139	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3140		amdgpu_vmid_free_reserved(adev, vm, i);
3141}
3142
3143/**
3144 * amdgpu_vm_manager_init - init the VM manager
3145 *
3146 * @adev: amdgpu_device pointer
3147 *
3148 * Initialize the VM manager structures
3149 */
3150void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3151{
3152	unsigned i;
3153
 
 
 
 
 
 
3154	amdgpu_vmid_mgr_init(adev);
3155
3156	adev->vm_manager.fence_context =
3157		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3158	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3159		adev->vm_manager.seqno[i] = 0;
3160
3161	spin_lock_init(&adev->vm_manager.prt_lock);
3162	atomic_set(&adev->vm_manager.num_prt_users, 0);
3163
3164	/* If not overridden by the user, by default, only in large BAR systems
3165	 * Compute VM tables will be updated by CPU
3166	 */
3167#ifdef CONFIG_X86_64
3168	if (amdgpu_vm_update_mode == -1) {
3169		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3170			adev->vm_manager.vm_update_mode =
3171				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3172		else
3173			adev->vm_manager.vm_update_mode = 0;
3174	} else
3175		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3176#else
3177	adev->vm_manager.vm_update_mode = 0;
3178#endif
3179
3180	idr_init(&adev->vm_manager.pasid_idr);
3181	spin_lock_init(&adev->vm_manager.pasid_lock);
3182}
3183
3184/**
3185 * amdgpu_vm_manager_fini - cleanup VM manager
3186 *
3187 * @adev: amdgpu_device pointer
3188 *
3189 * Cleanup the VM manager and free resources.
3190 */
3191void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3192{
3193	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3194	idr_destroy(&adev->vm_manager.pasid_idr);
3195
3196	amdgpu_vmid_mgr_fini(adev);
3197}
3198
3199/**
3200 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3201 *
3202 * @dev: drm device pointer
3203 * @data: drm_amdgpu_vm
3204 * @filp: drm file pointer
3205 *
3206 * Returns:
3207 * 0 for success, -errno for errors.
3208 */
3209int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3210{
3211	union drm_amdgpu_vm *args = data;
3212	struct amdgpu_device *adev = dev->dev_private;
3213	struct amdgpu_fpriv *fpriv = filp->driver_priv;
3214	long timeout = msecs_to_jiffies(2000);
3215	int r;
3216
3217	switch (args->in.op) {
3218	case AMDGPU_VM_OP_RESERVE_VMID:
3219		/* We only have requirement to reserve vmid from gfxhub */
3220		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
3221					       AMDGPU_GFXHUB_0);
3222		if (r)
3223			return r;
3224		break;
3225	case AMDGPU_VM_OP_UNRESERVE_VMID:
3226		if (amdgpu_sriov_runtime(adev))
3227			timeout = 8 * timeout;
3228
3229		/* Wait vm idle to make sure the vmid set in SPM_VMID is
3230		 * not referenced anymore.
3231		 */
3232		r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
3233		if (r)
3234			return r;
3235
3236		r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
3237		if (r < 0)
3238			return r;
3239
3240		amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
3241		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
3242		break;
3243	default:
3244		return -EINVAL;
3245	}
3246
3247	return 0;
3248}
3249
3250/**
3251 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3252 *
3253 * @adev: drm device pointer
3254 * @pasid: PASID identifier for VM
3255 * @task_info: task_info to fill.
3256 */
3257void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3258			 struct amdgpu_task_info *task_info)
3259{
3260	struct amdgpu_vm *vm;
3261	unsigned long flags;
3262
3263	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3264
3265	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3266	if (vm)
3267		*task_info = vm->task_info;
3268
3269	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3270}
3271
3272/**
3273 * amdgpu_vm_set_task_info - Sets VMs task info.
3274 *
3275 * @vm: vm for which to set the info
3276 */
3277void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3278{
3279	if (vm->task_info.pid)
3280		return;
3281
3282	vm->task_info.pid = current->pid;
3283	get_task_comm(vm->task_info.task_name, current);
3284
3285	if (current->group_leader->mm != current->mm)
3286		return;
3287
3288	vm->task_info.tgid = current->group_leader->pid;
3289	get_task_comm(vm->task_info.process_name, current->group_leader);
3290}
3291
3292/**
3293 * amdgpu_vm_handle_fault - graceful handling of VM faults.
3294 * @adev: amdgpu device pointer
3295 * @pasid: PASID of the VM
3296 * @addr: Address of the fault
3297 *
3298 * Try to gracefully handle a VM fault. Return true if the fault was handled and
3299 * shouldn't be reported any more.
3300 */
3301bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
3302			    uint64_t addr)
3303{
 
3304	struct amdgpu_bo *root;
 
3305	uint64_t value, flags;
3306	struct amdgpu_vm *vm;
3307	long r;
3308
3309	spin_lock(&adev->vm_manager.pasid_lock);
3310	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3311	if (vm)
3312		root = amdgpu_bo_ref(vm->root.base.bo);
3313	else
 
3314		root = NULL;
3315	spin_unlock(&adev->vm_manager.pasid_lock);
 
3316
3317	if (!root)
3318		return false;
3319
 
 
 
 
 
 
 
 
3320	r = amdgpu_bo_reserve(root, true);
3321	if (r)
3322		goto error_unref;
3323
3324	/* Double check that the VM still exists */
3325	spin_lock(&adev->vm_manager.pasid_lock);
3326	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3327	if (vm && vm->root.base.bo != root)
3328		vm = NULL;
3329	spin_unlock(&adev->vm_manager.pasid_lock);
3330	if (!vm)
3331		goto error_unlock;
3332
3333	addr /= AMDGPU_GPU_PAGE_SIZE;
3334	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
3335		AMDGPU_PTE_SYSTEM;
3336
3337	if (vm->is_compute_context) {
3338		/* Intentionally setting invalid PTE flag
3339		 * combination to force a no-retry-fault
3340		 */
3341		flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
3342			AMDGPU_PTE_TF;
3343		value = 0;
3344
3345	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3346		/* Redirect the access to the dummy page */
3347		value = adev->dummy_page_addr;
3348		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3349			AMDGPU_PTE_WRITEABLE;
3350
3351	} else {
3352		/* Let the hw retry silently on the PTE */
3353		value = 0;
3354	}
3355
3356	r = amdgpu_vm_bo_update_mapping(adev, vm, true, false, NULL, addr,
3357					addr + 1, flags, value, NULL, NULL);
 
 
 
 
 
 
 
3358	if (r)
3359		goto error_unlock;
3360
3361	r = amdgpu_vm_update_pdes(adev, vm, true);
3362
3363error_unlock:
3364	amdgpu_bo_unreserve(root);
3365	if (r < 0)
3366		DRM_ERROR("Can't handle page fault (%ld)\n", r);
3367
3368error_unref:
3369	amdgpu_bo_unref(&root);
3370
3371	return false;
3372}