Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28
  29#include <linux/dma-fence-array.h>
  30#include <linux/interval_tree_generic.h>
  31#include <linux/idr.h>
  32#include <linux/dma-buf.h>
  33
  34#include <drm/amdgpu_drm.h>
  35#include <drm/drm_drv.h>
  36#include <drm/ttm/ttm_tt.h>
  37#include <drm/drm_exec.h>
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40#include "amdgpu_amdkfd.h"
  41#include "amdgpu_gmc.h"
  42#include "amdgpu_xgmi.h"
  43#include "amdgpu_dma_buf.h"
  44#include "amdgpu_res_cursor.h"
  45#include "kfd_svm.h"
  46
  47/**
  48 * DOC: GPUVM
  49 *
  50 * GPUVM is the MMU functionality provided on the GPU.
  51 * GPUVM is similar to the legacy GART on older asics, however
  52 * rather than there being a single global GART table
  53 * for the entire GPU, there can be multiple GPUVM page tables active
  54 * at any given time.  The GPUVM page tables can contain a mix
  55 * VRAM pages and system pages (both memory and MMIO) and system pages
  56 * can be mapped as snooped (cached system pages) or unsnooped
  57 * (uncached system pages).
  58 *
  59 * Each active GPUVM has an ID associated with it and there is a page table
  60 * linked with each VMID.  When executing a command buffer,
  61 * the kernel tells the engine what VMID to use for that command
  62 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  63 * The userspace drivers maintain their own address space and the kernel
  64 * sets up their pages tables accordingly when they submit their
  65 * command buffers and a VMID is assigned.
  66 * The hardware supports up to 16 active GPUVMs at any given time.
  67 *
  68 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
  69 * on the ASIC family.  GPUVM supports RWX attributes on each page as well
  70 * as other features such as encryption and caching attributes.
  71 *
  72 * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
  73 * addition to an aperture managed by a page table, VMID 0 also has
  74 * several other apertures.  There is an aperture for direct access to VRAM
  75 * and there is a legacy AGP aperture which just forwards accesses directly
  76 * to the matching system physical addresses (or IOVAs when an IOMMU is
  77 * present).  These apertures provide direct access to these memories without
  78 * incurring the overhead of a page table.  VMID 0 is used by the kernel
  79 * driver for tasks like memory management.
  80 *
  81 * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
  82 * For user applications, each application can have their own unique GPUVM
  83 * address space.  The application manages the address space and the kernel
  84 * driver manages the GPUVM page tables for each process.  If an GPU client
  85 * accesses an invalid page, it will generate a GPU page fault, similar to
  86 * accessing an invalid page on a CPU.
  87 */
  88
  89#define START(node) ((node)->start)
  90#define LAST(node) ((node)->last)
  91
  92INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
  93		     START, LAST, static, amdgpu_vm_it)
  94
  95#undef START
  96#undef LAST
  97
  98/**
  99 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
 100 */
 101struct amdgpu_prt_cb {
 102
 103	/**
 104	 * @adev: amdgpu device
 105	 */
 106	struct amdgpu_device *adev;
 107
 108	/**
 109	 * @cb: callback
 110	 */
 111	struct dma_fence_cb cb;
 112};
 113
 114/**
 115 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
 116 */
 117struct amdgpu_vm_tlb_seq_struct {
 118	/**
 119	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
 120	 */
 121	struct amdgpu_vm *vm;
 122
 123	/**
 124	 * @cb: callback
 125	 */
 126	struct dma_fence_cb cb;
 127};
 128
 129/**
 130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
 131 *
 132 * @adev: amdgpu_device pointer
 133 * @vm: amdgpu_vm pointer
 134 * @pasid: the pasid the VM is using on this GPU
 135 *
 136 * Set the pasid this VM is using on this GPU, can also be used to remove the
 137 * pasid by passing in zero.
 138 *
 
 139 */
 140int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 141			u32 pasid)
 142{
 143	int r;
 144
 145	if (vm->pasid == pasid)
 146		return 0;
 147
 148	if (vm->pasid) {
 149		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
 150		if (r < 0)
 151			return r;
 152
 153		vm->pasid = 0;
 154	}
 155
 156	if (pasid) {
 157		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
 158					GFP_KERNEL));
 159		if (r < 0)
 160			return r;
 161
 162		vm->pasid = pasid;
 163	}
 164
 165
 166	return 0;
 167}
 168
 169/**
 170 * amdgpu_vm_bo_evicted - vm_bo is evicted
 171 *
 172 * @vm_bo: vm_bo which is evicted
 173 *
 174 * State for PDs/PTs and per VM BOs which are not at the location they should
 175 * be.
 176 */
 177static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
 178{
 179	struct amdgpu_vm *vm = vm_bo->vm;
 180	struct amdgpu_bo *bo = vm_bo->bo;
 181
 182	vm_bo->moved = true;
 183	spin_lock(&vm_bo->vm->status_lock);
 184	if (bo->tbo.type == ttm_bo_type_kernel)
 185		list_move(&vm_bo->vm_status, &vm->evicted);
 186	else
 187		list_move_tail(&vm_bo->vm_status, &vm->evicted);
 188	spin_unlock(&vm_bo->vm->status_lock);
 189}
 190/**
 191 * amdgpu_vm_bo_moved - vm_bo is moved
 192 *
 193 * @vm_bo: vm_bo which is moved
 194 *
 195 * State for per VM BOs which are moved, but that change is not yet reflected
 196 * in the page tables.
 197 */
 198static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
 199{
 200	spin_lock(&vm_bo->vm->status_lock);
 201	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 202	spin_unlock(&vm_bo->vm->status_lock);
 203}
 204
 205/**
 206 * amdgpu_vm_bo_idle - vm_bo is idle
 207 *
 208 * @vm_bo: vm_bo which is now idle
 209 *
 210 * State for PDs/PTs and per VM BOs which have gone through the state machine
 211 * and are now idle.
 212 */
 213static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
 214{
 215	spin_lock(&vm_bo->vm->status_lock);
 216	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
 217	spin_unlock(&vm_bo->vm->status_lock);
 218	vm_bo->moved = false;
 219}
 220
 221/**
 222 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
 223 *
 224 * @vm_bo: vm_bo which is now invalidated
 
 
 225 *
 226 * State for normal BOs which are invalidated and that change not yet reflected
 227 * in the PTs.
 228 */
 229static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
 
 
 230{
 231	spin_lock(&vm_bo->vm->status_lock);
 232	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
 233	spin_unlock(&vm_bo->vm->status_lock);
 
 
 
 234}
 235
 236/**
 237 * amdgpu_vm_bo_evicted_user - vm_bo is evicted
 238 *
 239 * @vm_bo: vm_bo which is evicted
 
 
 
 240 *
 241 * State for BOs used by user mode queues which are not at the location they
 242 * should be.
 243 */
 244static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
 
 
 245{
 246	vm_bo->moved = true;
 247	spin_lock(&vm_bo->vm->status_lock);
 248	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
 249	spin_unlock(&vm_bo->vm->status_lock);
 250}
 251
 252/**
 253 * amdgpu_vm_bo_relocated - vm_bo is reloacted
 254 *
 255 * @vm_bo: vm_bo which is relocated
 256 *
 257 * State for PDs/PTs which needs to update their parent PD.
 258 * For the root PD, just move to idle state.
 259 */
 260static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
 261{
 262	if (vm_bo->bo->parent) {
 263		spin_lock(&vm_bo->vm->status_lock);
 264		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
 265		spin_unlock(&vm_bo->vm->status_lock);
 266	} else {
 267		amdgpu_vm_bo_idle(vm_bo);
 268	}
 269}
 270
 271/**
 272 * amdgpu_vm_bo_done - vm_bo is done
 273 *
 274 * @vm_bo: vm_bo which is now done
 275 *
 276 * State for normal BOs which are invalidated and that change has been updated
 277 * in the PTs.
 278 */
 279static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
 280{
 281	spin_lock(&vm_bo->vm->status_lock);
 282	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
 283	spin_unlock(&vm_bo->vm->status_lock);
 284}
 285
 286/**
 287 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
 288 * @vm: the VM which state machine to reset
 289 *
 290 * Move all vm_bo object in the VM into a state where they will be updated
 291 * again during validation.
 292 */
 293static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
 294{
 295	struct amdgpu_vm_bo_base *vm_bo, *tmp;
 296
 297	spin_lock(&vm->status_lock);
 298	list_splice_init(&vm->done, &vm->invalidated);
 299	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
 300		vm_bo->moved = true;
 301	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
 302		struct amdgpu_bo *bo = vm_bo->bo;
 303
 304		vm_bo->moved = true;
 305		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
 306			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 307		else if (bo->parent)
 308			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
 309	}
 310	spin_unlock(&vm->status_lock);
 
 311}
 312
 313/**
 314 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
 315 *
 316 * @base: base structure for tracking BO usage in a VM
 317 * @vm: vm to which bo is to be added
 318 * @bo: amdgpu buffer object
 319 *
 320 * Initialize a bo_va_base structure and add it to the appropriate lists
 
 321 *
 
 322 */
 323void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 324			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
 325{
 326	base->vm = vm;
 327	base->bo = bo;
 328	base->next = NULL;
 329	INIT_LIST_HEAD(&base->vm_status);
 330
 331	if (!bo)
 332		return;
 333	base->next = bo->vm_bo;
 334	bo->vm_bo = base;
 335
 336	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
 337		return;
 338
 339	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
 340
 341	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
 342	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
 343		amdgpu_vm_bo_relocated(base);
 344	else
 345		amdgpu_vm_bo_idle(base);
 346
 347	if (bo->preferred_domains &
 348	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
 349		return;
 350
 351	/*
 352	 * we checked all the prerequisites, but it looks like this per vm bo
 353	 * is currently evicted. add the bo to the evicted list to make sure it
 354	 * is validated on next vm use to avoid fault.
 355	 * */
 356	amdgpu_vm_bo_evicted(base);
 357}
 358
 359/**
 360 * amdgpu_vm_lock_pd - lock PD in drm_exec
 361 *
 362 * @vm: vm providing the BOs
 363 * @exec: drm execution context
 364 * @num_fences: number of extra fences to reserve
 365 *
 366 * Lock the VM root PD in the DRM execution context.
 367 */
 368int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
 369		      unsigned int num_fences)
 370{
 371	/* We need at least two fences for the VM PD/PT updates */
 372	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
 373				    2 + num_fences);
 374}
 375
 376/**
 377 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
 378 *
 379 * @adev: amdgpu device pointer
 380 * @vm: vm providing the BOs
 
 
 381 *
 382 * Move all BOs to the end of LRU and remember their positions to put them
 383 * together.
 384 */
 385void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 386				struct amdgpu_vm *vm)
 387{
 388	spin_lock(&adev->mman.bdev.lru_lock);
 389	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
 390	spin_unlock(&adev->mman.bdev.lru_lock);
 391}
 392
 393/* Create scheduler entities for page table updates */
 394static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
 395				   struct amdgpu_vm *vm)
 396{
 397	int r;
 398
 399	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
 400				  adev->vm_manager.vm_pte_scheds,
 401				  adev->vm_manager.vm_pte_num_scheds, NULL);
 402	if (r)
 403		goto error;
 404
 405	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
 406				     adev->vm_manager.vm_pte_scheds,
 407				     adev->vm_manager.vm_pte_num_scheds, NULL);
 408
 409error:
 410	drm_sched_entity_destroy(&vm->immediate);
 411	return r;
 412}
 413
 414/* Destroy the entities for page table updates again */
 415static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
 416{
 417	drm_sched_entity_destroy(&vm->immediate);
 418	drm_sched_entity_destroy(&vm->delayed);
 419}
 420
 421/**
 422 * amdgpu_vm_generation - return the page table re-generation counter
 423 * @adev: the amdgpu_device
 424 * @vm: optional VM to check, might be NULL
 425 *
 426 * Returns a page table re-generation token to allow checking if submissions
 427 * are still valid to use this VM. The VM parameter might be NULL in which case
 428 * just the VRAM lost counter will be used.
 429 */
 430uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 431{
 432	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
 433
 434	if (!vm)
 435		return result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436
 437	result += vm->generation;
 438	/* Add one if the page tables will be re-generated on next CS */
 439	if (drm_sched_entity_error(&vm->delayed))
 440		++result;
 441
 442	return result;
 443}
 
 
 444
 445/**
 446 * amdgpu_vm_validate - validate evicted BOs tracked in the VM
 447 *
 448 * @adev: amdgpu device pointer
 449 * @vm: vm providing the BOs
 450 * @ticket: optional reservation ticket used to reserve the VM
 451 * @validate: callback to do the validation
 452 * @param: parameter for the validation callback
 453 *
 454 * Validate the page table BOs and per-VM BOs on command submission if
 455 * necessary. If a ticket is given, also try to validate evicted user queue
 456 * BOs. They must already be reserved with the given ticket.
 457 *
 458 * Returns:
 459 * Validation result.
 460 */
 461int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 462		       struct ww_acquire_ctx *ticket,
 463		       int (*validate)(void *p, struct amdgpu_bo *bo),
 464		       void *param)
 465{
 466	struct amdgpu_vm_bo_base *bo_base;
 467	struct amdgpu_bo *shadow;
 468	struct amdgpu_bo *bo;
 469	int r;
 470
 471	if (drm_sched_entity_error(&vm->delayed)) {
 472		++vm->generation;
 473		amdgpu_vm_bo_reset_state_machine(vm);
 474		amdgpu_vm_fini_entities(vm);
 475		r = amdgpu_vm_init_entities(adev, vm);
 476		if (r)
 477			return r;
 478	}
 
 479
 480	spin_lock(&vm->status_lock);
 481	while (!list_empty(&vm->evicted)) {
 482		bo_base = list_first_entry(&vm->evicted,
 483					   struct amdgpu_vm_bo_base,
 484					   vm_status);
 485		spin_unlock(&vm->status_lock);
 486
 487		bo = bo_base->bo;
 488		shadow = amdgpu_bo_shadowed(bo);
 489
 490		r = validate(param, bo);
 491		if (r)
 492			return r;
 493		if (shadow) {
 494			r = validate(param, shadow);
 495			if (r)
 496				return r;
 497		}
 498
 499		if (bo->tbo.type != ttm_bo_type_kernel) {
 500			amdgpu_vm_bo_moved(bo_base);
 501		} else {
 502			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
 503			amdgpu_vm_bo_relocated(bo_base);
 504		}
 505		spin_lock(&vm->status_lock);
 506	}
 507	while (ticket && !list_empty(&vm->evicted_user)) {
 508		bo_base = list_first_entry(&vm->evicted_user,
 509					   struct amdgpu_vm_bo_base,
 510					   vm_status);
 511		spin_unlock(&vm->status_lock);
 512
 513		bo = bo_base->bo;
 
 514
 515		if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
 516			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
 517
 518			pr_warn_ratelimited("Evicted user BO is not reserved\n");
 519			if (ti) {
 520				pr_warn_ratelimited("pid %d\n", ti->pid);
 521				amdgpu_vm_put_task_info(ti);
 522			}
 523
 524			return -EINVAL;
 525		}
 
 
 526
 527		r = validate(param, bo);
 
 
 
 528		if (r)
 529			return r;
 530
 531		amdgpu_vm_bo_invalidated(bo_base);
 
 
 
 
 
 
 532
 533		spin_lock(&vm->status_lock);
 534	}
 535	spin_unlock(&vm->status_lock);
 536
 537	amdgpu_vm_eviction_lock(vm);
 538	vm->evicting = false;
 539	amdgpu_vm_eviction_unlock(vm);
 540
 541	return 0;
 542}
 543
 544/**
 545 * amdgpu_vm_ready - check VM is ready for updates
 546 *
 547 * @vm: VM to check
 548 *
 549 * Check if all VM PDs/PTs are ready for updates
 550 *
 551 * Returns:
 552 * True if VM is not evicting.
 553 */
 554bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 555{
 556	bool empty;
 557	bool ret;
 558
 559	amdgpu_vm_eviction_lock(vm);
 560	ret = !vm->evicting;
 561	amdgpu_vm_eviction_unlock(vm);
 562
 563	spin_lock(&vm->status_lock);
 564	empty = list_empty(&vm->evicted);
 565	spin_unlock(&vm->status_lock);
 566
 567	return ret && empty;
 568}
 569
 570/**
 571 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 572 *
 573 * @adev: amdgpu_device pointer
 574 */
 575void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
 576{
 577	const struct amdgpu_ip_block *ip_block;
 578	bool has_compute_vm_bug;
 579	struct amdgpu_ring *ring;
 580	int i;
 581
 582	has_compute_vm_bug = false;
 
 583
 584	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
 585	if (ip_block) {
 586		/* Compute has a VM bug for GFX version < 7.
 587		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
 588		if (ip_block->version->major <= 7)
 589			has_compute_vm_bug = true;
 590		else if (ip_block->version->major == 8)
 591			if (adev->gfx.mec_fw_version < 673)
 592				has_compute_vm_bug = true;
 593	}
 594
 595	for (i = 0; i < adev->num_rings; i++) {
 596		ring = adev->rings[i];
 597		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
 598			/* only compute rings */
 599			ring->has_compute_vm_bug = has_compute_vm_bug;
 600		else
 601			ring->has_compute_vm_bug = false;
 602	}
 603}
 604
 605/**
 606 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
 607 *
 608 * @ring: ring on which the job will be submitted
 609 * @job: job to submit
 610 *
 611 * Returns:
 612 * True if sync is needed.
 613 */
 614bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 615				  struct amdgpu_job *job)
 616{
 617	struct amdgpu_device *adev = ring->adev;
 618	unsigned vmhub = ring->vm_hub;
 619	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 620
 621	if (job->vmid == 0)
 
 622		return false;
 623
 624	if (job->vm_needs_flush || ring->has_compute_vm_bug)
 625		return true;
 626
 627	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
 628		return true;
 629
 630	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
 
 631		return true;
 632
 
 
 
 
 
 
 633	return false;
 634}
 635
 636/**
 637 * amdgpu_vm_flush - hardware flush the vm
 638 *
 639 * @ring: ring to use for flush
 640 * @job:  related job
 641 * @need_pipe_sync: is pipe sync needed
 642 *
 643 * Emit a VM flush when it is necessary.
 644 *
 645 * Returns:
 646 * 0 on success, errno otherwise.
 647 */
 648int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
 649		    bool need_pipe_sync)
 650{
 651	struct amdgpu_device *adev = ring->adev;
 652	unsigned vmhub = ring->vm_hub;
 653	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 654	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
 655	bool spm_update_needed = job->spm_update_needed;
 656	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
 657		job->gds_switch_needed;
 658	bool vm_flush_needed = job->vm_needs_flush;
 659	struct dma_fence *fence = NULL;
 660	bool pasid_mapping_needed = false;
 661	unsigned int patch;
 662	int r;
 663
 664	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
 665		gds_switch_needed = true;
 666		vm_flush_needed = true;
 667		pasid_mapping_needed = true;
 668		spm_update_needed = true;
 669	}
 670
 671	mutex_lock(&id_mgr->lock);
 672	if (id->pasid != job->pasid || !id->pasid_mapping ||
 673	    !dma_fence_is_signaled(id->pasid_mapping))
 674		pasid_mapping_needed = true;
 675	mutex_unlock(&id_mgr->lock);
 676
 677	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
 678	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
 679			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
 680	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
 681		ring->funcs->emit_wreg;
 682
 683	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
 684		return 0;
 685
 686	amdgpu_ring_ib_begin(ring);
 687	if (ring->funcs->init_cond_exec)
 688		patch = amdgpu_ring_init_cond_exec(ring,
 689						   ring->cond_exe_gpu_addr);
 690
 691	if (need_pipe_sync)
 692		amdgpu_ring_emit_pipeline_sync(ring);
 693
 694	if (vm_flush_needed) {
 695		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
 696		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
 697	}
 698
 699	if (pasid_mapping_needed)
 700		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 701
 702	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
 703		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
 704
 705	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
 706	    gds_switch_needed) {
 707		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
 708					    job->gds_size, job->gws_base,
 709					    job->gws_size, job->oa_base,
 710					    job->oa_size);
 711	}
 712
 713	if (vm_flush_needed || pasid_mapping_needed) {
 714		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
 715		if (r)
 716			return r;
 717	}
 718
 719	if (vm_flush_needed) {
 720		mutex_lock(&id_mgr->lock);
 721		dma_fence_put(id->last_flush);
 722		id->last_flush = dma_fence_get(fence);
 723		id->current_gpu_reset_count =
 724			atomic_read(&adev->gpu_reset_counter);
 725		mutex_unlock(&id_mgr->lock);
 726	}
 727
 728	if (pasid_mapping_needed) {
 729		mutex_lock(&id_mgr->lock);
 730		id->pasid = job->pasid;
 731		dma_fence_put(id->pasid_mapping);
 732		id->pasid_mapping = dma_fence_get(fence);
 733		mutex_unlock(&id_mgr->lock);
 
 
 
 
 
 734	}
 735	dma_fence_put(fence);
 736
 737	amdgpu_ring_patch_cond_exec(ring, patch);
 738
 739	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
 740	if (ring->funcs->emit_switch_buffer) {
 741		amdgpu_ring_emit_switch_buffer(ring);
 742		amdgpu_ring_emit_switch_buffer(ring);
 743	}
 744	amdgpu_ring_ib_end(ring);
 745	return 0;
 746}
 747
 748/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 749 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 750 *
 751 * @vm: requested vm
 752 * @bo: requested buffer object
 753 *
 754 * Find @bo inside the requested vm.
 755 * Search inside the @bos vm list for the requested vm
 756 * Returns the found bo_va or NULL if none is found
 757 *
 758 * Object has to be reserved!
 759 *
 760 * Returns:
 761 * Found bo_va or NULL.
 762 */
 763struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 764				       struct amdgpu_bo *bo)
 765{
 766	struct amdgpu_vm_bo_base *base;
 767
 768	for (base = bo->vm_bo; base; base = base->next) {
 769		if (base->vm != vm)
 770			continue;
 771
 772		return container_of(base, struct amdgpu_bo_va, base);
 
 
 
 773	}
 774	return NULL;
 775}
 776
 777/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 778 * amdgpu_vm_map_gart - Resolve gart mapping of addr
 779 *
 780 * @pages_addr: optional DMA address to use for lookup
 781 * @addr: the unmapped addr
 782 *
 783 * Look up the physical address of the page that the pte resolves
 784 * to.
 785 *
 786 * Returns:
 787 * The pointer for the page table entry.
 788 */
 789uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 790{
 791	uint64_t result;
 792
 793	/* page table offset */
 794	result = pages_addr[addr >> PAGE_SHIFT];
 795
 796	/* in case cpu page size != gpu page size*/
 797	result |= addr & (~PAGE_MASK);
 798
 799	result &= 0xFFFFFFFFFFFFF000ULL;
 800
 801	return result;
 802}
 803
 804/**
 805 * amdgpu_vm_update_pdes - make sure that all directories are valid
 806 *
 807 * @adev: amdgpu_device pointer
 808 * @vm: requested vm
 809 * @immediate: submit immediately to the paging queue
 810 *
 811 * Makes sure all directories are up to date.
 812 *
 813 * Returns:
 814 * 0 for success, error for failure.
 
 815 */
 816int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 817			  struct amdgpu_vm *vm, bool immediate)
 818{
 819	struct amdgpu_vm_update_params params;
 820	struct amdgpu_vm_bo_base *entry;
 821	bool flush_tlb_needed = false;
 822	LIST_HEAD(relocated);
 823	int r, idx;
 
 
 
 
 824
 825	spin_lock(&vm->status_lock);
 826	list_splice_init(&vm->relocated, &relocated);
 827	spin_unlock(&vm->status_lock);
 828
 829	if (list_empty(&relocated))
 830		return 0;
 
 
 
 831
 832	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 833		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 834
 835	memset(&params, 0, sizeof(params));
 836	params.adev = adev;
 837	params.vm = vm;
 838	params.immediate = immediate;
 839
 840	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
 841	if (r)
 842		goto error;
 
 843
 844	list_for_each_entry(entry, &relocated, vm_status) {
 845		/* vm_flush_needed after updating moved PDEs */
 846		flush_tlb_needed |= entry->moved;
 847
 848		r = amdgpu_vm_pde_update(&params, entry);
 849		if (r)
 850			goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 851	}
 852
 853	r = vm->update_funcs->commit(&params, &vm->last_update);
 854	if (r)
 855		goto error;
 
 856
 857	if (flush_tlb_needed)
 858		atomic64_inc(&vm->tlb_seq);
 
 859
 860	while (!list_empty(&relocated)) {
 861		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
 862					 vm_status);
 863		amdgpu_vm_bo_idle(entry);
 864	}
 865
 866error:
 867	drm_dev_exit(idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 868	return r;
 869}
 870
 871/**
 872 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
 873 * @fence: unused
 874 * @cb: the callback structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 875 *
 876 * Increments the tlb sequence to make sure that future CS execute a VM flush.
 
 
 
 
 
 877 */
 878static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
 879				 struct dma_fence_cb *cb)
 
 
 880{
 881	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882
 883	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
 884	atomic64_inc(&tlb_cb->vm->tlb_seq);
 885	kfree(tlb_cb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886}
 887
 888/**
 889 * amdgpu_vm_update_range - update a range in the vm page table
 890 *
 891 * @adev: amdgpu_device pointer to use for commands
 892 * @vm: the VM to update the range
 893 * @immediate: immediate submission in a page fault
 894 * @unlocked: unlocked invalidation during MM callback
 895 * @flush_tlb: trigger tlb invalidation after update completed
 896 * @allow_override: change MTYPE for local NUMA nodes
 897 * @resv: fences we need to sync to
 898 * @start: start of mapped range
 899 * @last: last mapped entry
 900 * @flags: flags for the entries
 901 * @offset: offset into nodes and pages_addr
 902 * @vram_base: base for vram mappings
 903 * @res: ttm_resource to map
 904 * @pages_addr: DMA addresses to use for mapping
 905 * @fence: optional resulting fence
 906 *
 907 * Fill in the page table entries between @start and @last.
 908 *
 909 * Returns:
 910 * 0 for success, negative erro code for failure.
 911 */
 912int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 913			   bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
 914			   struct dma_resv *resv, uint64_t start, uint64_t last,
 915			   uint64_t flags, uint64_t offset, uint64_t vram_base,
 916			   struct ttm_resource *res, dma_addr_t *pages_addr,
 917			   struct dma_fence **fence)
 918{
 919	struct amdgpu_vm_update_params params;
 920	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
 921	struct amdgpu_res_cursor cursor;
 922	enum amdgpu_sync_mode sync_mode;
 923	int r, idx;
 924
 925	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 926		return -ENODEV;
 
 927
 928	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
 929	if (!tlb_cb) {
 930		r = -ENOMEM;
 931		goto error_unlock;
 932	}
 933
 934	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
 935	 * heavy-weight flush TLB unconditionally.
 936	 */
 937	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
 938		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
 939
 940	/*
 941	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
 942	 */
 943	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
 944
 945	memset(&params, 0, sizeof(params));
 946	params.adev = adev;
 947	params.vm = vm;
 948	params.immediate = immediate;
 949	params.pages_addr = pages_addr;
 950	params.unlocked = unlocked;
 951	params.allow_override = allow_override;
 952
 953	/* Implicitly sync to command submissions in the same VM before
 954	 * unmapping. Sync to moving fences before mapping.
 955	 */
 956	if (!(flags & AMDGPU_PTE_VALID))
 957		sync_mode = AMDGPU_SYNC_EQ_OWNER;
 958	else
 959		sync_mode = AMDGPU_SYNC_EXPLICIT;
 960
 961	amdgpu_vm_eviction_lock(vm);
 962	if (vm->evicting) {
 963		r = -EBUSY;
 964		goto error_free;
 965	}
 966
 967	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
 968		struct dma_fence *tmp = dma_fence_get_stub();
 
 
 
 969
 970		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
 971		swap(vm->last_unlocked, tmp);
 972		dma_fence_put(tmp);
 973	}
 974
 975	r = vm->update_funcs->prepare(&params, resv, sync_mode);
 976	if (r)
 977		goto error_free;
 978
 979	amdgpu_res_first(pages_addr ? NULL : res, offset,
 980			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
 981	while (cursor.remaining) {
 982		uint64_t tmp, num_entries, addr;
 983
 984		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
 985		if (pages_addr) {
 986			bool contiguous = true;
 987
 988			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
 989				uint64_t pfn = cursor.start >> PAGE_SHIFT;
 990				uint64_t count;
 991
 992				contiguous = pages_addr[pfn + 1] ==
 993					pages_addr[pfn] + PAGE_SIZE;
 994
 995				tmp = num_entries /
 996					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 997				for (count = 2; count < tmp; ++count) {
 998					uint64_t idx = pfn + count;
 999
1000					if (contiguous != (pages_addr[idx] ==
1001					    pages_addr[idx - 1] + PAGE_SIZE))
1002						break;
1003				}
1004				if (!contiguous)
1005					count--;
1006				num_entries = count *
1007					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1008			}
1009
1010			if (!contiguous) {
1011				addr = cursor.start;
1012				params.pages_addr = pages_addr;
1013			} else {
1014				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1015				params.pages_addr = NULL;
1016			}
1017
1018		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
1019			addr = vram_base + cursor.start;
1020		} else {
1021			addr = 0;
1022		}
1023
1024		tmp = start + num_entries;
1025		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1026		if (r)
1027			goto error_free;
1028
1029		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1030		start = tmp;
1031	}
1032
1033	r = vm->update_funcs->commit(&params, fence);
 
 
1034
1035	if (flush_tlb || params.table_freed) {
1036		tlb_cb->vm = vm;
1037		if (fence && *fence &&
1038		    !dma_fence_add_callback(*fence, &tlb_cb->cb,
1039					   amdgpu_vm_tlb_seq_cb)) {
1040			dma_fence_put(vm->last_tlb_flush);
1041			vm->last_tlb_flush = dma_fence_get(*fence);
1042		} else {
1043			amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
 
 
 
 
 
 
1044		}
1045		tlb_cb = NULL;
1046	}
1047
1048error_free:
1049	kfree(tlb_cb);
 
1050
1051error_unlock:
1052	amdgpu_vm_eviction_unlock(vm);
1053	drm_dev_exit(idx);
1054	return r;
1055}
1056
1057static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1058				    struct amdgpu_mem_stats *stats)
1059{
1060	struct amdgpu_vm *vm = bo_va->base.vm;
1061	struct amdgpu_bo *bo = bo_va->base.bo;
1062
1063	if (!bo)
1064		return;
 
 
 
 
 
 
 
 
 
1065
1066	/*
1067	 * For now ignore BOs which are currently locked and potentially
1068	 * changing their location.
1069	 */
1070	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
1071	    !dma_resv_trylock(bo->tbo.base.resv))
1072		return;
1073
1074	amdgpu_bo_get_memory(bo, stats);
1075	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
1076	    dma_resv_unlock(bo->tbo.base.resv);
1077}
1078
1079void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1080			  struct amdgpu_mem_stats *stats)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1081{
1082	struct amdgpu_bo_va *bo_va, *tmp;
 
1083
1084	spin_lock(&vm->status_lock);
1085	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1086		amdgpu_vm_bo_get_memory(bo_va, stats);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1089		amdgpu_vm_bo_get_memory(bo_va, stats);
 
 
 
 
 
 
 
 
 
 
1090
1091	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1092		amdgpu_vm_bo_get_memory(bo_va, stats);
 
 
 
 
 
 
 
 
 
1093
1094	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1095		amdgpu_vm_bo_get_memory(bo_va, stats);
 
 
 
 
 
1096
1097	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1098		amdgpu_vm_bo_get_memory(bo_va, stats);
 
 
 
 
1099
1100	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1101		amdgpu_vm_bo_get_memory(bo_va, stats);
1102	spin_unlock(&vm->status_lock);
1103}
1104
1105/**
1106 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1107 *
1108 * @adev: amdgpu_device pointer
1109 * @bo_va: requested BO and VM object
1110 * @clear: if true clear the entries
1111 *
1112 * Fill in the page table entries for @bo_va.
1113 *
1114 * Returns:
1115 * 0 for success, -EINVAL for failure.
1116 */
1117int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 
1118			bool clear)
1119{
1120	struct amdgpu_bo *bo = bo_va->base.bo;
1121	struct amdgpu_vm *vm = bo_va->base.vm;
1122	struct amdgpu_bo_va_mapping *mapping;
1123	dma_addr_t *pages_addr = NULL;
1124	struct ttm_resource *mem;
1125	struct dma_fence **last_update;
1126	bool flush_tlb = clear;
1127	bool uncached;
1128	struct dma_resv *resv;
1129	uint64_t vram_base;
1130	uint64_t flags;
1131	int r;
1132
1133	if (clear || !bo) {
1134		mem = NULL;
1135		resv = vm->root.bo->tbo.base.resv;
 
1136	} else {
1137		struct drm_gem_object *obj = &bo->tbo.base;
1138
1139		resv = bo->tbo.base.resv;
1140		if (obj->import_attach && bo_va->is_xgmi) {
1141			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1142			struct drm_gem_object *gobj = dma_buf->priv;
1143			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1144
1145			if (abo->tbo.resource &&
1146			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1147				bo = gem_to_amdgpu_bo(gobj);
1148		}
1149		mem = bo->tbo.resource;
1150		if (mem && (mem->mem_type == TTM_PL_TT ||
1151			    mem->mem_type == AMDGPU_PL_PREEMPT))
1152			pages_addr = bo->tbo.ttm->dma_address;
1153	}
1154
1155	if (bo) {
1156		struct amdgpu_device *bo_adev;
1157
1158		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1159
1160		if (amdgpu_bo_encrypted(bo))
1161			flags |= AMDGPU_PTE_TMZ;
1162
1163		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1164		vram_base = bo_adev->vm_manager.vram_base_offset;
1165		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1166	} else {
1167		flags = 0x0;
1168		vram_base = 0;
1169		uncached = false;
1170	}
1171
1172	if (clear || (bo && bo->tbo.base.resv ==
1173		      vm->root.bo->tbo.base.resv))
1174		last_update = &vm->last_update;
1175	else
1176		last_update = &bo_va->last_pt_update;
1177
1178	if (!clear && bo_va->base.moved) {
1179		flush_tlb = true;
1180		list_splice_init(&bo_va->valids, &bo_va->invalids);
1181
1182	} else if (bo_va->cleared != clear) {
 
1183		list_splice_init(&bo_va->valids, &bo_va->invalids);
1184	}
1185
1186	list_for_each_entry(mapping, &bo_va->invalids, list) {
1187		uint64_t update_flags = flags;
1188
1189		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1190		 * but in case of something, we filter the flags in first place
1191		 */
1192		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1193			update_flags &= ~AMDGPU_PTE_READABLE;
1194		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1195			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1196
1197		/* Apply ASIC specific mapping flags */
1198		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1199
1200		trace_amdgpu_vm_bo_update(mapping);
1201
1202		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1203					   !uncached, resv, mapping->start, mapping->last,
1204					   update_flags, mapping->offset,
1205					   vram_base, mem, pages_addr,
1206					   last_update);
1207		if (r)
1208			return r;
1209	}
1210
1211	/* If the BO is not in its preferred location add it back to
1212	 * the evicted list so that it gets validated again on the
1213	 * next command submission.
1214	 */
1215	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1216		uint32_t mem_type = bo->tbo.resource->mem_type;
1217
1218		if (!(bo->preferred_domains &
1219		      amdgpu_mem_type_to_domain(mem_type)))
1220			amdgpu_vm_bo_evicted(&bo_va->base);
1221		else
1222			amdgpu_vm_bo_idle(&bo_va->base);
1223	} else {
1224		amdgpu_vm_bo_done(&bo_va->base);
1225	}
1226
1227	list_splice_init(&bo_va->invalids, &bo_va->valids);
1228	bo_va->cleared = clear;
1229	bo_va->base.moved = false;
1230
1231	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1232		list_for_each_entry(mapping, &bo_va->valids, list)
1233			trace_amdgpu_vm_bo_mapping(mapping);
1234	}
1235
1236	return 0;
1237}
1238
1239/**
1240 * amdgpu_vm_update_prt_state - update the global PRT state
1241 *
1242 * @adev: amdgpu_device pointer
1243 */
1244static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1245{
1246	unsigned long flags;
1247	bool enable;
1248
1249	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1250	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1251	adev->gmc.gmc_funcs->set_prt(adev, enable);
1252	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1253}
1254
1255/**
1256 * amdgpu_vm_prt_get - add a PRT user
1257 *
1258 * @adev: amdgpu_device pointer
1259 */
1260static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1261{
1262	if (!adev->gmc.gmc_funcs->set_prt)
1263		return;
1264
1265	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1266		amdgpu_vm_update_prt_state(adev);
1267}
1268
1269/**
1270 * amdgpu_vm_prt_put - drop a PRT user
1271 *
1272 * @adev: amdgpu_device pointer
1273 */
1274static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1275{
1276	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1277		amdgpu_vm_update_prt_state(adev);
1278}
1279
1280/**
1281 * amdgpu_vm_prt_cb - callback for updating the PRT status
1282 *
1283 * @fence: fence for the callback
1284 * @_cb: the callback function
1285 */
1286static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1287{
1288	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1289
1290	amdgpu_vm_prt_put(cb->adev);
1291	kfree(cb);
1292}
1293
1294/**
1295 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1296 *
1297 * @adev: amdgpu_device pointer
1298 * @fence: fence for the callback
1299 */
1300static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1301				 struct dma_fence *fence)
1302{
1303	struct amdgpu_prt_cb *cb;
1304
1305	if (!adev->gmc.gmc_funcs->set_prt)
1306		return;
1307
1308	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1309	if (!cb) {
1310		/* Last resort when we are OOM */
1311		if (fence)
1312			dma_fence_wait(fence, false);
1313
1314		amdgpu_vm_prt_put(adev);
1315	} else {
1316		cb->adev = adev;
1317		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1318						     amdgpu_vm_prt_cb))
1319			amdgpu_vm_prt_cb(fence, &cb->cb);
1320	}
1321}
1322
1323/**
1324 * amdgpu_vm_free_mapping - free a mapping
1325 *
1326 * @adev: amdgpu_device pointer
1327 * @vm: requested vm
1328 * @mapping: mapping to be freed
1329 * @fence: fence of the unmap operation
1330 *
1331 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1332 */
1333static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1334				   struct amdgpu_vm *vm,
1335				   struct amdgpu_bo_va_mapping *mapping,
1336				   struct dma_fence *fence)
1337{
1338	if (mapping->flags & AMDGPU_PTE_PRT)
1339		amdgpu_vm_add_prt_cb(adev, fence);
1340	kfree(mapping);
1341}
1342
1343/**
1344 * amdgpu_vm_prt_fini - finish all prt mappings
1345 *
1346 * @adev: amdgpu_device pointer
1347 * @vm: requested vm
1348 *
1349 * Register a cleanup callback to disable PRT support after VM dies.
1350 */
1351static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1352{
1353	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1354	struct dma_resv_iter cursor;
1355	struct dma_fence *fence;
1356
1357	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1358		/* Add a callback for each fence in the reservation object */
1359		amdgpu_vm_prt_get(adev);
1360		amdgpu_vm_add_prt_cb(adev, fence);
1361	}
1362}
1363
1364/**
1365 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1366 *
1367 * @adev: amdgpu_device pointer
1368 * @vm: requested vm
1369 * @fence: optional resulting fence (unchanged if no work needed to be done
1370 * or if an error occurred)
1371 *
1372 * Make sure all freed BOs are cleared in the PT.
1373 * PTs have to be reserved and mutex must be locked!
1374 *
1375 * Returns:
1376 * 0 for success.
1377 *
 
1378 */
1379int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1380			  struct amdgpu_vm *vm,
1381			  struct dma_fence **fence)
1382{
1383	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1384	struct amdgpu_bo_va_mapping *mapping;
1385	uint64_t init_pte_value = 0;
1386	struct dma_fence *f = NULL;
1387	int r;
1388
1389	while (!list_empty(&vm->freed)) {
1390		mapping = list_first_entry(&vm->freed,
1391			struct amdgpu_bo_va_mapping, list);
1392		list_del(&mapping->list);
1393
1394		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1395					   resv, mapping->start, mapping->last,
1396					   init_pte_value, 0, 0, NULL, NULL,
1397					   &f);
1398		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1399		if (r) {
1400			dma_fence_put(f);
1401			return r;
1402		}
1403	}
1404
1405	if (fence && f) {
1406		dma_fence_put(*fence);
1407		*fence = f;
1408	} else {
1409		dma_fence_put(f);
1410	}
1411
1412	return 0;
1413
1414}
1415
1416/**
1417 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1418 *
1419 * @adev: amdgpu_device pointer
1420 * @vm: requested vm
1421 * @ticket: optional reservation ticket used to reserve the VM
1422 *
1423 * Make sure all BOs which are moved are updated in the PTs.
 
1424 *
1425 * Returns:
1426 * 0 for success.
1427 *
1428 * PTs have to be reserved!
1429 */
1430int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1431			   struct amdgpu_vm *vm,
1432			   struct ww_acquire_ctx *ticket)
1433{
1434	struct amdgpu_bo_va *bo_va;
1435	struct dma_resv *resv;
1436	bool clear, unlock;
1437	int r;
1438
1439	spin_lock(&vm->status_lock);
1440	while (!list_empty(&vm->moved)) {
1441		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1442					 base.vm_status);
1443		spin_unlock(&vm->status_lock);
1444
1445		/* Per VM BOs never need to bo cleared in the page tables */
1446		r = amdgpu_vm_bo_update(adev, bo_va, false);
1447		if (r)
1448			return r;
1449		spin_lock(&vm->status_lock);
1450	}
1451
1452	while (!list_empty(&vm->invalidated)) {
1453		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1454					 base.vm_status);
1455		resv = bo_va->base.bo->tbo.base.resv;
1456		spin_unlock(&vm->status_lock);
1457
1458		/* Try to reserve the BO to avoid clearing its ptes */
1459		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1460			clear = false;
1461			unlock = true;
1462		/* The caller is already holding the reservation lock */
1463		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1464			clear = false;
1465			unlock = false;
1466		/* Somebody else is using the BO right now */
1467		} else {
1468			clear = true;
1469			unlock = false;
1470		}
1471
1472		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1473
1474		if (unlock)
1475			dma_resv_unlock(resv);
1476		if (r)
1477			return r;
1478
1479		/* Remember evicted DMABuf imports in compute VMs for later
1480		 * validation
1481		 */
1482		if (vm->is_compute_context &&
1483		    bo_va->base.bo->tbo.base.import_attach &&
1484		    (!bo_va->base.bo->tbo.resource ||
1485		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1486			amdgpu_vm_bo_evicted_user(&bo_va->base);
1487
1488		spin_lock(&vm->status_lock);
1489	}
1490	spin_unlock(&vm->status_lock);
1491
1492	return 0;
1493}
1494
1495/**
1496 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1497 *
1498 * @adev: amdgpu_device pointer
1499 * @vm: requested vm
1500 * @flush_type: flush type
1501 * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1502 *
1503 * Flush TLB if needed for a compute VM.
1504 *
1505 * Returns:
1506 * 0 for success.
1507 */
1508int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1509				struct amdgpu_vm *vm,
1510				uint32_t flush_type,
1511				uint32_t xcc_mask)
1512{
1513	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1514	bool all_hub = false;
1515	int xcc = 0, r = 0;
1516
1517	WARN_ON_ONCE(!vm->is_compute_context);
1518
1519	/*
1520	 * It can be that we race and lose here, but that is extremely unlikely
1521	 * and the worst thing which could happen is that we flush the changes
1522	 * into the TLB once more which is harmless.
1523	 */
1524	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1525		return 0;
1526
1527	if (adev->family == AMDGPU_FAMILY_AI ||
1528	    adev->family == AMDGPU_FAMILY_RV)
1529		all_hub = true;
1530
1531	for_each_inst(xcc, xcc_mask) {
1532		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1533						   all_hub, xcc);
1534		if (r)
1535			break;
1536	}
1537	return r;
1538}
1539
1540/**
1541 * amdgpu_vm_bo_add - add a bo to a specific vm
1542 *
1543 * @adev: amdgpu_device pointer
1544 * @vm: requested vm
1545 * @bo: amdgpu buffer object
1546 *
1547 * Add @bo into the requested vm.
1548 * Add @bo to the list of bos associated with the vm
1549 *
1550 * Returns:
1551 * Newly added bo_va or NULL for failure
1552 *
1553 * Object has to be reserved!
1554 */
1555struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1556				      struct amdgpu_vm *vm,
1557				      struct amdgpu_bo *bo)
1558{
1559	struct amdgpu_bo_va *bo_va;
1560
1561	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1562	if (bo_va == NULL) {
1563		return NULL;
1564	}
1565	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1566
1567	bo_va->ref_count = 1;
1568	bo_va->last_pt_update = dma_fence_get_stub();
1569	INIT_LIST_HEAD(&bo_va->valids);
1570	INIT_LIST_HEAD(&bo_va->invalids);
 
1571
1572	if (!bo)
1573		return bo_va;
1574
1575	dma_resv_assert_held(bo->tbo.base.resv);
1576	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1577		bo_va->is_xgmi = true;
1578		/* Power up XGMI if it can be potentially used */
1579		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1580	}
1581
1582	return bo_va;
1583}
1584
1585
1586/**
1587 * amdgpu_vm_bo_insert_map - insert a new mapping
1588 *
1589 * @adev: amdgpu_device pointer
1590 * @bo_va: bo_va to store the address
1591 * @mapping: the mapping to insert
1592 *
1593 * Insert a new mapping into all structures.
1594 */
1595static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1596				    struct amdgpu_bo_va *bo_va,
1597				    struct amdgpu_bo_va_mapping *mapping)
1598{
1599	struct amdgpu_vm *vm = bo_va->base.vm;
1600	struct amdgpu_bo *bo = bo_va->base.bo;
1601
1602	mapping->bo_va = bo_va;
1603	list_add(&mapping->list, &bo_va->invalids);
1604	amdgpu_vm_it_insert(mapping, &vm->va);
1605
1606	if (mapping->flags & AMDGPU_PTE_PRT)
1607		amdgpu_vm_prt_get(adev);
1608
1609	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1610	    !bo_va->base.moved) {
1611		amdgpu_vm_bo_moved(&bo_va->base);
1612	}
1613	trace_amdgpu_vm_bo_map(bo_va, mapping);
1614}
1615
1616/* Validate operation parameters to prevent potential abuse */
1617static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1618					  struct amdgpu_bo *bo,
1619					  uint64_t saddr,
1620					  uint64_t offset,
1621					  uint64_t size)
1622{
1623	uint64_t tmp, lpfn;
1624
1625	if (saddr & AMDGPU_GPU_PAGE_MASK
1626	    || offset & AMDGPU_GPU_PAGE_MASK
1627	    || size & AMDGPU_GPU_PAGE_MASK)
1628		return -EINVAL;
1629
1630	if (check_add_overflow(saddr, size, &tmp)
1631	    || check_add_overflow(offset, size, &tmp)
1632	    || size == 0 /* which also leads to end < begin */)
1633		return -EINVAL;
1634
1635	/* make sure object fit at this offset */
1636	if (bo && offset + size > amdgpu_bo_size(bo))
1637		return -EINVAL;
1638
1639	/* Ensure last pfn not exceed max_pfn */
1640	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1641	if (lpfn >= adev->vm_manager.max_pfn)
1642		return -EINVAL;
1643
1644	return 0;
1645}
1646
1647/**
1648 * amdgpu_vm_bo_map - map bo inside a vm
1649 *
1650 * @adev: amdgpu_device pointer
1651 * @bo_va: bo_va to store the address
1652 * @saddr: where to map the BO
1653 * @offset: requested offset in the BO
1654 * @size: BO size in bytes
1655 * @flags: attributes of pages (read/write/valid/etc.)
1656 *
1657 * Add a mapping of the BO at the specefied addr into the VM.
1658 *
1659 * Returns:
1660 * 0 for success, error for failure.
1661 *
1662 * Object has to be reserved and unreserved outside!
1663 */
1664int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1665		     struct amdgpu_bo_va *bo_va,
1666		     uint64_t saddr, uint64_t offset,
1667		     uint64_t size, uint64_t flags)
1668{
1669	struct amdgpu_bo_va_mapping *mapping, *tmp;
1670	struct amdgpu_bo *bo = bo_va->base.bo;
1671	struct amdgpu_vm *vm = bo_va->base.vm;
 
1672	uint64_t eaddr;
1673	int r;
1674
1675	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1676	if (r)
1677		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
1678
1679	saddr /= AMDGPU_GPU_PAGE_SIZE;
1680	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1681
1682	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1683	if (tmp) {
 
 
1684		/* bo and tmp overlap, invalid addr */
1685		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1686			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1687			tmp->start, tmp->last + 1);
1688		return -EINVAL;
 
1689	}
1690
1691	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1692	if (!mapping)
1693		return -ENOMEM;
 
 
1694
1695	mapping->start = saddr;
1696	mapping->last = eaddr;
 
1697	mapping->offset = offset;
1698	mapping->flags = flags;
1699
1700	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
 
1701
1702	return 0;
1703}
 
1704
1705/**
1706 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1707 *
1708 * @adev: amdgpu_device pointer
1709 * @bo_va: bo_va to store the address
1710 * @saddr: where to map the BO
1711 * @offset: requested offset in the BO
1712 * @size: BO size in bytes
1713 * @flags: attributes of pages (read/write/valid/etc.)
1714 *
1715 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1716 * mappings as we do so.
1717 *
1718 * Returns:
1719 * 0 for success, error for failure.
1720 *
1721 * Object has to be reserved and unreserved outside!
1722 */
1723int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1724			     struct amdgpu_bo_va *bo_va,
1725			     uint64_t saddr, uint64_t offset,
1726			     uint64_t size, uint64_t flags)
1727{
1728	struct amdgpu_bo_va_mapping *mapping;
1729	struct amdgpu_bo *bo = bo_va->base.bo;
1730	uint64_t eaddr;
1731	int r;
1732
1733	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1734	if (r)
1735		return r;
1736
1737	/* Allocate all the needed memory */
1738	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1739	if (!mapping)
1740		return -ENOMEM;
1741
1742	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1743	if (r) {
1744		kfree(mapping);
1745		return r;
1746	}
1747
1748	saddr /= AMDGPU_GPU_PAGE_SIZE;
1749	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
 
 
 
 
 
 
 
 
1750
1751	mapping->start = saddr;
1752	mapping->last = eaddr;
1753	mapping->offset = offset;
1754	mapping->flags = flags;
1755
1756	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
 
 
1757
1758	return 0;
 
 
 
 
 
 
 
 
 
1759}
1760
1761/**
1762 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1763 *
1764 * @adev: amdgpu_device pointer
1765 * @bo_va: bo_va to remove the address from
1766 * @saddr: where to the BO is mapped
1767 *
1768 * Remove a mapping of the BO at the specefied addr from the VM.
1769 *
1770 * Returns:
1771 * 0 for success, error for failure.
1772 *
1773 * Object has to be reserved and unreserved outside!
1774 */
1775int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1776		       struct amdgpu_bo_va *bo_va,
1777		       uint64_t saddr)
1778{
1779	struct amdgpu_bo_va_mapping *mapping;
1780	struct amdgpu_vm *vm = bo_va->base.vm;
1781	bool valid = true;
1782
1783	saddr /= AMDGPU_GPU_PAGE_SIZE;
1784
1785	list_for_each_entry(mapping, &bo_va->valids, list) {
1786		if (mapping->start == saddr)
1787			break;
1788	}
1789
1790	if (&mapping->list == &bo_va->valids) {
1791		valid = false;
1792
1793		list_for_each_entry(mapping, &bo_va->invalids, list) {
1794			if (mapping->start == saddr)
1795				break;
1796		}
1797
1798		if (&mapping->list == &bo_va->invalids)
1799			return -ENOENT;
1800	}
1801
1802	list_del(&mapping->list);
1803	amdgpu_vm_it_remove(mapping, &vm->va);
1804	mapping->bo_va = NULL;
1805	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1806
1807	if (valid)
1808		list_add(&mapping->list, &vm->freed);
1809	else
1810		amdgpu_vm_free_mapping(adev, vm, mapping,
1811				       bo_va->last_pt_update);
1812
1813	return 0;
1814}
1815
1816/**
1817 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1818 *
1819 * @adev: amdgpu_device pointer
1820 * @vm: VM structure to use
1821 * @saddr: start of the range
1822 * @size: size of the range
1823 *
1824 * Remove all mappings in a range, split them as appropriate.
1825 *
1826 * Returns:
1827 * 0 for success, error for failure.
1828 */
1829int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1830				struct amdgpu_vm *vm,
1831				uint64_t saddr, uint64_t size)
1832{
1833	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1834	LIST_HEAD(removed);
1835	uint64_t eaddr;
1836	int r;
1837
1838	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1839	if (r)
1840		return r;
1841
1842	saddr /= AMDGPU_GPU_PAGE_SIZE;
1843	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1844
1845	/* Allocate all the needed memory */
1846	before = kzalloc(sizeof(*before), GFP_KERNEL);
1847	if (!before)
1848		return -ENOMEM;
1849	INIT_LIST_HEAD(&before->list);
1850
1851	after = kzalloc(sizeof(*after), GFP_KERNEL);
1852	if (!after) {
1853		kfree(before);
1854		return -ENOMEM;
1855	}
1856	INIT_LIST_HEAD(&after->list);
1857
1858	/* Now gather all removed mappings */
1859	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1860	while (tmp) {
1861		/* Remember mapping split at the start */
1862		if (tmp->start < saddr) {
1863			before->start = tmp->start;
1864			before->last = saddr - 1;
1865			before->offset = tmp->offset;
1866			before->flags = tmp->flags;
1867			before->bo_va = tmp->bo_va;
1868			list_add(&before->list, &tmp->bo_va->invalids);
1869		}
1870
1871		/* Remember mapping split at the end */
1872		if (tmp->last > eaddr) {
1873			after->start = eaddr + 1;
1874			after->last = tmp->last;
1875			after->offset = tmp->offset;
1876			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1877			after->flags = tmp->flags;
1878			after->bo_va = tmp->bo_va;
1879			list_add(&after->list, &tmp->bo_va->invalids);
1880		}
1881
1882		list_del(&tmp->list);
1883		list_add(&tmp->list, &removed);
1884
1885		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1886	}
1887
1888	/* And free them up */
1889	list_for_each_entry_safe(tmp, next, &removed, list) {
1890		amdgpu_vm_it_remove(tmp, &vm->va);
1891		list_del(&tmp->list);
1892
1893		if (tmp->start < saddr)
1894		    tmp->start = saddr;
1895		if (tmp->last > eaddr)
1896		    tmp->last = eaddr;
1897
1898		tmp->bo_va = NULL;
1899		list_add(&tmp->list, &vm->freed);
1900		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1901	}
1902
1903	/* Insert partial mapping before the range */
1904	if (!list_empty(&before->list)) {
1905		struct amdgpu_bo *bo = before->bo_va->base.bo;
1906
1907		amdgpu_vm_it_insert(before, &vm->va);
1908		if (before->flags & AMDGPU_PTE_PRT)
1909			amdgpu_vm_prt_get(adev);
1910
1911		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1912		    !before->bo_va->base.moved)
1913			amdgpu_vm_bo_moved(&before->bo_va->base);
1914	} else {
1915		kfree(before);
1916	}
1917
1918	/* Insert partial mapping after the range */
1919	if (!list_empty(&after->list)) {
1920		struct amdgpu_bo *bo = after->bo_va->base.bo;
1921
1922		amdgpu_vm_it_insert(after, &vm->va);
1923		if (after->flags & AMDGPU_PTE_PRT)
1924			amdgpu_vm_prt_get(adev);
1925
1926		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1927		    !after->bo_va->base.moved)
1928			amdgpu_vm_bo_moved(&after->bo_va->base);
1929	} else {
1930		kfree(after);
1931	}
1932
1933	return 0;
1934}
1935
1936/**
1937 * amdgpu_vm_bo_lookup_mapping - find mapping by address
1938 *
1939 * @vm: the requested VM
1940 * @addr: the address
1941 *
1942 * Find a mapping by it's address.
1943 *
1944 * Returns:
1945 * The amdgpu_bo_va_mapping matching for addr or NULL
1946 *
1947 */
1948struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1949							 uint64_t addr)
1950{
1951	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1952}
1953
1954/**
1955 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1956 *
1957 * @vm: the requested vm
1958 * @ticket: CS ticket
1959 *
1960 * Trace all mappings of BOs reserved during a command submission.
1961 */
1962void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
1963{
1964	struct amdgpu_bo_va_mapping *mapping;
1965
1966	if (!trace_amdgpu_vm_bo_cs_enabled())
1967		return;
1968
1969	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
1970	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
1971		if (mapping->bo_va && mapping->bo_va->base.bo) {
1972			struct amdgpu_bo *bo;
1973
1974			bo = mapping->bo_va->base.bo;
1975			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
1976			    ticket)
1977				continue;
1978		}
1979
1980		trace_amdgpu_vm_bo_cs(mapping);
1981	}
1982}
1983
1984/**
1985 * amdgpu_vm_bo_del - remove a bo from a specific vm
1986 *
1987 * @adev: amdgpu_device pointer
1988 * @bo_va: requested bo_va
1989 *
1990 * Remove @bo_va->bo from the requested vm.
1991 *
1992 * Object have to be reserved!
1993 */
1994void amdgpu_vm_bo_del(struct amdgpu_device *adev,
1995		      struct amdgpu_bo_va *bo_va)
1996{
1997	struct amdgpu_bo_va_mapping *mapping, *next;
1998	struct amdgpu_bo *bo = bo_va->base.bo;
1999	struct amdgpu_vm *vm = bo_va->base.vm;
2000	struct amdgpu_vm_bo_base **base;
2001
2002	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2003
2004	if (bo) {
2005		dma_resv_assert_held(bo->tbo.base.resv);
2006		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2007			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2008
2009		for (base = &bo_va->base.bo->vm_bo; *base;
2010		     base = &(*base)->next) {
2011			if (*base != &bo_va->base)
2012				continue;
2013
2014			*base = bo_va->base.next;
2015			break;
2016		}
2017	}
2018
2019	spin_lock(&vm->status_lock);
2020	list_del(&bo_va->base.vm_status);
2021	spin_unlock(&vm->status_lock);
2022
2023	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2024		list_del(&mapping->list);
2025		amdgpu_vm_it_remove(mapping, &vm->va);
2026		mapping->bo_va = NULL;
2027		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2028		list_add(&mapping->list, &vm->freed);
2029	}
2030	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2031		list_del(&mapping->list);
2032		amdgpu_vm_it_remove(mapping, &vm->va);
2033		amdgpu_vm_free_mapping(adev, vm, mapping,
2034				       bo_va->last_pt_update);
2035	}
2036
2037	dma_fence_put(bo_va->last_pt_update);
2038
2039	if (bo && bo_va->is_xgmi)
2040		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2041
2042	kfree(bo_va);
2043}
2044
2045/**
2046 * amdgpu_vm_evictable - check if we can evict a VM
2047 *
2048 * @bo: A page table of the VM.
2049 *
2050 * Check if it is possible to evict a VM.
2051 */
2052bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2053{
2054	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2055
2056	/* Page tables of a destroyed VM can go away immediately */
2057	if (!bo_base || !bo_base->vm)
2058		return true;
2059
2060	/* Don't evict VM page tables while they are busy */
2061	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2062		return false;
2063
2064	/* Try to block ongoing updates */
2065	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2066		return false;
2067
2068	/* Don't evict VM page tables while they are updated */
2069	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2070		amdgpu_vm_eviction_unlock(bo_base->vm);
2071		return false;
2072	}
2073
2074	bo_base->vm->evicting = true;
2075	amdgpu_vm_eviction_unlock(bo_base->vm);
2076	return true;
2077}
2078
2079/**
2080 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2081 *
2082 * @adev: amdgpu_device pointer
 
2083 * @bo: amdgpu buffer object
2084 * @evicted: is the BO evicted
2085 *
2086 * Mark @bo as invalid.
2087 */
2088void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2089			     struct amdgpu_bo *bo, bool evicted)
2090{
2091	struct amdgpu_vm_bo_base *bo_base;
2092
2093	/* shadow bo doesn't have bo base, its validation needs its parent */
2094	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
2095		bo = bo->parent;
2096
2097	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2098		struct amdgpu_vm *vm = bo_base->vm;
2099
2100		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
2101			amdgpu_vm_bo_evicted(bo_base);
2102			continue;
2103		}
2104
2105		if (bo_base->moved)
2106			continue;
2107		bo_base->moved = true;
2108
2109		if (bo->tbo.type == ttm_bo_type_kernel)
2110			amdgpu_vm_bo_relocated(bo_base);
2111		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2112			amdgpu_vm_bo_moved(bo_base);
2113		else
2114			amdgpu_vm_bo_invalidated(bo_base);
2115	}
2116}
2117
2118/**
2119 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2120 *
2121 * @vm_size: VM size
2122 *
2123 * Returns:
2124 * VM page table as power of two
2125 */
2126static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2127{
2128	/* Total bits covered by PD + PTs */
2129	unsigned bits = ilog2(vm_size) + 18;
2130
2131	/* Make sure the PD is 4K in size up to 8GB address space.
2132	   Above that split equal between PD and PTs */
2133	if (vm_size <= 8)
2134		return (bits - 9);
2135	else
2136		return ((bits + 3) / 2);
2137}
2138
2139/**
2140 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2141 *
2142 * @adev: amdgpu_device pointer
2143 * @min_vm_size: the minimum vm size in GB if it's set auto
2144 * @fragment_size_default: Default PTE fragment size
2145 * @max_level: max VMPT level
2146 * @max_bits: max address space size in bits
2147 *
2148 */
2149void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2150			   uint32_t fragment_size_default, unsigned max_level,
2151			   unsigned max_bits)
2152{
2153	unsigned int max_size = 1 << (max_bits - 30);
2154	unsigned int vm_size;
2155	uint64_t tmp;
2156
2157	/* adjust vm size first */
2158	if (amdgpu_vm_size != -1) {
2159		vm_size = amdgpu_vm_size;
2160		if (vm_size > max_size) {
2161			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2162				 amdgpu_vm_size, max_size);
2163			vm_size = max_size;
2164		}
2165	} else {
2166		struct sysinfo si;
2167		unsigned int phys_ram_gb;
2168
2169		/* Optimal VM size depends on the amount of physical
2170		 * RAM available. Underlying requirements and
2171		 * assumptions:
2172		 *
2173		 *  - Need to map system memory and VRAM from all GPUs
2174		 *     - VRAM from other GPUs not known here
2175		 *     - Assume VRAM <= system memory
2176		 *  - On GFX8 and older, VM space can be segmented for
2177		 *    different MTYPEs
2178		 *  - Need to allow room for fragmentation, guard pages etc.
2179		 *
2180		 * This adds up to a rough guess of system memory x3.
2181		 * Round up to power of two to maximize the available
2182		 * VM size with the given page table size.
2183		 */
2184		si_meminfo(&si);
2185		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2186			       (1 << 30) - 1) >> 30;
2187		vm_size = roundup_pow_of_two(
2188			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2189	}
2190
2191	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2192
2193	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2194	if (amdgpu_vm_block_size != -1)
2195		tmp >>= amdgpu_vm_block_size - 9;
2196	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2197	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2198	switch (adev->vm_manager.num_level) {
2199	case 3:
2200		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2201		break;
2202	case 2:
2203		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2204		break;
2205	case 1:
2206		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2207		break;
2208	default:
2209		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2210	}
2211	/* block size depends on vm size and hw setup*/
2212	if (amdgpu_vm_block_size != -1)
2213		adev->vm_manager.block_size =
2214			min((unsigned)amdgpu_vm_block_size, max_bits
2215			    - AMDGPU_GPU_PAGE_SHIFT
2216			    - 9 * adev->vm_manager.num_level);
2217	else if (adev->vm_manager.num_level > 1)
2218		adev->vm_manager.block_size = 9;
2219	else
2220		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2221
2222	if (amdgpu_vm_fragment_size == -1)
2223		adev->vm_manager.fragment_size = fragment_size_default;
2224	else
2225		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2226
2227	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2228		 vm_size, adev->vm_manager.num_level + 1,
2229		 adev->vm_manager.block_size,
2230		 adev->vm_manager.fragment_size);
2231}
2232
2233/**
2234 * amdgpu_vm_wait_idle - wait for the VM to become idle
2235 *
2236 * @vm: VM object to wait for
2237 * @timeout: timeout to wait for VM to become idle
2238 */
2239long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2240{
2241	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2242					DMA_RESV_USAGE_BOOKKEEP,
2243					true, timeout);
2244	if (timeout <= 0)
2245		return timeout;
2246
2247	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2248}
2249
2250static void amdgpu_vm_destroy_task_info(struct kref *kref)
2251{
2252	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2253
2254	kfree(ti);
2255}
2256
2257static inline struct amdgpu_vm *
2258amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2259{
2260	struct amdgpu_vm *vm;
2261	unsigned long flags;
2262
2263	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2264	vm = xa_load(&adev->vm_manager.pasids, pasid);
2265	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2266
2267	return vm;
2268}
2269
2270/**
2271 * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2272 *
2273 * @task_info: task_info struct under discussion.
2274 *
2275 * frees the vm task_info ptr at the last put
2276 */
2277void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2278{
2279	kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2280}
2281
2282/**
2283 * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2284 *
2285 * @vm: VM to get info from
2286 *
2287 * Returns the reference counted task_info structure, which must be
2288 * referenced down with amdgpu_vm_put_task_info.
2289 */
2290struct amdgpu_task_info *
2291amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2292{
2293	struct amdgpu_task_info *ti = NULL;
2294
2295	if (vm) {
2296		ti = vm->task_info;
2297		kref_get(&vm->task_info->refcount);
 
 
2298	}
2299
2300	return ti;
2301}
2302
2303/**
2304 * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2305 *
2306 * @adev: drm device pointer
2307 * @pasid: PASID identifier for VM
2308 *
2309 * Returns the reference counted task_info structure, which must be
2310 * referenced down with amdgpu_vm_put_task_info.
2311 */
2312struct amdgpu_task_info *
2313amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2314{
2315	return amdgpu_vm_get_task_info_vm(
2316			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2317}
2318
2319static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2320{
2321	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2322	if (!vm->task_info)
2323		return -ENOMEM;
2324
2325	kref_init(&vm->task_info->refcount);
2326	return 0;
2327}
2328
2329/**
2330 * amdgpu_vm_set_task_info - Sets VMs task info.
2331 *
2332 * @vm: vm for which to set the info
2333 */
2334void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2335{
2336	if (!vm->task_info)
2337		return;
2338
2339	if (vm->task_info->pid == current->pid)
2340		return;
2341
2342	vm->task_info->pid = current->pid;
2343	get_task_comm(vm->task_info->task_name, current);
2344
2345	if (current->group_leader->mm != current->mm)
2346		return;
2347
2348	vm->task_info->tgid = current->group_leader->pid;
2349	get_task_comm(vm->task_info->process_name, current->group_leader);
2350}
2351
2352/**
2353 * amdgpu_vm_init - initialize a vm instance
2354 *
2355 * @adev: amdgpu_device pointer
2356 * @vm: requested vm
2357 * @xcp_id: GPU partition selection id
2358 *
2359 * Init @vm fields.
2360 *
2361 * Returns:
2362 * 0 for success, error for failure.
2363 */
2364int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2365		   int32_t xcp_id)
2366{
2367	struct amdgpu_bo *root_bo;
2368	struct amdgpu_bo_vm *root;
2369	int r, i;
2370
2371	vm->va = RB_ROOT_CACHED;
2372	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2373		vm->reserved_vmid[i] = NULL;
2374	INIT_LIST_HEAD(&vm->evicted);
2375	INIT_LIST_HEAD(&vm->evicted_user);
2376	INIT_LIST_HEAD(&vm->relocated);
2377	INIT_LIST_HEAD(&vm->moved);
2378	INIT_LIST_HEAD(&vm->idle);
2379	INIT_LIST_HEAD(&vm->invalidated);
2380	spin_lock_init(&vm->status_lock);
 
 
2381	INIT_LIST_HEAD(&vm->freed);
2382	INIT_LIST_HEAD(&vm->done);
2383	INIT_LIST_HEAD(&vm->pt_freed);
2384	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2385	INIT_KFIFO(vm->faults);
2386
2387	r = amdgpu_vm_init_entities(adev, vm);
2388	if (r)
2389		return r;
2390
2391	vm->is_compute_context = false;
2392
2393	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2394				    AMDGPU_VM_USE_CPU_FOR_GFX);
2395
2396	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2397			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2398	WARN_ONCE((vm->use_cpu_for_update &&
2399		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2400		  "CPU update of VM recommended only for large BAR system\n");
2401
2402	if (vm->use_cpu_for_update)
2403		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2404	else
2405		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2406
2407	vm->last_update = dma_fence_get_stub();
2408	vm->last_unlocked = dma_fence_get_stub();
2409	vm->last_tlb_flush = dma_fence_get_stub();
2410	vm->generation = 0;
2411
2412	mutex_init(&vm->eviction_lock);
2413	vm->evicting = false;
2414
2415	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2416				false, &root, xcp_id);
 
 
 
 
2417	if (r)
2418		goto error_free_delayed;
2419
2420	root_bo = amdgpu_bo_ref(&root->bo);
2421	r = amdgpu_bo_reserve(root_bo, true);
2422	if (r) {
2423		amdgpu_bo_unref(&root->shadow);
2424		amdgpu_bo_unref(&root_bo);
2425		goto error_free_delayed;
2426	}
2427
2428	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2429	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2430	if (r)
2431		goto error_free_root;
2432
2433	r = amdgpu_vm_pt_clear(adev, vm, root, false);
 
 
 
 
 
 
2434	if (r)
2435		goto error_free_root;
2436
2437	r = amdgpu_vm_create_task_info(vm);
2438	if (r)
2439		DRM_DEBUG("Failed to create task info for VM\n");
2440
2441	amdgpu_bo_unreserve(vm->root.bo);
2442	amdgpu_bo_unref(&root_bo);
2443
2444	return 0;
2445
2446error_free_root:
2447	amdgpu_vm_pt_free_root(adev, vm);
2448	amdgpu_bo_unreserve(vm->root.bo);
2449	amdgpu_bo_unref(&root_bo);
2450
2451error_free_delayed:
2452	dma_fence_put(vm->last_tlb_flush);
2453	dma_fence_put(vm->last_unlocked);
2454	amdgpu_vm_fini_entities(vm);
2455
2456	return r;
2457}
2458
2459/**
2460 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2461 *
2462 * @adev: amdgpu_device pointer
2463 * @vm: requested vm
2464 *
2465 * This only works on GFX VMs that don't have any BOs added and no
2466 * page tables allocated yet.
2467 *
2468 * Changes the following VM parameters:
2469 * - use_cpu_for_update
2470 * - pte_supports_ats
2471 *
2472 * Reinitializes the page directory to reflect the changed ATS
2473 * setting.
2474 *
2475 * Returns:
2476 * 0 for success, -errno for errors.
2477 */
2478int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2479{
2480	int r;
2481
2482	r = amdgpu_bo_reserve(vm->root.bo, true);
2483	if (r)
2484		return r;
2485
2486	/* Update VM state */
2487	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2488				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2489	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2490			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2491	WARN_ONCE((vm->use_cpu_for_update &&
2492		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2493		  "CPU update of VM recommended only for large BAR system\n");
2494
2495	if (vm->use_cpu_for_update) {
2496		/* Sync with last SDMA update/clear before switching to CPU */
2497		r = amdgpu_bo_sync_wait(vm->root.bo,
2498					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2499		if (r)
2500			goto unreserve_bo;
2501
2502		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2503		r = amdgpu_vm_pt_map_tables(adev, vm);
2504		if (r)
2505			goto unreserve_bo;
2506
2507	} else {
2508		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2509	}
2510
2511	dma_fence_put(vm->last_update);
2512	vm->last_update = dma_fence_get_stub();
2513	vm->is_compute_context = true;
2514
2515	/* Free the shadow bo for compute VM */
2516	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2517
2518	goto unreserve_bo;
 
2519
2520unreserve_bo:
2521	amdgpu_bo_unreserve(vm->root.bo);
2522	return r;
2523}
2524
2525/**
2526 * amdgpu_vm_release_compute - release a compute vm
2527 * @adev: amdgpu_device pointer
2528 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2529 *
2530 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2531 * pasid from vm. Compute should stop use of vm after this call.
2532 */
2533void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2534{
2535	amdgpu_vm_set_pasid(adev, vm, 0);
2536	vm->is_compute_context = false;
2537}
2538
2539/**
2540 * amdgpu_vm_fini - tear down a vm instance
2541 *
2542 * @adev: amdgpu_device pointer
2543 * @vm: requested vm
2544 *
2545 * Tear down @vm.
2546 * Unbind the VM and remove all bos from the vm bo list
2547 */
2548void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2549{
2550	struct amdgpu_bo_va_mapping *mapping, *tmp;
2551	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2552	struct amdgpu_bo *root;
2553	unsigned long flags;
2554	int i;
2555
2556	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2557
2558	flush_work(&vm->pt_free_work);
2559
2560	root = amdgpu_bo_ref(vm->root.bo);
2561	amdgpu_bo_reserve(root, true);
2562	amdgpu_vm_put_task_info(vm->task_info);
2563	amdgpu_vm_set_pasid(adev, vm, 0);
2564	dma_fence_wait(vm->last_unlocked, false);
2565	dma_fence_put(vm->last_unlocked);
2566	dma_fence_wait(vm->last_tlb_flush, false);
2567	/* Make sure that all fence callbacks have completed */
2568	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2569	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2570	dma_fence_put(vm->last_tlb_flush);
2571
2572	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2573		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2574			amdgpu_vm_prt_fini(adev, vm);
2575			prt_fini_needed = false;
2576		}
2577
2578		list_del(&mapping->list);
2579		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2580	}
2581
2582	amdgpu_vm_pt_free_root(adev, vm);
2583	amdgpu_bo_unreserve(root);
2584	amdgpu_bo_unref(&root);
2585	WARN_ON(vm->root.bo);
2586
2587	amdgpu_vm_fini_entities(vm);
2588
2589	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2590		dev_err(adev->dev, "still active bo inside vm\n");
2591	}
2592	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2593					     &vm->va.rb_root, rb) {
2594		/* Don't remove the mapping here, we don't want to trigger a
2595		 * rebalance and the tree is about to be destroyed anyway.
2596		 */
 
2597		list_del(&mapping->list);
2598		kfree(mapping);
2599	}
2600
2601	dma_fence_put(vm->last_update);
 
2602
2603	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2604		if (vm->reserved_vmid[i]) {
2605			amdgpu_vmid_free_reserved(adev, i);
2606			vm->reserved_vmid[i] = false;
2607		}
2608	}
 
2609
 
 
 
2610}
2611
2612/**
2613 * amdgpu_vm_manager_init - init the VM manager
2614 *
2615 * @adev: amdgpu_device pointer
2616 *
2617 * Initialize the VM manager structures
2618 */
2619void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2620{
2621	unsigned i;
2622
2623	/* Concurrent flushes are only possible starting with Vega10 and
2624	 * are broken on Navi10 and Navi14.
2625	 */
2626	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2627					      adev->asic_type == CHIP_NAVI10 ||
2628					      adev->asic_type == CHIP_NAVI14);
2629	amdgpu_vmid_mgr_init(adev);
 
 
2630
2631	adev->vm_manager.fence_context =
2632		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2633	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2634		adev->vm_manager.seqno[i] = 0;
2635
2636	spin_lock_init(&adev->vm_manager.prt_lock);
2637	atomic_set(&adev->vm_manager.num_prt_users, 0);
2638
2639	/* If not overridden by the user, by default, only in large BAR systems
2640	 * Compute VM tables will be updated by CPU
2641	 */
2642#ifdef CONFIG_X86_64
2643	if (amdgpu_vm_update_mode == -1) {
2644		/* For asic with VF MMIO access protection
2645		 * avoid using CPU for VM table updates
2646		 */
2647		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2648		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2649			adev->vm_manager.vm_update_mode =
2650				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2651		else
2652			adev->vm_manager.vm_update_mode = 0;
2653	} else
2654		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2655#else
2656	adev->vm_manager.vm_update_mode = 0;
2657#endif
2658
2659	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2660}
2661
2662/**
2663 * amdgpu_vm_manager_fini - cleanup VM manager
2664 *
2665 * @adev: amdgpu_device pointer
2666 *
2667 * Cleanup the VM manager and free resources.
2668 */
2669void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2670{
2671	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2672	xa_destroy(&adev->vm_manager.pasids);
2673
2674	amdgpu_vmid_mgr_fini(adev);
2675}
2676
2677/**
2678 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2679 *
2680 * @dev: drm device pointer
2681 * @data: drm_amdgpu_vm
2682 * @filp: drm file pointer
2683 *
2684 * Returns:
2685 * 0 for success, -errno for errors.
2686 */
2687int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2688{
2689	union drm_amdgpu_vm *args = data;
2690	struct amdgpu_device *adev = drm_to_adev(dev);
2691	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2692
2693	/* No valid flags defined yet */
2694	if (args->in.flags)
2695		return -EINVAL;
2696
2697	switch (args->in.op) {
2698	case AMDGPU_VM_OP_RESERVE_VMID:
2699		/* We only have requirement to reserve vmid from gfxhub */
2700		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2701			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2702			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2703		}
2704
2705		break;
2706	case AMDGPU_VM_OP_UNRESERVE_VMID:
2707		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2708			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2709			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2710		}
2711		break;
2712	default:
2713		return -EINVAL;
2714	}
2715
2716	return 0;
2717}
2718
2719/**
2720 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2721 * @adev: amdgpu device pointer
2722 * @pasid: PASID of the VM
2723 * @vmid: VMID, only used for GFX 9.4.3.
2724 * @node_id: Node_id received in IH cookie. Only applicable for
2725 *           GFX 9.4.3.
2726 * @addr: Address of the fault
2727 * @write_fault: true is write fault, false is read fault
2728 *
2729 * Try to gracefully handle a VM fault. Return true if the fault was handled and
2730 * shouldn't be reported any more.
2731 */
2732bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2733			    u32 vmid, u32 node_id, uint64_t addr,
2734			    bool write_fault)
2735{
2736	bool is_compute_context = false;
2737	struct amdgpu_bo *root;
2738	unsigned long irqflags;
2739	uint64_t value, flags;
2740	struct amdgpu_vm *vm;
2741	int r;
2742
2743	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2744	vm = xa_load(&adev->vm_manager.pasids, pasid);
2745	if (vm) {
2746		root = amdgpu_bo_ref(vm->root.bo);
2747		is_compute_context = vm->is_compute_context;
2748	} else {
2749		root = NULL;
2750	}
2751	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2752
2753	if (!root)
2754		return false;
2755
2756	addr /= AMDGPU_GPU_PAGE_SIZE;
2757
2758	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2759	    node_id, addr, write_fault)) {
2760		amdgpu_bo_unref(&root);
2761		return true;
2762	}
2763
2764	r = amdgpu_bo_reserve(root, true);
2765	if (r)
2766		goto error_unref;
2767
2768	/* Double check that the VM still exists */
2769	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2770	vm = xa_load(&adev->vm_manager.pasids, pasid);
2771	if (vm && vm->root.bo != root)
2772		vm = NULL;
2773	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2774	if (!vm)
2775		goto error_unlock;
2776
2777	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2778		AMDGPU_PTE_SYSTEM;
2779
2780	if (is_compute_context) {
2781		/* Intentionally setting invalid PTE flag
2782		 * combination to force a no-retry-fault
2783		 */
2784		flags = AMDGPU_VM_NORETRY_FLAGS;
2785		value = 0;
2786	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2787		/* Redirect the access to the dummy page */
2788		value = adev->dummy_page_addr;
2789		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2790			AMDGPU_PTE_WRITEABLE;
2791
2792	} else {
2793		/* Let the hw retry silently on the PTE */
2794		value = 0;
2795	}
2796
2797	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2798	if (r) {
2799		pr_debug("failed %d to reserve fence slot\n", r);
2800		goto error_unlock;
2801	}
2802
2803	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2804				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2805	if (r)
2806		goto error_unlock;
2807
2808	r = amdgpu_vm_update_pdes(adev, vm, true);
2809
2810error_unlock:
2811	amdgpu_bo_unreserve(root);
2812	if (r < 0)
2813		DRM_ERROR("Can't handle page fault (%d)\n", r);
2814
2815error_unref:
2816	amdgpu_bo_unref(&root);
2817
2818	return false;
2819}
2820
2821#if defined(CONFIG_DEBUG_FS)
2822/**
2823 * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2824 *
2825 * @vm: Requested VM for printing BO info
2826 * @m: debugfs file
2827 *
2828 * Print BO information in debugfs file for the VM
2829 */
2830void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2831{
2832	struct amdgpu_bo_va *bo_va, *tmp;
2833	u64 total_idle = 0;
2834	u64 total_evicted = 0;
2835	u64 total_relocated = 0;
2836	u64 total_moved = 0;
2837	u64 total_invalidated = 0;
2838	u64 total_done = 0;
2839	unsigned int total_idle_objs = 0;
2840	unsigned int total_evicted_objs = 0;
2841	unsigned int total_relocated_objs = 0;
2842	unsigned int total_moved_objs = 0;
2843	unsigned int total_invalidated_objs = 0;
2844	unsigned int total_done_objs = 0;
2845	unsigned int id = 0;
2846
2847	spin_lock(&vm->status_lock);
2848	seq_puts(m, "\tIdle BOs:\n");
2849	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2850		if (!bo_va->base.bo)
2851			continue;
2852		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2853	}
2854	total_idle_objs = id;
2855	id = 0;
2856
2857	seq_puts(m, "\tEvicted BOs:\n");
2858	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2859		if (!bo_va->base.bo)
2860			continue;
2861		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2862	}
2863	total_evicted_objs = id;
2864	id = 0;
2865
2866	seq_puts(m, "\tRelocated BOs:\n");
2867	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2868		if (!bo_va->base.bo)
2869			continue;
2870		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2871	}
2872	total_relocated_objs = id;
2873	id = 0;
2874
2875	seq_puts(m, "\tMoved BOs:\n");
2876	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2877		if (!bo_va->base.bo)
2878			continue;
2879		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2880	}
2881	total_moved_objs = id;
2882	id = 0;
2883
2884	seq_puts(m, "\tInvalidated BOs:\n");
2885	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2886		if (!bo_va->base.bo)
2887			continue;
2888		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
2889	}
2890	total_invalidated_objs = id;
2891	id = 0;
2892
2893	seq_puts(m, "\tDone BOs:\n");
2894	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2895		if (!bo_va->base.bo)
2896			continue;
2897		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2898	}
2899	spin_unlock(&vm->status_lock);
2900	total_done_objs = id;
2901
2902	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2903		   total_idle_objs);
2904	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2905		   total_evicted_objs);
2906	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2907		   total_relocated_objs);
2908	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2909		   total_moved_objs);
2910	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2911		   total_invalidated_objs);
2912	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2913		   total_done_objs);
2914}
2915#endif
2916
2917/**
2918 * amdgpu_vm_update_fault_cache - update cached fault into.
2919 * @adev: amdgpu device pointer
2920 * @pasid: PASID of the VM
2921 * @addr: Address of the fault
2922 * @status: GPUVM fault status register
2923 * @vmhub: which vmhub got the fault
2924 *
2925 * Cache the fault info for later use by userspace in debugging.
2926 */
2927void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
2928				  unsigned int pasid,
2929				  uint64_t addr,
2930				  uint32_t status,
2931				  unsigned int vmhub)
2932{
2933	struct amdgpu_vm *vm;
2934	unsigned long flags;
2935
2936	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2937
2938	vm = xa_load(&adev->vm_manager.pasids, pasid);
2939	/* Don't update the fault cache if status is 0.  In the multiple
2940	 * fault case, subsequent faults will return a 0 status which is
2941	 * useless for userspace and replaces the useful fault status, so
2942	 * only update if status is non-0.
2943	 */
2944	if (vm && status) {
2945		vm->fault_info.addr = addr;
2946		vm->fault_info.status = status;
2947		if (AMDGPU_IS_GFXHUB(vmhub)) {
2948			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
2949			vm->fault_info.vmhub |=
2950				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
2951		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
2952			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
2953			vm->fault_info.vmhub |=
2954				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
2955		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
2956			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
2957			vm->fault_info.vmhub |=
2958				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
2959		} else {
2960			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
2961		}
2962	}
2963	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2964}
2965
v4.10.11
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
 
  28#include <linux/dma-fence-array.h>
  29#include <drm/drmP.h>
 
 
 
  30#include <drm/amdgpu_drm.h>
 
 
 
  31#include "amdgpu.h"
  32#include "amdgpu_trace.h"
  33
  34/*
  35 * GPUVM
  36 * GPUVM is similar to the legacy gart on older asics, however
  37 * rather than there being a single global gart table
  38 * for the entire GPU, there are multiple VM page tables active
  39 * at any given time.  The VM page tables can contain a mix
  40 * vram pages and system memory pages and system memory pages
 
 
 
 
 
 
 
 
  41 * can be mapped as snooped (cached system pages) or unsnooped
  42 * (uncached system pages).
  43 * Each VM has an ID associated with it and there is a page table
  44 * associated with each VMID.  When execting a command buffer,
  45 * the kernel tells the the ring what VMID to use for that command
 
  46 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  47 * The userspace drivers maintain their own address space and the kernel
  48 * sets up their pages tables accordingly when they submit their
  49 * command buffers and a VMID is assigned.
  50 * Cayman/Trinity support up to 8 active VMs at any given time;
  51 * SI supports 16.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52 */
  53
  54/* Local structure. Encapsulate some VM table update parameters to reduce
  55 * the number of function parameters
 
 
 
 
 
 
 
 
 
  56 */
  57struct amdgpu_pte_update_params {
  58	/* amdgpu device we do this update for */
 
 
 
  59	struct amdgpu_device *adev;
  60	/* address where to copy page table entries from */
  61	uint64_t src;
  62	/* indirect buffer to fill with commands */
  63	struct amdgpu_ib *ib;
  64	/* Function which actually does the update */
  65	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
  66		     uint64_t addr, unsigned count, uint32_t incr,
  67		     uint32_t flags);
  68	/* indicate update pt or its shadow */
  69	bool shadow;
 
 
 
 
 
 
 
 
 
 
  70};
  71
  72/**
  73 * amdgpu_vm_num_pde - return the number of page directory entries
  74 *
  75 * @adev: amdgpu_device pointer
 
 
 
 
 
  76 *
  77 * Calculate the number of page directory entries.
  78 */
  79static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
 
  80{
  81	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  82}
  83
  84/**
  85 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
 
 
  86 *
  87 * @adev: amdgpu_device pointer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88 *
  89 * Calculate the size of the page directory in bytes.
 
  90 */
  91static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
  92{
  93	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
 
 
 
  94}
  95
  96/**
  97 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
  98 *
  99 * @vm: vm providing the BOs
 100 * @validated: head of validation list
 101 * @entry: entry to add
 102 *
 103 * Add the page directory to the list of BOs to
 104 * validate for command submission.
 105 */
 106void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 107			 struct list_head *validated,
 108			 struct amdgpu_bo_list_entry *entry)
 109{
 110	entry->robj = vm->page_directory;
 111	entry->priority = 0;
 112	entry->tv.bo = &vm->page_directory->tbo;
 113	entry->tv.shared = true;
 114	entry->user_pages = NULL;
 115	list_add(&entry->tv.head, validated);
 116}
 117
 118/**
 119 * amdgpu_vm_validate_pt_bos - validate the page table BOs
 120 *
 121 * @adev: amdgpu device pointer
 122 * @vm: vm providing the BOs
 123 * @validate: callback to do the validation
 124 * @param: parameter for the validation callback
 125 *
 126 * Validate the page table BOs on command submission if neccessary.
 
 127 */
 128int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 129			      int (*validate)(void *p, struct amdgpu_bo *bo),
 130			      void *param)
 131{
 132	uint64_t num_evictions;
 133	unsigned i;
 134	int r;
 
 
 135
 136	/* We only need to validate the page tables
 137	 * if they aren't already valid.
 138	 */
 139	num_evictions = atomic64_read(&adev->num_evictions);
 140	if (num_evictions == vm->last_eviction_counter)
 141		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 142
 143	/* add the vm page table to the list */
 144	for (i = 0; i <= vm->max_pde_used; ++i) {
 145		struct amdgpu_bo *bo = vm->page_tables[i].bo;
 
 
 
 
 
 
 
 
 
 
 
 146
 147		if (!bo)
 148			continue;
 
 
 
 
 
 
 
 
 149
 150		r = validate(param, bo);
 151		if (r)
 152			return r;
 
 
 
 
 
 
 
 
 
 153	}
 154
 155	return 0;
 156}
 157
 158/**
 159 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
 
 
 
 
 160 *
 161 * @adev: amdgpu device instance
 162 * @vm: vm providing the BOs
 163 *
 164 * Move the PT BOs to the tail of the LRU.
 165 */
 166void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 167				  struct amdgpu_vm *vm)
 168{
 169	struct ttm_bo_global *glob = adev->mman.bdev.glob;
 170	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
 
 171
 172	spin_lock(&glob->lru_lock);
 173	for (i = 0; i <= vm->max_pde_used; ++i) {
 174		struct amdgpu_bo *bo = vm->page_tables[i].bo;
 
 
 175
 176		if (!bo)
 177			continue;
 
 178
 179		ttm_bo_move_to_lru_tail(&bo->tbo);
 180	}
 181	spin_unlock(&glob->lru_lock);
 
 
 
 182}
 183
 184static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
 185			      struct amdgpu_vm_id *id)
 
 
 
 
 
 
 
 
 
 186{
 187	return id->current_gpu_reset_count !=
 188		atomic_read(&adev->gpu_reset_counter) ? true : false;
 
 189}
 190
 191/**
 192 * amdgpu_vm_grab_id - allocate the next free VMID
 193 *
 194 * @vm: vm to allocate id for
 195 * @ring: ring we want to submit job to
 196 * @sync: sync object where we add dependencies
 197 * @fence: fence protecting ID from reuse
 198 *
 199 * Allocate an id for the vm, adding fences to the sync obj as necessary.
 
 200 */
 201int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 202		      struct amdgpu_sync *sync, struct dma_fence *fence,
 203		      struct amdgpu_job *job)
 
 
 
 
 
 
 
 
 204{
 205	struct amdgpu_device *adev = ring->adev;
 206	uint64_t fence_context = adev->fence_context + ring->idx;
 207	struct dma_fence *updates = sync->last_vm_update;
 208	struct amdgpu_vm_id *id, *idle;
 209	struct dma_fence **fences;
 210	unsigned i;
 211	int r = 0;
 
 
 
 
 212
 213	fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
 214			       GFP_KERNEL);
 215	if (!fences)
 216		return -ENOMEM;
 217
 218	mutex_lock(&adev->vm_manager.lock);
 
 
 
 
 
 219
 220	/* Check if we have an idle VMID */
 221	i = 0;
 222	list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
 223		fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
 224		if (!fences[i])
 225			break;
 226		++i;
 227	}
 
 
 
 
 228
 229	/* If we can't find a idle VMID to use, wait till one becomes available */
 230	if (&idle->list == &adev->vm_manager.ids_lru) {
 231		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
 232		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
 233		struct dma_fence_array *array;
 234		unsigned j;
 235
 236		for (j = 0; j < i; ++j)
 237			dma_fence_get(fences[j]);
 238
 239		array = dma_fence_array_create(i, fences, fence_context,
 240					   seqno, true);
 241		if (!array) {
 242			for (j = 0; j < i; ++j)
 243				dma_fence_put(fences[j]);
 244			kfree(fences);
 245			r = -ENOMEM;
 246			goto error;
 247		}
 248
 
 
 
 
 249
 250		r = amdgpu_sync_fence(ring->adev, sync, &array->base);
 251		dma_fence_put(&array->base);
 252		if (r)
 253			goto error;
 254
 255		mutex_unlock(&adev->vm_manager.lock);
 256		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 257
 
 
 
 
 
 
 
 258	}
 259	kfree(fences);
 260
 261	job->vm_needs_flush = true;
 262	/* Check if we can use a VMID already assigned to this VM */
 263	i = ring->idx;
 264	do {
 265		struct dma_fence *flushed;
 266
 267		id = vm->ids[i++];
 268		if (i == AMDGPU_MAX_RINGS)
 269			i = 0;
 270
 271		/* Check all the prerequisites to using this VMID */
 272		if (!id)
 273			continue;
 274		if (amdgpu_vm_is_gpu_reset(adev, id))
 275			continue;
 
 
 
 276
 277		if (atomic64_read(&id->owner) != vm->client_id)
 278			continue;
 
 
 
 
 
 
 
 
 
 
 
 279
 280		if (job->vm_pd_addr != id->pd_gpu_addr)
 281			continue;
 282
 283		if (!id->last_flush)
 284			continue;
 285
 286		if (id->last_flush->context != fence_context &&
 287		    !dma_fence_is_signaled(id->last_flush))
 288			continue;
 
 
 289
 290		flushed  = id->flushed_updates;
 291		if (updates &&
 292		    (!flushed || dma_fence_is_later(updates, flushed)))
 293			continue;
 294
 295		/* Good we can use this VMID. Remember this submission as
 296		 * user of the VMID.
 297		 */
 298		r = amdgpu_sync_fence(ring->adev, &id->active, fence);
 299		if (r)
 300			goto error;
 301
 302		id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
 303		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
 304		vm->ids[ring->idx] = id;
 305
 306		job->vm_id = id - adev->vm_manager.ids;
 307		job->vm_needs_flush = false;
 308		trace_amdgpu_vm_grab_id(vm, ring->idx, job);
 309
 310		mutex_unlock(&adev->vm_manager.lock);
 311		return 0;
 
 312
 313	} while (i != ring->idx);
 
 
 314
 315	/* Still no ID to use? Then use the idle one found earlier */
 316	id = idle;
 317
 318	/* Remember this submission as user of the VMID */
 319	r = amdgpu_sync_fence(ring->adev, &id->active, fence);
 320	if (r)
 321		goto error;
 
 
 
 
 
 
 
 
 
 
 322
 323	dma_fence_put(id->first);
 324	id->first = dma_fence_get(fence);
 
 325
 326	dma_fence_put(id->last_flush);
 327	id->last_flush = NULL;
 
 328
 329	dma_fence_put(id->flushed_updates);
 330	id->flushed_updates = dma_fence_get(updates);
 331
 332	id->pd_gpu_addr = job->vm_pd_addr;
 333	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
 334	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
 335	atomic64_set(&id->owner, vm->client_id);
 336	vm->ids[ring->idx] = id;
 
 
 
 
 
 
 337
 338	job->vm_id = id - adev->vm_manager.ids;
 339	trace_amdgpu_vm_grab_id(vm, ring->idx, job);
 340
 341error:
 342	mutex_unlock(&adev->vm_manager.lock);
 343	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344}
 345
 346static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
 
 
 
 
 
 
 
 
 
 
 347{
 348	struct amdgpu_device *adev = ring->adev;
 349	const struct amdgpu_ip_block *ip_block;
 
 350
 351	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
 352		/* only compute rings */
 353		return false;
 354
 355	ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
 356	if (!ip_block)
 357		return false;
 
 
 358
 359	if (ip_block->version->major <= 7) {
 360		/* gfx7 has no workaround */
 361		return true;
 362	} else if (ip_block->version->major == 8) {
 363		if (adev->gfx.mec_fw_version >= 673)
 364			/* gfx8 is fixed in MEC firmware 673 */
 365			return false;
 366		else
 367			return true;
 368	}
 369	return false;
 370}
 371
 372/**
 373 * amdgpu_vm_flush - hardware flush the vm
 374 *
 375 * @ring: ring to use for flush
 376 * @vm_id: vmid number to use
 377 * @pd_addr: address of the page directory
 378 *
 379 * Emit a VM flush when it is necessary.
 
 
 
 380 */
 381int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 
 382{
 383	struct amdgpu_device *adev = ring->adev;
 384	struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
 385	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
 386		id->gds_base != job->gds_base ||
 387		id->gds_size != job->gds_size ||
 388		id->gws_base != job->gws_base ||
 389		id->gws_size != job->gws_size ||
 390		id->oa_base != job->oa_base ||
 391		id->oa_size != job->oa_size);
 
 
 392	int r;
 393
 394	if (ring->funcs->emit_pipeline_sync && (
 395	    job->vm_needs_flush || gds_switch_needed ||
 396	    amdgpu_vm_ring_has_compute_vm_bug(ring)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397		amdgpu_ring_emit_pipeline_sync(ring);
 398
 399	if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
 400	    amdgpu_vm_is_gpu_reset(adev, id))) {
 401		struct dma_fence *fence;
 
 
 
 
 402
 403		trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
 404		amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
 
 
 
 
 
 
 
 
 405
 406		r = amdgpu_fence_emit(ring, &fence);
 
 407		if (r)
 408			return r;
 
 409
 410		mutex_lock(&adev->vm_manager.lock);
 
 411		dma_fence_put(id->last_flush);
 412		id->last_flush = fence;
 413		mutex_unlock(&adev->vm_manager.lock);
 
 
 414	}
 415
 416	if (gds_switch_needed) {
 417		id->gds_base = job->gds_base;
 418		id->gds_size = job->gds_size;
 419		id->gws_base = job->gws_base;
 420		id->gws_size = job->gws_size;
 421		id->oa_base = job->oa_base;
 422		id->oa_size = job->oa_size;
 423		amdgpu_ring_emit_gds_switch(ring, job->vm_id,
 424					    job->gds_base, job->gds_size,
 425					    job->gws_base, job->gws_size,
 426					    job->oa_base, job->oa_size);
 427	}
 
 428
 
 
 
 
 
 
 
 
 429	return 0;
 430}
 431
 432/**
 433 * amdgpu_vm_reset_id - reset VMID to zero
 434 *
 435 * @adev: amdgpu device structure
 436 * @vm_id: vmid number to use
 437 *
 438 * Reset saved GDW, GWS and OA to force switch on next flush.
 439 */
 440void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
 441{
 442	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
 443
 444	id->gds_base = 0;
 445	id->gds_size = 0;
 446	id->gws_base = 0;
 447	id->gws_size = 0;
 448	id->oa_base = 0;
 449	id->oa_size = 0;
 450}
 451
 452/**
 453 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 454 *
 455 * @vm: requested vm
 456 * @bo: requested buffer object
 457 *
 458 * Find @bo inside the requested vm.
 459 * Search inside the @bos vm list for the requested vm
 460 * Returns the found bo_va or NULL if none is found
 461 *
 462 * Object has to be reserved!
 
 
 
 463 */
 464struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 465				       struct amdgpu_bo *bo)
 466{
 467	struct amdgpu_bo_va *bo_va;
 
 
 
 
 468
 469	list_for_each_entry(bo_va, &bo->va, bo_list) {
 470		if (bo_va->vm == vm) {
 471			return bo_va;
 472		}
 473	}
 474	return NULL;
 475}
 476
 477/**
 478 * amdgpu_vm_do_set_ptes - helper to call the right asic function
 479 *
 480 * @params: see amdgpu_pte_update_params definition
 481 * @pe: addr of the page entry
 482 * @addr: dst addr to write into pe
 483 * @count: number of page entries to update
 484 * @incr: increase next addr by incr bytes
 485 * @flags: hw access flags
 486 *
 487 * Traces the parameters and calls the right asic functions
 488 * to setup the page table using the DMA.
 489 */
 490static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
 491				  uint64_t pe, uint64_t addr,
 492				  unsigned count, uint32_t incr,
 493				  uint32_t flags)
 494{
 495	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
 496
 497	if (count < 3) {
 498		amdgpu_vm_write_pte(params->adev, params->ib, pe,
 499				    addr | flags, count, incr);
 500
 501	} else {
 502		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
 503				      count, incr, flags);
 504	}
 505}
 506
 507/**
 508 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
 509 *
 510 * @params: see amdgpu_pte_update_params definition
 511 * @pe: addr of the page entry
 512 * @addr: dst addr to write into pe
 513 * @count: number of page entries to update
 514 * @incr: increase next addr by incr bytes
 515 * @flags: hw access flags
 516 *
 517 * Traces the parameters and calls the DMA function to copy the PTEs.
 518 */
 519static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
 520				   uint64_t pe, uint64_t addr,
 521				   unsigned count, uint32_t incr,
 522				   uint32_t flags)
 523{
 524	uint64_t src = (params->src + (addr >> 12) * 8);
 525
 526
 527	trace_amdgpu_vm_copy_ptes(pe, src, count);
 528
 529	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
 530}
 531
 532/**
 533 * amdgpu_vm_map_gart - Resolve gart mapping of addr
 534 *
 535 * @pages_addr: optional DMA address to use for lookup
 536 * @addr: the unmapped addr
 537 *
 538 * Look up the physical address of the page that the pte resolves
 539 * to and return the pointer for the page table entry.
 
 
 
 540 */
 541static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 542{
 543	uint64_t result;
 544
 545	/* page table offset */
 546	result = pages_addr[addr >> PAGE_SHIFT];
 547
 548	/* in case cpu page size != gpu page size*/
 549	result |= addr & (~PAGE_MASK);
 550
 551	result &= 0xFFFFFFFFFFFFF000ULL;
 552
 553	return result;
 554}
 555
 556/*
 557 * amdgpu_vm_update_pdes - make sure that page directory is valid
 558 *
 559 * @adev: amdgpu_device pointer
 560 * @vm: requested vm
 561 * @start: start of GPU address range
 562 * @end: end of GPU address range
 
 563 *
 564 * Allocates new page tables if necessary
 565 * and updates the page directory.
 566 * Returns 0 for success, error for failure.
 567 */
 568int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 569				    struct amdgpu_vm *vm)
 570{
 571	struct amdgpu_bo *shadow;
 572	struct amdgpu_ring *ring;
 573	uint64_t pd_addr, shadow_addr;
 574	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 575	uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
 576	unsigned count = 0, pt_idx, ndw;
 577	struct amdgpu_job *job;
 578	struct amdgpu_pte_update_params params;
 579	struct dma_fence *fence = NULL;
 580
 581	int r;
 
 
 582
 583	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 584	shadow = vm->page_directory->shadow;
 585
 586	/* padding, etc. */
 587	ndw = 64;
 588
 589	/* assume the worst case */
 590	ndw += vm->max_pde_used * 6;
 591
 592	pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 593	if (shadow) {
 594		r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
 595		if (r)
 596			return r;
 597		shadow_addr = amdgpu_bo_gpu_offset(shadow);
 598		ndw *= 2;
 599	} else {
 600		shadow_addr = 0;
 601	}
 602
 603	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 604	if (r)
 605		return r;
 606
 607	memset(&params, 0, sizeof(params));
 608	params.adev = adev;
 609	params.ib = &job->ibs[0];
 
 610
 611	/* walk over the address space and update the page directory */
 612	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
 613		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
 614		uint64_t pde, pt;
 615
 616		if (bo == NULL)
 617			continue;
 
 618
 619		if (bo->shadow) {
 620			struct amdgpu_bo *pt_shadow = bo->shadow;
 621
 622			r = amdgpu_ttm_bind(&pt_shadow->tbo,
 623					    &pt_shadow->tbo.mem);
 624			if (r)
 625				return r;
 626		}
 627
 628		pt = amdgpu_bo_gpu_offset(bo);
 629		if (vm->page_tables[pt_idx].addr == pt)
 630			continue;
 631
 632		vm->page_tables[pt_idx].addr = pt;
 633
 634		pde = pd_addr + pt_idx * 8;
 635		if (((last_pde + 8 * count) != pde) ||
 636		    ((last_pt + incr * count) != pt) ||
 637		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
 638
 639			if (count) {
 640				if (shadow)
 641					amdgpu_vm_do_set_ptes(&params,
 642							      last_shadow,
 643							      last_pt, count,
 644							      incr,
 645							      AMDGPU_PTE_VALID);
 646
 647				amdgpu_vm_do_set_ptes(&params, last_pde,
 648						      last_pt, count, incr,
 649						      AMDGPU_PTE_VALID);
 650			}
 651
 652			count = 1;
 653			last_pde = pde;
 654			last_shadow = shadow_addr + pt_idx * 8;
 655			last_pt = pt;
 656		} else {
 657			++count;
 658		}
 659	}
 660
 661	if (count) {
 662		if (vm->page_directory->shadow)
 663			amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
 664					      count, incr, AMDGPU_PTE_VALID);
 665
 666		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
 667				      count, incr, AMDGPU_PTE_VALID);
 668	}
 669
 670	if (params.ib->length_dw == 0) {
 671		amdgpu_job_free(job);
 672		return 0;
 
 673	}
 674
 675	amdgpu_ring_pad_ib(ring, params.ib);
 676	amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
 677			 AMDGPU_FENCE_OWNER_VM);
 678	if (shadow)
 679		amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
 680				 AMDGPU_FENCE_OWNER_VM);
 681
 682	WARN_ON(params.ib->length_dw > ndw);
 683	r = amdgpu_job_submit(job, ring, &vm->entity,
 684			      AMDGPU_FENCE_OWNER_VM, &fence);
 685	if (r)
 686		goto error_free;
 687
 688	amdgpu_bo_fence(vm->page_directory, fence, true);
 689	dma_fence_put(vm->page_directory_fence);
 690	vm->page_directory_fence = dma_fence_get(fence);
 691	dma_fence_put(fence);
 692
 693	return 0;
 694
 695error_free:
 696	amdgpu_job_free(job);
 697	return r;
 698}
 699
 700/**
 701 * amdgpu_vm_update_ptes - make sure that page tables are valid
 702 *
 703 * @params: see amdgpu_pte_update_params definition
 704 * @vm: requested vm
 705 * @start: start of GPU address range
 706 * @end: end of GPU address range
 707 * @dst: destination address to map to, the next dst inside the function
 708 * @flags: mapping flags
 709 *
 710 * Update the page tables in the range @start - @end.
 711 */
 712static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 713				  struct amdgpu_vm *vm,
 714				  uint64_t start, uint64_t end,
 715				  uint64_t dst, uint32_t flags)
 716{
 717	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 718
 719	uint64_t cur_pe_start, cur_nptes, cur_dst;
 720	uint64_t addr; /* next GPU address to be updated */
 721	uint64_t pt_idx;
 722	struct amdgpu_bo *pt;
 723	unsigned nptes; /* next number of ptes to be updated */
 724	uint64_t next_pe_start;
 725
 726	/* initialize the variables */
 727	addr = start;
 728	pt_idx = addr >> amdgpu_vm_block_size;
 729	pt = vm->page_tables[pt_idx].bo;
 730	if (params->shadow) {
 731		if (!pt->shadow)
 732			return;
 733		pt = pt->shadow;
 734	}
 735	if ((addr & ~mask) == (end & ~mask))
 736		nptes = end - addr;
 737	else
 738		nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
 739
 740	cur_pe_start = amdgpu_bo_gpu_offset(pt);
 741	cur_pe_start += (addr & mask) * 8;
 742	cur_nptes = nptes;
 743	cur_dst = dst;
 744
 745	/* for next ptb*/
 746	addr += nptes;
 747	dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 748
 749	/* walk over the address space and update the page tables */
 750	while (addr < end) {
 751		pt_idx = addr >> amdgpu_vm_block_size;
 752		pt = vm->page_tables[pt_idx].bo;
 753		if (params->shadow) {
 754			if (!pt->shadow)
 755				return;
 756			pt = pt->shadow;
 757		}
 758
 759		if ((addr & ~mask) == (end & ~mask))
 760			nptes = end - addr;
 761		else
 762			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
 763
 764		next_pe_start = amdgpu_bo_gpu_offset(pt);
 765		next_pe_start += (addr & mask) * 8;
 766
 767		if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
 768		    ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
 769			/* The next ptb is consecutive to current ptb.
 770			 * Don't call the update function now.
 771			 * Will update two ptbs together in future.
 772			*/
 773			cur_nptes += nptes;
 774		} else {
 775			params->func(params, cur_pe_start, cur_dst, cur_nptes,
 776				     AMDGPU_GPU_PAGE_SIZE, flags);
 777
 778			cur_pe_start = next_pe_start;
 779			cur_nptes = nptes;
 780			cur_dst = dst;
 781		}
 782
 783		/* for next ptb*/
 784		addr += nptes;
 785		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 786	}
 787
 788	params->func(params, cur_pe_start, cur_dst, cur_nptes,
 789		     AMDGPU_GPU_PAGE_SIZE, flags);
 790}
 791
 792/*
 793 * amdgpu_vm_frag_ptes - add fragment information to PTEs
 794 *
 795 * @params: see amdgpu_pte_update_params definition
 796 * @vm: requested vm
 797 * @start: first PTE to handle
 798 * @end: last PTE to handle
 799 * @dst: addr those PTEs should point to
 800 * @flags: hw mapping flags
 801 */
 802static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
 803				struct amdgpu_vm *vm,
 804				uint64_t start, uint64_t end,
 805				uint64_t dst, uint32_t flags)
 806{
 807	/**
 808	 * The MC L1 TLB supports variable sized pages, based on a fragment
 809	 * field in the PTE. When this field is set to a non-zero value, page
 810	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
 811	 * flags are considered valid for all PTEs within the fragment range
 812	 * and corresponding mappings are assumed to be physically contiguous.
 813	 *
 814	 * The L1 TLB can store a single PTE for the whole fragment,
 815	 * significantly increasing the space available for translation
 816	 * caching. This leads to large improvements in throughput when the
 817	 * TLB is under pressure.
 818	 *
 819	 * The L2 TLB distributes small and large fragments into two
 820	 * asymmetric partitions. The large fragment cache is significantly
 821	 * larger. Thus, we try to use large fragments wherever possible.
 822	 * Userspace can support this by aligning virtual base address and
 823	 * allocation size to the fragment size.
 824	 */
 825
 826	/* SI and newer are optimized for 64KB */
 827	uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
 828	uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
 829
 830	uint64_t frag_start = ALIGN(start, frag_align);
 831	uint64_t frag_end = end & ~(frag_align - 1);
 832
 833	/* system pages are non continuously */
 834	if (params->src || !(flags & AMDGPU_PTE_VALID) ||
 835	    (frag_start >= frag_end)) {
 836
 837		amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
 838		return;
 839	}
 840
 841	/* handle the 4K area at the beginning */
 842	if (start != frag_start) {
 843		amdgpu_vm_update_ptes(params, vm, start, frag_start,
 844				      dst, flags);
 845		dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
 846	}
 847
 848	/* handle the area in the middle */
 849	amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
 850			      flags | frag_flags);
 851
 852	/* handle the 4K area at the end */
 853	if (frag_end != end) {
 854		dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
 855		amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
 856	}
 857}
 858
 859/**
 860 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 861 *
 862 * @adev: amdgpu_device pointer
 863 * @exclusive: fence we need to sync to
 864 * @src: address where to copy page table entries from
 865 * @pages_addr: DMA addresses to use for mapping
 866 * @vm: requested vm
 
 
 867 * @start: start of mapped range
 868 * @last: last mapped entry
 869 * @flags: flags for the entries
 870 * @addr: addr to set the area to
 
 
 
 871 * @fence: optional resulting fence
 872 *
 873 * Fill in the page table entries between @start and @last.
 874 * Returns 0 for success, -EINVAL for failure.
 
 
 875 */
 876static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 877				       struct dma_fence *exclusive,
 878				       uint64_t src,
 879				       dma_addr_t *pages_addr,
 880				       struct amdgpu_vm *vm,
 881				       uint64_t start, uint64_t last,
 882				       uint32_t flags, uint64_t addr,
 883				       struct dma_fence **fence)
 884{
 885	struct amdgpu_ring *ring;
 886	void *owner = AMDGPU_FENCE_OWNER_VM;
 887	unsigned nptes, ncmds, ndw;
 888	struct amdgpu_job *job;
 889	struct amdgpu_pte_update_params params;
 890	struct dma_fence *f = NULL;
 891	int r;
 892
 893	memset(&params, 0, sizeof(params));
 894	params.adev = adev;
 895	params.src = src;
 
 
 896
 897	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 
 
 
 
 
 
 
 
 898
 899	memset(&params, 0, sizeof(params));
 900	params.adev = adev;
 901	params.src = src;
 
 
 
 
 902
 903	/* sync to everything on unmapping */
 
 
 904	if (!(flags & AMDGPU_PTE_VALID))
 905		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
 
 
 906
 907	nptes = last - start + 1;
 
 
 
 
 908
 909	/*
 910	 * reserve space for one command every (1 << BLOCK_SIZE)
 911	 *  entries or 2k dwords (whatever is smaller)
 912	 */
 913	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
 914
 915	/* padding, etc. */
 916	ndw = 64;
 
 
 917
 918	if (src) {
 919		/* only copy commands needed */
 920		ndw += ncmds * 7;
 921
 922		params.func = amdgpu_vm_do_copy_ptes;
 
 
 
 923
 924	} else if (pages_addr) {
 925		/* copy commands needed */
 926		ndw += ncmds * 7;
 927
 928		/* and also PTEs */
 929		ndw += nptes * 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930
 931		params.func = amdgpu_vm_do_copy_ptes;
 
 
 
 
 
 
 932
 933	} else {
 934		/* set page commands needed */
 935		ndw += ncmds * 10;
 
 
 936
 937		/* two extra commands for begin/end of fragment */
 938		ndw += 2 * 10;
 
 
 939
 940		params.func = amdgpu_vm_do_set_ptes;
 
 941	}
 942
 943	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 944	if (r)
 945		return r;
 946
 947	params.ib = &job->ibs[0];
 948
 949	if (!src && pages_addr) {
 950		uint64_t *pte;
 951		unsigned i;
 952
 953		/* Put the PTEs at the end of the IB. */
 954		i = ndw - nptes * 2;
 955		pte= (uint64_t *)&(job->ibs->ptr[i]);
 956		params.src = job->ibs->gpu_addr + i * 4;
 957
 958		for (i = 0; i < nptes; ++i) {
 959			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
 960						    AMDGPU_GPU_PAGE_SIZE);
 961			pte[i] |= flags;
 962		}
 963		addr = 0;
 964	}
 965
 966	r = amdgpu_sync_fence(adev, &job->sync, exclusive);
 967	if (r)
 968		goto error_free;
 969
 970	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
 971			     owner);
 972	if (r)
 973		goto error_free;
 
 974
 975	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
 976	if (r)
 977		goto error_free;
 
 
 978
 979	params.shadow = true;
 980	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
 981	params.shadow = false;
 982	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
 983
 984	amdgpu_ring_pad_ib(ring, params.ib);
 985	WARN_ON(params.ib->length_dw > ndw);
 986	r = amdgpu_job_submit(job, ring, &vm->entity,
 987			      AMDGPU_FENCE_OWNER_VM, &f);
 988	if (r)
 989		goto error_free;
 990
 991	amdgpu_bo_fence(vm->page_directory, f, true);
 992	if (fence) {
 993		dma_fence_put(*fence);
 994		*fence = dma_fence_get(f);
 995	}
 996	dma_fence_put(f);
 997	return 0;
 998
 999error_free:
1000	amdgpu_job_free(job);
1001	return r;
1002}
1003
1004/**
1005 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1006 *
1007 * @adev: amdgpu_device pointer
1008 * @exclusive: fence we need to sync to
1009 * @gtt_flags: flags as they are used for GTT
1010 * @pages_addr: DMA addresses to use for mapping
1011 * @vm: requested vm
1012 * @mapping: mapped range and flags to use for the update
1013 * @flags: HW flags for the mapping
1014 * @nodes: array of drm_mm_nodes with the MC addresses
1015 * @fence: optional resulting fence
1016 *
1017 * Split the mapping into smaller chunks so that each update fits
1018 * into a SDMA IB.
1019 * Returns 0 for success, -EINVAL for failure.
1020 */
1021static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1022				      struct dma_fence *exclusive,
1023				      uint32_t gtt_flags,
1024				      dma_addr_t *pages_addr,
1025				      struct amdgpu_vm *vm,
1026				      struct amdgpu_bo_va_mapping *mapping,
1027				      uint32_t flags,
1028				      struct drm_mm_node *nodes,
1029				      struct dma_fence **fence)
1030{
1031	uint64_t pfn, src = 0, start = mapping->it.start;
1032	int r;
1033
1034	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1035	 * but in case of something, we filter the flags in first place
1036	 */
1037	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1038		flags &= ~AMDGPU_PTE_READABLE;
1039	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1040		flags &= ~AMDGPU_PTE_WRITEABLE;
1041
1042	trace_amdgpu_vm_bo_update(mapping);
1043
1044	pfn = mapping->offset >> PAGE_SHIFT;
1045	if (nodes) {
1046		while (pfn >= nodes->size) {
1047			pfn -= nodes->size;
1048			++nodes;
1049		}
1050	}
1051
1052	do {
1053		uint64_t max_entries;
1054		uint64_t addr, last;
1055
1056		if (nodes) {
1057			addr = nodes->start << PAGE_SHIFT;
1058			max_entries = (nodes->size - pfn) *
1059				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1060		} else {
1061			addr = 0;
1062			max_entries = S64_MAX;
1063		}
1064
1065		if (pages_addr) {
1066			if (flags == gtt_flags)
1067				src = adev->gart.table_addr +
1068					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1069			else
1070				max_entries = min(max_entries, 16ull * 1024ull);
1071			addr = 0;
1072		} else if (flags & AMDGPU_PTE_VALID) {
1073			addr += adev->vm_manager.vram_base_offset;
1074		}
1075		addr += pfn << PAGE_SHIFT;
1076
1077		last = min((uint64_t)mapping->it.last, start + max_entries - 1);
1078		r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1079						src, pages_addr, vm,
1080						start, last, flags, addr,
1081						fence);
1082		if (r)
1083			return r;
1084
1085		pfn += last - start + 1;
1086		if (nodes && nodes->size == pfn) {
1087			pfn = 0;
1088			++nodes;
1089		}
1090		start = last + 1;
1091
1092	} while (unlikely(start != mapping->it.last + 1));
1093
1094	return 0;
1095}
1096
1097/**
1098 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1099 *
1100 * @adev: amdgpu_device pointer
1101 * @bo_va: requested BO and VM object
1102 * @clear: if true clear the entries
1103 *
1104 * Fill in the page table entries for @bo_va.
1105 * Returns 0 for success, -EINVAL for failure.
 
 
1106 */
1107int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1108			struct amdgpu_bo_va *bo_va,
1109			bool clear)
1110{
1111	struct amdgpu_vm *vm = bo_va->vm;
 
1112	struct amdgpu_bo_va_mapping *mapping;
1113	dma_addr_t *pages_addr = NULL;
1114	uint32_t gtt_flags, flags;
1115	struct ttm_mem_reg *mem;
1116	struct drm_mm_node *nodes;
1117	struct dma_fence *exclusive;
 
 
 
1118	int r;
1119
1120	if (clear) {
1121		mem = NULL;
1122		nodes = NULL;
1123		exclusive = NULL;
1124	} else {
1125		struct ttm_dma_tt *ttm;
1126
1127		mem = &bo_va->bo->tbo.mem;
1128		nodes = mem->mm_node;
1129		if (mem->mem_type == TTM_PL_TT) {
1130			ttm = container_of(bo_va->bo->tbo.ttm, struct
1131					   ttm_dma_tt, ttm);
1132			pages_addr = ttm->dma_address;
 
 
 
1133		}
1134		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
 
 
 
1135	}
1136
1137	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1138	gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1139		adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1140
1141	spin_lock(&vm->status_lock);
1142	if (!list_empty(&bo_va->vm_status))
1143		list_splice_init(&bo_va->valids, &bo_va->invalids);
1144	spin_unlock(&vm->status_lock);
1145
1146	list_for_each_entry(mapping, &bo_va->invalids, list) {
1147		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1148					       gtt_flags, pages_addr, vm,
1149					       mapping, flags, nodes,
1150					       &bo_va->last_pt_update);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151		if (r)
1152			return r;
1153	}
1154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1156		list_for_each_entry(mapping, &bo_va->valids, list)
1157			trace_amdgpu_vm_bo_mapping(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1158
1159		list_for_each_entry(mapping, &bo_va->invalids, list)
1160			trace_amdgpu_vm_bo_mapping(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161	}
 
1162
1163	spin_lock(&vm->status_lock);
1164	list_splice_init(&bo_va->invalids, &bo_va->valids);
1165	list_del_init(&bo_va->vm_status);
1166	if (clear)
1167		list_add(&bo_va->vm_status, &vm->cleared);
1168	spin_unlock(&vm->status_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
1169
1170	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1171}
1172
1173/**
1174 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1175 *
1176 * @adev: amdgpu_device pointer
1177 * @vm: requested vm
 
 
1178 *
1179 * Make sure all freed BOs are cleared in the PT.
1180 * Returns 0 for success.
 
 
 
1181 *
1182 * PTs have to be reserved and mutex must be locked!
1183 */
1184int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1185			  struct amdgpu_vm *vm)
 
1186{
 
1187	struct amdgpu_bo_va_mapping *mapping;
 
 
1188	int r;
1189
1190	while (!list_empty(&vm->freed)) {
1191		mapping = list_first_entry(&vm->freed,
1192			struct amdgpu_bo_va_mapping, list);
1193		list_del(&mapping->list);
1194
1195		r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1196					       0, 0, NULL);
1197		kfree(mapping);
1198		if (r)
 
 
 
1199			return r;
 
 
1200
 
 
 
 
 
1201	}
 
1202	return 0;
1203
1204}
1205
1206/**
1207 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1208 *
1209 * @adev: amdgpu_device pointer
1210 * @vm: requested vm
 
1211 *
1212 * Make sure all invalidated BOs are cleared in the PT.
1213 * Returns 0 for success.
1214 *
1215 * PTs have to be reserved and mutex must be locked!
 
 
 
1216 */
1217int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1218			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 
1219{
1220	struct amdgpu_bo_va *bo_va = NULL;
1221	int r = 0;
 
 
1222
1223	spin_lock(&vm->status_lock);
 
 
 
 
 
 
 
 
 
 
 
 
1224	while (!list_empty(&vm->invalidated)) {
1225		bo_va = list_first_entry(&vm->invalidated,
1226			struct amdgpu_bo_va, vm_status);
 
1227		spin_unlock(&vm->status_lock);
1228
1229		r = amdgpu_vm_bo_update(adev, bo_va, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1230		if (r)
1231			return r;
1232
 
 
 
 
 
 
 
 
 
1233		spin_lock(&vm->status_lock);
1234	}
1235	spin_unlock(&vm->status_lock);
1236
1237	if (bo_va)
1238		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1239
 
 
 
 
 
 
 
 
 
 
1240	return r;
1241}
1242
1243/**
1244 * amdgpu_vm_bo_add - add a bo to a specific vm
1245 *
1246 * @adev: amdgpu_device pointer
1247 * @vm: requested vm
1248 * @bo: amdgpu buffer object
1249 *
1250 * Add @bo into the requested vm.
1251 * Add @bo to the list of bos associated with the vm
1252 * Returns newly added bo_va or NULL for failure
 
 
1253 *
1254 * Object has to be reserved!
1255 */
1256struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1257				      struct amdgpu_vm *vm,
1258				      struct amdgpu_bo *bo)
1259{
1260	struct amdgpu_bo_va *bo_va;
1261
1262	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1263	if (bo_va == NULL) {
1264		return NULL;
1265	}
1266	bo_va->vm = vm;
1267	bo_va->bo = bo;
1268	bo_va->ref_count = 1;
1269	INIT_LIST_HEAD(&bo_va->bo_list);
1270	INIT_LIST_HEAD(&bo_va->valids);
1271	INIT_LIST_HEAD(&bo_va->invalids);
1272	INIT_LIST_HEAD(&bo_va->vm_status);
1273
1274	list_add_tail(&bo_va->bo_list, &bo->va);
 
 
 
 
 
 
 
 
1275
1276	return bo_va;
1277}
1278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1279/**
1280 * amdgpu_vm_bo_map - map bo inside a vm
1281 *
1282 * @adev: amdgpu_device pointer
1283 * @bo_va: bo_va to store the address
1284 * @saddr: where to map the BO
1285 * @offset: requested offset in the BO
 
1286 * @flags: attributes of pages (read/write/valid/etc.)
1287 *
1288 * Add a mapping of the BO at the specefied addr into the VM.
1289 * Returns 0 for success, error for failure.
 
 
1290 *
1291 * Object has to be reserved and unreserved outside!
1292 */
1293int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1294		     struct amdgpu_bo_va *bo_va,
1295		     uint64_t saddr, uint64_t offset,
1296		     uint64_t size, uint32_t flags)
1297{
1298	struct amdgpu_bo_va_mapping *mapping;
1299	struct amdgpu_vm *vm = bo_va->vm;
1300	struct interval_tree_node *it;
1301	unsigned last_pfn, pt_idx;
1302	uint64_t eaddr;
1303	int r;
1304
1305	/* validate the parameters */
1306	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1307	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1308		return -EINVAL;
1309
1310	/* make sure object fit at this offset */
1311	eaddr = saddr + size - 1;
1312	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1313		return -EINVAL;
1314
1315	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1316	if (last_pfn >= adev->vm_manager.max_pfn) {
1317		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1318			last_pfn, adev->vm_manager.max_pfn);
1319		return -EINVAL;
1320	}
1321
1322	saddr /= AMDGPU_GPU_PAGE_SIZE;
1323	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1324
1325	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1326	if (it) {
1327		struct amdgpu_bo_va_mapping *tmp;
1328		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1329		/* bo and tmp overlap, invalid addr */
1330		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1331			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1332			tmp->it.start, tmp->it.last + 1);
1333		r = -EINVAL;
1334		goto error;
1335	}
1336
1337	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1338	if (!mapping) {
1339		r = -ENOMEM;
1340		goto error;
1341	}
1342
1343	INIT_LIST_HEAD(&mapping->list);
1344	mapping->it.start = saddr;
1345	mapping->it.last = eaddr;
1346	mapping->offset = offset;
1347	mapping->flags = flags;
1348
1349	list_add(&mapping->list, &bo_va->invalids);
1350	interval_tree_insert(&mapping->it, &vm->va);
1351
1352	/* Make sure the page tables are allocated */
1353	saddr >>= amdgpu_vm_block_size;
1354	eaddr >>= amdgpu_vm_block_size;
1355
1356	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357
1358	if (eaddr > vm->max_pde_used)
1359		vm->max_pde_used = eaddr;
 
1360
1361	/* walk over the address space and allocate the page tables */
1362	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1363		struct reservation_object *resv = vm->page_directory->tbo.resv;
1364		struct amdgpu_bo *pt;
1365
1366		if (vm->page_tables[pt_idx].bo)
1367			continue;
 
 
 
1368
1369		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1370				     AMDGPU_GPU_PAGE_SIZE, true,
1371				     AMDGPU_GEM_DOMAIN_VRAM,
1372				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1373				     AMDGPU_GEM_CREATE_SHADOW |
1374				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1375				     AMDGPU_GEM_CREATE_VRAM_CLEARED,
1376				     NULL, resv, &pt);
1377		if (r)
1378			goto error_free;
1379
1380		/* Keep a reference to the page table to avoid freeing
1381		 * them up in the wrong order.
1382		 */
1383		pt->parent = amdgpu_bo_ref(vm->page_directory);
1384
1385		vm->page_tables[pt_idx].bo = pt;
1386		vm->page_tables[pt_idx].addr = 0;
1387	}
1388
1389	return 0;
1390
1391error_free:
1392	list_del(&mapping->list);
1393	interval_tree_remove(&mapping->it, &vm->va);
1394	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1395	kfree(mapping);
1396
1397error:
1398	return r;
1399}
1400
1401/**
1402 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1403 *
1404 * @adev: amdgpu_device pointer
1405 * @bo_va: bo_va to remove the address from
1406 * @saddr: where to the BO is mapped
1407 *
1408 * Remove a mapping of the BO at the specefied addr from the VM.
1409 * Returns 0 for success, error for failure.
 
 
1410 *
1411 * Object has to be reserved and unreserved outside!
1412 */
1413int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1414		       struct amdgpu_bo_va *bo_va,
1415		       uint64_t saddr)
1416{
1417	struct amdgpu_bo_va_mapping *mapping;
1418	struct amdgpu_vm *vm = bo_va->vm;
1419	bool valid = true;
1420
1421	saddr /= AMDGPU_GPU_PAGE_SIZE;
1422
1423	list_for_each_entry(mapping, &bo_va->valids, list) {
1424		if (mapping->it.start == saddr)
1425			break;
1426	}
1427
1428	if (&mapping->list == &bo_va->valids) {
1429		valid = false;
1430
1431		list_for_each_entry(mapping, &bo_va->invalids, list) {
1432			if (mapping->it.start == saddr)
1433				break;
1434		}
1435
1436		if (&mapping->list == &bo_va->invalids)
1437			return -ENOENT;
1438	}
1439
1440	list_del(&mapping->list);
1441	interval_tree_remove(&mapping->it, &vm->va);
 
1442	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1443
1444	if (valid)
1445		list_add(&mapping->list, &vm->freed);
1446	else
1447		kfree(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1448
1449	return 0;
1450}
1451
1452/**
1453 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1454 *
1455 * @adev: amdgpu_device pointer
1456 * @bo_va: requested bo_va
1457 *
1458 * Remove @bo_va->bo from the requested vm.
1459 *
1460 * Object have to be reserved!
1461 */
1462void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1463		      struct amdgpu_bo_va *bo_va)
1464{
1465	struct amdgpu_bo_va_mapping *mapping, *next;
1466	struct amdgpu_vm *vm = bo_va->vm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1467
1468	list_del(&bo_va->bo_list);
 
 
 
1469
1470	spin_lock(&vm->status_lock);
1471	list_del(&bo_va->vm_status);
1472	spin_unlock(&vm->status_lock);
1473
1474	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1475		list_del(&mapping->list);
1476		interval_tree_remove(&mapping->it, &vm->va);
 
1477		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1478		list_add(&mapping->list, &vm->freed);
1479	}
1480	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1481		list_del(&mapping->list);
1482		interval_tree_remove(&mapping->it, &vm->va);
1483		kfree(mapping);
 
1484	}
1485
1486	dma_fence_put(bo_va->last_pt_update);
 
 
 
 
1487	kfree(bo_va);
1488}
1489
1490/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1491 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1492 *
1493 * @adev: amdgpu_device pointer
1494 * @vm: requested vm
1495 * @bo: amdgpu buffer object
 
1496 *
1497 * Mark @bo as invalid.
1498 */
1499void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1500			     struct amdgpu_bo *bo)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501{
1502	struct amdgpu_bo_va *bo_va;
1503
1504	list_for_each_entry(bo_va, &bo->va, bo_list) {
1505		spin_lock(&bo_va->vm->status_lock);
1506		if (list_empty(&bo_va->vm_status))
1507			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1508		spin_unlock(&bo_va->vm->status_lock);
1509	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1510}
1511
1512/**
1513 * amdgpu_vm_init - initialize a vm instance
1514 *
1515 * @adev: amdgpu_device pointer
1516 * @vm: requested vm
 
1517 *
1518 * Init @vm fields.
 
 
 
1519 */
1520int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
1521{
1522	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1523		AMDGPU_VM_PTE_COUNT * 8);
1524	unsigned pd_size, pd_entries;
1525	unsigned ring_instance;
1526	struct amdgpu_ring *ring;
1527	struct amd_sched_rq *rq;
1528	int i, r;
1529
1530	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1531		vm->ids[i] = NULL;
1532	vm->va = RB_ROOT;
1533	vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
 
1534	spin_lock_init(&vm->status_lock);
1535	INIT_LIST_HEAD(&vm->invalidated);
1536	INIT_LIST_HEAD(&vm->cleared);
1537	INIT_LIST_HEAD(&vm->freed);
 
 
 
 
 
 
 
 
 
 
 
 
 
1538
1539	pd_size = amdgpu_vm_directory_size(adev);
1540	pd_entries = amdgpu_vm_num_pdes(adev);
 
 
 
1541
1542	/* allocate page table array */
1543	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1544	if (vm->page_tables == NULL) {
1545		DRM_ERROR("Cannot allocate memory for page table array\n");
1546		return -ENOMEM;
1547	}
 
 
 
1548
1549	/* create scheduler entity for page table updates */
 
1550
1551	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1552	ring_instance %= adev->vm_manager.vm_pte_num_rings;
1553	ring = adev->vm_manager.vm_pte_rings[ring_instance];
1554	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1555	r = amd_sched_entity_init(&ring->sched, &vm->entity,
1556				  rq, amdgpu_sched_jobs);
1557	if (r)
1558		goto err;
1559
1560	vm->page_directory_fence = NULL;
 
 
 
 
 
 
 
 
 
 
 
1561
1562	r = amdgpu_bo_create(adev, pd_size, align, true,
1563			     AMDGPU_GEM_DOMAIN_VRAM,
1564			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1565			     AMDGPU_GEM_CREATE_SHADOW |
1566			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1567			     AMDGPU_GEM_CREATE_VRAM_CLEARED,
1568			     NULL, NULL, &vm->page_directory);
1569	if (r)
1570		goto error_free_sched_entity;
1571
1572	r = amdgpu_bo_reserve(vm->page_directory, false);
1573	if (r)
1574		goto error_free_page_directory;
1575
1576	vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1577	amdgpu_bo_unreserve(vm->page_directory);
1578
1579	return 0;
1580
1581error_free_page_directory:
1582	amdgpu_bo_unref(&vm->page_directory->shadow);
1583	amdgpu_bo_unref(&vm->page_directory);
1584	vm->page_directory = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585
1586error_free_sched_entity:
1587	amd_sched_entity_fini(&ring->sched, &vm->entity);
1588
1589err:
1590	drm_free_large(vm->page_tables);
1591
 
 
1592	return r;
1593}
1594
1595/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1596 * amdgpu_vm_fini - tear down a vm instance
1597 *
1598 * @adev: amdgpu_device pointer
1599 * @vm: requested vm
1600 *
1601 * Tear down @vm.
1602 * Unbind the VM and remove all bos from the vm bo list
1603 */
1604void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1605{
1606	struct amdgpu_bo_va_mapping *mapping, *tmp;
 
 
 
1607	int i;
1608
1609	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1610
1611	if (!RB_EMPTY_ROOT(&vm->va)) {
 
 
1612		dev_err(adev->dev, "still active bo inside vm\n");
1613	}
1614	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1615		list_del(&mapping->list);
1616		interval_tree_remove(&mapping->it, &vm->va);
1617		kfree(mapping);
1618	}
1619	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1620		list_del(&mapping->list);
1621		kfree(mapping);
1622	}
1623
1624	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
1625		struct amdgpu_bo *pt = vm->page_tables[i].bo;
1626
1627		if (!pt)
1628			continue;
1629
1630		amdgpu_bo_unref(&pt->shadow);
1631		amdgpu_bo_unref(&pt);
1632	}
1633	drm_free_large(vm->page_tables);
1634
1635	amdgpu_bo_unref(&vm->page_directory->shadow);
1636	amdgpu_bo_unref(&vm->page_directory);
1637	dma_fence_put(vm->page_directory_fence);
1638}
1639
1640/**
1641 * amdgpu_vm_manager_init - init the VM manager
1642 *
1643 * @adev: amdgpu_device pointer
1644 *
1645 * Initialize the VM manager structures
1646 */
1647void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1648{
1649	unsigned i;
1650
1651	INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1652
1653	/* skip over VMID 0, since it is the system VM */
1654	for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1655		amdgpu_vm_reset_id(adev, i);
1656		amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1657		list_add_tail(&adev->vm_manager.ids[i].list,
1658			      &adev->vm_manager.ids_lru);
1659	}
1660
1661	adev->vm_manager.fence_context =
1662		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1663	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1664		adev->vm_manager.seqno[i] = 0;
1665
1666	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1667	atomic64_set(&adev->vm_manager.client_counter, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1668}
1669
1670/**
1671 * amdgpu_vm_manager_fini - cleanup VM manager
1672 *
1673 * @adev: amdgpu_device pointer
1674 *
1675 * Cleanup the VM manager and free resources.
1676 */
1677void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1678{
1679	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1680
1681	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1682		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
 
 
 
 
 
 
 
 
 
 
 
 
1683
1684		dma_fence_put(adev->vm_manager.ids[i].first);
1685		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1686		dma_fence_put(id->flushed_updates);
1687		dma_fence_put(id->last_flush);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1688	}
 
1689}