Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28
  29#include <linux/dma-fence-array.h>
  30#include <linux/interval_tree_generic.h>
  31#include <linux/idr.h>
  32#include <linux/dma-buf.h>
  33
  34#include <drm/amdgpu_drm.h>
  35#include <drm/drm_drv.h>
  36#include <drm/ttm/ttm_tt.h>
  37#include <drm/drm_exec.h>
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40#include "amdgpu_amdkfd.h"
  41#include "amdgpu_gmc.h"
  42#include "amdgpu_xgmi.h"
  43#include "amdgpu_dma_buf.h"
  44#include "amdgpu_res_cursor.h"
  45#include "kfd_svm.h"
  46
  47/**
  48 * DOC: GPUVM
  49 *
  50 * GPUVM is the MMU functionality provided on the GPU.
  51 * GPUVM is similar to the legacy GART on older asics, however
  52 * rather than there being a single global GART table
  53 * for the entire GPU, there can be multiple GPUVM page tables active
  54 * at any given time.  The GPUVM page tables can contain a mix
  55 * VRAM pages and system pages (both memory and MMIO) and system pages
  56 * can be mapped as snooped (cached system pages) or unsnooped
  57 * (uncached system pages).
  58 *
  59 * Each active GPUVM has an ID associated with it and there is a page table
  60 * linked with each VMID.  When executing a command buffer,
  61 * the kernel tells the engine what VMID to use for that command
  62 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  63 * The userspace drivers maintain their own address space and the kernel
  64 * sets up their pages tables accordingly when they submit their
  65 * command buffers and a VMID is assigned.
  66 * The hardware supports up to 16 active GPUVMs at any given time.
  67 *
  68 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
  69 * on the ASIC family.  GPUVM supports RWX attributes on each page as well
  70 * as other features such as encryption and caching attributes.
  71 *
  72 * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
  73 * addition to an aperture managed by a page table, VMID 0 also has
  74 * several other apertures.  There is an aperture for direct access to VRAM
  75 * and there is a legacy AGP aperture which just forwards accesses directly
  76 * to the matching system physical addresses (or IOVAs when an IOMMU is
  77 * present).  These apertures provide direct access to these memories without
  78 * incurring the overhead of a page table.  VMID 0 is used by the kernel
  79 * driver for tasks like memory management.
  80 *
  81 * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
  82 * For user applications, each application can have their own unique GPUVM
  83 * address space.  The application manages the address space and the kernel
  84 * driver manages the GPUVM page tables for each process.  If an GPU client
  85 * accesses an invalid page, it will generate a GPU page fault, similar to
  86 * accessing an invalid page on a CPU.
  87 */
  88
  89#define START(node) ((node)->start)
  90#define LAST(node) ((node)->last)
  91
  92INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
  93		     START, LAST, static, amdgpu_vm_it)
  94
  95#undef START
  96#undef LAST
  97
  98/**
  99 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
 100 */
 101struct amdgpu_prt_cb {
 102
 103	/**
 104	 * @adev: amdgpu device
 105	 */
 106	struct amdgpu_device *adev;
 107
 108	/**
 109	 * @cb: callback
 110	 */
 111	struct dma_fence_cb cb;
 112};
 113
 114/**
 115 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
 116 */
 117struct amdgpu_vm_tlb_seq_struct {
 118	/**
 119	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
 120	 */
 121	struct amdgpu_vm *vm;
 122
 123	/**
 124	 * @cb: callback
 125	 */
 126	struct dma_fence_cb cb;
 127};
 128
 129/**
 130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
 131 *
 132 * @adev: amdgpu_device pointer
 133 * @vm: amdgpu_vm pointer
 134 * @pasid: the pasid the VM is using on this GPU
 135 *
 136 * Set the pasid this VM is using on this GPU, can also be used to remove the
 137 * pasid by passing in zero.
 138 *
 
 139 */
 140int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 141			u32 pasid)
 142{
 143	int r;
 144
 145	if (vm->pasid == pasid)
 146		return 0;
 147
 148	if (vm->pasid) {
 149		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
 150		if (r < 0)
 151			return r;
 152
 153		vm->pasid = 0;
 154	}
 155
 156	if (pasid) {
 157		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
 158					GFP_KERNEL));
 159		if (r < 0)
 160			return r;
 161
 162		vm->pasid = pasid;
 163	}
 164
 165
 166	return 0;
 167}
 168
 169/**
 170 * amdgpu_vm_bo_evicted - vm_bo is evicted
 171 *
 172 * @vm_bo: vm_bo which is evicted
 173 *
 174 * State for PDs/PTs and per VM BOs which are not at the location they should
 175 * be.
 176 */
 177static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
 178{
 179	struct amdgpu_vm *vm = vm_bo->vm;
 180	struct amdgpu_bo *bo = vm_bo->bo;
 181
 182	vm_bo->moved = true;
 183	spin_lock(&vm_bo->vm->status_lock);
 184	if (bo->tbo.type == ttm_bo_type_kernel)
 185		list_move(&vm_bo->vm_status, &vm->evicted);
 186	else
 187		list_move_tail(&vm_bo->vm_status, &vm->evicted);
 188	spin_unlock(&vm_bo->vm->status_lock);
 189}
 190/**
 191 * amdgpu_vm_bo_moved - vm_bo is moved
 192 *
 193 * @vm_bo: vm_bo which is moved
 194 *
 195 * State for per VM BOs which are moved, but that change is not yet reflected
 196 * in the page tables.
 197 */
 198static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
 199{
 200	spin_lock(&vm_bo->vm->status_lock);
 201	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 202	spin_unlock(&vm_bo->vm->status_lock);
 203}
 204
 205/**
 206 * amdgpu_vm_bo_idle - vm_bo is idle
 207 *
 208 * @vm_bo: vm_bo which is now idle
 209 *
 210 * State for PDs/PTs and per VM BOs which have gone through the state machine
 211 * and are now idle.
 212 */
 213static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
 214{
 215	spin_lock(&vm_bo->vm->status_lock);
 216	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
 217	spin_unlock(&vm_bo->vm->status_lock);
 218	vm_bo->moved = false;
 219}
 220
 221/**
 222 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
 223 *
 224 * @vm_bo: vm_bo which is now invalidated
 225 *
 226 * State for normal BOs which are invalidated and that change not yet reflected
 227 * in the PTs.
 228 */
 229static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
 230{
 231	spin_lock(&vm_bo->vm->status_lock);
 232	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
 233	spin_unlock(&vm_bo->vm->status_lock);
 234}
 235
 236/**
 237 * amdgpu_vm_bo_evicted_user - vm_bo is evicted
 238 *
 239 * @vm_bo: vm_bo which is evicted
 
 
 240 *
 241 * State for BOs used by user mode queues which are not at the location they
 242 * should be.
 243 */
 244static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
 
 
 245{
 246	vm_bo->moved = true;
 247	spin_lock(&vm_bo->vm->status_lock);
 248	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
 249	spin_unlock(&vm_bo->vm->status_lock);
 
 
 250}
 251
 252/**
 253 * amdgpu_vm_bo_relocated - vm_bo is reloacted
 254 *
 255 * @vm_bo: vm_bo which is relocated
 
 256 *
 257 * State for PDs/PTs which needs to update their parent PD.
 258 * For the root PD, just move to idle state.
 259 */
 260static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
 261{
 262	if (vm_bo->bo->parent) {
 263		spin_lock(&vm_bo->vm->status_lock);
 264		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
 265		spin_unlock(&vm_bo->vm->status_lock);
 266	} else {
 267		amdgpu_vm_bo_idle(vm_bo);
 268	}
 269}
 270
 271/**
 272 * amdgpu_vm_bo_done - vm_bo is done
 273 *
 274 * @vm_bo: vm_bo which is now done
 275 *
 276 * State for normal BOs which are invalidated and that change has been updated
 277 * in the PTs.
 278 */
 279static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
 280{
 281	spin_lock(&vm_bo->vm->status_lock);
 282	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
 283	spin_unlock(&vm_bo->vm->status_lock);
 284}
 285
 286/**
 287 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
 288 * @vm: the VM which state machine to reset
 289 *
 290 * Move all vm_bo object in the VM into a state where they will be updated
 291 * again during validation.
 292 */
 293static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
 294{
 295	struct amdgpu_vm_bo_base *vm_bo, *tmp;
 296
 297	spin_lock(&vm->status_lock);
 298	list_splice_init(&vm->done, &vm->invalidated);
 299	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
 300		vm_bo->moved = true;
 301	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
 302		struct amdgpu_bo *bo = vm_bo->bo;
 303
 304		vm_bo->moved = true;
 305		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
 306			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 307		else if (bo->parent)
 308			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
 309	}
 310	spin_unlock(&vm->status_lock);
 311}
 312
 313/**
 314 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
 315 *
 316 * @base: base structure for tracking BO usage in a VM
 317 * @vm: vm to which bo is to be added
 318 * @bo: amdgpu buffer object
 319 *
 320 * Initialize a bo_va_base structure and add it to the appropriate lists
 321 *
 322 */
 323void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 324			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
 325{
 326	base->vm = vm;
 327	base->bo = bo;
 328	base->next = NULL;
 329	INIT_LIST_HEAD(&base->vm_status);
 330
 331	if (!bo)
 332		return;
 333	base->next = bo->vm_bo;
 334	bo->vm_bo = base;
 335
 336	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
 337		return;
 338
 339	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
 340
 341	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
 342	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
 343		amdgpu_vm_bo_relocated(base);
 344	else
 345		amdgpu_vm_bo_idle(base);
 346
 347	if (bo->preferred_domains &
 348	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
 349		return;
 350
 351	/*
 352	 * we checked all the prerequisites, but it looks like this per vm bo
 353	 * is currently evicted. add the bo to the evicted list to make sure it
 354	 * is validated on next vm use to avoid fault.
 355	 * */
 356	amdgpu_vm_bo_evicted(base);
 357}
 358
 359/**
 360 * amdgpu_vm_lock_pd - lock PD in drm_exec
 361 *
 362 * @vm: vm providing the BOs
 363 * @exec: drm execution context
 364 * @num_fences: number of extra fences to reserve
 365 *
 366 * Lock the VM root PD in the DRM execution context.
 367 */
 368int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
 369		      unsigned int num_fences)
 370{
 371	/* We need at least two fences for the VM PD/PT updates */
 372	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
 373				    2 + num_fences);
 374}
 375
 376/**
 377 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
 378 *
 379 * @adev: amdgpu device pointer
 380 * @vm: vm providing the BOs
 381 *
 382 * Move all BOs to the end of LRU and remember their positions to put them
 383 * together.
 384 */
 385void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 386				struct amdgpu_vm *vm)
 387{
 388	spin_lock(&adev->mman.bdev.lru_lock);
 389	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
 390	spin_unlock(&adev->mman.bdev.lru_lock);
 391}
 392
 393/* Create scheduler entities for page table updates */
 394static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
 395				   struct amdgpu_vm *vm)
 396{
 397	int r;
 398
 399	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
 400				  adev->vm_manager.vm_pte_scheds,
 401				  adev->vm_manager.vm_pte_num_scheds, NULL);
 402	if (r)
 403		goto error;
 404
 405	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
 406				     adev->vm_manager.vm_pte_scheds,
 407				     adev->vm_manager.vm_pte_num_scheds, NULL);
 408
 409error:
 410	drm_sched_entity_destroy(&vm->immediate);
 411	return r;
 412}
 413
 414/* Destroy the entities for page table updates again */
 415static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
 416{
 417	drm_sched_entity_destroy(&vm->immediate);
 418	drm_sched_entity_destroy(&vm->delayed);
 419}
 420
 421/**
 422 * amdgpu_vm_generation - return the page table re-generation counter
 423 * @adev: the amdgpu_device
 424 * @vm: optional VM to check, might be NULL
 425 *
 426 * Returns a page table re-generation token to allow checking if submissions
 427 * are still valid to use this VM. The VM parameter might be NULL in which case
 428 * just the VRAM lost counter will be used.
 
 
 
 429 */
 430uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
 
 431{
 432	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
 433
 434	if (!vm)
 435		return result;
 436
 437	result += lower_32_bits(vm->generation);
 438	/* Add one if the page tables will be re-generated on next CS */
 439	if (drm_sched_entity_error(&vm->delayed))
 440		++result;
 441
 442	return result;
 443}
 444
 445/**
 446 * amdgpu_vm_validate - validate evicted BOs tracked in the VM
 447 *
 448 * @adev: amdgpu device pointer
 449 * @vm: vm providing the BOs
 450 * @ticket: optional reservation ticket used to reserve the VM
 451 * @validate: callback to do the validation
 452 * @param: parameter for the validation callback
 453 *
 454 * Validate the page table BOs and per-VM BOs on command submission if
 455 * necessary. If a ticket is given, also try to validate evicted user queue
 456 * BOs. They must already be reserved with the given ticket.
 457 *
 458 * Returns:
 459 * Validation result.
 460 */
 461int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 462		       struct ww_acquire_ctx *ticket,
 463		       int (*validate)(void *p, struct amdgpu_bo *bo),
 464		       void *param)
 465{
 466	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
 467	struct amdgpu_vm_bo_base *bo_base;
 468	struct amdgpu_bo *bo;
 469	int r;
 470
 471	if (vm->generation != new_vm_generation) {
 472		vm->generation = new_vm_generation;
 473		amdgpu_vm_bo_reset_state_machine(vm);
 474		amdgpu_vm_fini_entities(vm);
 475		r = amdgpu_vm_init_entities(adev, vm);
 476		if (r)
 477			return r;
 478	}
 479
 480	spin_lock(&vm->status_lock);
 481	while (!list_empty(&vm->evicted)) {
 482		bo_base = list_first_entry(&vm->evicted,
 483					   struct amdgpu_vm_bo_base,
 484					   vm_status);
 485		spin_unlock(&vm->status_lock);
 486
 487		bo = bo_base->bo;
 488
 489		r = validate(param, bo);
 490		if (r)
 491			return r;
 492
 493		if (bo->tbo.type != ttm_bo_type_kernel) {
 494			amdgpu_vm_bo_moved(bo_base);
 495		} else {
 496			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
 497			amdgpu_vm_bo_relocated(bo_base);
 498		}
 499		spin_lock(&vm->status_lock);
 500	}
 501	while (ticket && !list_empty(&vm->evicted_user)) {
 502		bo_base = list_first_entry(&vm->evicted_user,
 503					   struct amdgpu_vm_bo_base,
 504					   vm_status);
 505		spin_unlock(&vm->status_lock);
 506
 507		bo = bo_base->bo;
 508
 509		if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
 510			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
 
 
 
 
 
 
 
 
 
 
 511
 512			pr_warn_ratelimited("Evicted user BO is not reserved\n");
 513			if (ti) {
 514				pr_warn_ratelimited("pid %d\n", ti->pid);
 515				amdgpu_vm_put_task_info(ti);
 
 
 
 
 
 516			}
 517
 518			return -EINVAL;
 519		}
 520
 521		r = validate(param, bo);
 522		if (r)
 523			return r;
 524
 525		amdgpu_vm_bo_invalidated(bo_base);
 
 
 
 526
 527		spin_lock(&vm->status_lock);
 
 
 528	}
 529	spin_unlock(&vm->status_lock);
 530
 531	amdgpu_vm_eviction_lock(vm);
 532	vm->evicting = false;
 533	amdgpu_vm_eviction_unlock(vm);
 534
 535	return 0;
 536}
 537
 538/**
 539 * amdgpu_vm_ready - check VM is ready for updates
 540 *
 541 * @vm: VM to check
 542 *
 543 * Check if all VM PDs/PTs are ready for updates
 544 *
 545 * Returns:
 546 * True if VM is not evicting.
 547 */
 548bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 549{
 550	bool empty;
 551	bool ret;
 552
 553	amdgpu_vm_eviction_lock(vm);
 554	ret = !vm->evicting;
 555	amdgpu_vm_eviction_unlock(vm);
 556
 557	spin_lock(&vm->status_lock);
 558	empty = list_empty(&vm->evicted);
 559	spin_unlock(&vm->status_lock);
 
 560
 561	return ret && empty;
 562}
 563
 564/**
 565 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 566 *
 567 * @adev: amdgpu_device pointer
 568 */
 569void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
 570{
 571	const struct amdgpu_ip_block *ip_block;
 572	bool has_compute_vm_bug;
 573	struct amdgpu_ring *ring;
 574	int i;
 575
 576	has_compute_vm_bug = false;
 
 577
 578	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
 579	if (ip_block) {
 580		/* Compute has a VM bug for GFX version < 7.
 581		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
 582		if (ip_block->version->major <= 7)
 583			has_compute_vm_bug = true;
 584		else if (ip_block->version->major == 8)
 585			if (adev->gfx.mec_fw_version < 673)
 586				has_compute_vm_bug = true;
 587	}
 588
 589	for (i = 0; i < adev->num_rings; i++) {
 590		ring = adev->rings[i];
 591		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
 592			/* only compute rings */
 593			ring->has_compute_vm_bug = has_compute_vm_bug;
 594		else
 595			ring->has_compute_vm_bug = false;
 596	}
 597}
 598
 599/**
 600 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
 601 *
 602 * @ring: ring on which the job will be submitted
 603 * @job: job to submit
 604 *
 605 * Returns:
 606 * True if sync is needed.
 607 */
 608bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 609				  struct amdgpu_job *job)
 610{
 611	struct amdgpu_device *adev = ring->adev;
 612	unsigned vmhub = ring->vm_hub;
 613	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 614
 615	if (job->vmid == 0)
 616		return false;
 617
 618	if (job->vm_needs_flush || ring->has_compute_vm_bug)
 619		return true;
 620
 621	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
 622		return true;
 623
 624	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
 625		return true;
 626
 627	return false;
 628}
 629
 630/**
 631 * amdgpu_vm_flush - hardware flush the vm
 632 *
 633 * @ring: ring to use for flush
 634 * @job:  related job
 635 * @need_pipe_sync: is pipe sync needed
 636 *
 637 * Emit a VM flush when it is necessary.
 638 *
 639 * Returns:
 640 * 0 on success, errno otherwise.
 641 */
 642int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
 643		    bool need_pipe_sync)
 
 
 
 644{
 645	struct amdgpu_device *adev = ring->adev;
 646	unsigned vmhub = ring->vm_hub;
 647	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 648	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
 649	bool spm_update_needed = job->spm_update_needed;
 650	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
 651		job->gds_switch_needed;
 652	bool vm_flush_needed = job->vm_needs_flush;
 653	struct dma_fence *fence = NULL;
 654	bool pasid_mapping_needed = false;
 655	unsigned int patch;
 656	int r;
 657
 658	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
 659		gds_switch_needed = true;
 660		vm_flush_needed = true;
 661		pasid_mapping_needed = true;
 662		spm_update_needed = true;
 663	}
 664
 665	mutex_lock(&id_mgr->lock);
 666	if (id->pasid != job->pasid || !id->pasid_mapping ||
 667	    !dma_fence_is_signaled(id->pasid_mapping))
 668		pasid_mapping_needed = true;
 669	mutex_unlock(&id_mgr->lock);
 670
 671	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
 672	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
 673			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
 674	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
 675		ring->funcs->emit_wreg;
 676
 677	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
 678	    !(job->enforce_isolation && !job->vmid))
 679		return 0;
 680
 681	amdgpu_ring_ib_begin(ring);
 682	if (ring->funcs->init_cond_exec)
 683		patch = amdgpu_ring_init_cond_exec(ring,
 684						   ring->cond_exe_gpu_addr);
 685
 686	if (need_pipe_sync)
 
 687		amdgpu_ring_emit_pipeline_sync(ring);
 688
 689	if (adev->gfx.enable_cleaner_shader &&
 690	    ring->funcs->emit_cleaner_shader &&
 691	    job->enforce_isolation)
 692		ring->funcs->emit_cleaner_shader(ring);
 693
 694	if (vm_flush_needed) {
 695		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
 696		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
 697	}
 698
 699	if (pasid_mapping_needed)
 700		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 701
 702	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
 703		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
 704
 705	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
 706	    gds_switch_needed) {
 707		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
 708					    job->gds_size, job->gws_base,
 709					    job->gws_size, job->oa_base,
 710					    job->oa_size);
 711	}
 712
 713	if (vm_flush_needed || pasid_mapping_needed) {
 714		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
 715		if (r)
 716			return r;
 
 
 
 
 
 
 
 717	}
 
 718
 719	if (vm_flush_needed) {
 720		mutex_lock(&id_mgr->lock);
 721		dma_fence_put(id->last_flush);
 722		id->last_flush = dma_fence_get(fence);
 723		id->current_gpu_reset_count =
 724			atomic_read(&adev->gpu_reset_counter);
 725		mutex_unlock(&id_mgr->lock);
 726	}
 727
 728	if (pasid_mapping_needed) {
 729		mutex_lock(&id_mgr->lock);
 730		id->pasid = job->pasid;
 731		dma_fence_put(id->pasid_mapping);
 732		id->pasid_mapping = dma_fence_get(fence);
 733		mutex_unlock(&id_mgr->lock);
 734	}
 735	dma_fence_put(fence);
 736
 737	amdgpu_ring_patch_cond_exec(ring, patch);
 738
 739	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
 740	if (ring->funcs->emit_switch_buffer) {
 741		amdgpu_ring_emit_switch_buffer(ring);
 742		amdgpu_ring_emit_switch_buffer(ring);
 743	}
 744
 745	amdgpu_ring_ib_end(ring);
 746	return 0;
 
 
 
 
 747}
 748
 749/**
 750 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 751 *
 752 * @vm: requested vm
 753 * @bo: requested buffer object
 754 *
 755 * Find @bo inside the requested vm.
 756 * Search inside the @bos vm list for the requested vm
 757 * Returns the found bo_va or NULL if none is found
 758 *
 759 * Object has to be reserved!
 760 *
 761 * Returns:
 762 * Found bo_va or NULL.
 763 */
 764struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 765				       struct amdgpu_bo *bo)
 766{
 767	struct amdgpu_vm_bo_base *base;
 768
 769	for (base = bo->vm_bo; base; base = base->next) {
 770		if (base->vm != vm)
 771			continue;
 772
 773		return container_of(base, struct amdgpu_bo_va, base);
 
 
 
 774	}
 775	return NULL;
 776}
 777
 778/**
 779 * amdgpu_vm_map_gart - Resolve gart mapping of addr
 780 *
 781 * @pages_addr: optional DMA address to use for lookup
 782 * @addr: the unmapped addr
 783 *
 784 * Look up the physical address of the page that the pte resolves
 785 * to.
 786 *
 787 * Returns:
 788 * The pointer for the page table entry.
 789 */
 790uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 791{
 792	uint64_t result;
 793
 794	/* page table offset */
 795	result = pages_addr[addr >> PAGE_SHIFT];
 796
 797	/* in case cpu page size != gpu page size*/
 798	result |= addr & (~PAGE_MASK);
 799
 800	result &= 0xFFFFFFFFFFFFF000ULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801
 802	return result;
 
 
 
 803}
 804
 805/**
 806 * amdgpu_vm_update_pdes - make sure that all directories are valid
 807 *
 808 * @adev: amdgpu_device pointer
 809 * @vm: requested vm
 810 * @immediate: submit immediately to the paging queue
 811 *
 812 * Makes sure all directories are up to date.
 813 *
 814 * Returns:
 815 * 0 for success, error for failure.
 816 */
 817int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 818			  struct amdgpu_vm *vm, bool immediate)
 
 819{
 820	struct amdgpu_vm_update_params params;
 821	struct amdgpu_vm_bo_base *entry;
 822	bool flush_tlb_needed = false;
 823	LIST_HEAD(relocated);
 824	int r, idx;
 825
 826	spin_lock(&vm->status_lock);
 827	list_splice_init(&vm->relocated, &relocated);
 828	spin_unlock(&vm->status_lock);
 829
 830	if (list_empty(&relocated))
 831		return 0;
 832
 833	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 834		return -ENODEV;
 835
 836	memset(&params, 0, sizeof(params));
 837	params.adev = adev;
 838	params.vm = vm;
 839	params.immediate = immediate;
 840
 841	r = vm->update_funcs->prepare(&params, NULL);
 842	if (r)
 843		goto error;
 844
 845	list_for_each_entry(entry, &relocated, vm_status) {
 846		/* vm_flush_needed after updating moved PDEs */
 847		flush_tlb_needed |= entry->moved;
 848
 849		r = amdgpu_vm_pde_update(&params, entry);
 850		if (r)
 851			goto error;
 852	}
 853
 854	r = vm->update_funcs->commit(&params, &vm->last_update);
 855	if (r)
 856		goto error;
 857
 858	if (flush_tlb_needed)
 859		atomic64_inc(&vm->tlb_seq);
 
 
 
 
 
 
 
 860
 861	while (!list_empty(&relocated)) {
 862		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
 863					 vm_status);
 864		amdgpu_vm_bo_idle(entry);
 865	}
 
 866
 867error:
 868	drm_dev_exit(idx);
 869	return r;
 870}
 871
 872/**
 873 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
 874 * @fence: unused
 875 * @cb: the callback structure
 876 *
 877 * Increments the tlb sequence to make sure that future CS execute a VM flush.
 878 */
 879static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
 880				 struct dma_fence_cb *cb)
 881{
 882	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
 883
 884	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
 885	atomic64_inc(&tlb_cb->vm->tlb_seq);
 886	kfree(tlb_cb);
 887}
 888
 889/**
 890 * amdgpu_vm_tlb_flush - prepare TLB flush
 891 *
 892 * @params: parameters for update
 893 * @fence: input fence to sync TLB flush with
 894 * @tlb_cb: the callback structure
 895 *
 896 * Increments the tlb sequence to make sure that future CS execute a VM flush.
 
 897 */
 898static void
 899amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
 900		    struct dma_fence **fence,
 901		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
 902{
 903	struct amdgpu_vm *vm = params->vm;
 904
 905	tlb_cb->vm = vm;
 906	if (!fence || !*fence) {
 907		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
 908		return;
 909	}
 
 910
 911	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
 912				    amdgpu_vm_tlb_seq_cb)) {
 913		dma_fence_put(vm->last_tlb_flush);
 914		vm->last_tlb_flush = dma_fence_get(*fence);
 915	} else {
 916		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
 
 917	}
 918
 919	/* Prepare a TLB flush fence to be attached to PTs */
 920	if (!params->unlocked && vm->is_compute_context) {
 921		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
 922
 923		/* Makes sure no PD/PT is freed before the flush */
 924		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
 925				   DMA_RESV_USAGE_BOOKKEEP);
 926	}
 927}
 928
 929/**
 930 * amdgpu_vm_update_range - update a range in the vm page table
 931 *
 932 * @adev: amdgpu_device pointer to use for commands
 933 * @vm: the VM to update the range
 934 * @immediate: immediate submission in a page fault
 935 * @unlocked: unlocked invalidation during MM callback
 936 * @flush_tlb: trigger tlb invalidation after update completed
 937 * @allow_override: change MTYPE for local NUMA nodes
 938 * @sync: fences we need to sync to
 939 * @start: start of mapped range
 940 * @last: last mapped entry
 941 * @flags: flags for the entries
 942 * @offset: offset into nodes and pages_addr
 943 * @vram_base: base for vram mappings
 944 * @res: ttm_resource to map
 945 * @pages_addr: DMA addresses to use for mapping
 946 * @fence: optional resulting fence
 947 *
 948 * Fill in the page table entries between @start and @last.
 949 *
 950 * Returns:
 951 * 0 for success, negative erro code for failure.
 
 952 */
 953int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 954			   bool immediate, bool unlocked, bool flush_tlb,
 955			   bool allow_override, struct amdgpu_sync *sync,
 956			   uint64_t start, uint64_t last, uint64_t flags,
 957			   uint64_t offset, uint64_t vram_base,
 958			   struct ttm_resource *res, dma_addr_t *pages_addr,
 959			   struct dma_fence **fence)
 960{
 961	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
 962	struct amdgpu_vm_update_params params;
 963	struct amdgpu_res_cursor cursor;
 964	int r, idx;
 965
 966	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 967		return -ENODEV;
 968
 969	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
 970	if (!tlb_cb) {
 971		drm_dev_exit(idx);
 972		return -ENOMEM;
 973	}
 974
 975	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
 976	 * heavy-weight flush TLB unconditionally.
 977	 */
 978	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
 979		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
 980
 981	/*
 982	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
 983	 */
 984	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
 985
 986	memset(&params, 0, sizeof(params));
 987	params.adev = adev;
 988	params.vm = vm;
 989	params.immediate = immediate;
 990	params.pages_addr = pages_addr;
 991	params.unlocked = unlocked;
 992	params.needs_flush = flush_tlb;
 993	params.allow_override = allow_override;
 994	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
 995
 996	amdgpu_vm_eviction_lock(vm);
 997	if (vm->evicting) {
 998		r = -EBUSY;
 999		goto error_free;
1000	}
1001
1002	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1003		struct dma_fence *tmp = dma_fence_get_stub();
1004
1005		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1006		swap(vm->last_unlocked, tmp);
1007		dma_fence_put(tmp);
1008	}
1009
1010	r = vm->update_funcs->prepare(&params, sync);
1011	if (r)
1012		goto error_free;
1013
1014	amdgpu_res_first(pages_addr ? NULL : res, offset,
1015			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1016	while (cursor.remaining) {
1017		uint64_t tmp, num_entries, addr;
1018
1019		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1020		if (pages_addr) {
1021			bool contiguous = true;
1022
1023			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1024				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1025				uint64_t count;
1026
1027				contiguous = pages_addr[pfn + 1] ==
1028					pages_addr[pfn] + PAGE_SIZE;
1029
1030				tmp = num_entries /
1031					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1032				for (count = 2; count < tmp; ++count) {
1033					uint64_t idx = pfn + count;
1034
1035					if (contiguous != (pages_addr[idx] ==
1036					    pages_addr[idx - 1] + PAGE_SIZE))
1037						break;
1038				}
1039				if (!contiguous)
1040					count--;
1041				num_entries = count *
1042					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1043			}
1044
1045			if (!contiguous) {
1046				addr = cursor.start;
1047				params.pages_addr = pages_addr;
1048			} else {
1049				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1050				params.pages_addr = NULL;
 
 
 
 
 
 
 
 
 
 
 
1051			}
1052
1053		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1054			addr = vram_base + cursor.start;
 
1055		} else {
1056			addr = 0;
1057		}
 
1058
1059		tmp = start + num_entries;
1060		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
 
 
 
 
 
 
 
 
 
1061		if (r)
1062			goto error_free;
1063
1064		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1065		start = tmp;
1066	}
 
1067
1068	r = vm->update_funcs->commit(&params, fence);
1069	if (r)
1070		goto error_free;
1071
1072	if (params.needs_flush) {
1073		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1074		tlb_cb = NULL;
1075	}
1076
1077	amdgpu_vm_pt_free_list(adev, &params);
1078
1079error_free:
1080	kfree(tlb_cb);
1081	amdgpu_vm_eviction_unlock(vm);
1082	drm_dev_exit(idx);
1083	return r;
1084}
1085
1086static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1087				    struct amdgpu_mem_stats *stats,
1088				    unsigned int size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1089{
1090	struct amdgpu_vm *vm = bo_va->base.vm;
1091	struct amdgpu_bo *bo = bo_va->base.bo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1092
1093	if (!bo)
 
1094		return;
1095
1096	/*
1097	 * For now ignore BOs which are currently locked and potentially
1098	 * changing their location.
1099	 */
1100	if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
1101	    !dma_resv_trylock(bo->tbo.base.resv))
 
1102		return;
 
1103
1104	amdgpu_bo_get_memory(bo, stats, size);
1105	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
1106		dma_resv_unlock(bo->tbo.base.resv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107}
1108
1109void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1110			  struct amdgpu_mem_stats *stats,
1111			  unsigned int size)
1112{
1113	struct amdgpu_bo_va *bo_va, *tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1114
1115	spin_lock(&vm->status_lock);
1116	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1117		amdgpu_vm_bo_get_memory(bo_va, stats, size);
 
1118
1119	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1120		amdgpu_vm_bo_get_memory(bo_va, stats, size);
1121
1122	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1123		amdgpu_vm_bo_get_memory(bo_va, stats, size);
1124
1125	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1126		amdgpu_vm_bo_get_memory(bo_va, stats, size);
 
1127
1128	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1129		amdgpu_vm_bo_get_memory(bo_va, stats, size);
 
 
 
 
1130
1131	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1132		amdgpu_vm_bo_get_memory(bo_va, stats, size);
1133	spin_unlock(&vm->status_lock);
 
 
 
 
1134}
1135
1136/**
1137 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1138 *
1139 * @adev: amdgpu_device pointer
1140 * @bo_va: requested BO and VM object
1141 * @clear: if true clear the entries
1142 *
1143 * Fill in the page table entries for @bo_va.
 
 
 
 
1144 *
1145 * Returns:
1146 * 0 for success, -EINVAL for failure.
1147 */
1148int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1149			bool clear)
 
 
 
 
 
1150{
1151	struct amdgpu_bo *bo = bo_va->base.bo;
1152	struct amdgpu_vm *vm = bo_va->base.vm;
1153	struct amdgpu_bo_va_mapping *mapping;
1154	struct dma_fence **last_update;
1155	dma_addr_t *pages_addr = NULL;
1156	struct ttm_resource *mem;
1157	struct amdgpu_sync sync;
1158	bool flush_tlb = clear;
1159	uint64_t vram_base;
1160	uint64_t flags;
1161	bool uncached;
1162	int r;
1163
1164	amdgpu_sync_create(&sync);
1165	if (clear) {
1166		mem = NULL;
1167
1168		/* Implicitly sync to command submissions in the same VM before
1169		 * unmapping.
1170		 */
1171		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1172				     AMDGPU_SYNC_EQ_OWNER, vm);
1173		if (r)
1174			goto error_free;
1175		if (bo) {
1176			r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1177			if (r)
1178				goto error_free;
1179		}
1180	} else if (!bo) {
1181		mem = NULL;
1182
1183		/* PRT map operations don't need to sync to anything. */
1184
1185	} else {
1186		struct drm_gem_object *obj = &bo->tbo.base;
 
 
 
1187
1188		if (obj->import_attach && bo_va->is_xgmi) {
1189			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1190			struct drm_gem_object *gobj = dma_buf->priv;
1191			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1192
1193			if (abo->tbo.resource &&
1194			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1195				bo = gem_to_amdgpu_bo(gobj);
1196		}
1197		mem = bo->tbo.resource;
1198		if (mem && (mem->mem_type == TTM_PL_TT ||
1199			    mem->mem_type == AMDGPU_PL_PREEMPT))
1200			pages_addr = bo->tbo.ttm->dma_address;
1201
1202		/* Implicitly sync to moving fences before mapping anything */
1203		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1204				     AMDGPU_SYNC_EXPLICIT, vm);
1205		if (r)
1206			goto error_free;
1207	}
1208
1209	if (bo) {
1210		struct amdgpu_device *bo_adev;
 
1211
1212		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
 
 
1213
1214		if (amdgpu_bo_encrypted(bo))
1215			flags |= AMDGPU_PTE_TMZ;
1216
1217		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1218		vram_base = bo_adev->vm_manager.vram_base_offset;
1219		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1220	} else {
1221		flags = 0x0;
1222		vram_base = 0;
1223		uncached = false;
 
 
1224	}
1225
1226	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1227		last_update = &vm->last_update;
1228	else
1229		last_update = &bo_va->last_pt_update;
1230
1231	if (!clear && bo_va->base.moved) {
1232		flush_tlb = true;
1233		list_splice_init(&bo_va->valids, &bo_va->invalids);
1234
1235	} else if (bo_va->cleared != clear) {
1236		list_splice_init(&bo_va->valids, &bo_va->invalids);
1237	}
1238
1239	list_for_each_entry(mapping, &bo_va->invalids, list) {
1240		uint64_t update_flags = flags;
1241
1242		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1243		 * but in case of something, we filter the flags in first place
1244		 */
1245		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1246			update_flags &= ~AMDGPU_PTE_READABLE;
1247		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1248			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1249
1250		/* Apply ASIC specific mapping flags */
1251		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1252
1253		trace_amdgpu_vm_bo_update(mapping);
1254
1255		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1256					   !uncached, &sync, mapping->start,
1257					   mapping->last, update_flags,
1258					   mapping->offset, vram_base, mem,
1259					   pages_addr, last_update);
1260		if (r)
1261			goto error_free;
1262	}
1263
1264	/* If the BO is not in its preferred location add it back to
1265	 * the evicted list so that it gets validated again on the
1266	 * next command submission.
1267	 */
1268	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1269		if (bo->tbo.resource &&
1270		    !(bo->preferred_domains &
1271		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1272			amdgpu_vm_bo_evicted(&bo_va->base);
1273		else
1274			amdgpu_vm_bo_idle(&bo_va->base);
1275	} else {
1276		amdgpu_vm_bo_done(&bo_va->base);
1277	}
1278
1279	list_splice_init(&bo_va->invalids, &bo_va->valids);
1280	bo_va->cleared = clear;
1281	bo_va->base.moved = false;
 
 
 
1282
1283	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1284		list_for_each_entry(mapping, &bo_va->valids, list)
1285			trace_amdgpu_vm_bo_mapping(mapping);
 
1286	}
 
 
1287
1288error_free:
1289	amdgpu_sync_free(&sync);
1290	return r;
1291}
1292
1293/**
1294 * amdgpu_vm_update_prt_state - update the global PRT state
1295 *
1296 * @adev: amdgpu_device pointer
1297 */
1298static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1299{
1300	unsigned long flags;
1301	bool enable;
1302
1303	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1304	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1305	adev->gmc.gmc_funcs->set_prt(adev, enable);
1306	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1307}
1308
1309/**
1310 * amdgpu_vm_prt_get - add a PRT user
1311 *
1312 * @adev: amdgpu_device pointer
1313 */
1314static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 
 
 
 
 
 
 
1315{
1316	if (!adev->gmc.gmc_funcs->set_prt)
1317		return;
1318
1319	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1320		amdgpu_vm_update_prt_state(adev);
1321}
1322
1323/**
1324 * amdgpu_vm_prt_put - drop a PRT user
1325 *
1326 * @adev: amdgpu_device pointer
1327 */
1328static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1329{
1330	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1331		amdgpu_vm_update_prt_state(adev);
1332}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1333
1334/**
1335 * amdgpu_vm_prt_cb - callback for updating the PRT status
1336 *
1337 * @fence: fence for the callback
1338 * @_cb: the callback function
1339 */
1340static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1341{
1342	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1343
1344	amdgpu_vm_prt_put(cb->adev);
1345	kfree(cb);
1346}
1347
1348/**
1349 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1350 *
1351 * @adev: amdgpu_device pointer
1352 * @fence: fence for the callback
 
 
 
 
 
 
1353 */
1354static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1355				 struct dma_fence *fence)
 
1356{
1357	struct amdgpu_prt_cb *cb;
 
 
 
 
 
1358
1359	if (!adev->gmc.gmc_funcs->set_prt)
1360		return;
 
 
 
 
1361
1362	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1363	if (!cb) {
1364		/* Last resort when we are OOM */
1365		if (fence)
1366			dma_fence_wait(fence, false);
1367
1368		amdgpu_vm_prt_put(adev);
 
 
1369	} else {
1370		cb->adev = adev;
1371		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1372						     amdgpu_vm_prt_cb))
1373			amdgpu_vm_prt_cb(fence, &cb->cb);
1374	}
1375}
1376
1377/**
1378 * amdgpu_vm_free_mapping - free a mapping
1379 *
1380 * @adev: amdgpu_device pointer
1381 * @vm: requested vm
1382 * @mapping: mapping to be freed
1383 * @fence: fence of the unmap operation
1384 *
1385 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1386 */
1387static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1388				   struct amdgpu_vm *vm,
1389				   struct amdgpu_bo_va_mapping *mapping,
1390				   struct dma_fence *fence)
1391{
1392	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1393		amdgpu_vm_add_prt_cb(adev, fence);
1394	kfree(mapping);
1395}
1396
1397/**
1398 * amdgpu_vm_prt_fini - finish all prt mappings
1399 *
1400 * @adev: amdgpu_device pointer
1401 * @vm: requested vm
1402 *
1403 * Register a cleanup callback to disable PRT support after VM dies.
1404 */
1405static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1406{
1407	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1408	struct dma_resv_iter cursor;
1409	struct dma_fence *fence;
1410
1411	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1412		/* Add a callback for each fence in the reservation object */
1413		amdgpu_vm_prt_get(adev);
1414		amdgpu_vm_add_prt_cb(adev, fence);
1415	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1416}
1417
1418/**
1419 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1420 *
1421 * @adev: amdgpu_device pointer
1422 * @vm: requested vm
1423 * @fence: optional resulting fence (unchanged if no work needed to be done
1424 * or if an error occurred)
1425 *
1426 * Make sure all freed BOs are cleared in the PT.
1427 * PTs have to be reserved and mutex must be locked!
1428 *
1429 * Returns:
1430 * 0 for success.
1431 *
 
1432 */
1433int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1434			  struct amdgpu_vm *vm,
1435			  struct dma_fence **fence)
1436{
1437	struct amdgpu_bo_va_mapping *mapping;
1438	struct dma_fence *f = NULL;
1439	struct amdgpu_sync sync;
1440	int r;
1441
1442
1443	/*
1444	 * Implicitly sync to command submissions in the same VM before
1445	 * unmapping.
1446	 */
1447	amdgpu_sync_create(&sync);
1448	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1449			     AMDGPU_SYNC_EQ_OWNER, vm);
1450	if (r)
1451		goto error_free;
1452
1453	while (!list_empty(&vm->freed)) {
1454		mapping = list_first_entry(&vm->freed,
1455			struct amdgpu_bo_va_mapping, list);
1456		list_del(&mapping->list);
1457
1458		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1459					   &sync, mapping->start, mapping->last,
1460					   0, 0, 0, NULL, NULL, &f);
1461		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1462		if (r) {
1463			dma_fence_put(f);
1464			goto error_free;
1465		}
1466	}
1467
1468	if (fence && f) {
1469		dma_fence_put(*fence);
1470		*fence = f;
1471	} else {
1472		dma_fence_put(f);
1473	}
1474
1475error_free:
1476	amdgpu_sync_free(&sync);
1477	return r;
1478
1479}
1480
1481/**
1482 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1483 *
1484 * @adev: amdgpu_device pointer
1485 * @vm: requested vm
1486 * @ticket: optional reservation ticket used to reserve the VM
1487 *
1488 * Make sure all BOs which are moved are updated in the PTs.
1489 *
1490 * Returns:
1491 * 0 for success.
1492 *
1493 * PTs have to be reserved!
1494 */
1495int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1496			   struct amdgpu_vm *vm,
1497			   struct ww_acquire_ctx *ticket)
1498{
1499	struct amdgpu_bo_va *bo_va;
1500	struct dma_resv *resv;
1501	bool clear, unlock;
1502	int r;
1503
1504	spin_lock(&vm->status_lock);
1505	while (!list_empty(&vm->moved)) {
1506		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1507					 base.vm_status);
1508		spin_unlock(&vm->status_lock);
1509
1510		/* Per VM BOs never need to bo cleared in the page tables */
1511		r = amdgpu_vm_bo_update(adev, bo_va, false);
1512		if (r)
1513			return r;
1514		spin_lock(&vm->status_lock);
1515	}
1516
1517	while (!list_empty(&vm->invalidated)) {
1518		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1519					 base.vm_status);
1520		resv = bo_va->base.bo->tbo.base.resv;
1521		spin_unlock(&vm->status_lock);
1522
1523		/* Try to reserve the BO to avoid clearing its ptes */
1524		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1525			clear = false;
1526			unlock = true;
1527		/* The caller is already holding the reservation lock */
1528		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1529			clear = false;
1530			unlock = false;
1531		/* Somebody else is using the BO right now */
1532		} else {
1533			clear = true;
1534			unlock = false;
1535		}
1536
1537		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1538
1539		if (unlock)
1540			dma_resv_unlock(resv);
1541		if (r)
1542			return r;
1543
1544		/* Remember evicted DMABuf imports in compute VMs for later
1545		 * validation
1546		 */
1547		if (vm->is_compute_context &&
1548		    bo_va->base.bo->tbo.base.import_attach &&
1549		    (!bo_va->base.bo->tbo.resource ||
1550		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1551			amdgpu_vm_bo_evicted_user(&bo_va->base);
1552
1553		spin_lock(&vm->status_lock);
1554	}
1555	spin_unlock(&vm->status_lock);
1556
1557	return 0;
1558}
1559
1560/**
1561 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1562 *
1563 * @adev: amdgpu_device pointer
1564 * @vm: requested vm
1565 * @flush_type: flush type
1566 * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1567 *
1568 * Flush TLB if needed for a compute VM.
1569 *
1570 * Returns:
1571 * 0 for success.
1572 */
1573int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1574				struct amdgpu_vm *vm,
1575				uint32_t flush_type,
1576				uint32_t xcc_mask)
1577{
1578	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1579	bool all_hub = false;
1580	int xcc = 0, r = 0;
1581
1582	WARN_ON_ONCE(!vm->is_compute_context);
1583
1584	/*
1585	 * It can be that we race and lose here, but that is extremely unlikely
1586	 * and the worst thing which could happen is that we flush the changes
1587	 * into the TLB once more which is harmless.
1588	 */
1589	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1590		return 0;
1591
1592	if (adev->family == AMDGPU_FAMILY_AI ||
1593	    adev->family == AMDGPU_FAMILY_RV)
1594		all_hub = true;
1595
1596	for_each_inst(xcc, xcc_mask) {
1597		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1598						   all_hub, xcc);
1599		if (r)
1600			break;
1601	}
1602	return r;
1603}
1604
1605/**
1606 * amdgpu_vm_bo_add - add a bo to a specific vm
1607 *
1608 * @adev: amdgpu_device pointer
1609 * @vm: requested vm
1610 * @bo: amdgpu buffer object
1611 *
1612 * Add @bo into the requested vm.
1613 * Add @bo to the list of bos associated with the vm
1614 *
1615 * Returns:
1616 * Newly added bo_va or NULL for failure
1617 *
1618 * Object has to be reserved!
1619 */
1620struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1621				      struct amdgpu_vm *vm,
1622				      struct amdgpu_bo *bo)
1623{
1624	struct amdgpu_bo_va *bo_va;
1625
1626	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1627	if (bo_va == NULL) {
1628		return NULL;
1629	}
1630	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1631
1632	bo_va->ref_count = 1;
1633	bo_va->last_pt_update = dma_fence_get_stub();
1634	INIT_LIST_HEAD(&bo_va->valids);
1635	INIT_LIST_HEAD(&bo_va->invalids);
 
1636
1637	if (!bo)
1638		return bo_va;
1639
1640	dma_resv_assert_held(bo->tbo.base.resv);
1641	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1642		bo_va->is_xgmi = true;
1643		/* Power up XGMI if it can be potentially used */
1644		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1645	}
1646
1647	return bo_va;
1648}
1649
1650
1651/**
1652 * amdgpu_vm_bo_insert_map - insert a new mapping
1653 *
1654 * @adev: amdgpu_device pointer
1655 * @bo_va: bo_va to store the address
1656 * @mapping: the mapping to insert
1657 *
1658 * Insert a new mapping into all structures.
1659 */
1660static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1661				    struct amdgpu_bo_va *bo_va,
1662				    struct amdgpu_bo_va_mapping *mapping)
1663{
1664	struct amdgpu_vm *vm = bo_va->base.vm;
1665	struct amdgpu_bo *bo = bo_va->base.bo;
1666
1667	mapping->bo_va = bo_va;
1668	list_add(&mapping->list, &bo_va->invalids);
1669	amdgpu_vm_it_insert(mapping, &vm->va);
1670
1671	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1672		amdgpu_vm_prt_get(adev);
1673
1674	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1675		amdgpu_vm_bo_moved(&bo_va->base);
1676
1677	trace_amdgpu_vm_bo_map(bo_va, mapping);
1678}
1679
1680/* Validate operation parameters to prevent potential abuse */
1681static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1682					  struct amdgpu_bo *bo,
1683					  uint64_t saddr,
1684					  uint64_t offset,
1685					  uint64_t size)
1686{
1687	uint64_t tmp, lpfn;
1688
1689	if (saddr & AMDGPU_GPU_PAGE_MASK
1690	    || offset & AMDGPU_GPU_PAGE_MASK
1691	    || size & AMDGPU_GPU_PAGE_MASK)
1692		return -EINVAL;
1693
1694	if (check_add_overflow(saddr, size, &tmp)
1695	    || check_add_overflow(offset, size, &tmp)
1696	    || size == 0 /* which also leads to end < begin */)
1697		return -EINVAL;
1698
1699	/* make sure object fit at this offset */
1700	if (bo && offset + size > amdgpu_bo_size(bo))
1701		return -EINVAL;
1702
1703	/* Ensure last pfn not exceed max_pfn */
1704	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1705	if (lpfn >= adev->vm_manager.max_pfn)
1706		return -EINVAL;
1707
1708	return 0;
1709}
1710
1711/**
1712 * amdgpu_vm_bo_map - map bo inside a vm
1713 *
1714 * @adev: amdgpu_device pointer
1715 * @bo_va: bo_va to store the address
1716 * @saddr: where to map the BO
1717 * @offset: requested offset in the BO
1718 * @size: BO size in bytes
1719 * @flags: attributes of pages (read/write/valid/etc.)
1720 *
1721 * Add a mapping of the BO at the specefied addr into the VM.
1722 *
1723 * Returns:
1724 * 0 for success, error for failure.
1725 *
1726 * Object has to be reserved and unreserved outside!
1727 */
1728int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1729		     struct amdgpu_bo_va *bo_va,
1730		     uint64_t saddr, uint64_t offset,
1731		     uint64_t size, uint64_t flags)
1732{
1733	struct amdgpu_bo_va_mapping *mapping, *tmp;
1734	struct amdgpu_bo *bo = bo_va->base.bo;
1735	struct amdgpu_vm *vm = bo_va->base.vm;
 
1736	uint64_t eaddr;
1737	int r;
1738
1739	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1740	if (r)
1741		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
1742
1743	saddr /= AMDGPU_GPU_PAGE_SIZE;
1744	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1745
1746	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1747	if (tmp) {
 
 
1748		/* bo and tmp overlap, invalid addr */
1749		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1750			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1751			tmp->start, tmp->last + 1);
1752		return -EINVAL;
 
1753	}
1754
1755	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1756	if (!mapping)
1757		return -ENOMEM;
 
 
1758
1759	mapping->start = saddr;
1760	mapping->last = eaddr;
 
1761	mapping->offset = offset;
1762	mapping->flags = flags;
1763
1764	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
 
1765
1766	return 0;
1767}
 
1768
1769/**
1770 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1771 *
1772 * @adev: amdgpu_device pointer
1773 * @bo_va: bo_va to store the address
1774 * @saddr: where to map the BO
1775 * @offset: requested offset in the BO
1776 * @size: BO size in bytes
1777 * @flags: attributes of pages (read/write/valid/etc.)
1778 *
1779 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1780 * mappings as we do so.
1781 *
1782 * Returns:
1783 * 0 for success, error for failure.
1784 *
1785 * Object has to be reserved and unreserved outside!
1786 */
1787int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1788			     struct amdgpu_bo_va *bo_va,
1789			     uint64_t saddr, uint64_t offset,
1790			     uint64_t size, uint64_t flags)
1791{
1792	struct amdgpu_bo_va_mapping *mapping;
1793	struct amdgpu_bo *bo = bo_va->base.bo;
1794	uint64_t eaddr;
1795	int r;
1796
1797	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1798	if (r)
1799		return r;
1800
1801	/* Allocate all the needed memory */
1802	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1803	if (!mapping)
1804		return -ENOMEM;
 
1805
1806	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1807	if (r) {
1808		kfree(mapping);
1809		return r;
1810	}
1811
1812	saddr /= AMDGPU_GPU_PAGE_SIZE;
1813	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
 
 
 
 
 
1814
1815	mapping->start = saddr;
1816	mapping->last = eaddr;
1817	mapping->offset = offset;
1818	mapping->flags = flags;
1819
1820	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
 
 
 
 
 
 
 
 
 
 
 
 
1821
1822	return 0;
 
 
 
 
 
 
 
 
 
1823}
1824
1825/**
1826 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1827 *
1828 * @adev: amdgpu_device pointer
1829 * @bo_va: bo_va to remove the address from
1830 * @saddr: where to the BO is mapped
1831 *
1832 * Remove a mapping of the BO at the specefied addr from the VM.
1833 *
1834 * Returns:
1835 * 0 for success, error for failure.
1836 *
1837 * Object has to be reserved and unreserved outside!
1838 */
1839int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1840		       struct amdgpu_bo_va *bo_va,
1841		       uint64_t saddr)
1842{
1843	struct amdgpu_bo_va_mapping *mapping;
1844	struct amdgpu_vm *vm = bo_va->base.vm;
1845	bool valid = true;
1846
1847	saddr /= AMDGPU_GPU_PAGE_SIZE;
1848
1849	list_for_each_entry(mapping, &bo_va->valids, list) {
1850		if (mapping->start == saddr)
1851			break;
1852	}
1853
1854	if (&mapping->list == &bo_va->valids) {
1855		valid = false;
1856
1857		list_for_each_entry(mapping, &bo_va->invalids, list) {
1858			if (mapping->start == saddr)
1859				break;
1860		}
1861
1862		if (&mapping->list == &bo_va->invalids)
1863			return -ENOENT;
1864	}
1865
1866	list_del(&mapping->list);
1867	amdgpu_vm_it_remove(mapping, &vm->va);
1868	mapping->bo_va = NULL;
1869	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1870
1871	if (valid)
1872		list_add(&mapping->list, &vm->freed);
1873	else
1874		amdgpu_vm_free_mapping(adev, vm, mapping,
1875				       bo_va->last_pt_update);
1876
1877	return 0;
1878}
1879
1880/**
1881 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1882 *
1883 * @adev: amdgpu_device pointer
1884 * @vm: VM structure to use
1885 * @saddr: start of the range
1886 * @size: size of the range
1887 *
1888 * Remove all mappings in a range, split them as appropriate.
1889 *
1890 * Returns:
1891 * 0 for success, error for failure.
1892 */
1893int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1894				struct amdgpu_vm *vm,
1895				uint64_t saddr, uint64_t size)
1896{
1897	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1898	LIST_HEAD(removed);
1899	uint64_t eaddr;
1900	int r;
1901
1902	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1903	if (r)
1904		return r;
1905
1906	saddr /= AMDGPU_GPU_PAGE_SIZE;
1907	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1908
1909	/* Allocate all the needed memory */
1910	before = kzalloc(sizeof(*before), GFP_KERNEL);
1911	if (!before)
1912		return -ENOMEM;
1913	INIT_LIST_HEAD(&before->list);
1914
1915	after = kzalloc(sizeof(*after), GFP_KERNEL);
1916	if (!after) {
1917		kfree(before);
1918		return -ENOMEM;
1919	}
1920	INIT_LIST_HEAD(&after->list);
1921
1922	/* Now gather all removed mappings */
1923	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1924	while (tmp) {
1925		/* Remember mapping split at the start */
1926		if (tmp->start < saddr) {
1927			before->start = tmp->start;
1928			before->last = saddr - 1;
1929			before->offset = tmp->offset;
1930			before->flags = tmp->flags;
1931			before->bo_va = tmp->bo_va;
1932			list_add(&before->list, &tmp->bo_va->invalids);
1933		}
1934
1935		/* Remember mapping split at the end */
1936		if (tmp->last > eaddr) {
1937			after->start = eaddr + 1;
1938			after->last = tmp->last;
1939			after->offset = tmp->offset;
1940			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1941			after->flags = tmp->flags;
1942			after->bo_va = tmp->bo_va;
1943			list_add(&after->list, &tmp->bo_va->invalids);
1944		}
1945
1946		list_del(&tmp->list);
1947		list_add(&tmp->list, &removed);
1948
1949		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1950	}
1951
1952	/* And free them up */
1953	list_for_each_entry_safe(tmp, next, &removed, list) {
1954		amdgpu_vm_it_remove(tmp, &vm->va);
1955		list_del(&tmp->list);
1956
1957		if (tmp->start < saddr)
1958		    tmp->start = saddr;
1959		if (tmp->last > eaddr)
1960		    tmp->last = eaddr;
1961
1962		tmp->bo_va = NULL;
1963		list_add(&tmp->list, &vm->freed);
1964		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1965	}
1966
1967	/* Insert partial mapping before the range */
1968	if (!list_empty(&before->list)) {
1969		struct amdgpu_bo *bo = before->bo_va->base.bo;
1970
1971		amdgpu_vm_it_insert(before, &vm->va);
1972		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
1973			amdgpu_vm_prt_get(adev);
1974
1975		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
1976		    !before->bo_va->base.moved)
1977			amdgpu_vm_bo_moved(&before->bo_va->base);
1978	} else {
1979		kfree(before);
1980	}
1981
1982	/* Insert partial mapping after the range */
1983	if (!list_empty(&after->list)) {
1984		struct amdgpu_bo *bo = after->bo_va->base.bo;
1985
1986		amdgpu_vm_it_insert(after, &vm->va);
1987		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
1988			amdgpu_vm_prt_get(adev);
1989
1990		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
1991		    !after->bo_va->base.moved)
1992			amdgpu_vm_bo_moved(&after->bo_va->base);
1993	} else {
1994		kfree(after);
1995	}
1996
1997	return 0;
1998}
1999
2000/**
2001 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2002 *
2003 * @vm: the requested VM
2004 * @addr: the address
2005 *
2006 * Find a mapping by it's address.
2007 *
2008 * Returns:
2009 * The amdgpu_bo_va_mapping matching for addr or NULL
2010 *
2011 */
2012struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2013							 uint64_t addr)
2014{
2015	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2016}
2017
2018/**
2019 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2020 *
2021 * @vm: the requested vm
2022 * @ticket: CS ticket
2023 *
2024 * Trace all mappings of BOs reserved during a command submission.
2025 */
2026void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2027{
2028	struct amdgpu_bo_va_mapping *mapping;
2029
2030	if (!trace_amdgpu_vm_bo_cs_enabled())
2031		return;
2032
2033	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2034	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2035		if (mapping->bo_va && mapping->bo_va->base.bo) {
2036			struct amdgpu_bo *bo;
2037
2038			bo = mapping->bo_va->base.bo;
2039			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2040			    ticket)
2041				continue;
2042		}
2043
2044		trace_amdgpu_vm_bo_cs(mapping);
2045	}
2046}
2047
2048/**
2049 * amdgpu_vm_bo_del - remove a bo from a specific vm
2050 *
2051 * @adev: amdgpu_device pointer
2052 * @bo_va: requested bo_va
2053 *
2054 * Remove @bo_va->bo from the requested vm.
2055 *
2056 * Object have to be reserved!
2057 */
2058void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2059		      struct amdgpu_bo_va *bo_va)
2060{
2061	struct amdgpu_bo_va_mapping *mapping, *next;
2062	struct amdgpu_bo *bo = bo_va->base.bo;
2063	struct amdgpu_vm *vm = bo_va->base.vm;
2064	struct amdgpu_vm_bo_base **base;
2065
2066	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2067
2068	if (bo) {
2069		dma_resv_assert_held(bo->tbo.base.resv);
2070		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2071			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2072
2073		for (base = &bo_va->base.bo->vm_bo; *base;
2074		     base = &(*base)->next) {
2075			if (*base != &bo_va->base)
2076				continue;
2077
2078			*base = bo_va->base.next;
2079			break;
2080		}
2081	}
2082
2083	spin_lock(&vm->status_lock);
2084	list_del(&bo_va->base.vm_status);
2085	spin_unlock(&vm->status_lock);
2086
2087	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2088		list_del(&mapping->list);
2089		amdgpu_vm_it_remove(mapping, &vm->va);
2090		mapping->bo_va = NULL;
2091		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2092		list_add(&mapping->list, &vm->freed);
2093	}
2094	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2095		list_del(&mapping->list);
2096		amdgpu_vm_it_remove(mapping, &vm->va);
2097		amdgpu_vm_free_mapping(adev, vm, mapping,
2098				       bo_va->last_pt_update);
2099	}
2100
2101	dma_fence_put(bo_va->last_pt_update);
2102
2103	if (bo && bo_va->is_xgmi)
2104		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2105
2106	kfree(bo_va);
2107}
2108
2109/**
2110 * amdgpu_vm_evictable - check if we can evict a VM
2111 *
2112 * @bo: A page table of the VM.
2113 *
2114 * Check if it is possible to evict a VM.
2115 */
2116bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2117{
2118	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2119
2120	/* Page tables of a destroyed VM can go away immediately */
2121	if (!bo_base || !bo_base->vm)
2122		return true;
2123
2124	/* Don't evict VM page tables while they are busy */
2125	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2126		return false;
2127
2128	/* Try to block ongoing updates */
2129	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2130		return false;
2131
2132	/* Don't evict VM page tables while they are updated */
2133	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2134		amdgpu_vm_eviction_unlock(bo_base->vm);
2135		return false;
2136	}
2137
2138	bo_base->vm->evicting = true;
2139	amdgpu_vm_eviction_unlock(bo_base->vm);
2140	return true;
2141}
2142
2143/**
2144 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2145 *
2146 * @adev: amdgpu_device pointer
 
2147 * @bo: amdgpu buffer object
2148 * @evicted: is the BO evicted
2149 *
2150 * Mark @bo as invalid.
2151 */
2152void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2153			     struct amdgpu_bo *bo, bool evicted)
2154{
2155	struct amdgpu_vm_bo_base *bo_base;
2156
2157	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2158		struct amdgpu_vm *vm = bo_base->vm;
2159
2160		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2161			amdgpu_vm_bo_evicted(bo_base);
2162			continue;
2163		}
2164
2165		if (bo_base->moved)
2166			continue;
2167		bo_base->moved = true;
2168
2169		if (bo->tbo.type == ttm_bo_type_kernel)
2170			amdgpu_vm_bo_relocated(bo_base);
2171		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2172			amdgpu_vm_bo_moved(bo_base);
2173		else
2174			amdgpu_vm_bo_invalidated(bo_base);
2175	}
2176}
2177
2178/**
2179 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2180 *
2181 * @vm_size: VM size
2182 *
2183 * Returns:
2184 * VM page table as power of two
2185 */
2186static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2187{
2188	/* Total bits covered by PD + PTs */
2189	unsigned bits = ilog2(vm_size) + 18;
2190
2191	/* Make sure the PD is 4K in size up to 8GB address space.
2192	   Above that split equal between PD and PTs */
2193	if (vm_size <= 8)
2194		return (bits - 9);
2195	else
2196		return ((bits + 3) / 2);
2197}
2198
2199/**
2200 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2201 *
2202 * @adev: amdgpu_device pointer
2203 * @min_vm_size: the minimum vm size in GB if it's set auto
2204 * @fragment_size_default: Default PTE fragment size
2205 * @max_level: max VMPT level
2206 * @max_bits: max address space size in bits
2207 *
2208 */
2209void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2210			   uint32_t fragment_size_default, unsigned max_level,
2211			   unsigned max_bits)
2212{
2213	unsigned int max_size = 1 << (max_bits - 30);
2214	unsigned int vm_size;
2215	uint64_t tmp;
2216
2217	/* adjust vm size first */
2218	if (amdgpu_vm_size != -1) {
2219		vm_size = amdgpu_vm_size;
2220		if (vm_size > max_size) {
2221			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2222				 amdgpu_vm_size, max_size);
2223			vm_size = max_size;
2224		}
2225	} else {
2226		struct sysinfo si;
2227		unsigned int phys_ram_gb;
2228
2229		/* Optimal VM size depends on the amount of physical
2230		 * RAM available. Underlying requirements and
2231		 * assumptions:
2232		 *
2233		 *  - Need to map system memory and VRAM from all GPUs
2234		 *     - VRAM from other GPUs not known here
2235		 *     - Assume VRAM <= system memory
2236		 *  - On GFX8 and older, VM space can be segmented for
2237		 *    different MTYPEs
2238		 *  - Need to allow room for fragmentation, guard pages etc.
2239		 *
2240		 * This adds up to a rough guess of system memory x3.
2241		 * Round up to power of two to maximize the available
2242		 * VM size with the given page table size.
2243		 */
2244		si_meminfo(&si);
2245		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2246			       (1 << 30) - 1) >> 30;
2247		vm_size = roundup_pow_of_two(
2248			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2249	}
2250
2251	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2252
2253	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2254	if (amdgpu_vm_block_size != -1)
2255		tmp >>= amdgpu_vm_block_size - 9;
2256	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2257	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2258	switch (adev->vm_manager.num_level) {
2259	case 3:
2260		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2261		break;
2262	case 2:
2263		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2264		break;
2265	case 1:
2266		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2267		break;
2268	default:
2269		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2270	}
2271	/* block size depends on vm size and hw setup*/
2272	if (amdgpu_vm_block_size != -1)
2273		adev->vm_manager.block_size =
2274			min((unsigned)amdgpu_vm_block_size, max_bits
2275			    - AMDGPU_GPU_PAGE_SHIFT
2276			    - 9 * adev->vm_manager.num_level);
2277	else if (adev->vm_manager.num_level > 1)
2278		adev->vm_manager.block_size = 9;
2279	else
2280		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2281
2282	if (amdgpu_vm_fragment_size == -1)
2283		adev->vm_manager.fragment_size = fragment_size_default;
2284	else
2285		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2286
2287	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2288		 vm_size, adev->vm_manager.num_level + 1,
2289		 adev->vm_manager.block_size,
2290		 adev->vm_manager.fragment_size);
2291}
2292
2293/**
2294 * amdgpu_vm_wait_idle - wait for the VM to become idle
2295 *
2296 * @vm: VM object to wait for
2297 * @timeout: timeout to wait for VM to become idle
2298 */
2299long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2300{
2301	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2302					DMA_RESV_USAGE_BOOKKEEP,
2303					true, timeout);
2304	if (timeout <= 0)
2305		return timeout;
2306
2307	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2308}
2309
2310static void amdgpu_vm_destroy_task_info(struct kref *kref)
2311{
2312	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2313
2314	kfree(ti);
2315}
2316
2317static inline struct amdgpu_vm *
2318amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2319{
2320	struct amdgpu_vm *vm;
2321	unsigned long flags;
2322
2323	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2324	vm = xa_load(&adev->vm_manager.pasids, pasid);
2325	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2326
2327	return vm;
2328}
2329
2330/**
2331 * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2332 *
2333 * @task_info: task_info struct under discussion.
2334 *
2335 * frees the vm task_info ptr at the last put
2336 */
2337void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2338{
2339	kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2340}
2341
2342/**
2343 * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2344 *
2345 * @vm: VM to get info from
2346 *
2347 * Returns the reference counted task_info structure, which must be
2348 * referenced down with amdgpu_vm_put_task_info.
2349 */
2350struct amdgpu_task_info *
2351amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2352{
2353	struct amdgpu_task_info *ti = NULL;
2354
2355	if (vm) {
2356		ti = vm->task_info;
2357		kref_get(&vm->task_info->refcount);
 
 
2358	}
2359
2360	return ti;
2361}
2362
2363/**
2364 * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2365 *
2366 * @adev: drm device pointer
2367 * @pasid: PASID identifier for VM
2368 *
2369 * Returns the reference counted task_info structure, which must be
2370 * referenced down with amdgpu_vm_put_task_info.
2371 */
2372struct amdgpu_task_info *
2373amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2374{
2375	return amdgpu_vm_get_task_info_vm(
2376			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2377}
2378
2379static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2380{
2381	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2382	if (!vm->task_info)
2383		return -ENOMEM;
2384
2385	kref_init(&vm->task_info->refcount);
2386	return 0;
2387}
2388
2389/**
2390 * amdgpu_vm_set_task_info - Sets VMs task info.
2391 *
2392 * @vm: vm for which to set the info
2393 */
2394void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2395{
2396	if (!vm->task_info)
2397		return;
2398
2399	if (vm->task_info->pid == current->pid)
2400		return;
2401
2402	vm->task_info->pid = current->pid;
2403	get_task_comm(vm->task_info->task_name, current);
2404
2405	if (current->group_leader->mm != current->mm)
2406		return;
2407
2408	vm->task_info->tgid = current->group_leader->pid;
2409	get_task_comm(vm->task_info->process_name, current->group_leader);
2410}
2411
2412/**
2413 * amdgpu_vm_init - initialize a vm instance
2414 *
2415 * @adev: amdgpu_device pointer
2416 * @vm: requested vm
2417 * @xcp_id: GPU partition selection id
2418 *
2419 * Init @vm fields.
2420 *
2421 * Returns:
2422 * 0 for success, error for failure.
2423 */
2424int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2425		   int32_t xcp_id)
2426{
2427	struct amdgpu_bo *root_bo;
2428	struct amdgpu_bo_vm *root;
2429	int r, i;
2430
2431	vm->va = RB_ROOT_CACHED;
2432	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2433		vm->reserved_vmid[i] = NULL;
2434	INIT_LIST_HEAD(&vm->evicted);
2435	INIT_LIST_HEAD(&vm->evicted_user);
2436	INIT_LIST_HEAD(&vm->relocated);
2437	INIT_LIST_HEAD(&vm->moved);
2438	INIT_LIST_HEAD(&vm->idle);
2439	INIT_LIST_HEAD(&vm->invalidated);
2440	spin_lock_init(&vm->status_lock);
 
 
2441	INIT_LIST_HEAD(&vm->freed);
2442	INIT_LIST_HEAD(&vm->done);
2443	INIT_LIST_HEAD(&vm->pt_freed);
2444	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2445	INIT_KFIFO(vm->faults);
2446
2447	r = amdgpu_vm_init_entities(adev, vm);
2448	if (r)
2449		return r;
2450
2451	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2452
2453	vm->is_compute_context = false;
2454
2455	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2456				    AMDGPU_VM_USE_CPU_FOR_GFX);
2457
2458	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2459			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2460	WARN_ONCE((vm->use_cpu_for_update &&
2461		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2462		  "CPU update of VM recommended only for large BAR system\n");
2463
2464	if (vm->use_cpu_for_update)
2465		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2466	else
2467		vm->update_funcs = &amdgpu_vm_sdma_funcs;
 
 
2468
2469	vm->last_update = dma_fence_get_stub();
2470	vm->last_unlocked = dma_fence_get_stub();
2471	vm->last_tlb_flush = dma_fence_get_stub();
2472	vm->generation = amdgpu_vm_generation(adev, NULL);
2473
2474	mutex_init(&vm->eviction_lock);
2475	vm->evicting = false;
2476	vm->tlb_fence_context = dma_fence_context_alloc(1);
2477
2478	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2479				false, &root, xcp_id);
 
 
 
 
2480	if (r)
2481		goto error_free_delayed;
2482
2483	root_bo = amdgpu_bo_ref(&root->bo);
2484	r = amdgpu_bo_reserve(root_bo, true);
2485	if (r) {
2486		amdgpu_bo_unref(&root_bo);
2487		goto error_free_delayed;
2488	}
2489
2490	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2491	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
 
 
2492	if (r)
2493		goto error_free_root;
2494
2495	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2496	if (r)
2497		goto error_free_root;
2498
2499	r = amdgpu_vm_create_task_info(vm);
 
2500	if (r)
2501		DRM_DEBUG("Failed to create task info for VM\n");
2502
2503	amdgpu_bo_unreserve(vm->root.bo);
2504	amdgpu_bo_unref(&root_bo);
2505
2506	return 0;
2507
2508error_free_root:
2509	amdgpu_vm_pt_free_root(adev, vm);
2510	amdgpu_bo_unreserve(vm->root.bo);
2511	amdgpu_bo_unref(&root_bo);
2512
2513error_free_delayed:
2514	dma_fence_put(vm->last_tlb_flush);
2515	dma_fence_put(vm->last_unlocked);
2516	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2517	amdgpu_vm_fini_entities(vm);
2518
2519	return r;
2520}
2521
2522/**
2523 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2524 *
2525 * @adev: amdgpu_device pointer
2526 * @vm: requested vm
2527 *
2528 * This only works on GFX VMs that don't have any BOs added and no
2529 * page tables allocated yet.
2530 *
2531 * Changes the following VM parameters:
2532 * - use_cpu_for_update
2533 * - pte_supports_ats
2534 *
2535 * Reinitializes the page directory to reflect the changed ATS
2536 * setting.
2537 *
2538 * Returns:
2539 * 0 for success, -errno for errors.
2540 */
2541int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2542{
2543	int r;
2544
2545	r = amdgpu_bo_reserve(vm->root.bo, true);
2546	if (r)
2547		return r;
2548
2549	/* Update VM state */
2550	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2551				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2552	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2553			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2554	WARN_ONCE((vm->use_cpu_for_update &&
2555		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2556		  "CPU update of VM recommended only for large BAR system\n");
2557
2558	if (vm->use_cpu_for_update) {
2559		/* Sync with last SDMA update/clear before switching to CPU */
2560		r = amdgpu_bo_sync_wait(vm->root.bo,
2561					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2562		if (r)
2563			goto unreserve_bo;
2564
2565		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2566		r = amdgpu_vm_pt_map_tables(adev, vm);
2567		if (r)
2568			goto unreserve_bo;
2569
2570	} else {
2571		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2572	}
2573
2574	dma_fence_put(vm->last_update);
2575	vm->last_update = dma_fence_get_stub();
2576	vm->is_compute_context = true;
2577
2578unreserve_bo:
2579	amdgpu_bo_unreserve(vm->root.bo);
2580	return r;
2581}
2582
2583/**
2584 * amdgpu_vm_release_compute - release a compute vm
2585 * @adev: amdgpu_device pointer
2586 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2587 *
2588 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2589 * pasid from vm. Compute should stop use of vm after this call.
2590 */
2591void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2592{
2593	amdgpu_vm_set_pasid(adev, vm, 0);
2594	vm->is_compute_context = false;
2595}
2596
2597/**
2598 * amdgpu_vm_fini - tear down a vm instance
2599 *
2600 * @adev: amdgpu_device pointer
2601 * @vm: requested vm
2602 *
2603 * Tear down @vm.
2604 * Unbind the VM and remove all bos from the vm bo list
2605 */
2606void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2607{
2608	struct amdgpu_bo_va_mapping *mapping, *tmp;
2609	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2610	struct amdgpu_bo *root;
2611	unsigned long flags;
2612	int i;
2613
2614	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2615
2616	flush_work(&vm->pt_free_work);
2617
2618	root = amdgpu_bo_ref(vm->root.bo);
2619	amdgpu_bo_reserve(root, true);
2620	amdgpu_vm_put_task_info(vm->task_info);
2621	amdgpu_vm_set_pasid(adev, vm, 0);
2622	dma_fence_wait(vm->last_unlocked, false);
2623	dma_fence_put(vm->last_unlocked);
2624	dma_fence_wait(vm->last_tlb_flush, false);
2625	/* Make sure that all fence callbacks have completed */
2626	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2627	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2628	dma_fence_put(vm->last_tlb_flush);
2629
2630	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2631		if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2632			amdgpu_vm_prt_fini(adev, vm);
2633			prt_fini_needed = false;
2634		}
2635
2636		list_del(&mapping->list);
2637		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2638	}
2639
2640	amdgpu_vm_pt_free_root(adev, vm);
2641	amdgpu_bo_unreserve(root);
2642	amdgpu_bo_unref(&root);
2643	WARN_ON(vm->root.bo);
2644
2645	amdgpu_vm_fini_entities(vm);
2646
2647	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2648		dev_err(adev->dev, "still active bo inside vm\n");
2649	}
2650	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2651					     &vm->va.rb_root, rb) {
2652		/* Don't remove the mapping here, we don't want to trigger a
2653		 * rebalance and the tree is about to be destroyed anyway.
2654		 */
2655		list_del(&mapping->list);
 
2656		kfree(mapping);
2657	}
2658
2659	dma_fence_put(vm->last_update);
2660
2661	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2662		if (vm->reserved_vmid[i]) {
2663			amdgpu_vmid_free_reserved(adev, i);
2664			vm->reserved_vmid[i] = false;
2665		}
2666	}
2667
2668	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2669}
2670
2671/**
2672 * amdgpu_vm_manager_init - init the VM manager
2673 *
2674 * @adev: amdgpu_device pointer
2675 *
2676 * Initialize the VM manager structures
2677 */
2678void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2679{
2680	unsigned i;
2681
2682	/* Concurrent flushes are only possible starting with Vega10 and
2683	 * are broken on Navi10 and Navi14.
2684	 */
2685	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2686					      adev->asic_type == CHIP_NAVI10 ||
2687					      adev->asic_type == CHIP_NAVI14);
2688	amdgpu_vmid_mgr_init(adev);
2689
2690	adev->vm_manager.fence_context =
2691		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2692	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2693		adev->vm_manager.seqno[i] = 0;
2694
2695	spin_lock_init(&adev->vm_manager.prt_lock);
2696	atomic_set(&adev->vm_manager.num_prt_users, 0);
2697
2698	/* If not overridden by the user, by default, only in large BAR systems
2699	 * Compute VM tables will be updated by CPU
2700	 */
2701#ifdef CONFIG_X86_64
2702	if (amdgpu_vm_update_mode == -1) {
2703		/* For asic with VF MMIO access protection
2704		 * avoid using CPU for VM table updates
2705		 */
2706		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2707		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2708			adev->vm_manager.vm_update_mode =
2709				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2710		else
2711			adev->vm_manager.vm_update_mode = 0;
2712	} else
2713		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2714#else
2715	adev->vm_manager.vm_update_mode = 0;
2716#endif
2717
2718	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2719}
2720
2721/**
2722 * amdgpu_vm_manager_fini - cleanup VM manager
2723 *
2724 * @adev: amdgpu_device pointer
2725 *
2726 * Cleanup the VM manager and free resources.
2727 */
2728void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2729{
2730	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2731	xa_destroy(&adev->vm_manager.pasids);
2732
2733	amdgpu_vmid_mgr_fini(adev);
2734}
2735
2736/**
2737 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2738 *
2739 * @dev: drm device pointer
2740 * @data: drm_amdgpu_vm
2741 * @filp: drm file pointer
2742 *
2743 * Returns:
2744 * 0 for success, -errno for errors.
2745 */
2746int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2747{
2748	union drm_amdgpu_vm *args = data;
2749	struct amdgpu_device *adev = drm_to_adev(dev);
2750	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2751
2752	/* No valid flags defined yet */
2753	if (args->in.flags)
2754		return -EINVAL;
2755
2756	switch (args->in.op) {
2757	case AMDGPU_VM_OP_RESERVE_VMID:
2758		/* We only have requirement to reserve vmid from gfxhub */
2759		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2760			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2761			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2762		}
2763
2764		break;
2765	case AMDGPU_VM_OP_UNRESERVE_VMID:
2766		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2767			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2768			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2769		}
2770		break;
2771	default:
2772		return -EINVAL;
2773	}
2774
2775	return 0;
2776}
2777
2778/**
2779 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2780 * @adev: amdgpu device pointer
2781 * @pasid: PASID of the VM
2782 * @ts: Timestamp of the fault
2783 * @vmid: VMID, only used for GFX 9.4.3.
2784 * @node_id: Node_id received in IH cookie. Only applicable for
2785 *           GFX 9.4.3.
2786 * @addr: Address of the fault
2787 * @write_fault: true is write fault, false is read fault
2788 *
2789 * Try to gracefully handle a VM fault. Return true if the fault was handled and
2790 * shouldn't be reported any more.
2791 */
2792bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2793			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2794			    bool write_fault)
2795{
2796	bool is_compute_context = false;
2797	struct amdgpu_bo *root;
2798	unsigned long irqflags;
2799	uint64_t value, flags;
2800	struct amdgpu_vm *vm;
2801	int r;
2802
2803	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2804	vm = xa_load(&adev->vm_manager.pasids, pasid);
2805	if (vm) {
2806		root = amdgpu_bo_ref(vm->root.bo);
2807		is_compute_context = vm->is_compute_context;
2808	} else {
2809		root = NULL;
2810	}
2811	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2812
2813	if (!root)
2814		return false;
2815
2816	addr /= AMDGPU_GPU_PAGE_SIZE;
2817
2818	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2819	    node_id, addr, ts, write_fault)) {
2820		amdgpu_bo_unref(&root);
2821		return true;
2822	}
2823
2824	r = amdgpu_bo_reserve(root, true);
2825	if (r)
2826		goto error_unref;
2827
2828	/* Double check that the VM still exists */
2829	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2830	vm = xa_load(&adev->vm_manager.pasids, pasid);
2831	if (vm && vm->root.bo != root)
2832		vm = NULL;
2833	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2834	if (!vm)
2835		goto error_unlock;
2836
2837	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2838		AMDGPU_PTE_SYSTEM;
2839
2840	if (is_compute_context) {
2841		/* Intentionally setting invalid PTE flag
2842		 * combination to force a no-retry-fault
2843		 */
2844		flags = AMDGPU_VM_NORETRY_FLAGS;
2845		value = 0;
2846	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2847		/* Redirect the access to the dummy page */
2848		value = adev->dummy_page_addr;
2849		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2850			AMDGPU_PTE_WRITEABLE;
2851
2852	} else {
2853		/* Let the hw retry silently on the PTE */
2854		value = 0;
2855	}
2856
2857	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2858	if (r) {
2859		pr_debug("failed %d to reserve fence slot\n", r);
2860		goto error_unlock;
2861	}
2862
2863	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2864				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2865	if (r)
2866		goto error_unlock;
2867
2868	r = amdgpu_vm_update_pdes(adev, vm, true);
2869
2870error_unlock:
2871	amdgpu_bo_unreserve(root);
2872	if (r < 0)
2873		DRM_ERROR("Can't handle page fault (%d)\n", r);
2874
2875error_unref:
2876	amdgpu_bo_unref(&root);
2877
2878	return false;
2879}
2880
2881#if defined(CONFIG_DEBUG_FS)
2882/**
2883 * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2884 *
2885 * @vm: Requested VM for printing BO info
2886 * @m: debugfs file
2887 *
2888 * Print BO information in debugfs file for the VM
2889 */
2890void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2891{
2892	struct amdgpu_bo_va *bo_va, *tmp;
2893	u64 total_idle = 0;
2894	u64 total_evicted = 0;
2895	u64 total_relocated = 0;
2896	u64 total_moved = 0;
2897	u64 total_invalidated = 0;
2898	u64 total_done = 0;
2899	unsigned int total_idle_objs = 0;
2900	unsigned int total_evicted_objs = 0;
2901	unsigned int total_relocated_objs = 0;
2902	unsigned int total_moved_objs = 0;
2903	unsigned int total_invalidated_objs = 0;
2904	unsigned int total_done_objs = 0;
2905	unsigned int id = 0;
2906
2907	spin_lock(&vm->status_lock);
2908	seq_puts(m, "\tIdle BOs:\n");
2909	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2910		if (!bo_va->base.bo)
2911			continue;
2912		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2913	}
2914	total_idle_objs = id;
2915	id = 0;
2916
2917	seq_puts(m, "\tEvicted BOs:\n");
2918	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2919		if (!bo_va->base.bo)
2920			continue;
2921		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2922	}
2923	total_evicted_objs = id;
2924	id = 0;
2925
2926	seq_puts(m, "\tRelocated BOs:\n");
2927	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2928		if (!bo_va->base.bo)
2929			continue;
2930		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2931	}
2932	total_relocated_objs = id;
2933	id = 0;
2934
2935	seq_puts(m, "\tMoved BOs:\n");
2936	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2937		if (!bo_va->base.bo)
2938			continue;
2939		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2940	}
2941	total_moved_objs = id;
2942	id = 0;
2943
2944	seq_puts(m, "\tInvalidated BOs:\n");
2945	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2946		if (!bo_va->base.bo)
2947			continue;
2948		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
2949	}
2950	total_invalidated_objs = id;
2951	id = 0;
2952
2953	seq_puts(m, "\tDone BOs:\n");
2954	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2955		if (!bo_va->base.bo)
2956			continue;
2957		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2958	}
2959	spin_unlock(&vm->status_lock);
2960	total_done_objs = id;
2961
2962	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2963		   total_idle_objs);
2964	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2965		   total_evicted_objs);
2966	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2967		   total_relocated_objs);
2968	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2969		   total_moved_objs);
2970	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2971		   total_invalidated_objs);
2972	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2973		   total_done_objs);
2974}
2975#endif
2976
2977/**
2978 * amdgpu_vm_update_fault_cache - update cached fault into.
2979 * @adev: amdgpu device pointer
2980 * @pasid: PASID of the VM
2981 * @addr: Address of the fault
2982 * @status: GPUVM fault status register
2983 * @vmhub: which vmhub got the fault
2984 *
2985 * Cache the fault info for later use by userspace in debugging.
2986 */
2987void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
2988				  unsigned int pasid,
2989				  uint64_t addr,
2990				  uint32_t status,
2991				  unsigned int vmhub)
2992{
2993	struct amdgpu_vm *vm;
2994	unsigned long flags;
2995
2996	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2997
2998	vm = xa_load(&adev->vm_manager.pasids, pasid);
2999	/* Don't update the fault cache if status is 0.  In the multiple
3000	 * fault case, subsequent faults will return a 0 status which is
3001	 * useless for userspace and replaces the useful fault status, so
3002	 * only update if status is non-0.
3003	 */
3004	if (vm && status) {
3005		vm->fault_info.addr = addr;
3006		vm->fault_info.status = status;
3007		/*
3008		 * Update the fault information globally for later usage
3009		 * when vm could be stale or freed.
3010		 */
3011		adev->vm_manager.fault_info.addr = addr;
3012		adev->vm_manager.fault_info.vmhub = vmhub;
3013		adev->vm_manager.fault_info.status = status;
3014
3015		if (AMDGPU_IS_GFXHUB(vmhub)) {
3016			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3017			vm->fault_info.vmhub |=
3018				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3019		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3020			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3021			vm->fault_info.vmhub |=
3022				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3023		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3024			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3025			vm->fault_info.vmhub |=
3026				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3027		} else {
3028			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3029		}
3030	}
3031	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3032}
3033
3034/**
3035 * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3036 *
3037 * @vm: VM to test against.
3038 * @bo: BO to be tested.
3039 *
3040 * Returns true if the BO shares the dma_resv object with the root PD and is
3041 * always guaranteed to be valid inside the VM.
3042 */
3043bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3044{
3045	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3046}
v4.6
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <drm/drmP.h>
 
 
 
 
 
  29#include <drm/amdgpu_drm.h>
 
 
 
  30#include "amdgpu.h"
  31#include "amdgpu_trace.h"
  32
  33/*
  34 * GPUVM
  35 * GPUVM is similar to the legacy gart on older asics, however
  36 * rather than there being a single global gart table
  37 * for the entire GPU, there are multiple VM page tables active
  38 * at any given time.  The VM page tables can contain a mix
  39 * vram pages and system memory pages and system memory pages
 
 
 
 
 
 
 
 
  40 * can be mapped as snooped (cached system pages) or unsnooped
  41 * (uncached system pages).
  42 * Each VM has an ID associated with it and there is a page table
  43 * associated with each VMID.  When execting a command buffer,
  44 * the kernel tells the the ring what VMID to use for that command
 
  45 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  46 * The userspace drivers maintain their own address space and the kernel
  47 * sets up their pages tables accordingly when they submit their
  48 * command buffers and a VMID is assigned.
  49 * Cayman/Trinity support up to 8 active VMs at any given time;
  50 * SI supports 16.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51 */
  52
  53/* Special value that no flush is necessary */
  54#define AMDGPU_VM_NO_FLUSH (~0ll)
 
 
 
 
 
 
  55
  56/**
  57 * amdgpu_vm_num_pde - return the number of page directory entries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58 *
  59 * @adev: amdgpu_device pointer
 
 
 
 
 
  60 *
  61 * Calculate the number of page directory entries.
  62 */
  63static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
 
  64{
  65	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66}
  67
  68/**
  69 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
 
 
  70 *
  71 * @adev: amdgpu_device pointer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  72 *
  73 * Calculate the size of the page directory in bytes.
 
  74 */
  75static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
  76{
  77	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
 
 
  78}
  79
  80/**
  81 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
  82 *
  83 * @vm: vm providing the BOs
  84 * @validated: head of validation list
  85 * @entry: entry to add
  86 *
  87 * Add the page directory to the list of BOs to
  88 * validate for command submission.
  89 */
  90void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
  91			 struct list_head *validated,
  92			 struct amdgpu_bo_list_entry *entry)
  93{
  94	entry->robj = vm->page_directory;
  95	entry->priority = 0;
  96	entry->tv.bo = &vm->page_directory->tbo;
  97	entry->tv.shared = true;
  98	entry->user_pages = NULL;
  99	list_add(&entry->tv.head, validated);
 100}
 101
 102/**
 103 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
 104 *
 105 * @vm: vm providing the BOs
 106 * @duplicates: head of duplicates list
 107 *
 108 * Add the page directory to the BO duplicates list
 109 * for command submission.
 110 */
 111void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
 112{
 113	unsigned i;
 
 
 
 
 
 
 
 114
 115	/* add the vm page table to the list */
 116	for (i = 0; i <= vm->max_pde_used; ++i) {
 117		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
 
 
 
 
 
 
 
 
 
 
 
 118
 119		if (!entry->robj)
 120			continue;
 
 
 
 
 
 
 
 
 121
 122		list_add(&entry->tv.head, duplicates);
 
 
 
 
 
 
 
 
 
 
 
 123	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125}
 126
 127/**
 128 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
 129 *
 130 * @adev: amdgpu device instance
 131 * @vm: vm providing the BOs
 132 *
 133 * Move the PT BOs to the tail of the LRU.
 
 134 */
 135void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 136				  struct amdgpu_vm *vm)
 
 
 
 
 
 
 
 
 
 137{
 138	struct ttm_bo_global *glob = adev->mman.bdev.glob;
 139	unsigned i;
 
 
 
 
 
 140
 141	spin_lock(&glob->lru_lock);
 142	for (i = 0; i <= vm->max_pde_used; ++i) {
 143		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
 144
 145		if (!entry->robj)
 146			continue;
 
 
 147
 148		ttm_bo_move_to_lru_tail(&entry->robj->tbo);
 149	}
 150	spin_unlock(&glob->lru_lock);
 
 
 151}
 152
 153/**
 154 * amdgpu_vm_grab_id - allocate the next free VMID
 
 
 155 *
 156 * @vm: vm to allocate id for
 157 * @ring: ring we want to submit job to
 158 * @sync: sync object where we add dependencies
 159 * @fence: fence protecting ID from reuse
 160 *
 161 * Allocate an id for the vm, adding fences to the sync obj as necessary.
 162 */
 163int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 164		      struct amdgpu_sync *sync, struct fence *fence,
 165		      unsigned *vm_id, uint64_t *vm_pd_addr)
 166{
 167	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 168	struct amdgpu_device *adev = ring->adev;
 169	struct amdgpu_vm_id *id = &vm->ids[ring->idx];
 170	struct fence *updates = sync->last_vm_update;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171	int r;
 172
 173	mutex_lock(&adev->vm_manager.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 174
 175	/* check if the id is still valid */
 176	if (id->mgr_id) {
 177		struct fence *flushed = id->flushed_updates;
 178		bool is_later;
 179		long owner;
 180
 181		if (!flushed)
 182			is_later = true;
 183		else if (!updates)
 184			is_later = false;
 185		else
 186			is_later = fence_is_later(updates, flushed);
 187
 188		owner = atomic_long_read(&id->mgr_id->owner);
 189		if (!is_later && owner == (long)id &&
 190		    pd_addr == id->pd_gpu_addr) {
 191
 192			r = amdgpu_sync_fence(ring->adev, sync,
 193					      id->mgr_id->active);
 194			if (r) {
 195				mutex_unlock(&adev->vm_manager.lock);
 196				return r;
 197			}
 198
 199			fence_put(id->mgr_id->active);
 200			id->mgr_id->active = fence_get(fence);
 201
 202			list_move_tail(&id->mgr_id->list,
 203				       &adev->vm_manager.ids_lru);
 
 204
 205			*vm_id = id->mgr_id - adev->vm_manager.ids;
 206			*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
 207			trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
 208						*vm_pd_addr);
 209
 210			mutex_unlock(&adev->vm_manager.lock);
 211			return 0;
 212		}
 213	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 214
 215	id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
 216				      struct amdgpu_vm_manager_id,
 217				      list);
 218
 219	r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
 220	if (!r) {
 221		fence_put(id->mgr_id->active);
 222		id->mgr_id->active = fence_get(fence);
 223
 224		fence_put(id->flushed_updates);
 225		id->flushed_updates = fence_get(updates);
 226
 227		id->pd_gpu_addr = pd_addr;
 
 
 
 
 
 
 
 
 
 
 228
 229		list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
 230		atomic_long_set(&id->mgr_id->owner, (long)id);
 231
 232		*vm_id = id->mgr_id - adev->vm_manager.ids;
 233		*vm_pd_addr = pd_addr;
 234		trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235	}
 
 236
 237	mutex_unlock(&adev->vm_manager.lock);
 238	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 239}
 240
 241/**
 242 * amdgpu_vm_flush - hardware flush the vm
 243 *
 244 * @ring: ring to use for flush
 245 * @vm_id: vmid number to use
 246 * @pd_addr: address of the page directory
 247 *
 248 * Emit a VM flush when it is necessary.
 
 
 
 249 */
 250void amdgpu_vm_flush(struct amdgpu_ring *ring,
 251		     unsigned vm_id, uint64_t pd_addr,
 252		     uint32_t gds_base, uint32_t gds_size,
 253		     uint32_t gws_base, uint32_t gws_size,
 254		     uint32_t oa_base, uint32_t oa_size)
 255{
 256	struct amdgpu_device *adev = ring->adev;
 257	struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
 258	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
 259		mgr_id->gds_base != gds_base ||
 260		mgr_id->gds_size != gds_size ||
 261		mgr_id->gws_base != gws_base ||
 262		mgr_id->gws_size != gws_size ||
 263		mgr_id->oa_base != oa_base ||
 264		mgr_id->oa_size != oa_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 265
 266	if (ring->funcs->emit_pipeline_sync && (
 267	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
 268		amdgpu_ring_emit_pipeline_sync(ring);
 269
 270	if (pd_addr != AMDGPU_VM_NO_FLUSH) {
 271		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
 272		amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 273	}
 274
 275	if (gds_switch_needed) {
 276		mgr_id->gds_base = gds_base;
 277		mgr_id->gds_size = gds_size;
 278		mgr_id->gws_base = gws_base;
 279		mgr_id->gws_size = gws_size;
 280		mgr_id->oa_base = oa_base;
 281		mgr_id->oa_size = oa_size;
 282		amdgpu_ring_emit_gds_switch(ring, vm_id,
 283					    gds_base, gds_size,
 284					    gws_base, gws_size,
 285					    oa_base, oa_size);
 286	}
 287}
 288
 289/**
 290 * amdgpu_vm_reset_id - reset VMID to zero
 291 *
 292 * @adev: amdgpu device structure
 293 * @vm_id: vmid number to use
 294 *
 295 * Reset saved GDW, GWS and OA to force switch on next flush.
 296 */
 297void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
 298{
 299	struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300
 301	mgr_id->gds_base = 0;
 302	mgr_id->gds_size = 0;
 303	mgr_id->gws_base = 0;
 304	mgr_id->gws_size = 0;
 305	mgr_id->oa_base = 0;
 306	mgr_id->oa_size = 0;
 307}
 308
 309/**
 310 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 311 *
 312 * @vm: requested vm
 313 * @bo: requested buffer object
 314 *
 315 * Find @bo inside the requested vm.
 316 * Search inside the @bos vm list for the requested vm
 317 * Returns the found bo_va or NULL if none is found
 318 *
 319 * Object has to be reserved!
 
 
 
 320 */
 321struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 322				       struct amdgpu_bo *bo)
 323{
 324	struct amdgpu_bo_va *bo_va;
 
 
 
 
 325
 326	list_for_each_entry(bo_va, &bo->va, bo_list) {
 327		if (bo_va->vm == vm) {
 328			return bo_va;
 329		}
 330	}
 331	return NULL;
 332}
 333
 334/**
 335 * amdgpu_vm_update_pages - helper to call the right asic function
 
 
 
 
 
 
 336 *
 337 * @adev: amdgpu_device pointer
 338 * @gtt: GART instance to use for mapping
 339 * @gtt_flags: GTT hw access flags
 340 * @ib: indirect buffer to fill with commands
 341 * @pe: addr of the page entry
 342 * @addr: dst addr to write into pe
 343 * @count: number of page entries to update
 344 * @incr: increase next addr by incr bytes
 345 * @flags: hw access flags
 346 *
 347 * Traces the parameters and calls the right asic functions
 348 * to setup the page table using the DMA.
 349 */
 350static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
 351				   struct amdgpu_gart *gtt,
 352				   uint32_t gtt_flags,
 353				   struct amdgpu_ib *ib,
 354				   uint64_t pe, uint64_t addr,
 355				   unsigned count, uint32_t incr,
 356				   uint32_t flags)
 357{
 358	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
 359
 360	if ((gtt == &adev->gart) && (flags == gtt_flags)) {
 361		uint64_t src = gtt->table_addr + (addr >> 12) * 8;
 362		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
 363
 364	} else if (gtt) {
 365		dma_addr_t *pages_addr = gtt->pages_addr;
 366		amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
 367				    count, incr, flags);
 368
 369	} else if (count < 3) {
 370		amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
 371				    count, incr, flags);
 372
 373	} else {
 374		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
 375				      count, incr, flags);
 376	}
 377}
 378
 379/**
 380 * amdgpu_vm_clear_bo - initially clear the page dir/table
 381 *
 382 * @adev: amdgpu_device pointer
 383 * @bo: bo to clear
 
 
 
 384 *
 385 * need to reserve bo first before calling it.
 
 386 */
 387static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 388			      struct amdgpu_vm *vm,
 389			      struct amdgpu_bo *bo)
 390{
 391	struct amdgpu_ring *ring;
 392	struct fence *fence = NULL;
 393	struct amdgpu_job *job;
 394	unsigned entries;
 395	uint64_t addr;
 396	int r;
 
 
 
 
 
 
 397
 398	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 399
 400	r = reservation_object_reserve_shared(bo->tbo.resv);
 401	if (r)
 402		return r;
 
 403
 404	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 405	if (r)
 406		goto error;
 407
 408	addr = amdgpu_bo_gpu_offset(bo);
 409	entries = amdgpu_bo_size(bo) / 8;
 
 410
 411	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
 
 
 
 
 
 412	if (r)
 413		goto error;
 414
 415	amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
 416			       0, 0);
 417	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 418
 419	WARN_ON(job->ibs[0].length_dw > 64);
 420	r = amdgpu_job_submit(job, ring, &vm->entity,
 421			      AMDGPU_FENCE_OWNER_VM, &fence);
 422	if (r)
 423		goto error_free;
 424
 425	amdgpu_bo_fence(bo, fence, true);
 426	fence_put(fence);
 427	return 0;
 428
 429error_free:
 430	amdgpu_job_free(job);
 431
 432error:
 
 433	return r;
 434}
 435
 436/**
 437 * amdgpu_vm_map_gart - Resolve gart mapping of addr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 438 *
 439 * @pages_addr: optional DMA address to use for lookup
 440 * @addr: the unmapped addr
 
 441 *
 442 * Look up the physical address of the page that the pte resolves
 443 * to and return the pointer for the page table entry.
 444 */
 445uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 
 
 
 446{
 447	uint64_t result;
 448
 449	if (pages_addr) {
 450		/* page table offset */
 451		result = pages_addr[addr >> PAGE_SHIFT];
 452
 453		/* in case cpu page size != gpu page size*/
 454		result |= addr & (~PAGE_MASK);
 455
 
 
 
 
 456	} else {
 457		/* No mapping required */
 458		result = addr;
 459	}
 460
 461	result &= 0xFFFFFFFFFFFFF000ULL;
 
 
 462
 463	return result;
 
 
 
 464}
 465
 466/**
 467 * amdgpu_vm_update_pdes - make sure that page directory is valid
 468 *
 469 * @adev: amdgpu_device pointer
 470 * @vm: requested vm
 471 * @start: start of GPU address range
 472 * @end: end of GPU address range
 
 
 
 
 
 
 
 
 
 
 
 
 
 473 *
 474 * Allocates new page tables if necessary
 475 * and updates the page directory.
 476 * Returns 0 for success, error for failure.
 477 */
 478int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 479				    struct amdgpu_vm *vm)
 480{
 481	struct amdgpu_ring *ring;
 482	struct amdgpu_bo *pd = vm->page_directory;
 483	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
 484	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 485	uint64_t last_pde = ~0, last_pt = ~0;
 486	unsigned count = 0, pt_idx, ndw;
 487	struct amdgpu_job *job;
 488	struct amdgpu_ib *ib;
 489	struct fence *fence = NULL;
 
 
 
 
 
 
 
 
 
 490
 491	int r;
 
 
 
 
 492
 493	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 
 
 494
 495	/* padding, etc. */
 496	ndw = 64;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497
 498	/* assume the worst case */
 499	ndw += vm->max_pde_used * 6;
 
 
 500
 501	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 502	if (r)
 503		return r;
 504
 505	ib = &job->ibs[0];
 506
 507	/* walk over the address space and update the page directory */
 508	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
 509		struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
 510		uint64_t pde, pt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511
 512		if (bo == NULL)
 513			continue;
 514
 515		pt = amdgpu_bo_gpu_offset(bo);
 516		if (vm->page_tables[pt_idx].addr == pt)
 517			continue;
 518		vm->page_tables[pt_idx].addr = pt;
 519
 520		pde = pd_addr + pt_idx * 8;
 521		if (((last_pde + 8 * count) != pde) ||
 522		    ((last_pt + incr * count) != pt)) {
 523
 524			if (count) {
 525				amdgpu_vm_update_pages(adev, NULL, 0, ib,
 526						       last_pde, last_pt,
 527						       count, incr,
 528						       AMDGPU_PTE_VALID);
 529			}
 530
 531			count = 1;
 532			last_pde = pde;
 533			last_pt = pt;
 534		} else {
 535			++count;
 536		}
 537	}
 538
 539	if (count)
 540		amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
 541				       count, incr, AMDGPU_PTE_VALID);
 542
 543	if (ib->length_dw != 0) {
 544		amdgpu_ring_pad_ib(ring, ib);
 545		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
 546				 AMDGPU_FENCE_OWNER_VM);
 547		WARN_ON(ib->length_dw > ndw);
 548		r = amdgpu_job_submit(job, ring, &vm->entity,
 549				      AMDGPU_FENCE_OWNER_VM, &fence);
 550		if (r)
 551			goto error_free;
 552
 553		amdgpu_bo_fence(pd, fence, true);
 554		fence_put(vm->page_directory_fence);
 555		vm->page_directory_fence = fence_get(fence);
 556		fence_put(fence);
 557
 558	} else {
 559		amdgpu_job_free(job);
 
 
 
 
 
 560	}
 561
 562	return 0;
 563
 564error_free:
 565	amdgpu_job_free(job);
 
 
 566	return r;
 567}
 568
 569/**
 570 * amdgpu_vm_frag_ptes - add fragment information to PTEs
 571 *
 572 * @adev: amdgpu_device pointer
 573 * @gtt: GART instance to use for mapping
 574 * @gtt_flags: GTT hw mapping flags
 575 * @ib: IB for the update
 576 * @pe_start: first PTE to handle
 577 * @pe_end: last PTE to handle
 578 * @addr: addr those PTEs should point to
 579 * @flags: hw mapping flags
 580 */
 581static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
 582				struct amdgpu_gart *gtt,
 583				uint32_t gtt_flags,
 584				struct amdgpu_ib *ib,
 585				uint64_t pe_start, uint64_t pe_end,
 586				uint64_t addr, uint32_t flags)
 587{
 588	/**
 589	 * The MC L1 TLB supports variable sized pages, based on a fragment
 590	 * field in the PTE. When this field is set to a non-zero value, page
 591	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
 592	 * flags are considered valid for all PTEs within the fragment range
 593	 * and corresponding mappings are assumed to be physically contiguous.
 594	 *
 595	 * The L1 TLB can store a single PTE for the whole fragment,
 596	 * significantly increasing the space available for translation
 597	 * caching. This leads to large improvements in throughput when the
 598	 * TLB is under pressure.
 599	 *
 600	 * The L2 TLB distributes small and large fragments into two
 601	 * asymmetric partitions. The large fragment cache is significantly
 602	 * larger. Thus, we try to use large fragments wherever possible.
 603	 * Userspace can support this by aligning virtual base address and
 604	 * allocation size to the fragment size.
 605	 */
 606
 607	/* SI and newer are optimized for 64KB */
 608	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
 609	uint64_t frag_align = 0x80;
 610
 611	uint64_t frag_start = ALIGN(pe_start, frag_align);
 612	uint64_t frag_end = pe_end & ~(frag_align - 1);
 613
 614	unsigned count;
 615
 616	/* Abort early if there isn't anything to do */
 617	if (pe_start == pe_end)
 618		return;
 619
 620	/* system pages are non continuously */
 621	if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
 622
 623		count = (pe_end - pe_start) / 8;
 624		amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
 625				       addr, count, AMDGPU_GPU_PAGE_SIZE,
 626				       flags);
 627		return;
 628	}
 629
 630	/* handle the 4K area at the beginning */
 631	if (pe_start != frag_start) {
 632		count = (frag_start - pe_start) / 8;
 633		amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
 634				       count, AMDGPU_GPU_PAGE_SIZE, flags);
 635		addr += AMDGPU_GPU_PAGE_SIZE * count;
 636	}
 637
 638	/* handle the area in the middle */
 639	count = (frag_end - frag_start) / 8;
 640	amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
 641			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
 642
 643	/* handle the 4K area at the end */
 644	if (frag_end != pe_end) {
 645		addr += AMDGPU_GPU_PAGE_SIZE * count;
 646		count = (pe_end - frag_end) / 8;
 647		amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
 648				       count, AMDGPU_GPU_PAGE_SIZE, flags);
 649	}
 650}
 651
 652/**
 653 * amdgpu_vm_update_ptes - make sure that page tables are valid
 654 *
 655 * @adev: amdgpu_device pointer
 656 * @gtt: GART instance to use for mapping
 657 * @gtt_flags: GTT hw mapping flags
 658 * @vm: requested vm
 659 * @start: start of GPU address range
 660 * @end: end of GPU address range
 661 * @dst: destination address to map to
 662 * @flags: mapping flags
 663 *
 664 * Update the page tables in the range @start - @end.
 665 */
 666static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 667				  struct amdgpu_gart *gtt,
 668				  uint32_t gtt_flags,
 669				  struct amdgpu_vm *vm,
 670				  struct amdgpu_ib *ib,
 671				  uint64_t start, uint64_t end,
 672				  uint64_t dst, uint32_t flags)
 673{
 674	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 675
 676	uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
 677	uint64_t addr;
 678
 679	/* walk over the address space and update the page tables */
 680	for (addr = start; addr < end; ) {
 681		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
 682		struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
 683		unsigned nptes;
 684		uint64_t pe_start;
 685
 686		if ((addr & ~mask) == (end & ~mask))
 687			nptes = end - addr;
 688		else
 689			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
 690
 691		pe_start = amdgpu_bo_gpu_offset(pt);
 692		pe_start += (addr & mask) * 8;
 693
 694		if (last_pe_end != pe_start) {
 
 695
 696			amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
 697					    last_pe_start, last_pe_end,
 698					    last_dst, flags);
 699
 700			last_pe_start = pe_start;
 701			last_pe_end = pe_start + 8 * nptes;
 702			last_dst = dst;
 703		} else {
 704			last_pe_end += 8 * nptes;
 705		}
 706
 707		addr += nptes;
 708		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 709	}
 710
 711	amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
 712			    last_pe_start, last_pe_end,
 713			    last_dst, flags);
 714}
 715
 716/**
 717 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 718 *
 719 * @adev: amdgpu_device pointer
 720 * @gtt: GART instance to use for mapping
 721 * @gtt_flags: flags as they are used for GTT
 722 * @vm: requested vm
 723 * @start: start of mapped range
 724 * @last: last mapped entry
 725 * @flags: flags for the entries
 726 * @addr: addr to set the area to
 727 * @fence: optional resulting fence
 728 *
 729 * Fill in the page table entries between @start and @last.
 730 * Returns 0 for success, -EINVAL for failure.
 731 */
 732static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 733				       struct amdgpu_gart *gtt,
 734				       uint32_t gtt_flags,
 735				       struct amdgpu_vm *vm,
 736				       uint64_t start, uint64_t last,
 737				       uint32_t flags, uint64_t addr,
 738				       struct fence **fence)
 739{
 740	struct amdgpu_ring *ring;
 741	void *owner = AMDGPU_FENCE_OWNER_VM;
 742	unsigned nptes, ncmds, ndw;
 743	struct amdgpu_job *job;
 744	struct amdgpu_ib *ib;
 745	struct fence *f = NULL;
 
 
 
 
 
 746	int r;
 747
 748	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 
 749
 750	/* sync to everything on unmapping */
 751	if (!(flags & AMDGPU_PTE_VALID))
 752		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
 
 
 
 
 
 
 
 
 
 
 
 753
 754	nptes = last - start + 1;
 755
 756	/*
 757	 * reserve space for one command every (1 << BLOCK_SIZE)
 758	 *  entries or 2k dwords (whatever is smaller)
 759	 */
 760	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
 761
 762	/* padding, etc. */
 763	ndw = 64;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 764
 765	if ((gtt == &adev->gart) && (flags == gtt_flags)) {
 766		/* only copy commands needed */
 767		ndw += ncmds * 7;
 768
 769	} else if (gtt) {
 770		/* header for write data commands */
 771		ndw += ncmds * 4;
 772
 773		/* body of write data command */
 774		ndw += nptes * 2;
 775
 
 
 
 776	} else {
 777		/* set page commands needed */
 778		ndw += ncmds * 10;
 779
 780		/* two extra commands for begin/end of fragment */
 781		ndw += 2 * 10;
 782	}
 783
 784	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 785	if (r)
 786		return r;
 
 787
 788	ib = &job->ibs[0];
 
 
 789
 790	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
 791			     owner);
 792	if (r)
 793		goto error_free;
 
 
 794
 795	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
 796	if (r)
 797		goto error_free;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798
 799	amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
 800			      addr, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 801
 802	amdgpu_ring_pad_ib(ring, ib);
 803	WARN_ON(ib->length_dw > ndw);
 804	r = amdgpu_job_submit(job, ring, &vm->entity,
 805			      AMDGPU_FENCE_OWNER_VM, &f);
 806	if (r)
 807		goto error_free;
 808
 809	amdgpu_bo_fence(vm->page_directory, f, true);
 810	if (fence) {
 811		fence_put(*fence);
 812		*fence = fence_get(f);
 813	}
 814	fence_put(f);
 815	return 0;
 816
 817error_free:
 818	amdgpu_job_free(job);
 819	return r;
 820}
 821
 822/**
 823 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
 824 *
 825 * @adev: amdgpu_device pointer
 826 * @gtt: GART instance to use for mapping
 827 * @vm: requested vm
 828 * @mapping: mapped range and flags to use for the update
 829 * @addr: addr to set the area to
 830 * @gtt_flags: flags as they are used for GTT
 831 * @fence: optional resulting fence
 
 
 
 
 
 
 
 
 832 *
 833 * Split the mapping into smaller chunks so that each update fits
 834 * into a SDMA IB.
 835 * Returns 0 for success, -EINVAL for failure.
 836 */
 837static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 838				      struct amdgpu_gart *gtt,
 839				      uint32_t gtt_flags,
 840				      struct amdgpu_vm *vm,
 841				      struct amdgpu_bo_va_mapping *mapping,
 842				      uint64_t addr, struct fence **fence)
 843{
 844	const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
 
 845
 846	uint64_t start = mapping->it.start;
 847	uint32_t flags = gtt_flags;
 848	int r;
 849
 850	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
 851	 * but in case of something, we filter the flags in first place
 852	 */
 853	if (!(mapping->flags & AMDGPU_PTE_READABLE))
 854		flags &= ~AMDGPU_PTE_READABLE;
 855	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
 856		flags &= ~AMDGPU_PTE_WRITEABLE;
 857
 858	trace_amdgpu_vm_bo_update(mapping);
 859
 860	addr += mapping->offset;
 861
 862	if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
 863		return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
 864						   start, mapping->it.last,
 865						   flags, addr, fence);
 866
 867	while (start != mapping->it.last + 1) {
 868		uint64_t last;
 869
 870		last = min((uint64_t)mapping->it.last, start + max_size - 1);
 871		r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
 872						start, last, flags, addr,
 873						fence);
 874		if (r)
 875			return r;
 876
 877		start = last + 1;
 878		addr += max_size * AMDGPU_GPU_PAGE_SIZE;
 879	}
 
 
 
 
 
 
 880
 881	return 0;
 
 882}
 883
 884/**
 885 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
 886 *
 887 * @adev: amdgpu_device pointer
 888 * @bo_va: requested BO and VM object
 889 * @mem: ttm mem
 890 *
 891 * Fill in the page table entries for @bo_va.
 892 * Returns 0 for success, -EINVAL for failure.
 893 *
 894 * Object have to be reserved and mutex must be locked!
 895 */
 896int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 897			struct amdgpu_bo_va *bo_va,
 898			struct ttm_mem_reg *mem)
 899{
 900	struct amdgpu_vm *vm = bo_va->vm;
 901	struct amdgpu_bo_va_mapping *mapping;
 902	struct amdgpu_gart *gtt = NULL;
 903	uint32_t flags;
 904	uint64_t addr;
 905	int r;
 906
 907	if (mem) {
 908		addr = (u64)mem->start << PAGE_SHIFT;
 909		switch (mem->mem_type) {
 910		case TTM_PL_TT:
 911			gtt = &bo_va->bo->adev->gart;
 912			break;
 913
 914		case TTM_PL_VRAM:
 915			addr += adev->vm_manager.vram_base_offset;
 916			break;
 
 
 917
 918		default:
 919			break;
 920		}
 921	} else {
 922		addr = 0;
 
 
 
 923	}
 
 924
 925	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 926
 927	spin_lock(&vm->status_lock);
 928	if (!list_empty(&bo_va->vm_status))
 929		list_splice_init(&bo_va->valids, &bo_va->invalids);
 930	spin_unlock(&vm->status_lock);
 931
 932	list_for_each_entry(mapping, &bo_va->invalids, list) {
 933		r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
 934					       &bo_va->last_pt_update);
 935		if (r)
 936			return r;
 
 
 
 
 
 
 
 
 937	}
 938
 939	if (trace_amdgpu_vm_bo_mapping_enabled()) {
 940		list_for_each_entry(mapping, &bo_va->valids, list)
 941			trace_amdgpu_vm_bo_mapping(mapping);
 942
 943		list_for_each_entry(mapping, &bo_va->invalids, list)
 944			trace_amdgpu_vm_bo_mapping(mapping);
 945	}
 946
 947	spin_lock(&vm->status_lock);
 948	list_splice_init(&bo_va->invalids, &bo_va->valids);
 949	list_del_init(&bo_va->vm_status);
 950	if (!mem)
 951		list_add(&bo_va->vm_status, &vm->cleared);
 952	spin_unlock(&vm->status_lock);
 953
 954	return 0;
 955}
 956
 957/**
 958 * amdgpu_vm_clear_freed - clear freed BOs in the PT
 959 *
 960 * @adev: amdgpu_device pointer
 961 * @vm: requested vm
 
 
 962 *
 963 * Make sure all freed BOs are cleared in the PT.
 964 * Returns 0 for success.
 
 
 
 965 *
 966 * PTs have to be reserved and mutex must be locked!
 967 */
 968int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 969			  struct amdgpu_vm *vm)
 
 970{
 971	struct amdgpu_bo_va_mapping *mapping;
 
 
 972	int r;
 973
 
 
 
 
 
 
 
 
 
 
 
 974	while (!list_empty(&vm->freed)) {
 975		mapping = list_first_entry(&vm->freed,
 976			struct amdgpu_bo_va_mapping, list);
 977		list_del(&mapping->list);
 978
 979		r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
 980					       0, NULL);
 981		kfree(mapping);
 982		if (r)
 983			return r;
 
 
 
 
 984
 
 
 
 
 
 985	}
 986	return 0;
 
 
 
 987
 988}
 989
 990/**
 991 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
 992 *
 993 * @adev: amdgpu_device pointer
 994 * @vm: requested vm
 
 
 
 995 *
 996 * Make sure all invalidated BOs are cleared in the PT.
 997 * Returns 0 for success.
 998 *
 999 * PTs have to be reserved and mutex must be locked!
1000 */
1001int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1002			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 
1003{
1004	struct amdgpu_bo_va *bo_va = NULL;
1005	int r = 0;
 
 
1006
1007	spin_lock(&vm->status_lock);
 
 
 
 
 
 
 
 
 
 
 
 
1008	while (!list_empty(&vm->invalidated)) {
1009		bo_va = list_first_entry(&vm->invalidated,
1010			struct amdgpu_bo_va, vm_status);
 
1011		spin_unlock(&vm->status_lock);
1012
1013		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014		if (r)
1015			return r;
1016
 
 
 
 
 
 
 
 
 
1017		spin_lock(&vm->status_lock);
1018	}
1019	spin_unlock(&vm->status_lock);
1020
1021	if (bo_va)
1022		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1024	return r;
1025}
1026
1027/**
1028 * amdgpu_vm_bo_add - add a bo to a specific vm
1029 *
1030 * @adev: amdgpu_device pointer
1031 * @vm: requested vm
1032 * @bo: amdgpu buffer object
1033 *
1034 * Add @bo into the requested vm.
1035 * Add @bo to the list of bos associated with the vm
1036 * Returns newly added bo_va or NULL for failure
 
 
1037 *
1038 * Object has to be reserved!
1039 */
1040struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1041				      struct amdgpu_vm *vm,
1042				      struct amdgpu_bo *bo)
1043{
1044	struct amdgpu_bo_va *bo_va;
1045
1046	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1047	if (bo_va == NULL) {
1048		return NULL;
1049	}
1050	bo_va->vm = vm;
1051	bo_va->bo = bo;
1052	bo_va->ref_count = 1;
1053	INIT_LIST_HEAD(&bo_va->bo_list);
1054	INIT_LIST_HEAD(&bo_va->valids);
1055	INIT_LIST_HEAD(&bo_va->invalids);
1056	INIT_LIST_HEAD(&bo_va->vm_status);
1057
1058	list_add_tail(&bo_va->bo_list, &bo->va);
 
 
 
 
 
 
 
 
1059
1060	return bo_va;
1061}
1062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1063/**
1064 * amdgpu_vm_bo_map - map bo inside a vm
1065 *
1066 * @adev: amdgpu_device pointer
1067 * @bo_va: bo_va to store the address
1068 * @saddr: where to map the BO
1069 * @offset: requested offset in the BO
 
1070 * @flags: attributes of pages (read/write/valid/etc.)
1071 *
1072 * Add a mapping of the BO at the specefied addr into the VM.
1073 * Returns 0 for success, error for failure.
 
 
1074 *
1075 * Object has to be reserved and unreserved outside!
1076 */
1077int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1078		     struct amdgpu_bo_va *bo_va,
1079		     uint64_t saddr, uint64_t offset,
1080		     uint64_t size, uint32_t flags)
1081{
1082	struct amdgpu_bo_va_mapping *mapping;
1083	struct amdgpu_vm *vm = bo_va->vm;
1084	struct interval_tree_node *it;
1085	unsigned last_pfn, pt_idx;
1086	uint64_t eaddr;
1087	int r;
1088
1089	/* validate the parameters */
1090	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1091	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1092		return -EINVAL;
1093
1094	/* make sure object fit at this offset */
1095	eaddr = saddr + size - 1;
1096	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1097		return -EINVAL;
1098
1099	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1100	if (last_pfn >= adev->vm_manager.max_pfn) {
1101		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1102			last_pfn, adev->vm_manager.max_pfn);
1103		return -EINVAL;
1104	}
1105
1106	saddr /= AMDGPU_GPU_PAGE_SIZE;
1107	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1108
1109	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1110	if (it) {
1111		struct amdgpu_bo_va_mapping *tmp;
1112		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1113		/* bo and tmp overlap, invalid addr */
1114		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1115			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1116			tmp->it.start, tmp->it.last + 1);
1117		r = -EINVAL;
1118		goto error;
1119	}
1120
1121	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1122	if (!mapping) {
1123		r = -ENOMEM;
1124		goto error;
1125	}
1126
1127	INIT_LIST_HEAD(&mapping->list);
1128	mapping->it.start = saddr;
1129	mapping->it.last = eaddr;
1130	mapping->offset = offset;
1131	mapping->flags = flags;
1132
1133	list_add(&mapping->list, &bo_va->invalids);
1134	interval_tree_insert(&mapping->it, &vm->va);
1135
1136	/* Make sure the page tables are allocated */
1137	saddr >>= amdgpu_vm_block_size;
1138	eaddr >>= amdgpu_vm_block_size;
1139
1140	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1141
1142	if (eaddr > vm->max_pde_used)
1143		vm->max_pde_used = eaddr;
 
1144
1145	/* walk over the address space and allocate the page tables */
1146	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1147		struct reservation_object *resv = vm->page_directory->tbo.resv;
1148		struct amdgpu_bo_list_entry *entry;
1149		struct amdgpu_bo *pt;
1150
1151		entry = &vm->page_tables[pt_idx].entry;
1152		if (entry->robj)
1153			continue;
 
 
1154
1155		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1156				     AMDGPU_GPU_PAGE_SIZE, true,
1157				     AMDGPU_GEM_DOMAIN_VRAM,
1158				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1159				     NULL, resv, &pt);
1160		if (r)
1161			goto error_free;
1162
1163		/* Keep a reference to the page table to avoid freeing
1164		 * them up in the wrong order.
1165		 */
1166		pt->parent = amdgpu_bo_ref(vm->page_directory);
1167
1168		r = amdgpu_vm_clear_bo(adev, vm, pt);
1169		if (r) {
1170			amdgpu_bo_unref(&pt);
1171			goto error_free;
1172		}
1173
1174		entry->robj = pt;
1175		entry->priority = 0;
1176		entry->tv.bo = &entry->robj->tbo;
1177		entry->tv.shared = true;
1178		entry->user_pages = NULL;
1179		vm->page_tables[pt_idx].addr = 0;
1180	}
1181
1182	return 0;
1183
1184error_free:
1185	list_del(&mapping->list);
1186	interval_tree_remove(&mapping->it, &vm->va);
1187	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1188	kfree(mapping);
1189
1190error:
1191	return r;
1192}
1193
1194/**
1195 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1196 *
1197 * @adev: amdgpu_device pointer
1198 * @bo_va: bo_va to remove the address from
1199 * @saddr: where to the BO is mapped
1200 *
1201 * Remove a mapping of the BO at the specefied addr from the VM.
1202 * Returns 0 for success, error for failure.
 
 
1203 *
1204 * Object has to be reserved and unreserved outside!
1205 */
1206int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1207		       struct amdgpu_bo_va *bo_va,
1208		       uint64_t saddr)
1209{
1210	struct amdgpu_bo_va_mapping *mapping;
1211	struct amdgpu_vm *vm = bo_va->vm;
1212	bool valid = true;
1213
1214	saddr /= AMDGPU_GPU_PAGE_SIZE;
1215
1216	list_for_each_entry(mapping, &bo_va->valids, list) {
1217		if (mapping->it.start == saddr)
1218			break;
1219	}
1220
1221	if (&mapping->list == &bo_va->valids) {
1222		valid = false;
1223
1224		list_for_each_entry(mapping, &bo_va->invalids, list) {
1225			if (mapping->it.start == saddr)
1226				break;
1227		}
1228
1229		if (&mapping->list == &bo_va->invalids)
1230			return -ENOENT;
1231	}
1232
1233	list_del(&mapping->list);
1234	interval_tree_remove(&mapping->it, &vm->va);
 
1235	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1236
1237	if (valid)
1238		list_add(&mapping->list, &vm->freed);
1239	else
1240		kfree(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1241
1242	return 0;
1243}
1244
1245/**
1246 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247 *
1248 * @adev: amdgpu_device pointer
1249 * @bo_va: requested bo_va
1250 *
1251 * Remove @bo_va->bo from the requested vm.
1252 *
1253 * Object have to be reserved!
1254 */
1255void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1256		      struct amdgpu_bo_va *bo_va)
1257{
1258	struct amdgpu_bo_va_mapping *mapping, *next;
1259	struct amdgpu_vm *vm = bo_va->vm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1260
1261	list_del(&bo_va->bo_list);
 
 
 
1262
1263	spin_lock(&vm->status_lock);
1264	list_del(&bo_va->vm_status);
1265	spin_unlock(&vm->status_lock);
1266
1267	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1268		list_del(&mapping->list);
1269		interval_tree_remove(&mapping->it, &vm->va);
 
1270		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1271		list_add(&mapping->list, &vm->freed);
1272	}
1273	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1274		list_del(&mapping->list);
1275		interval_tree_remove(&mapping->it, &vm->va);
1276		kfree(mapping);
 
1277	}
1278
1279	fence_put(bo_va->last_pt_update);
 
 
 
 
1280	kfree(bo_va);
1281}
1282
1283/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1285 *
1286 * @adev: amdgpu_device pointer
1287 * @vm: requested vm
1288 * @bo: amdgpu buffer object
 
1289 *
1290 * Mark @bo as invalid.
1291 */
1292void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1293			     struct amdgpu_bo *bo)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1294{
1295	struct amdgpu_bo_va *bo_va;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1296
1297	list_for_each_entry(bo_va, &bo->va, bo_list) {
1298		spin_lock(&bo_va->vm->status_lock);
1299		if (list_empty(&bo_va->vm_status))
1300			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1301		spin_unlock(&bo_va->vm->status_lock);
1302	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1303}
1304
1305/**
1306 * amdgpu_vm_init - initialize a vm instance
1307 *
1308 * @adev: amdgpu_device pointer
1309 * @vm: requested vm
 
1310 *
1311 * Init @vm fields.
 
 
 
1312 */
1313int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
1314{
1315	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1316		AMDGPU_VM_PTE_COUNT * 8);
1317	unsigned pd_size, pd_entries;
1318	unsigned ring_instance;
1319	struct amdgpu_ring *ring;
1320	struct amd_sched_rq *rq;
1321	int i, r;
1322
1323	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1324		vm->ids[i].mgr_id = NULL;
1325		vm->ids[i].flushed_updates = NULL;
1326	}
1327	vm->va = RB_ROOT;
1328	spin_lock_init(&vm->status_lock);
1329	INIT_LIST_HEAD(&vm->invalidated);
1330	INIT_LIST_HEAD(&vm->cleared);
1331	INIT_LIST_HEAD(&vm->freed);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1332
1333	pd_size = amdgpu_vm_directory_size(adev);
1334	pd_entries = amdgpu_vm_num_pdes(adev);
 
 
 
1335
1336	/* allocate page table array */
1337	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1338	if (vm->page_tables == NULL) {
1339		DRM_ERROR("Cannot allocate memory for page table array\n");
1340		return -ENOMEM;
1341	}
1342
1343	/* create scheduler entity for page table updates */
 
 
 
 
 
 
 
1344
1345	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1346	ring_instance %= adev->vm_manager.vm_pte_num_rings;
1347	ring = adev->vm_manager.vm_pte_rings[ring_instance];
1348	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1349	r = amd_sched_entity_init(&ring->sched, &vm->entity,
1350				  rq, amdgpu_sched_jobs);
1351	if (r)
1352		return r;
1353
1354	vm->page_directory_fence = NULL;
 
 
 
 
 
1355
1356	r = amdgpu_bo_create(adev, pd_size, align, true,
1357			     AMDGPU_GEM_DOMAIN_VRAM,
1358			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1359			     NULL, NULL, &vm->page_directory);
1360	if (r)
1361		goto error_free_sched_entity;
1362
1363	r = amdgpu_bo_reserve(vm->page_directory, false);
1364	if (r)
1365		goto error_free_page_directory;
1366
1367	r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1368	amdgpu_bo_unreserve(vm->page_directory);
1369	if (r)
1370		goto error_free_page_directory;
 
 
 
1371
1372	return 0;
1373
1374error_free_page_directory:
1375	amdgpu_bo_unref(&vm->page_directory);
1376	vm->page_directory = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1377
1378error_free_sched_entity:
1379	amd_sched_entity_fini(&ring->sched, &vm->entity);
 
1380
 
 
1381	return r;
1382}
1383
1384/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1385 * amdgpu_vm_fini - tear down a vm instance
1386 *
1387 * @adev: amdgpu_device pointer
1388 * @vm: requested vm
1389 *
1390 * Tear down @vm.
1391 * Unbind the VM and remove all bos from the vm bo list
1392 */
1393void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1394{
1395	struct amdgpu_bo_va_mapping *mapping, *tmp;
 
 
 
1396	int i;
1397
1398	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1399
1400	if (!RB_EMPTY_ROOT(&vm->va)) {
 
 
1401		dev_err(adev->dev, "still active bo inside vm\n");
1402	}
1403	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
 
 
 
 
1404		list_del(&mapping->list);
1405		interval_tree_remove(&mapping->it, &vm->va);
1406		kfree(mapping);
1407	}
1408	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1409		list_del(&mapping->list);
1410		kfree(mapping);
 
 
 
 
 
1411	}
1412
1413	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1414		amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1415	drm_free_large(vm->page_tables);
1416
1417	amdgpu_bo_unref(&vm->page_directory);
1418	fence_put(vm->page_directory_fence);
1419
1420	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1421		struct amdgpu_vm_id *id = &vm->ids[i];
1422
1423		if (id->mgr_id)
1424			atomic_long_cmpxchg(&id->mgr_id->owner,
1425					    (long)id, 0);
1426		fence_put(id->flushed_updates);
1427	}
1428}
1429
1430/**
1431 * amdgpu_vm_manager_init - init the VM manager
1432 *
1433 * @adev: amdgpu_device pointer
1434 *
1435 * Initialize the VM manager structures
1436 */
1437void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1438{
1439	unsigned i;
1440
1441	INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1442
1443	/* skip over VMID 0, since it is the system VM */
1444	for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1445		amdgpu_vm_reset_id(adev, i);
1446		list_add_tail(&adev->vm_manager.ids[i].list,
1447			      &adev->vm_manager.ids_lru);
1448	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1449
1450	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1451}
1452
1453/**
1454 * amdgpu_vm_manager_fini - cleanup VM manager
1455 *
1456 * @adev: amdgpu_device pointer
1457 *
1458 * Cleanup the VM manager and free resources.
1459 */
1460void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1461{
1462	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1463
1464	for (i = 0; i < AMDGPU_NUM_VM; ++i)
1465		fence_put(adev->vm_manager.ids[i].active);
 
 
 
 
 
 
 
 
 
 
1466}