Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28
  29#include <linux/dma-fence-array.h>
  30#include <linux/interval_tree_generic.h>
  31#include <linux/idr.h>
  32#include <linux/dma-buf.h>
  33
  34#include <drm/amdgpu_drm.h>
  35#include <drm/drm_drv.h>
  36#include "amdgpu.h"
  37#include "amdgpu_trace.h"
  38#include "amdgpu_amdkfd.h"
  39#include "amdgpu_gmc.h"
  40#include "amdgpu_xgmi.h"
  41#include "amdgpu_dma_buf.h"
  42#include "amdgpu_res_cursor.h"
  43#include "kfd_svm.h"
  44
  45/**
  46 * DOC: GPUVM
  47 *
  48 * GPUVM is the MMU functionality provided on the GPU.
  49 * GPUVM is similar to the legacy GART on older asics, however
  50 * rather than there being a single global GART table
  51 * for the entire GPU, there can be multiple GPUVM page tables active
  52 * at any given time.  The GPUVM page tables can contain a mix
  53 * VRAM pages and system pages (both memory and MMIO) and system pages
  54 * can be mapped as snooped (cached system pages) or unsnooped
  55 * (uncached system pages).
  56 *
  57 * Each active GPUVM has an ID associated with it and there is a page table
  58 * linked with each VMID.  When executing a command buffer,
  59 * the kernel tells the engine what VMID to use for that command
  60 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  61 * The userspace drivers maintain their own address space and the kernel
  62 * sets up their pages tables accordingly when they submit their
  63 * command buffers and a VMID is assigned.
  64 * The hardware supports up to 16 active GPUVMs at any given time.
  65 *
  66 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
  67 * on the ASIC family.  GPUVM supports RWX attributes on each page as well
  68 * as other features such as encryption and caching attributes.
  69 *
  70 * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
  71 * addition to an aperture managed by a page table, VMID 0 also has
  72 * several other apertures.  There is an aperture for direct access to VRAM
  73 * and there is a legacy AGP aperture which just forwards accesses directly
  74 * to the matching system physical addresses (or IOVAs when an IOMMU is
  75 * present).  These apertures provide direct access to these memories without
  76 * incurring the overhead of a page table.  VMID 0 is used by the kernel
  77 * driver for tasks like memory management.
  78 *
  79 * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
  80 * For user applications, each application can have their own unique GPUVM
  81 * address space.  The application manages the address space and the kernel
  82 * driver manages the GPUVM page tables for each process.  If an GPU client
  83 * accesses an invalid page, it will generate a GPU page fault, similar to
  84 * accessing an invalid page on a CPU.
  85 */
  86
  87#define START(node) ((node)->start)
  88#define LAST(node) ((node)->last)
  89
  90INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
  91		     START, LAST, static, amdgpu_vm_it)
  92
  93#undef START
  94#undef LAST
  95
  96/**
  97 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
  98 */
  99struct amdgpu_prt_cb {
 100
 101	/**
 102	 * @adev: amdgpu device
 103	 */
 104	struct amdgpu_device *adev;
 105
 106	/**
 107	 * @cb: callback
 108	 */
 109	struct dma_fence_cb cb;
 
 
 
 
 
 110};
 111
 112/**
 113 * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence
 114 */
 115struct amdgpu_vm_tlb_seq_cb {
 116	/**
 117	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
 118	 */
 119	struct amdgpu_vm *vm;
 120
 121	/**
 122	 * @cb: callback
 123	 */
 124	struct dma_fence_cb cb;
 125};
 126
 127/**
 128 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
 129 *
 130 * @adev: amdgpu_device pointer
 131 * @vm: amdgpu_vm pointer
 132 * @pasid: the pasid the VM is using on this GPU
 133 *
 134 * Set the pasid this VM is using on this GPU, can also be used to remove the
 135 * pasid by passing in zero.
 136 *
 
 137 */
 138int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 139			u32 pasid)
 140{
 141	int r;
 142
 143	if (vm->pasid == pasid)
 144		return 0;
 145
 146	if (vm->pasid) {
 147		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
 148		if (r < 0)
 149			return r;
 150
 151		vm->pasid = 0;
 152	}
 153
 154	if (pasid) {
 155		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
 156					GFP_KERNEL));
 157		if (r < 0)
 158			return r;
 159
 160		vm->pasid = pasid;
 161	}
 162
 163
 164	return 0;
 165}
 166
 167/**
 168 * amdgpu_vm_bo_evicted - vm_bo is evicted
 169 *
 170 * @vm_bo: vm_bo which is evicted
 171 *
 172 * State for PDs/PTs and per VM BOs which are not at the location they should
 173 * be.
 174 */
 175static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
 176{
 177	struct amdgpu_vm *vm = vm_bo->vm;
 178	struct amdgpu_bo *bo = vm_bo->bo;
 179
 180	vm_bo->moved = true;
 181	spin_lock(&vm_bo->vm->status_lock);
 182	if (bo->tbo.type == ttm_bo_type_kernel)
 183		list_move(&vm_bo->vm_status, &vm->evicted);
 184	else
 185		list_move_tail(&vm_bo->vm_status, &vm->evicted);
 186	spin_unlock(&vm_bo->vm->status_lock);
 187}
 188/**
 189 * amdgpu_vm_bo_moved - vm_bo is moved
 190 *
 191 * @vm_bo: vm_bo which is moved
 192 *
 193 * State for per VM BOs which are moved, but that change is not yet reflected
 194 * in the page tables.
 195 */
 196static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
 197{
 198	spin_lock(&vm_bo->vm->status_lock);
 199	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 200	spin_unlock(&vm_bo->vm->status_lock);
 201}
 202
 203/**
 204 * amdgpu_vm_bo_idle - vm_bo is idle
 205 *
 206 * @vm_bo: vm_bo which is now idle
 207 *
 208 * State for PDs/PTs and per VM BOs which have gone through the state machine
 209 * and are now idle.
 210 */
 211static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
 212{
 213	spin_lock(&vm_bo->vm->status_lock);
 214	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
 215	spin_unlock(&vm_bo->vm->status_lock);
 216	vm_bo->moved = false;
 217}
 218
 219/**
 220 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
 221 *
 222 * @vm_bo: vm_bo which is now invalidated
 223 *
 224 * State for normal BOs which are invalidated and that change not yet reflected
 225 * in the PTs.
 226 */
 227static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
 228{
 229	spin_lock(&vm_bo->vm->status_lock);
 230	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
 231	spin_unlock(&vm_bo->vm->status_lock);
 232}
 233
 234/**
 235 * amdgpu_vm_bo_relocated - vm_bo is reloacted
 236 *
 237 * @vm_bo: vm_bo which is relocated
 238 *
 239 * State for PDs/PTs which needs to update their parent PD.
 240 * For the root PD, just move to idle state.
 241 */
 242static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
 243{
 244	if (vm_bo->bo->parent) {
 245		spin_lock(&vm_bo->vm->status_lock);
 246		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
 247		spin_unlock(&vm_bo->vm->status_lock);
 248	} else {
 249		amdgpu_vm_bo_idle(vm_bo);
 250	}
 251}
 252
 253/**
 254 * amdgpu_vm_bo_done - vm_bo is done
 255 *
 256 * @vm_bo: vm_bo which is now done
 257 *
 258 * State for normal BOs which are invalidated and that change has been updated
 259 * in the PTs.
 260 */
 261static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
 262{
 263	spin_lock(&vm_bo->vm->status_lock);
 264	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
 265	spin_unlock(&vm_bo->vm->status_lock);
 266}
 267
 268/**
 269 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
 270 *
 271 * @base: base structure for tracking BO usage in a VM
 272 * @vm: vm to which bo is to be added
 273 * @bo: amdgpu buffer object
 274 *
 275 * Initialize a bo_va_base structure and add it to the appropriate lists
 276 *
 
 277 */
 278void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 279			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
 280{
 281	base->vm = vm;
 282	base->bo = bo;
 283	base->next = NULL;
 284	INIT_LIST_HEAD(&base->vm_status);
 285
 286	if (!bo)
 287		return;
 288	base->next = bo->vm_bo;
 289	bo->vm_bo = base;
 290
 291	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
 292		return;
 293
 294	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
 295
 296	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
 297	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
 298		amdgpu_vm_bo_relocated(base);
 299	else
 300		amdgpu_vm_bo_idle(base);
 301
 302	if (bo->preferred_domains &
 303	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
 304		return;
 305
 306	/*
 307	 * we checked all the prerequisites, but it looks like this per vm bo
 308	 * is currently evicted. add the bo to the evicted list to make sure it
 309	 * is validated on next vm use to avoid fault.
 310	 * */
 311	amdgpu_vm_bo_evicted(base);
 312}
 313
 314/**
 315 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
 316 *
 317 * @vm: vm providing the BOs
 318 * @validated: head of validation list
 319 * @entry: entry to add
 320 *
 321 * Add the page directory to the list of BOs to
 322 * validate for command submission.
 323 */
 324void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 325			 struct list_head *validated,
 326			 struct amdgpu_bo_list_entry *entry)
 327{
 
 328	entry->priority = 0;
 329	entry->tv.bo = &vm->root.bo->tbo;
 330	/* Two for VM updates, one for TTM and one for the CS job */
 331	entry->tv.num_shared = 4;
 332	entry->user_pages = NULL;
 333	list_add(&entry->tv.head, validated);
 334}
 335
 336/**
 337 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
 338 *
 339 * @adev: amdgpu device pointer
 340 * @vm: vm providing the BOs
 341 *
 342 * Move all BOs to the end of LRU and remember their positions to put them
 343 * together.
 344 */
 345void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 346				struct amdgpu_vm *vm)
 347{
 348	spin_lock(&adev->mman.bdev.lru_lock);
 349	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
 350	spin_unlock(&adev->mman.bdev.lru_lock);
 351}
 352
 353/**
 354 * amdgpu_vm_validate_pt_bos - validate the page table BOs
 355 *
 356 * @adev: amdgpu device pointer
 357 * @vm: vm providing the BOs
 358 * @validate: callback to do the validation
 359 * @param: parameter for the validation callback
 360 *
 361 * Validate the page table BOs on command submission if neccessary.
 362 *
 363 * Returns:
 364 * Validation result.
 365 */
 366int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 367			      int (*validate)(void *p, struct amdgpu_bo *bo),
 368			      void *param)
 369{
 370	struct amdgpu_vm_bo_base *bo_base;
 371	struct amdgpu_bo *shadow;
 372	struct amdgpu_bo *bo;
 373	int r;
 374
 375	spin_lock(&vm->status_lock);
 376	while (!list_empty(&vm->evicted)) {
 377		bo_base = list_first_entry(&vm->evicted,
 378					   struct amdgpu_vm_bo_base,
 379					   vm_status);
 380		spin_unlock(&vm->status_lock);
 381
 382		bo = bo_base->bo;
 383		shadow = amdgpu_bo_shadowed(bo);
 
 
 
 
 384
 385		r = validate(param, bo);
 386		if (r)
 387			return r;
 388		if (shadow) {
 389			r = validate(param, shadow);
 390			if (r)
 391				return r;
 392		}
 393
 394		if (bo->tbo.type != ttm_bo_type_kernel) {
 395			amdgpu_vm_bo_moved(bo_base);
 396		} else {
 397			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
 398			amdgpu_vm_bo_relocated(bo_base);
 399		}
 400		spin_lock(&vm->status_lock);
 401	}
 402	spin_unlock(&vm->status_lock);
 403
 404	amdgpu_vm_eviction_lock(vm);
 405	vm->evicting = false;
 406	amdgpu_vm_eviction_unlock(vm);
 407
 408	return 0;
 409}
 410
 411/**
 412 * amdgpu_vm_ready - check VM is ready for updates
 413 *
 414 * @vm: VM to check
 415 *
 416 * Check if all VM PDs/PTs are ready for updates
 417 *
 418 * Returns:
 419 * True if VM is not evicting.
 420 */
 421bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 
 422{
 423	bool empty;
 424	bool ret;
 425
 426	amdgpu_vm_eviction_lock(vm);
 427	ret = !vm->evicting;
 428	amdgpu_vm_eviction_unlock(vm);
 429
 430	spin_lock(&vm->status_lock);
 431	empty = list_empty(&vm->evicted);
 432	spin_unlock(&vm->status_lock);
 433
 434	return ret && empty;
 
 
 
 
 
 
 
 
 
 435}
 436
 437/**
 438 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 439 *
 440 * @adev: amdgpu_device pointer
 
 
 
 
 
 441 */
 442void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
 
 
 443{
 444	const struct amdgpu_ip_block *ip_block;
 445	bool has_compute_vm_bug;
 446	struct amdgpu_ring *ring;
 447	int i;
 
 
 
 448
 449	has_compute_vm_bug = false;
 
 
 
 450
 451	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
 452	if (ip_block) {
 453		/* Compute has a VM bug for GFX version < 7.
 454		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
 455		if (ip_block->version->major <= 7)
 456			has_compute_vm_bug = true;
 457		else if (ip_block->version->major == 8)
 458			if (adev->gfx.mec_fw_version < 673)
 459				has_compute_vm_bug = true;
 460	}
 461
 462	for (i = 0; i < adev->num_rings; i++) {
 463		ring = adev->rings[i];
 464		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
 465			/* only compute rings */
 466			ring->has_compute_vm_bug = has_compute_vm_bug;
 467		else
 468			ring->has_compute_vm_bug = false;
 469	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 470}
 471
 472/**
 473 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
 474 *
 475 * @ring: ring on which the job will be submitted
 476 * @job: job to submit
 477 *
 478 * Returns:
 479 * True if sync is needed.
 480 */
 481bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 482				  struct amdgpu_job *job)
 483{
 484	struct amdgpu_device *adev = ring->adev;
 485	unsigned vmhub = ring->funcs->vmhub;
 486	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 487
 488	if (job->vmid == 0)
 
 489		return false;
 490
 491	if (job->vm_needs_flush || ring->has_compute_vm_bug)
 492		return true;
 493
 494	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
 495		return true;
 496
 497	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
 
 498		return true;
 499
 
 
 
 
 
 
 500	return false;
 501}
 502
 503/**
 504 * amdgpu_vm_flush - hardware flush the vm
 505 *
 506 * @ring: ring to use for flush
 507 * @job:  related job
 508 * @need_pipe_sync: is pipe sync needed
 509 *
 510 * Emit a VM flush when it is necessary.
 511 *
 512 * Returns:
 513 * 0 on success, errno otherwise.
 514 */
 515int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
 516		    bool need_pipe_sync)
 517{
 518	struct amdgpu_device *adev = ring->adev;
 519	unsigned vmhub = ring->funcs->vmhub;
 520	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 521	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
 522	bool spm_update_needed = job->spm_update_needed;
 523	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
 524		job->gds_switch_needed;
 525	bool vm_flush_needed = job->vm_needs_flush;
 526	struct dma_fence *fence = NULL;
 527	bool pasid_mapping_needed = false;
 528	unsigned patch_offset = 0;
 529	int r;
 530
 531	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
 532		gds_switch_needed = true;
 533		vm_flush_needed = true;
 534		pasid_mapping_needed = true;
 535		spm_update_needed = true;
 536	}
 537
 538	mutex_lock(&id_mgr->lock);
 539	if (id->pasid != job->pasid || !id->pasid_mapping ||
 540	    !dma_fence_is_signaled(id->pasid_mapping))
 541		pasid_mapping_needed = true;
 542	mutex_unlock(&id_mgr->lock);
 543
 544	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
 545	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
 546			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
 547	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
 548		ring->funcs->emit_wreg;
 549
 550	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
 551		return 0;
 552
 553	amdgpu_ring_ib_begin(ring);
 554	if (ring->funcs->init_cond_exec)
 555		patch_offset = amdgpu_ring_init_cond_exec(ring);
 556
 557	if (need_pipe_sync)
 558		amdgpu_ring_emit_pipeline_sync(ring);
 559
 560	if (vm_flush_needed) {
 561		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
 562		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
 563	}
 564
 565	if (pasid_mapping_needed)
 566		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 567
 568	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
 569		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
 570
 571	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
 572	    gds_switch_needed) {
 573		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
 574					    job->gds_size, job->gws_base,
 575					    job->gws_size, job->oa_base,
 576					    job->oa_size);
 577	}
 578
 579	if (vm_flush_needed || pasid_mapping_needed) {
 580		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
 581		if (r)
 582			return r;
 583	}
 584
 585	if (vm_flush_needed) {
 586		mutex_lock(&id_mgr->lock);
 587		dma_fence_put(id->last_flush);
 588		id->last_flush = dma_fence_get(fence);
 589		id->current_gpu_reset_count =
 590			atomic_read(&adev->gpu_reset_counter);
 591		mutex_unlock(&id_mgr->lock);
 592	}
 593
 594	if (pasid_mapping_needed) {
 595		mutex_lock(&id_mgr->lock);
 596		id->pasid = job->pasid;
 597		dma_fence_put(id->pasid_mapping);
 598		id->pasid_mapping = dma_fence_get(fence);
 599		mutex_unlock(&id_mgr->lock);
 
 
 
 
 
 600	}
 601	dma_fence_put(fence);
 602
 603	if (ring->funcs->patch_cond_exec)
 604		amdgpu_ring_patch_cond_exec(ring, patch_offset);
 605
 606	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
 607	if (ring->funcs->emit_switch_buffer) {
 608		amdgpu_ring_emit_switch_buffer(ring);
 609		amdgpu_ring_emit_switch_buffer(ring);
 610	}
 611	amdgpu_ring_ib_end(ring);
 612	return 0;
 613}
 614
 615/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 616 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 617 *
 618 * @vm: requested vm
 619 * @bo: requested buffer object
 620 *
 621 * Find @bo inside the requested vm.
 622 * Search inside the @bos vm list for the requested vm
 623 * Returns the found bo_va or NULL if none is found
 624 *
 625 * Object has to be reserved!
 626 *
 627 * Returns:
 628 * Found bo_va or NULL.
 629 */
 630struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 631				       struct amdgpu_bo *bo)
 632{
 633	struct amdgpu_vm_bo_base *base;
 634
 635	for (base = bo->vm_bo; base; base = base->next) {
 636		if (base->vm != vm)
 637			continue;
 638
 639		return container_of(base, struct amdgpu_bo_va, base);
 
 
 
 640	}
 641	return NULL;
 642}
 643
 644/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 645 * amdgpu_vm_map_gart - Resolve gart mapping of addr
 646 *
 647 * @pages_addr: optional DMA address to use for lookup
 648 * @addr: the unmapped addr
 649 *
 650 * Look up the physical address of the page that the pte resolves
 651 * to.
 652 *
 653 * Returns:
 654 * The pointer for the page table entry.
 655 */
 656uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 657{
 658	uint64_t result;
 659
 660	/* page table offset */
 661	result = pages_addr[addr >> PAGE_SHIFT];
 662
 663	/* in case cpu page size != gpu page size*/
 664	result |= addr & (~PAGE_MASK);
 665
 666	result &= 0xFFFFFFFFFFFFF000ULL;
 667
 668	return result;
 669}
 670
 671/**
 672 * amdgpu_vm_update_pdes - make sure that all directories are valid
 673 *
 674 * @adev: amdgpu_device pointer
 675 * @vm: requested vm
 676 * @immediate: submit immediately to the paging queue
 677 *
 678 * Makes sure all directories are up to date.
 679 *
 680 * Returns:
 681 * 0 for success, error for failure.
 
 682 */
 683int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 684			  struct amdgpu_vm *vm, bool immediate)
 685{
 686	struct amdgpu_vm_update_params params;
 687	struct amdgpu_vm_bo_base *entry;
 688	bool flush_tlb_needed = false;
 689	LIST_HEAD(relocated);
 690	int r, idx;
 
 
 
 
 691
 692	spin_lock(&vm->status_lock);
 693	list_splice_init(&vm->relocated, &relocated);
 694	spin_unlock(&vm->status_lock);
 695
 696	if (list_empty(&relocated))
 697		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 698
 699	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 700		return -ENODEV;
 
 701
 702	memset(&params, 0, sizeof(params));
 703	params.adev = adev;
 704	params.vm = vm;
 705	params.immediate = immediate;
 706
 707	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
 708	if (r)
 709		goto error;
 
 710
 711	list_for_each_entry(entry, &relocated, vm_status) {
 712		/* vm_flush_needed after updating moved PDEs */
 713		flush_tlb_needed |= entry->moved;
 714
 715		r = amdgpu_vm_pde_update(&params, entry);
 716		if (r)
 717			goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718	}
 719
 720	r = vm->update_funcs->commit(&params, &vm->last_update);
 721	if (r)
 722		goto error;
 
 723
 724	if (flush_tlb_needed)
 725		atomic64_inc(&vm->tlb_seq);
 
 726
 727	while (!list_empty(&relocated)) {
 728		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
 729					 vm_status);
 730		amdgpu_vm_bo_idle(entry);
 731	}
 732
 733error:
 734	drm_dev_exit(idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735	return r;
 736}
 737
 738/**
 739 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
 740 * @fence: unused
 741 * @cb: the callback structure
 742 *
 743 * Increments the tlb sequence to make sure that future CS execute a VM flush.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 744 */
 745static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
 746				 struct dma_fence_cb *cb)
 
 
 747{
 748	struct amdgpu_vm_tlb_seq_cb *tlb_cb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 749
 750	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
 751	atomic64_inc(&tlb_cb->vm->tlb_seq);
 752	kfree(tlb_cb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753}
 754
 755/**
 756 * amdgpu_vm_update_range - update a range in the vm page table
 757 *
 758 * @adev: amdgpu_device pointer to use for commands
 759 * @vm: the VM to update the range
 760 * @immediate: immediate submission in a page fault
 761 * @unlocked: unlocked invalidation during MM callback
 762 * @flush_tlb: trigger tlb invalidation after update completed
 763 * @resv: fences we need to sync to
 764 * @start: start of mapped range
 765 * @last: last mapped entry
 766 * @flags: flags for the entries
 767 * @offset: offset into nodes and pages_addr
 768 * @vram_base: base for vram mappings
 769 * @res: ttm_resource to map
 770 * @pages_addr: DMA addresses to use for mapping
 771 * @fence: optional resulting fence
 772 *
 773 * Fill in the page table entries between @start and @last.
 774 *
 775 * Returns:
 776 * 0 for success, negative erro code for failure.
 777 */
 778int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 779			   bool immediate, bool unlocked, bool flush_tlb,
 780			   struct dma_resv *resv, uint64_t start, uint64_t last,
 781			   uint64_t flags, uint64_t offset, uint64_t vram_base,
 782			   struct ttm_resource *res, dma_addr_t *pages_addr,
 783			   struct dma_fence **fence)
 784{
 785	struct amdgpu_vm_update_params params;
 786	struct amdgpu_vm_tlb_seq_cb *tlb_cb;
 787	struct amdgpu_res_cursor cursor;
 788	enum amdgpu_sync_mode sync_mode;
 789	int r, idx;
 790
 791	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 792		return -ENODEV;
 793
 794	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
 795	if (!tlb_cb) {
 796		r = -ENOMEM;
 797		goto error_unlock;
 798	}
 799
 800	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
 801	 * heavy-weight flush TLB unconditionally.
 802	 */
 803	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
 804		     adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
 805
 806	/*
 807	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
 808	 */
 809	flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);
 810
 811	memset(&params, 0, sizeof(params));
 812	params.adev = adev;
 813	params.vm = vm;
 814	params.immediate = immediate;
 815	params.pages_addr = pages_addr;
 816	params.unlocked = unlocked;
 817
 818	/* Implicitly sync to command submissions in the same VM before
 819	 * unmapping. Sync to moving fences before mapping.
 820	 */
 821	if (!(flags & AMDGPU_PTE_VALID))
 822		sync_mode = AMDGPU_SYNC_EQ_OWNER;
 823	else
 824		sync_mode = AMDGPU_SYNC_EXPLICIT;
 825
 826	amdgpu_vm_eviction_lock(vm);
 827	if (vm->evicting) {
 828		r = -EBUSY;
 829		goto error_free;
 830	}
 831
 832	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
 833		struct dma_fence *tmp = dma_fence_get_stub();
 
 
 
 834
 835		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
 836		swap(vm->last_unlocked, tmp);
 837		dma_fence_put(tmp);
 838	}
 839
 840	r = vm->update_funcs->prepare(&params, resv, sync_mode);
 841	if (r)
 842		goto error_free;
 843
 844	amdgpu_res_first(pages_addr ? NULL : res, offset,
 845			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
 846	while (cursor.remaining) {
 847		uint64_t tmp, num_entries, addr;
 848
 849		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
 850		if (pages_addr) {
 851			bool contiguous = true;
 852
 853			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
 854				uint64_t pfn = cursor.start >> PAGE_SHIFT;
 855				uint64_t count;
 856
 857				contiguous = pages_addr[pfn + 1] ==
 858					pages_addr[pfn] + PAGE_SIZE;
 859
 860				tmp = num_entries /
 861					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 862				for (count = 2; count < tmp; ++count) {
 863					uint64_t idx = pfn + count;
 864
 865					if (contiguous != (pages_addr[idx] ==
 866					    pages_addr[idx - 1] + PAGE_SIZE))
 867						break;
 868				}
 869				num_entries = count *
 870					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 871			}
 872
 873			if (!contiguous) {
 874				addr = cursor.start;
 875				params.pages_addr = pages_addr;
 876			} else {
 877				addr = pages_addr[cursor.start >> PAGE_SHIFT];
 878				params.pages_addr = NULL;
 879			}
 880
 881		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
 882			addr = vram_base + cursor.start;
 883		} else {
 884			addr = 0;
 885		}
 886
 887		tmp = start + num_entries;
 888		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
 889		if (r)
 890			goto error_free;
 891
 892		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
 893		start = tmp;
 894	}
 895
 896	r = vm->update_funcs->commit(&params, fence);
 
 
 897
 898	if (flush_tlb || params.table_freed) {
 899		tlb_cb->vm = vm;
 900		if (fence && *fence &&
 901		    !dma_fence_add_callback(*fence, &tlb_cb->cb,
 902					   amdgpu_vm_tlb_seq_cb)) {
 903			dma_fence_put(vm->last_tlb_flush);
 904			vm->last_tlb_flush = dma_fence_get(*fence);
 905		} else {
 906			amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
 
 
 
 
 
 
 907		}
 908		tlb_cb = NULL;
 909	}
 910
 911error_free:
 912	kfree(tlb_cb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 913
 914error_unlock:
 915	amdgpu_vm_eviction_unlock(vm);
 916	drm_dev_exit(idx);
 917	return r;
 918}
 919
 920void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
 921				uint64_t *gtt_mem, uint64_t *cpu_mem)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922{
 923	struct amdgpu_bo_va *bo_va, *tmp;
 
 924
 925	spin_lock(&vm->status_lock);
 926	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
 927		if (!bo_va->base.bo)
 928			continue;
 929		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
 930				gtt_mem, cpu_mem);
 931	}
 932	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
 933		if (!bo_va->base.bo)
 934			continue;
 935		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
 936				gtt_mem, cpu_mem);
 937	}
 938	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
 939		if (!bo_va->base.bo)
 940			continue;
 941		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
 942				gtt_mem, cpu_mem);
 943	}
 944	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
 945		if (!bo_va->base.bo)
 946			continue;
 947		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
 948				gtt_mem, cpu_mem);
 949	}
 950	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
 951		if (!bo_va->base.bo)
 952			continue;
 953		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
 954				gtt_mem, cpu_mem);
 955	}
 956	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
 957		if (!bo_va->base.bo)
 958			continue;
 959		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
 960				gtt_mem, cpu_mem);
 961	}
 962	spin_unlock(&vm->status_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 963}
 
 964/**
 965 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
 966 *
 967 * @adev: amdgpu_device pointer
 968 * @bo_va: requested BO and VM object
 969 * @clear: if true clear the entries
 970 *
 971 * Fill in the page table entries for @bo_va.
 972 *
 973 * Returns:
 974 * 0 for success, -EINVAL for failure.
 975 */
 976int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 
 977			bool clear)
 978{
 979	struct amdgpu_bo *bo = bo_va->base.bo;
 980	struct amdgpu_vm *vm = bo_va->base.vm;
 981	struct amdgpu_bo_va_mapping *mapping;
 982	dma_addr_t *pages_addr = NULL;
 983	struct ttm_resource *mem;
 984	struct dma_fence **last_update;
 985	bool flush_tlb = clear;
 986	struct dma_resv *resv;
 987	uint64_t vram_base;
 988	uint64_t flags;
 989	int r;
 990
 991	if (clear || !bo) {
 992		mem = NULL;
 993		resv = vm->root.bo->tbo.base.resv;
 
 994	} else {
 995		struct drm_gem_object *obj = &bo->tbo.base;
 996
 997		resv = bo->tbo.base.resv;
 998		if (obj->import_attach && bo_va->is_xgmi) {
 999			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1000			struct drm_gem_object *gobj = dma_buf->priv;
1001			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1002
1003			if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
1004				bo = gem_to_amdgpu_bo(gobj);
 
 
 
 
1005		}
1006		mem = bo->tbo.resource;
1007		if (mem->mem_type == TTM_PL_TT ||
1008		    mem->mem_type == AMDGPU_PL_PREEMPT)
1009			pages_addr = bo->tbo.ttm->dma_address;
1010	}
1011
1012	if (bo) {
1013		struct amdgpu_device *bo_adev;
1014
1015		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1016
1017		if (amdgpu_bo_encrypted(bo))
1018			flags |= AMDGPU_PTE_TMZ;
1019
1020		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1021		vram_base = bo_adev->vm_manager.vram_base_offset;
1022	} else {
1023		flags = 0x0;
1024		vram_base = 0;
1025	}
1026
1027	if (clear || (bo && bo->tbo.base.resv ==
1028		      vm->root.bo->tbo.base.resv))
1029		last_update = &vm->last_update;
1030	else
1031		last_update = &bo_va->last_pt_update;
1032
1033	if (!clear && bo_va->base.moved) {
1034		flush_tlb = true;
1035		list_splice_init(&bo_va->valids, &bo_va->invalids);
1036
1037	} else if (bo_va->cleared != clear) {
 
1038		list_splice_init(&bo_va->valids, &bo_va->invalids);
1039	}
1040
1041	list_for_each_entry(mapping, &bo_va->invalids, list) {
1042		uint64_t update_flags = flags;
1043
1044		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1045		 * but in case of something, we filter the flags in first place
1046		 */
1047		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1048			update_flags &= ~AMDGPU_PTE_READABLE;
1049		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1050			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1051
1052		/* Apply ASIC specific mapping flags */
1053		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1054
1055		trace_amdgpu_vm_bo_update(mapping);
1056
1057		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1058					   resv, mapping->start, mapping->last,
1059					   update_flags, mapping->offset,
1060					   vram_base, mem, pages_addr,
1061					   last_update);
1062		if (r)
1063			return r;
1064	}
1065
1066	/* If the BO is not in its preferred location add it back to
1067	 * the evicted list so that it gets validated again on the
1068	 * next command submission.
1069	 */
1070	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1071		uint32_t mem_type = bo->tbo.resource->mem_type;
1072
1073		if (!(bo->preferred_domains &
1074		      amdgpu_mem_type_to_domain(mem_type)))
1075			amdgpu_vm_bo_evicted(&bo_va->base);
1076		else
1077			amdgpu_vm_bo_idle(&bo_va->base);
1078	} else {
1079		amdgpu_vm_bo_done(&bo_va->base);
1080	}
1081
1082	list_splice_init(&bo_va->invalids, &bo_va->valids);
1083	bo_va->cleared = clear;
1084	bo_va->base.moved = false;
1085
1086	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1087		list_for_each_entry(mapping, &bo_va->valids, list)
1088			trace_amdgpu_vm_bo_mapping(mapping);
1089	}
1090
1091	return 0;
1092}
1093
1094/**
1095 * amdgpu_vm_update_prt_state - update the global PRT state
1096 *
1097 * @adev: amdgpu_device pointer
1098 */
1099static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1100{
1101	unsigned long flags;
1102	bool enable;
1103
1104	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1105	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1106	adev->gmc.gmc_funcs->set_prt(adev, enable);
1107	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1108}
1109
1110/**
1111 * amdgpu_vm_prt_get - add a PRT user
1112 *
1113 * @adev: amdgpu_device pointer
1114 */
1115static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1116{
1117	if (!adev->gmc.gmc_funcs->set_prt)
1118		return;
1119
1120	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1121		amdgpu_vm_update_prt_state(adev);
1122}
1123
1124/**
1125 * amdgpu_vm_prt_put - drop a PRT user
1126 *
1127 * @adev: amdgpu_device pointer
1128 */
1129static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1130{
1131	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1132		amdgpu_vm_update_prt_state(adev);
1133}
1134
1135/**
1136 * amdgpu_vm_prt_cb - callback for updating the PRT status
1137 *
1138 * @fence: fence for the callback
1139 * @_cb: the callback function
1140 */
1141static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1142{
1143	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1144
1145	amdgpu_vm_prt_put(cb->adev);
1146	kfree(cb);
1147}
1148
1149/**
1150 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1151 *
1152 * @adev: amdgpu_device pointer
1153 * @fence: fence for the callback
1154 */
1155static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1156				 struct dma_fence *fence)
1157{
1158	struct amdgpu_prt_cb *cb;
1159
1160	if (!adev->gmc.gmc_funcs->set_prt)
1161		return;
1162
1163	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1164	if (!cb) {
1165		/* Last resort when we are OOM */
1166		if (fence)
1167			dma_fence_wait(fence, false);
1168
1169		amdgpu_vm_prt_put(adev);
1170	} else {
1171		cb->adev = adev;
1172		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1173						     amdgpu_vm_prt_cb))
1174			amdgpu_vm_prt_cb(fence, &cb->cb);
1175	}
1176}
1177
1178/**
1179 * amdgpu_vm_free_mapping - free a mapping
1180 *
1181 * @adev: amdgpu_device pointer
1182 * @vm: requested vm
1183 * @mapping: mapping to be freed
1184 * @fence: fence of the unmap operation
1185 *
1186 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1187 */
1188static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1189				   struct amdgpu_vm *vm,
1190				   struct amdgpu_bo_va_mapping *mapping,
1191				   struct dma_fence *fence)
1192{
1193	if (mapping->flags & AMDGPU_PTE_PRT)
1194		amdgpu_vm_add_prt_cb(adev, fence);
1195	kfree(mapping);
1196}
1197
1198/**
1199 * amdgpu_vm_prt_fini - finish all prt mappings
1200 *
1201 * @adev: amdgpu_device pointer
1202 * @vm: requested vm
1203 *
1204 * Register a cleanup callback to disable PRT support after VM dies.
1205 */
1206static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1207{
1208	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1209	struct dma_resv_iter cursor;
1210	struct dma_fence *fence;
1211
1212	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1213		/* Add a callback for each fence in the reservation object */
1214		amdgpu_vm_prt_get(adev);
1215		amdgpu_vm_add_prt_cb(adev, fence);
1216	}
1217}
1218
1219/**
1220 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1221 *
1222 * @adev: amdgpu_device pointer
1223 * @vm: requested vm
1224 * @fence: optional resulting fence (unchanged if no work needed to be done
1225 * or if an error occurred)
1226 *
1227 * Make sure all freed BOs are cleared in the PT.
1228 * PTs have to be reserved and mutex must be locked!
1229 *
1230 * Returns:
1231 * 0 for success.
1232 *
 
1233 */
1234int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1235			  struct amdgpu_vm *vm,
1236			  struct dma_fence **fence)
1237{
1238	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1239	struct amdgpu_bo_va_mapping *mapping;
1240	uint64_t init_pte_value = 0;
1241	struct dma_fence *f = NULL;
1242	int r;
1243
1244	while (!list_empty(&vm->freed)) {
1245		mapping = list_first_entry(&vm->freed,
1246			struct amdgpu_bo_va_mapping, list);
1247		list_del(&mapping->list);
1248
1249		if (vm->pte_support_ats &&
1250		    mapping->start < AMDGPU_GMC_HOLE_START)
1251			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1252
1253		r = amdgpu_vm_update_range(adev, vm, false, false, true, resv,
1254					   mapping->start, mapping->last,
1255					   init_pte_value, 0, 0, NULL, NULL,
1256					   &f);
1257		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1258		if (r) {
1259			dma_fence_put(f);
1260			return r;
1261		}
1262	}
1263
1264	if (fence && f) {
1265		dma_fence_put(*fence);
1266		*fence = f;
1267	} else {
1268		dma_fence_put(f);
1269	}
1270
1271	return 0;
1272
1273}
1274
1275/**
1276 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1277 *
1278 * @adev: amdgpu_device pointer
1279 * @vm: requested vm
1280 *
1281 * Make sure all BOs which are moved are updated in the PTs.
 
1282 *
1283 * Returns:
1284 * 0 for success.
1285 *
1286 * PTs have to be reserved!
1287 */
1288int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1289			   struct amdgpu_vm *vm)
1290{
1291	struct amdgpu_bo_va *bo_va;
1292	struct dma_resv *resv;
1293	bool clear;
1294	int r;
1295
1296	spin_lock(&vm->status_lock);
1297	while (!list_empty(&vm->moved)) {
1298		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1299					 base.vm_status);
1300		spin_unlock(&vm->status_lock);
1301
1302		/* Per VM BOs never need to bo cleared in the page tables */
1303		r = amdgpu_vm_bo_update(adev, bo_va, false);
1304		if (r)
1305			return r;
1306		spin_lock(&vm->status_lock);
1307	}
1308
1309	while (!list_empty(&vm->invalidated)) {
1310		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1311					 base.vm_status);
1312		resv = bo_va->base.bo->tbo.base.resv;
1313		spin_unlock(&vm->status_lock);
1314
1315		/* Try to reserve the BO to avoid clearing its ptes */
1316		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
1317			clear = false;
1318		/* Somebody else is using the BO right now */
1319		else
1320			clear = true;
1321
1322		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1323		if (r)
1324			return r;
1325
1326		if (!clear)
1327			dma_resv_unlock(resv);
1328		spin_lock(&vm->status_lock);
1329	}
1330	spin_unlock(&vm->status_lock);
1331
1332	return 0;
 
 
 
1333}
1334
1335/**
1336 * amdgpu_vm_bo_add - add a bo to a specific vm
1337 *
1338 * @adev: amdgpu_device pointer
1339 * @vm: requested vm
1340 * @bo: amdgpu buffer object
1341 *
1342 * Add @bo into the requested vm.
1343 * Add @bo to the list of bos associated with the vm
1344 *
1345 * Returns:
1346 * Newly added bo_va or NULL for failure
1347 *
1348 * Object has to be reserved!
1349 */
1350struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1351				      struct amdgpu_vm *vm,
1352				      struct amdgpu_bo *bo)
1353{
1354	struct amdgpu_bo_va *bo_va;
1355
1356	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1357	if (bo_va == NULL) {
1358		return NULL;
1359	}
1360	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1361
1362	bo_va->ref_count = 1;
 
1363	INIT_LIST_HEAD(&bo_va->valids);
1364	INIT_LIST_HEAD(&bo_va->invalids);
 
1365
1366	if (!bo)
1367		return bo_va;
1368
1369	dma_resv_assert_held(bo->tbo.base.resv);
1370	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1371		bo_va->is_xgmi = true;
1372		/* Power up XGMI if it can be potentially used */
1373		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1374	}
1375
1376	return bo_va;
1377}
1378
1379
1380/**
1381 * amdgpu_vm_bo_insert_map - insert a new mapping
1382 *
1383 * @adev: amdgpu_device pointer
1384 * @bo_va: bo_va to store the address
1385 * @mapping: the mapping to insert
1386 *
1387 * Insert a new mapping into all structures.
1388 */
1389static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1390				    struct amdgpu_bo_va *bo_va,
1391				    struct amdgpu_bo_va_mapping *mapping)
1392{
1393	struct amdgpu_vm *vm = bo_va->base.vm;
1394	struct amdgpu_bo *bo = bo_va->base.bo;
1395
1396	mapping->bo_va = bo_va;
1397	list_add(&mapping->list, &bo_va->invalids);
1398	amdgpu_vm_it_insert(mapping, &vm->va);
1399
1400	if (mapping->flags & AMDGPU_PTE_PRT)
1401		amdgpu_vm_prt_get(adev);
1402
1403	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1404	    !bo_va->base.moved) {
1405		amdgpu_vm_bo_moved(&bo_va->base);
1406	}
1407	trace_amdgpu_vm_bo_map(bo_va, mapping);
1408}
1409
1410/**
1411 * amdgpu_vm_bo_map - map bo inside a vm
1412 *
1413 * @adev: amdgpu_device pointer
1414 * @bo_va: bo_va to store the address
1415 * @saddr: where to map the BO
1416 * @offset: requested offset in the BO
1417 * @size: BO size in bytes
1418 * @flags: attributes of pages (read/write/valid/etc.)
1419 *
1420 * Add a mapping of the BO at the specefied addr into the VM.
1421 *
1422 * Returns:
1423 * 0 for success, error for failure.
1424 *
1425 * Object has to be reserved and unreserved outside!
1426 */
1427int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1428		     struct amdgpu_bo_va *bo_va,
1429		     uint64_t saddr, uint64_t offset,
1430		     uint64_t size, uint64_t flags)
1431{
1432	struct amdgpu_bo_va_mapping *mapping, *tmp;
1433	struct amdgpu_bo *bo = bo_va->base.bo;
1434	struct amdgpu_vm *vm = bo_va->base.vm;
 
1435	uint64_t eaddr;
 
1436
1437	/* validate the parameters */
1438	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
1439	    size == 0 || size & ~PAGE_MASK)
1440		return -EINVAL;
1441
1442	/* make sure object fit at this offset */
1443	eaddr = saddr + size - 1;
1444	if (saddr >= eaddr ||
1445	    (bo && offset + size > amdgpu_bo_size(bo)) ||
1446	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
 
 
 
 
1447		return -EINVAL;
 
1448
1449	saddr /= AMDGPU_GPU_PAGE_SIZE;
1450	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1451
1452	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1453	if (tmp) {
 
 
1454		/* bo and tmp overlap, invalid addr */
1455		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1456			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1457			tmp->start, tmp->last + 1);
1458		return -EINVAL;
 
1459	}
1460
1461	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1462	if (!mapping)
1463		return -ENOMEM;
 
 
1464
1465	mapping->start = saddr;
1466	mapping->last = eaddr;
 
1467	mapping->offset = offset;
1468	mapping->flags = flags;
1469
1470	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1471
1472	return 0;
1473}
1474
1475/**
1476 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1477 *
1478 * @adev: amdgpu_device pointer
1479 * @bo_va: bo_va to store the address
1480 * @saddr: where to map the BO
1481 * @offset: requested offset in the BO
1482 * @size: BO size in bytes
1483 * @flags: attributes of pages (read/write/valid/etc.)
1484 *
1485 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1486 * mappings as we do so.
1487 *
1488 * Returns:
1489 * 0 for success, error for failure.
1490 *
1491 * Object has to be reserved and unreserved outside!
1492 */
1493int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1494			     struct amdgpu_bo_va *bo_va,
1495			     uint64_t saddr, uint64_t offset,
1496			     uint64_t size, uint64_t flags)
1497{
1498	struct amdgpu_bo_va_mapping *mapping;
1499	struct amdgpu_bo *bo = bo_va->base.bo;
1500	uint64_t eaddr;
1501	int r;
1502
1503	/* validate the parameters */
1504	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
1505	    size == 0 || size & ~PAGE_MASK)
1506		return -EINVAL;
1507
1508	/* make sure object fit at this offset */
1509	eaddr = saddr + size - 1;
1510	if (saddr >= eaddr ||
1511	    (bo && offset + size > amdgpu_bo_size(bo)) ||
1512	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
1513		return -EINVAL;
1514
1515	/* Allocate all the needed memory */
1516	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1517	if (!mapping)
1518		return -ENOMEM;
1519
1520	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1521	if (r) {
1522		kfree(mapping);
1523		return r;
1524	}
1525
1526	saddr /= AMDGPU_GPU_PAGE_SIZE;
1527	eaddr /= AMDGPU_GPU_PAGE_SIZE;
 
 
 
 
 
 
 
 
1528
1529	mapping->start = saddr;
1530	mapping->last = eaddr;
1531	mapping->offset = offset;
1532	mapping->flags = flags;
1533
1534	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
 
 
1535
1536	return 0;
 
 
 
 
 
 
 
 
 
1537}
1538
1539/**
1540 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1541 *
1542 * @adev: amdgpu_device pointer
1543 * @bo_va: bo_va to remove the address from
1544 * @saddr: where to the BO is mapped
1545 *
1546 * Remove a mapping of the BO at the specefied addr from the VM.
1547 *
1548 * Returns:
1549 * 0 for success, error for failure.
1550 *
1551 * Object has to be reserved and unreserved outside!
1552 */
1553int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1554		       struct amdgpu_bo_va *bo_va,
1555		       uint64_t saddr)
1556{
1557	struct amdgpu_bo_va_mapping *mapping;
1558	struct amdgpu_vm *vm = bo_va->base.vm;
1559	bool valid = true;
1560
1561	saddr /= AMDGPU_GPU_PAGE_SIZE;
1562
1563	list_for_each_entry(mapping, &bo_va->valids, list) {
1564		if (mapping->start == saddr)
1565			break;
1566	}
1567
1568	if (&mapping->list == &bo_va->valids) {
1569		valid = false;
1570
1571		list_for_each_entry(mapping, &bo_va->invalids, list) {
1572			if (mapping->start == saddr)
1573				break;
1574		}
1575
1576		if (&mapping->list == &bo_va->invalids)
1577			return -ENOENT;
1578	}
1579
1580	list_del(&mapping->list);
1581	amdgpu_vm_it_remove(mapping, &vm->va);
1582	mapping->bo_va = NULL;
1583	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1584
1585	if (valid)
1586		list_add(&mapping->list, &vm->freed);
1587	else
1588		amdgpu_vm_free_mapping(adev, vm, mapping,
1589				       bo_va->last_pt_update);
1590
1591	return 0;
1592}
1593
1594/**
1595 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1596 *
1597 * @adev: amdgpu_device pointer
1598 * @vm: VM structure to use
1599 * @saddr: start of the range
1600 * @size: size of the range
1601 *
1602 * Remove all mappings in a range, split them as appropriate.
1603 *
1604 * Returns:
1605 * 0 for success, error for failure.
1606 */
1607int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1608				struct amdgpu_vm *vm,
1609				uint64_t saddr, uint64_t size)
1610{
1611	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1612	LIST_HEAD(removed);
1613	uint64_t eaddr;
1614
1615	eaddr = saddr + size - 1;
1616	saddr /= AMDGPU_GPU_PAGE_SIZE;
1617	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1618
1619	/* Allocate all the needed memory */
1620	before = kzalloc(sizeof(*before), GFP_KERNEL);
1621	if (!before)
1622		return -ENOMEM;
1623	INIT_LIST_HEAD(&before->list);
1624
1625	after = kzalloc(sizeof(*after), GFP_KERNEL);
1626	if (!after) {
1627		kfree(before);
1628		return -ENOMEM;
1629	}
1630	INIT_LIST_HEAD(&after->list);
1631
1632	/* Now gather all removed mappings */
1633	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1634	while (tmp) {
1635		/* Remember mapping split at the start */
1636		if (tmp->start < saddr) {
1637			before->start = tmp->start;
1638			before->last = saddr - 1;
1639			before->offset = tmp->offset;
1640			before->flags = tmp->flags;
1641			before->bo_va = tmp->bo_va;
1642			list_add(&before->list, &tmp->bo_va->invalids);
1643		}
1644
1645		/* Remember mapping split at the end */
1646		if (tmp->last > eaddr) {
1647			after->start = eaddr + 1;
1648			after->last = tmp->last;
1649			after->offset = tmp->offset;
1650			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1651			after->flags = tmp->flags;
1652			after->bo_va = tmp->bo_va;
1653			list_add(&after->list, &tmp->bo_va->invalids);
1654		}
1655
1656		list_del(&tmp->list);
1657		list_add(&tmp->list, &removed);
1658
1659		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1660	}
1661
1662	/* And free them up */
1663	list_for_each_entry_safe(tmp, next, &removed, list) {
1664		amdgpu_vm_it_remove(tmp, &vm->va);
1665		list_del(&tmp->list);
1666
1667		if (tmp->start < saddr)
1668		    tmp->start = saddr;
1669		if (tmp->last > eaddr)
1670		    tmp->last = eaddr;
1671
1672		tmp->bo_va = NULL;
1673		list_add(&tmp->list, &vm->freed);
1674		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1675	}
1676
1677	/* Insert partial mapping before the range */
1678	if (!list_empty(&before->list)) {
1679		amdgpu_vm_it_insert(before, &vm->va);
1680		if (before->flags & AMDGPU_PTE_PRT)
1681			amdgpu_vm_prt_get(adev);
1682	} else {
1683		kfree(before);
1684	}
1685
1686	/* Insert partial mapping after the range */
1687	if (!list_empty(&after->list)) {
1688		amdgpu_vm_it_insert(after, &vm->va);
1689		if (after->flags & AMDGPU_PTE_PRT)
1690			amdgpu_vm_prt_get(adev);
1691	} else {
1692		kfree(after);
1693	}
1694
1695	return 0;
1696}
1697
1698/**
1699 * amdgpu_vm_bo_lookup_mapping - find mapping by address
1700 *
1701 * @vm: the requested VM
1702 * @addr: the address
1703 *
1704 * Find a mapping by it's address.
1705 *
1706 * Returns:
1707 * The amdgpu_bo_va_mapping matching for addr or NULL
1708 *
1709 */
1710struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1711							 uint64_t addr)
1712{
1713	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1714}
1715
1716/**
1717 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1718 *
1719 * @vm: the requested vm
1720 * @ticket: CS ticket
1721 *
1722 * Trace all mappings of BOs reserved during a command submission.
1723 */
1724void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
1725{
1726	struct amdgpu_bo_va_mapping *mapping;
1727
1728	if (!trace_amdgpu_vm_bo_cs_enabled())
1729		return;
1730
1731	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
1732	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
1733		if (mapping->bo_va && mapping->bo_va->base.bo) {
1734			struct amdgpu_bo *bo;
1735
1736			bo = mapping->bo_va->base.bo;
1737			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
1738			    ticket)
1739				continue;
1740		}
1741
1742		trace_amdgpu_vm_bo_cs(mapping);
1743	}
1744}
1745
1746/**
1747 * amdgpu_vm_bo_del - remove a bo from a specific vm
1748 *
1749 * @adev: amdgpu_device pointer
1750 * @bo_va: requested bo_va
1751 *
1752 * Remove @bo_va->bo from the requested vm.
1753 *
1754 * Object have to be reserved!
1755 */
1756void amdgpu_vm_bo_del(struct amdgpu_device *adev,
1757		      struct amdgpu_bo_va *bo_va)
1758{
1759	struct amdgpu_bo_va_mapping *mapping, *next;
1760	struct amdgpu_bo *bo = bo_va->base.bo;
1761	struct amdgpu_vm *vm = bo_va->base.vm;
1762	struct amdgpu_vm_bo_base **base;
1763
1764	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
1765
1766	if (bo) {
1767		dma_resv_assert_held(bo->tbo.base.resv);
1768		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
1769			ttm_bo_set_bulk_move(&bo->tbo, NULL);
1770
1771		for (base = &bo_va->base.bo->vm_bo; *base;
1772		     base = &(*base)->next) {
1773			if (*base != &bo_va->base)
1774				continue;
1775
1776			*base = bo_va->base.next;
1777			break;
1778		}
1779	}
1780
1781	spin_lock(&vm->status_lock);
1782	list_del(&bo_va->base.vm_status);
1783	spin_unlock(&vm->status_lock);
1784
1785	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1786		list_del(&mapping->list);
1787		amdgpu_vm_it_remove(mapping, &vm->va);
1788		mapping->bo_va = NULL;
1789		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1790		list_add(&mapping->list, &vm->freed);
1791	}
1792	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1793		list_del(&mapping->list);
1794		amdgpu_vm_it_remove(mapping, &vm->va);
1795		amdgpu_vm_free_mapping(adev, vm, mapping,
1796				       bo_va->last_pt_update);
1797	}
1798
1799	dma_fence_put(bo_va->last_pt_update);
1800
1801	if (bo && bo_va->is_xgmi)
1802		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
1803
1804	kfree(bo_va);
1805}
1806
1807/**
1808 * amdgpu_vm_evictable - check if we can evict a VM
1809 *
1810 * @bo: A page table of the VM.
1811 *
1812 * Check if it is possible to evict a VM.
1813 */
1814bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
1815{
1816	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
1817
1818	/* Page tables of a destroyed VM can go away immediately */
1819	if (!bo_base || !bo_base->vm)
1820		return true;
1821
1822	/* Don't evict VM page tables while they are busy */
1823	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
1824		return false;
1825
1826	/* Try to block ongoing updates */
1827	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
1828		return false;
1829
1830	/* Don't evict VM page tables while they are updated */
1831	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
1832		amdgpu_vm_eviction_unlock(bo_base->vm);
1833		return false;
1834	}
1835
1836	bo_base->vm->evicting = true;
1837	amdgpu_vm_eviction_unlock(bo_base->vm);
1838	return true;
1839}
1840
1841/**
1842 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1843 *
1844 * @adev: amdgpu_device pointer
 
1845 * @bo: amdgpu buffer object
1846 * @evicted: is the BO evicted
1847 *
1848 * Mark @bo as invalid.
1849 */
1850void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1851			     struct amdgpu_bo *bo, bool evicted)
1852{
1853	struct amdgpu_vm_bo_base *bo_base;
1854
1855	/* shadow bo doesn't have bo base, its validation needs its parent */
1856	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
1857		bo = bo->parent;
1858
1859	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
1860		struct amdgpu_vm *vm = bo_base->vm;
1861
1862		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1863			amdgpu_vm_bo_evicted(bo_base);
1864			continue;
1865		}
1866
1867		if (bo_base->moved)
1868			continue;
1869		bo_base->moved = true;
1870
1871		if (bo->tbo.type == ttm_bo_type_kernel)
1872			amdgpu_vm_bo_relocated(bo_base);
1873		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
1874			amdgpu_vm_bo_moved(bo_base);
1875		else
1876			amdgpu_vm_bo_invalidated(bo_base);
1877	}
1878}
1879
1880/**
1881 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
1882 *
1883 * @vm_size: VM size
1884 *
1885 * Returns:
1886 * VM page table as power of two
1887 */
1888static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
1889{
1890	/* Total bits covered by PD + PTs */
1891	unsigned bits = ilog2(vm_size) + 18;
1892
1893	/* Make sure the PD is 4K in size up to 8GB address space.
1894	   Above that split equal between PD and PTs */
1895	if (vm_size <= 8)
1896		return (bits - 9);
1897	else
1898		return ((bits + 3) / 2);
1899}
1900
1901/**
1902 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
1903 *
1904 * @adev: amdgpu_device pointer
1905 * @min_vm_size: the minimum vm size in GB if it's set auto
1906 * @fragment_size_default: Default PTE fragment size
1907 * @max_level: max VMPT level
1908 * @max_bits: max address space size in bits
1909 *
1910 */
1911void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
1912			   uint32_t fragment_size_default, unsigned max_level,
1913			   unsigned max_bits)
1914{
1915	unsigned int max_size = 1 << (max_bits - 30);
1916	unsigned int vm_size;
1917	uint64_t tmp;
1918
1919	/* adjust vm size first */
1920	if (amdgpu_vm_size != -1) {
1921		vm_size = amdgpu_vm_size;
1922		if (vm_size > max_size) {
1923			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
1924				 amdgpu_vm_size, max_size);
1925			vm_size = max_size;
1926		}
1927	} else {
1928		struct sysinfo si;
1929		unsigned int phys_ram_gb;
1930
1931		/* Optimal VM size depends on the amount of physical
1932		 * RAM available. Underlying requirements and
1933		 * assumptions:
1934		 *
1935		 *  - Need to map system memory and VRAM from all GPUs
1936		 *     - VRAM from other GPUs not known here
1937		 *     - Assume VRAM <= system memory
1938		 *  - On GFX8 and older, VM space can be segmented for
1939		 *    different MTYPEs
1940		 *  - Need to allow room for fragmentation, guard pages etc.
1941		 *
1942		 * This adds up to a rough guess of system memory x3.
1943		 * Round up to power of two to maximize the available
1944		 * VM size with the given page table size.
1945		 */
1946		si_meminfo(&si);
1947		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
1948			       (1 << 30) - 1) >> 30;
1949		vm_size = roundup_pow_of_two(
1950			min(max(phys_ram_gb * 3, min_vm_size), max_size));
1951	}
1952
1953	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
1954
1955	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
1956	if (amdgpu_vm_block_size != -1)
1957		tmp >>= amdgpu_vm_block_size - 9;
1958	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
1959	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
1960	switch (adev->vm_manager.num_level) {
1961	case 3:
1962		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
1963		break;
1964	case 2:
1965		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
1966		break;
1967	case 1:
1968		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
1969		break;
1970	default:
1971		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
1972	}
1973	/* block size depends on vm size and hw setup*/
1974	if (amdgpu_vm_block_size != -1)
1975		adev->vm_manager.block_size =
1976			min((unsigned)amdgpu_vm_block_size, max_bits
1977			    - AMDGPU_GPU_PAGE_SHIFT
1978			    - 9 * adev->vm_manager.num_level);
1979	else if (adev->vm_manager.num_level > 1)
1980		adev->vm_manager.block_size = 9;
1981	else
1982		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
1983
1984	if (amdgpu_vm_fragment_size == -1)
1985		adev->vm_manager.fragment_size = fragment_size_default;
1986	else
1987		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
1988
1989	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
1990		 vm_size, adev->vm_manager.num_level + 1,
1991		 adev->vm_manager.block_size,
1992		 adev->vm_manager.fragment_size);
1993}
1994
1995/**
1996 * amdgpu_vm_wait_idle - wait for the VM to become idle
1997 *
1998 * @vm: VM object to wait for
1999 * @timeout: timeout to wait for VM to become idle
2000 */
2001long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2002{
2003	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2004					DMA_RESV_USAGE_BOOKKEEP,
2005					true, timeout);
2006	if (timeout <= 0)
2007		return timeout;
2008
2009	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2010}
2011
2012/**
2013 * amdgpu_vm_init - initialize a vm instance
2014 *
2015 * @adev: amdgpu_device pointer
2016 * @vm: requested vm
2017 *
2018 * Init @vm fields.
2019 *
2020 * Returns:
2021 * 0 for success, error for failure.
2022 */
2023int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2024{
2025	struct amdgpu_bo *root_bo;
2026	struct amdgpu_bo_vm *root;
2027	int r, i;
2028
2029	vm->va = RB_ROOT_CACHED;
2030	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2031		vm->reserved_vmid[i] = NULL;
2032	INIT_LIST_HEAD(&vm->evicted);
2033	INIT_LIST_HEAD(&vm->relocated);
2034	INIT_LIST_HEAD(&vm->moved);
2035	INIT_LIST_HEAD(&vm->idle);
2036	INIT_LIST_HEAD(&vm->invalidated);
2037	spin_lock_init(&vm->status_lock);
 
 
2038	INIT_LIST_HEAD(&vm->freed);
2039	INIT_LIST_HEAD(&vm->done);
2040	INIT_LIST_HEAD(&vm->pt_freed);
2041	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2042
2043	/* create scheduler entities for page table updates */
2044	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
2045				  adev->vm_manager.vm_pte_scheds,
2046				  adev->vm_manager.vm_pte_num_scheds, NULL);
2047	if (r)
2048		return r;
2049
2050	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
2051				  adev->vm_manager.vm_pte_scheds,
2052				  adev->vm_manager.vm_pte_num_scheds, NULL);
2053	if (r)
2054		goto error_free_immediate;
2055
2056	vm->pte_support_ats = false;
2057	vm->is_compute_context = false;
2058
2059	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2060				    AMDGPU_VM_USE_CPU_FOR_GFX);
2061
2062	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2063			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2064	WARN_ONCE((vm->use_cpu_for_update &&
2065		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2066		  "CPU update of VM recommended only for large BAR system\n");
2067
2068	if (vm->use_cpu_for_update)
2069		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2070	else
2071		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2072	vm->last_update = NULL;
2073	vm->last_unlocked = dma_fence_get_stub();
2074	vm->last_tlb_flush = dma_fence_get_stub();
2075
2076	mutex_init(&vm->eviction_lock);
2077	vm->evicting = false;
2078
2079	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2080				false, &root);
2081	if (r)
2082		goto error_free_delayed;
2083	root_bo = &root->bo;
2084	r = amdgpu_bo_reserve(root_bo, true);
2085	if (r)
2086		goto error_free_root;
2087
2088	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2089	if (r)
2090		goto error_unreserve;
2091
2092	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2093
2094	r = amdgpu_vm_pt_clear(adev, vm, root, false);
 
 
 
 
 
 
2095	if (r)
2096		goto error_unreserve;
2097
2098	amdgpu_bo_unreserve(vm->root.bo);
2099
2100	INIT_KFIFO(vm->faults);
2101
2102	return 0;
2103
2104error_unreserve:
2105	amdgpu_bo_unreserve(vm->root.bo);
2106
2107error_free_root:
2108	amdgpu_bo_unref(&root->shadow);
2109	amdgpu_bo_unref(&root_bo);
2110	vm->root.bo = NULL;
2111
2112error_free_delayed:
2113	dma_fence_put(vm->last_tlb_flush);
2114	dma_fence_put(vm->last_unlocked);
2115	drm_sched_entity_destroy(&vm->delayed);
2116
2117error_free_immediate:
2118	drm_sched_entity_destroy(&vm->immediate);
2119
2120	return r;
2121}
2122
2123/**
2124 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2125 *
2126 * @adev: amdgpu_device pointer
2127 * @vm: requested vm
2128 *
2129 * This only works on GFX VMs that don't have any BOs added and no
2130 * page tables allocated yet.
2131 *
2132 * Changes the following VM parameters:
2133 * - use_cpu_for_update
2134 * - pte_supports_ats
2135 *
2136 * Reinitializes the page directory to reflect the changed ATS
2137 * setting.
2138 *
2139 * Returns:
2140 * 0 for success, -errno for errors.
2141 */
2142int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2143{
2144	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2145	int r;
2146
2147	r = amdgpu_bo_reserve(vm->root.bo, true);
2148	if (r)
2149		return r;
2150
2151	/* Sanity checks */
2152	if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
2153		r = -EINVAL;
2154		goto unreserve_bo;
2155	}
2156
2157	/* Check if PD needs to be reinitialized and do it before
2158	 * changing any other state, in case it fails.
2159	 */
2160	if (pte_support_ats != vm->pte_support_ats) {
2161		vm->pte_support_ats = pte_support_ats;
2162		r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
2163				       false);
2164		if (r)
2165			goto unreserve_bo;
2166	}
2167
2168	/* Update VM state */
2169	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2170				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2171	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2172			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2173	WARN_ONCE((vm->use_cpu_for_update &&
2174		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2175		  "CPU update of VM recommended only for large BAR system\n");
2176
2177	if (vm->use_cpu_for_update) {
2178		/* Sync with last SDMA update/clear before switching to CPU */
2179		r = amdgpu_bo_sync_wait(vm->root.bo,
2180					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2181		if (r)
2182			goto unreserve_bo;
2183
2184		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2185	} else {
2186		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2187	}
2188	/*
2189	 * Make sure root PD gets mapped. As vm_update_mode could be changed
2190	 * when turning a GFX VM into a compute VM.
2191	 */
2192	r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo));
2193	if (r)
2194		goto unreserve_bo;
2195
2196	dma_fence_put(vm->last_update);
2197	vm->last_update = NULL;
2198	vm->is_compute_context = true;
 
2199
2200	/* Free the shadow bo for compute VM */
2201	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2202
2203	goto unreserve_bo;
 
2204
2205unreserve_bo:
2206	amdgpu_bo_unreserve(vm->root.bo);
2207	return r;
2208}
2209
2210/**
2211 * amdgpu_vm_release_compute - release a compute vm
2212 * @adev: amdgpu_device pointer
2213 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2214 *
2215 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2216 * pasid from vm. Compute should stop use of vm after this call.
2217 */
2218void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2219{
2220	amdgpu_vm_set_pasid(adev, vm, 0);
2221	vm->is_compute_context = false;
2222}
2223
2224/**
2225 * amdgpu_vm_fini - tear down a vm instance
2226 *
2227 * @adev: amdgpu_device pointer
2228 * @vm: requested vm
2229 *
2230 * Tear down @vm.
2231 * Unbind the VM and remove all bos from the vm bo list
2232 */
2233void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2234{
2235	struct amdgpu_bo_va_mapping *mapping, *tmp;
2236	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2237	struct amdgpu_bo *root;
2238	unsigned long flags;
2239	int i;
2240
2241	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2242
2243	flush_work(&vm->pt_free_work);
2244
2245	root = amdgpu_bo_ref(vm->root.bo);
2246	amdgpu_bo_reserve(root, true);
2247	amdgpu_vm_set_pasid(adev, vm, 0);
2248	dma_fence_wait(vm->last_unlocked, false);
2249	dma_fence_put(vm->last_unlocked);
2250	dma_fence_wait(vm->last_tlb_flush, false);
2251	/* Make sure that all fence callbacks have completed */
2252	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2253	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2254	dma_fence_put(vm->last_tlb_flush);
2255
 
 
 
 
 
 
 
 
2256	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2257		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2258			amdgpu_vm_prt_fini(adev, vm);
2259			prt_fini_needed = false;
2260		}
2261
2262		list_del(&mapping->list);
2263		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2264	}
2265
2266	amdgpu_vm_pt_free_root(adev, vm);
2267	amdgpu_bo_unreserve(root);
2268	amdgpu_bo_unref(&root);
2269	WARN_ON(vm->root.bo);
2270
2271	drm_sched_entity_destroy(&vm->immediate);
2272	drm_sched_entity_destroy(&vm->delayed);
2273
2274	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2275		dev_err(adev->dev, "still active bo inside vm\n");
2276	}
2277	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2278					     &vm->va.rb_root, rb) {
2279		/* Don't remove the mapping here, we don't want to trigger a
2280		 * rebalance and the tree is about to be destroyed anyway.
2281		 */
2282		list_del(&mapping->list);
2283		kfree(mapping);
2284	}
 
2285
2286	dma_fence_put(vm->last_update);
2287	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2288		amdgpu_vmid_free_reserved(adev, vm, i);
2289}
2290
2291/**
2292 * amdgpu_vm_manager_init - init the VM manager
2293 *
2294 * @adev: amdgpu_device pointer
2295 *
2296 * Initialize the VM manager structures
2297 */
2298void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2299{
2300	unsigned i;
2301
2302	/* Concurrent flushes are only possible starting with Vega10 and
2303	 * are broken on Navi10 and Navi14.
2304	 */
2305	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2306					      adev->asic_type == CHIP_NAVI10 ||
2307					      adev->asic_type == CHIP_NAVI14);
2308	amdgpu_vmid_mgr_init(adev);
 
 
2309
2310	adev->vm_manager.fence_context =
2311		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2312	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2313		adev->vm_manager.seqno[i] = 0;
2314
2315	spin_lock_init(&adev->vm_manager.prt_lock);
2316	atomic_set(&adev->vm_manager.num_prt_users, 0);
2317
2318	/* If not overridden by the user, by default, only in large BAR systems
2319	 * Compute VM tables will be updated by CPU
2320	 */
2321#ifdef CONFIG_X86_64
2322	if (amdgpu_vm_update_mode == -1) {
2323		/* For asic with VF MMIO access protection
2324		 * avoid using CPU for VM table updates
2325		 */
2326		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2327		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2328			adev->vm_manager.vm_update_mode =
2329				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2330		else
2331			adev->vm_manager.vm_update_mode = 0;
2332	} else
2333		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2334#else
2335	adev->vm_manager.vm_update_mode = 0;
2336#endif
2337
2338	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2339}
2340
2341/**
2342 * amdgpu_vm_manager_fini - cleanup VM manager
2343 *
2344 * @adev: amdgpu_device pointer
2345 *
2346 * Cleanup the VM manager and free resources.
2347 */
2348void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2349{
2350	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2351	xa_destroy(&adev->vm_manager.pasids);
2352
2353	amdgpu_vmid_mgr_fini(adev);
2354}
2355
2356/**
2357 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2358 *
2359 * @dev: drm device pointer
2360 * @data: drm_amdgpu_vm
2361 * @filp: drm file pointer
2362 *
2363 * Returns:
2364 * 0 for success, -errno for errors.
2365 */
2366int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2367{
2368	union drm_amdgpu_vm *args = data;
2369	struct amdgpu_device *adev = drm_to_adev(dev);
2370	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2371	int r;
2372
2373	switch (args->in.op) {
2374	case AMDGPU_VM_OP_RESERVE_VMID:
2375		/* We only have requirement to reserve vmid from gfxhub */
2376		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
2377					       AMDGPU_GFXHUB_0);
2378		if (r)
2379			return r;
2380		break;
2381	case AMDGPU_VM_OP_UNRESERVE_VMID:
2382		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
2383		break;
2384	default:
2385		return -EINVAL;
2386	}
2387
2388	return 0;
2389}
2390
2391/**
2392 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2393 *
2394 * @adev: drm device pointer
2395 * @pasid: PASID identifier for VM
2396 * @task_info: task_info to fill.
2397 */
2398void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
2399			 struct amdgpu_task_info *task_info)
2400{
2401	struct amdgpu_vm *vm;
2402	unsigned long flags;
2403
2404	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2405
2406	vm = xa_load(&adev->vm_manager.pasids, pasid);
2407	if (vm)
2408		*task_info = vm->task_info;
2409
2410	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2411}
2412
2413/**
2414 * amdgpu_vm_set_task_info - Sets VMs task info.
2415 *
2416 * @vm: vm for which to set the info
2417 */
2418void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2419{
2420	if (vm->task_info.pid)
2421		return;
2422
2423	vm->task_info.pid = current->pid;
2424	get_task_comm(vm->task_info.task_name, current);
2425
2426	if (current->group_leader->mm != current->mm)
2427		return;
2428
2429	vm->task_info.tgid = current->group_leader->pid;
2430	get_task_comm(vm->task_info.process_name, current->group_leader);
2431}
2432
2433/**
2434 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2435 * @adev: amdgpu device pointer
2436 * @pasid: PASID of the VM
2437 * @addr: Address of the fault
2438 * @write_fault: true is write fault, false is read fault
2439 *
2440 * Try to gracefully handle a VM fault. Return true if the fault was handled and
2441 * shouldn't be reported any more.
2442 */
2443bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2444			    uint64_t addr, bool write_fault)
2445{
2446	bool is_compute_context = false;
2447	struct amdgpu_bo *root;
2448	unsigned long irqflags;
2449	uint64_t value, flags;
2450	struct amdgpu_vm *vm;
2451	int r;
2452
2453	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2454	vm = xa_load(&adev->vm_manager.pasids, pasid);
2455	if (vm) {
2456		root = amdgpu_bo_ref(vm->root.bo);
2457		is_compute_context = vm->is_compute_context;
2458	} else {
2459		root = NULL;
2460	}
2461	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2462
2463	if (!root)
2464		return false;
2465
2466	addr /= AMDGPU_GPU_PAGE_SIZE;
2467
2468	if (is_compute_context &&
2469	    !svm_range_restore_pages(adev, pasid, addr, write_fault)) {
2470		amdgpu_bo_unref(&root);
2471		return true;
2472	}
2473
2474	r = amdgpu_bo_reserve(root, true);
2475	if (r)
2476		goto error_unref;
2477
2478	/* Double check that the VM still exists */
2479	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2480	vm = xa_load(&adev->vm_manager.pasids, pasid);
2481	if (vm && vm->root.bo != root)
2482		vm = NULL;
2483	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2484	if (!vm)
2485		goto error_unlock;
2486
2487	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2488		AMDGPU_PTE_SYSTEM;
2489
2490	if (is_compute_context) {
2491		/* Intentionally setting invalid PTE flag
2492		 * combination to force a no-retry-fault
2493		 */
2494		flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
2495		value = 0;
2496	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2497		/* Redirect the access to the dummy page */
2498		value = adev->dummy_page_addr;
2499		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2500			AMDGPU_PTE_WRITEABLE;
2501
2502	} else {
2503		/* Let the hw retry silently on the PTE */
2504		value = 0;
2505	}
2506
2507	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2508	if (r) {
2509		pr_debug("failed %d to reserve fence slot\n", r);
2510		goto error_unlock;
2511	}
2512
2513	r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr,
2514				   addr, flags, value, 0, NULL, NULL, NULL);
2515	if (r)
2516		goto error_unlock;
2517
2518	r = amdgpu_vm_update_pdes(adev, vm, true);
2519
2520error_unlock:
2521	amdgpu_bo_unreserve(root);
2522	if (r < 0)
2523		DRM_ERROR("Can't handle page fault (%d)\n", r);
2524
2525error_unref:
2526	amdgpu_bo_unref(&root);
2527
2528	return false;
2529}
2530
2531#if defined(CONFIG_DEBUG_FS)
2532/**
2533 * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2534 *
2535 * @vm: Requested VM for printing BO info
2536 * @m: debugfs file
2537 *
2538 * Print BO information in debugfs file for the VM
2539 */
2540void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2541{
2542	struct amdgpu_bo_va *bo_va, *tmp;
2543	u64 total_idle = 0;
2544	u64 total_evicted = 0;
2545	u64 total_relocated = 0;
2546	u64 total_moved = 0;
2547	u64 total_invalidated = 0;
2548	u64 total_done = 0;
2549	unsigned int total_idle_objs = 0;
2550	unsigned int total_evicted_objs = 0;
2551	unsigned int total_relocated_objs = 0;
2552	unsigned int total_moved_objs = 0;
2553	unsigned int total_invalidated_objs = 0;
2554	unsigned int total_done_objs = 0;
2555	unsigned int id = 0;
2556
2557	spin_lock(&vm->status_lock);
2558	seq_puts(m, "\tIdle BOs:\n");
2559	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2560		if (!bo_va->base.bo)
2561			continue;
2562		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2563	}
2564	total_idle_objs = id;
2565	id = 0;
2566
2567	seq_puts(m, "\tEvicted BOs:\n");
2568	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2569		if (!bo_va->base.bo)
2570			continue;
2571		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2572	}
2573	total_evicted_objs = id;
2574	id = 0;
2575
2576	seq_puts(m, "\tRelocated BOs:\n");
2577	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2578		if (!bo_va->base.bo)
2579			continue;
2580		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2581	}
2582	total_relocated_objs = id;
2583	id = 0;
2584
2585	seq_puts(m, "\tMoved BOs:\n");
2586	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2587		if (!bo_va->base.bo)
2588			continue;
2589		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2590	}
2591	total_moved_objs = id;
2592	id = 0;
2593
2594	seq_puts(m, "\tInvalidated BOs:\n");
2595	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2596		if (!bo_va->base.bo)
2597			continue;
2598		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
2599	}
2600	total_invalidated_objs = id;
2601	id = 0;
2602
2603	seq_puts(m, "\tDone BOs:\n");
2604	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2605		if (!bo_va->base.bo)
2606			continue;
2607		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2608	}
2609	spin_unlock(&vm->status_lock);
2610	total_done_objs = id;
2611
2612	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2613		   total_idle_objs);
2614	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2615		   total_evicted_objs);
2616	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2617		   total_relocated_objs);
2618	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2619		   total_moved_objs);
2620	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2621		   total_invalidated_objs);
2622	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2623		   total_done_objs);
2624}
2625#endif
v4.10.11
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
 
  28#include <linux/dma-fence-array.h>
  29#include <drm/drmP.h>
 
 
 
  30#include <drm/amdgpu_drm.h>
 
  31#include "amdgpu.h"
  32#include "amdgpu_trace.h"
  33
  34/*
  35 * GPUVM
  36 * GPUVM is similar to the legacy gart on older asics, however
  37 * rather than there being a single global gart table
  38 * for the entire GPU, there are multiple VM page tables active
  39 * at any given time.  The VM page tables can contain a mix
  40 * vram pages and system memory pages and system memory pages
 
 
 
 
 
 
 
 
  41 * can be mapped as snooped (cached system pages) or unsnooped
  42 * (uncached system pages).
  43 * Each VM has an ID associated with it and there is a page table
  44 * associated with each VMID.  When execting a command buffer,
  45 * the kernel tells the the ring what VMID to use for that command
 
  46 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  47 * The userspace drivers maintain their own address space and the kernel
  48 * sets up their pages tables accordingly when they submit their
  49 * command buffers and a VMID is assigned.
  50 * Cayman/Trinity support up to 8 active VMs at any given time;
  51 * SI supports 16.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52 */
  53
  54/* Local structure. Encapsulate some VM table update parameters to reduce
  55 * the number of function parameters
 
 
 
 
 
 
 
 
 
  56 */
  57struct amdgpu_pte_update_params {
  58	/* amdgpu device we do this update for */
 
 
 
  59	struct amdgpu_device *adev;
  60	/* address where to copy page table entries from */
  61	uint64_t src;
  62	/* indirect buffer to fill with commands */
  63	struct amdgpu_ib *ib;
  64	/* Function which actually does the update */
  65	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
  66		     uint64_t addr, unsigned count, uint32_t incr,
  67		     uint32_t flags);
  68	/* indicate update pt or its shadow */
  69	bool shadow;
  70};
  71
  72/**
  73 * amdgpu_vm_num_pde - return the number of page directory entries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74 *
  75 * @adev: amdgpu_device pointer
 
 
 
 
 
  76 *
  77 * Calculate the number of page directory entries.
  78 */
  79static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
 
  80{
  81	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  82}
  83
  84/**
  85 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
 
 
  86 *
  87 * @adev: amdgpu_device pointer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88 *
  89 * Calculate the size of the page directory in bytes.
  90 */
  91static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
 
  92{
  93	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94}
  95
  96/**
  97 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
  98 *
  99 * @vm: vm providing the BOs
 100 * @validated: head of validation list
 101 * @entry: entry to add
 102 *
 103 * Add the page directory to the list of BOs to
 104 * validate for command submission.
 105 */
 106void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 107			 struct list_head *validated,
 108			 struct amdgpu_bo_list_entry *entry)
 109{
 110	entry->robj = vm->page_directory;
 111	entry->priority = 0;
 112	entry->tv.bo = &vm->page_directory->tbo;
 113	entry->tv.shared = true;
 
 114	entry->user_pages = NULL;
 115	list_add(&entry->tv.head, validated);
 116}
 117
 118/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119 * amdgpu_vm_validate_pt_bos - validate the page table BOs
 120 *
 121 * @adev: amdgpu device pointer
 122 * @vm: vm providing the BOs
 123 * @validate: callback to do the validation
 124 * @param: parameter for the validation callback
 125 *
 126 * Validate the page table BOs on command submission if neccessary.
 
 
 
 127 */
 128int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 129			      int (*validate)(void *p, struct amdgpu_bo *bo),
 130			      void *param)
 131{
 132	uint64_t num_evictions;
 133	unsigned i;
 
 134	int r;
 135
 136	/* We only need to validate the page tables
 137	 * if they aren't already valid.
 138	 */
 139	num_evictions = atomic64_read(&adev->num_evictions);
 140	if (num_evictions == vm->last_eviction_counter)
 141		return 0;
 142
 143	/* add the vm page table to the list */
 144	for (i = 0; i <= vm->max_pde_used; ++i) {
 145		struct amdgpu_bo *bo = vm->page_tables[i].bo;
 146
 147		if (!bo)
 148			continue;
 149
 150		r = validate(param, bo);
 151		if (r)
 152			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 153	}
 
 
 
 
 
 154
 155	return 0;
 156}
 157
 158/**
 159 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
 160 *
 161 * @adev: amdgpu device instance
 162 * @vm: vm providing the BOs
 
 163 *
 164 * Move the PT BOs to the tail of the LRU.
 
 165 */
 166void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 167				  struct amdgpu_vm *vm)
 168{
 169	struct ttm_bo_global *glob = adev->mman.bdev.glob;
 170	unsigned i;
 171
 172	spin_lock(&glob->lru_lock);
 173	for (i = 0; i <= vm->max_pde_used; ++i) {
 174		struct amdgpu_bo *bo = vm->page_tables[i].bo;
 175
 176		if (!bo)
 177			continue;
 
 178
 179		ttm_bo_move_to_lru_tail(&bo->tbo);
 180	}
 181	spin_unlock(&glob->lru_lock);
 182}
 183
 184static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
 185			      struct amdgpu_vm_id *id)
 186{
 187	return id->current_gpu_reset_count !=
 188		atomic_read(&adev->gpu_reset_counter) ? true : false;
 189}
 190
 191/**
 192 * amdgpu_vm_grab_id - allocate the next free VMID
 193 *
 194 * @vm: vm to allocate id for
 195 * @ring: ring we want to submit job to
 196 * @sync: sync object where we add dependencies
 197 * @fence: fence protecting ID from reuse
 198 *
 199 * Allocate an id for the vm, adding fences to the sync obj as necessary.
 200 */
 201int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 202		      struct amdgpu_sync *sync, struct dma_fence *fence,
 203		      struct amdgpu_job *job)
 204{
 205	struct amdgpu_device *adev = ring->adev;
 206	uint64_t fence_context = adev->fence_context + ring->idx;
 207	struct dma_fence *updates = sync->last_vm_update;
 208	struct amdgpu_vm_id *id, *idle;
 209	struct dma_fence **fences;
 210	unsigned i;
 211	int r = 0;
 212
 213	fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
 214			       GFP_KERNEL);
 215	if (!fences)
 216		return -ENOMEM;
 217
 218	mutex_lock(&adev->vm_manager.lock);
 219
 220	/* Check if we have an idle VMID */
 221	i = 0;
 222	list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
 223		fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
 224		if (!fences[i])
 225			break;
 226		++i;
 
 
 
 
 
 
 
 
 
 227	}
 228
 229	/* If we can't find a idle VMID to use, wait till one becomes available */
 230	if (&idle->list == &adev->vm_manager.ids_lru) {
 231		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
 232		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
 233		struct dma_fence_array *array;
 234		unsigned j;
 235
 236		for (j = 0; j < i; ++j)
 237			dma_fence_get(fences[j]);
 238
 239		array = dma_fence_array_create(i, fences, fence_context,
 240					   seqno, true);
 241		if (!array) {
 242			for (j = 0; j < i; ++j)
 243				dma_fence_put(fences[j]);
 244			kfree(fences);
 245			r = -ENOMEM;
 246			goto error;
 247		}
 248
 249
 250		r = amdgpu_sync_fence(ring->adev, sync, &array->base);
 251		dma_fence_put(&array->base);
 252		if (r)
 253			goto error;
 254
 255		mutex_unlock(&adev->vm_manager.lock);
 256		return 0;
 257
 258	}
 259	kfree(fences);
 260
 261	job->vm_needs_flush = true;
 262	/* Check if we can use a VMID already assigned to this VM */
 263	i = ring->idx;
 264	do {
 265		struct dma_fence *flushed;
 266
 267		id = vm->ids[i++];
 268		if (i == AMDGPU_MAX_RINGS)
 269			i = 0;
 270
 271		/* Check all the prerequisites to using this VMID */
 272		if (!id)
 273			continue;
 274		if (amdgpu_vm_is_gpu_reset(adev, id))
 275			continue;
 276
 277		if (atomic64_read(&id->owner) != vm->client_id)
 278			continue;
 279
 280		if (job->vm_pd_addr != id->pd_gpu_addr)
 281			continue;
 282
 283		if (!id->last_flush)
 284			continue;
 285
 286		if (id->last_flush->context != fence_context &&
 287		    !dma_fence_is_signaled(id->last_flush))
 288			continue;
 289
 290		flushed  = id->flushed_updates;
 291		if (updates &&
 292		    (!flushed || dma_fence_is_later(updates, flushed)))
 293			continue;
 294
 295		/* Good we can use this VMID. Remember this submission as
 296		 * user of the VMID.
 297		 */
 298		r = amdgpu_sync_fence(ring->adev, &id->active, fence);
 299		if (r)
 300			goto error;
 301
 302		id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
 303		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
 304		vm->ids[ring->idx] = id;
 305
 306		job->vm_id = id - adev->vm_manager.ids;
 307		job->vm_needs_flush = false;
 308		trace_amdgpu_vm_grab_id(vm, ring->idx, job);
 309
 310		mutex_unlock(&adev->vm_manager.lock);
 311		return 0;
 312
 313	} while (i != ring->idx);
 314
 315	/* Still no ID to use? Then use the idle one found earlier */
 316	id = idle;
 317
 318	/* Remember this submission as user of the VMID */
 319	r = amdgpu_sync_fence(ring->adev, &id->active, fence);
 320	if (r)
 321		goto error;
 322
 323	dma_fence_put(id->first);
 324	id->first = dma_fence_get(fence);
 325
 326	dma_fence_put(id->last_flush);
 327	id->last_flush = NULL;
 328
 329	dma_fence_put(id->flushed_updates);
 330	id->flushed_updates = dma_fence_get(updates);
 331
 332	id->pd_gpu_addr = job->vm_pd_addr;
 333	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
 334	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
 335	atomic64_set(&id->owner, vm->client_id);
 336	vm->ids[ring->idx] = id;
 337
 338	job->vm_id = id - adev->vm_manager.ids;
 339	trace_amdgpu_vm_grab_id(vm, ring->idx, job);
 340
 341error:
 342	mutex_unlock(&adev->vm_manager.lock);
 343	return r;
 344}
 345
 346static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
 
 
 
 
 
 
 
 
 
 
 347{
 348	struct amdgpu_device *adev = ring->adev;
 349	const struct amdgpu_ip_block *ip_block;
 
 350
 351	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
 352		/* only compute rings */
 353		return false;
 354
 355	ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
 356	if (!ip_block)
 357		return false;
 
 
 358
 359	if (ip_block->version->major <= 7) {
 360		/* gfx7 has no workaround */
 361		return true;
 362	} else if (ip_block->version->major == 8) {
 363		if (adev->gfx.mec_fw_version >= 673)
 364			/* gfx8 is fixed in MEC firmware 673 */
 365			return false;
 366		else
 367			return true;
 368	}
 369	return false;
 370}
 371
 372/**
 373 * amdgpu_vm_flush - hardware flush the vm
 374 *
 375 * @ring: ring to use for flush
 376 * @vm_id: vmid number to use
 377 * @pd_addr: address of the page directory
 378 *
 379 * Emit a VM flush when it is necessary.
 
 
 
 380 */
 381int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 
 382{
 383	struct amdgpu_device *adev = ring->adev;
 384	struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
 385	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
 386		id->gds_base != job->gds_base ||
 387		id->gds_size != job->gds_size ||
 388		id->gws_base != job->gws_base ||
 389		id->gws_size != job->gws_size ||
 390		id->oa_base != job->oa_base ||
 391		id->oa_size != job->oa_size);
 
 
 392	int r;
 393
 394	if (ring->funcs->emit_pipeline_sync && (
 395	    job->vm_needs_flush || gds_switch_needed ||
 396	    amdgpu_vm_ring_has_compute_vm_bug(ring)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397		amdgpu_ring_emit_pipeline_sync(ring);
 398
 399	if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
 400	    amdgpu_vm_is_gpu_reset(adev, id))) {
 401		struct dma_fence *fence;
 
 402
 403		trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
 404		amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
 405
 406		r = amdgpu_fence_emit(ring, &fence);
 
 
 
 
 
 
 
 
 
 
 
 
 407		if (r)
 408			return r;
 
 409
 410		mutex_lock(&adev->vm_manager.lock);
 
 411		dma_fence_put(id->last_flush);
 412		id->last_flush = fence;
 413		mutex_unlock(&adev->vm_manager.lock);
 
 
 414	}
 415
 416	if (gds_switch_needed) {
 417		id->gds_base = job->gds_base;
 418		id->gds_size = job->gds_size;
 419		id->gws_base = job->gws_base;
 420		id->gws_size = job->gws_size;
 421		id->oa_base = job->oa_base;
 422		id->oa_size = job->oa_size;
 423		amdgpu_ring_emit_gds_switch(ring, job->vm_id,
 424					    job->gds_base, job->gds_size,
 425					    job->gws_base, job->gws_size,
 426					    job->oa_base, job->oa_size);
 427	}
 
 
 
 
 428
 
 
 
 
 
 
 429	return 0;
 430}
 431
 432/**
 433 * amdgpu_vm_reset_id - reset VMID to zero
 434 *
 435 * @adev: amdgpu device structure
 436 * @vm_id: vmid number to use
 437 *
 438 * Reset saved GDW, GWS and OA to force switch on next flush.
 439 */
 440void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
 441{
 442	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
 443
 444	id->gds_base = 0;
 445	id->gds_size = 0;
 446	id->gws_base = 0;
 447	id->gws_size = 0;
 448	id->oa_base = 0;
 449	id->oa_size = 0;
 450}
 451
 452/**
 453 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 454 *
 455 * @vm: requested vm
 456 * @bo: requested buffer object
 457 *
 458 * Find @bo inside the requested vm.
 459 * Search inside the @bos vm list for the requested vm
 460 * Returns the found bo_va or NULL if none is found
 461 *
 462 * Object has to be reserved!
 
 
 
 463 */
 464struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 465				       struct amdgpu_bo *bo)
 466{
 467	struct amdgpu_bo_va *bo_va;
 
 
 
 
 468
 469	list_for_each_entry(bo_va, &bo->va, bo_list) {
 470		if (bo_va->vm == vm) {
 471			return bo_va;
 472		}
 473	}
 474	return NULL;
 475}
 476
 477/**
 478 * amdgpu_vm_do_set_ptes - helper to call the right asic function
 479 *
 480 * @params: see amdgpu_pte_update_params definition
 481 * @pe: addr of the page entry
 482 * @addr: dst addr to write into pe
 483 * @count: number of page entries to update
 484 * @incr: increase next addr by incr bytes
 485 * @flags: hw access flags
 486 *
 487 * Traces the parameters and calls the right asic functions
 488 * to setup the page table using the DMA.
 489 */
 490static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
 491				  uint64_t pe, uint64_t addr,
 492				  unsigned count, uint32_t incr,
 493				  uint32_t flags)
 494{
 495	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
 496
 497	if (count < 3) {
 498		amdgpu_vm_write_pte(params->adev, params->ib, pe,
 499				    addr | flags, count, incr);
 500
 501	} else {
 502		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
 503				      count, incr, flags);
 504	}
 505}
 506
 507/**
 508 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
 509 *
 510 * @params: see amdgpu_pte_update_params definition
 511 * @pe: addr of the page entry
 512 * @addr: dst addr to write into pe
 513 * @count: number of page entries to update
 514 * @incr: increase next addr by incr bytes
 515 * @flags: hw access flags
 516 *
 517 * Traces the parameters and calls the DMA function to copy the PTEs.
 518 */
 519static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
 520				   uint64_t pe, uint64_t addr,
 521				   unsigned count, uint32_t incr,
 522				   uint32_t flags)
 523{
 524	uint64_t src = (params->src + (addr >> 12) * 8);
 525
 526
 527	trace_amdgpu_vm_copy_ptes(pe, src, count);
 528
 529	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
 530}
 531
 532/**
 533 * amdgpu_vm_map_gart - Resolve gart mapping of addr
 534 *
 535 * @pages_addr: optional DMA address to use for lookup
 536 * @addr: the unmapped addr
 537 *
 538 * Look up the physical address of the page that the pte resolves
 539 * to and return the pointer for the page table entry.
 
 
 
 540 */
 541static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 542{
 543	uint64_t result;
 544
 545	/* page table offset */
 546	result = pages_addr[addr >> PAGE_SHIFT];
 547
 548	/* in case cpu page size != gpu page size*/
 549	result |= addr & (~PAGE_MASK);
 550
 551	result &= 0xFFFFFFFFFFFFF000ULL;
 552
 553	return result;
 554}
 555
 556/*
 557 * amdgpu_vm_update_pdes - make sure that page directory is valid
 558 *
 559 * @adev: amdgpu_device pointer
 560 * @vm: requested vm
 561 * @start: start of GPU address range
 562 * @end: end of GPU address range
 
 563 *
 564 * Allocates new page tables if necessary
 565 * and updates the page directory.
 566 * Returns 0 for success, error for failure.
 567 */
 568int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 569				    struct amdgpu_vm *vm)
 570{
 571	struct amdgpu_bo *shadow;
 572	struct amdgpu_ring *ring;
 573	uint64_t pd_addr, shadow_addr;
 574	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 575	uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
 576	unsigned count = 0, pt_idx, ndw;
 577	struct amdgpu_job *job;
 578	struct amdgpu_pte_update_params params;
 579	struct dma_fence *fence = NULL;
 580
 581	int r;
 
 
 582
 583	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 584	shadow = vm->page_directory->shadow;
 585
 586	/* padding, etc. */
 587	ndw = 64;
 588
 589	/* assume the worst case */
 590	ndw += vm->max_pde_used * 6;
 591
 592	pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 593	if (shadow) {
 594		r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
 595		if (r)
 596			return r;
 597		shadow_addr = amdgpu_bo_gpu_offset(shadow);
 598		ndw *= 2;
 599	} else {
 600		shadow_addr = 0;
 601	}
 602
 603	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 604	if (r)
 605		return r;
 606
 607	memset(&params, 0, sizeof(params));
 608	params.adev = adev;
 609	params.ib = &job->ibs[0];
 
 610
 611	/* walk over the address space and update the page directory */
 612	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
 613		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
 614		uint64_t pde, pt;
 615
 616		if (bo == NULL)
 617			continue;
 
 618
 619		if (bo->shadow) {
 620			struct amdgpu_bo *pt_shadow = bo->shadow;
 621
 622			r = amdgpu_ttm_bind(&pt_shadow->tbo,
 623					    &pt_shadow->tbo.mem);
 624			if (r)
 625				return r;
 626		}
 627
 628		pt = amdgpu_bo_gpu_offset(bo);
 629		if (vm->page_tables[pt_idx].addr == pt)
 630			continue;
 631
 632		vm->page_tables[pt_idx].addr = pt;
 633
 634		pde = pd_addr + pt_idx * 8;
 635		if (((last_pde + 8 * count) != pde) ||
 636		    ((last_pt + incr * count) != pt) ||
 637		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
 638
 639			if (count) {
 640				if (shadow)
 641					amdgpu_vm_do_set_ptes(&params,
 642							      last_shadow,
 643							      last_pt, count,
 644							      incr,
 645							      AMDGPU_PTE_VALID);
 646
 647				amdgpu_vm_do_set_ptes(&params, last_pde,
 648						      last_pt, count, incr,
 649						      AMDGPU_PTE_VALID);
 650			}
 651
 652			count = 1;
 653			last_pde = pde;
 654			last_shadow = shadow_addr + pt_idx * 8;
 655			last_pt = pt;
 656		} else {
 657			++count;
 658		}
 659	}
 660
 661	if (count) {
 662		if (vm->page_directory->shadow)
 663			amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
 664					      count, incr, AMDGPU_PTE_VALID);
 665
 666		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
 667				      count, incr, AMDGPU_PTE_VALID);
 668	}
 669
 670	if (params.ib->length_dw == 0) {
 671		amdgpu_job_free(job);
 672		return 0;
 
 673	}
 674
 675	amdgpu_ring_pad_ib(ring, params.ib);
 676	amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
 677			 AMDGPU_FENCE_OWNER_VM);
 678	if (shadow)
 679		amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
 680				 AMDGPU_FENCE_OWNER_VM);
 681
 682	WARN_ON(params.ib->length_dw > ndw);
 683	r = amdgpu_job_submit(job, ring, &vm->entity,
 684			      AMDGPU_FENCE_OWNER_VM, &fence);
 685	if (r)
 686		goto error_free;
 687
 688	amdgpu_bo_fence(vm->page_directory, fence, true);
 689	dma_fence_put(vm->page_directory_fence);
 690	vm->page_directory_fence = dma_fence_get(fence);
 691	dma_fence_put(fence);
 692
 693	return 0;
 694
 695error_free:
 696	amdgpu_job_free(job);
 697	return r;
 698}
 699
 700/**
 701 * amdgpu_vm_update_ptes - make sure that page tables are valid
 
 
 702 *
 703 * @params: see amdgpu_pte_update_params definition
 704 * @vm: requested vm
 705 * @start: start of GPU address range
 706 * @end: end of GPU address range
 707 * @dst: destination address to map to, the next dst inside the function
 708 * @flags: mapping flags
 709 *
 710 * Update the page tables in the range @start - @end.
 711 */
 712static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 713				  struct amdgpu_vm *vm,
 714				  uint64_t start, uint64_t end,
 715				  uint64_t dst, uint32_t flags)
 716{
 717	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 718
 719	uint64_t cur_pe_start, cur_nptes, cur_dst;
 720	uint64_t addr; /* next GPU address to be updated */
 721	uint64_t pt_idx;
 722	struct amdgpu_bo *pt;
 723	unsigned nptes; /* next number of ptes to be updated */
 724	uint64_t next_pe_start;
 725
 726	/* initialize the variables */
 727	addr = start;
 728	pt_idx = addr >> amdgpu_vm_block_size;
 729	pt = vm->page_tables[pt_idx].bo;
 730	if (params->shadow) {
 731		if (!pt->shadow)
 732			return;
 733		pt = pt->shadow;
 734	}
 735	if ((addr & ~mask) == (end & ~mask))
 736		nptes = end - addr;
 737	else
 738		nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
 739
 740	cur_pe_start = amdgpu_bo_gpu_offset(pt);
 741	cur_pe_start += (addr & mask) * 8;
 742	cur_nptes = nptes;
 743	cur_dst = dst;
 744
 745	/* for next ptb*/
 746	addr += nptes;
 747	dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 748
 749	/* walk over the address space and update the page tables */
 750	while (addr < end) {
 751		pt_idx = addr >> amdgpu_vm_block_size;
 752		pt = vm->page_tables[pt_idx].bo;
 753		if (params->shadow) {
 754			if (!pt->shadow)
 755				return;
 756			pt = pt->shadow;
 757		}
 758
 759		if ((addr & ~mask) == (end & ~mask))
 760			nptes = end - addr;
 761		else
 762			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
 763
 764		next_pe_start = amdgpu_bo_gpu_offset(pt);
 765		next_pe_start += (addr & mask) * 8;
 766
 767		if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
 768		    ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
 769			/* The next ptb is consecutive to current ptb.
 770			 * Don't call the update function now.
 771			 * Will update two ptbs together in future.
 772			*/
 773			cur_nptes += nptes;
 774		} else {
 775			params->func(params, cur_pe_start, cur_dst, cur_nptes,
 776				     AMDGPU_GPU_PAGE_SIZE, flags);
 777
 778			cur_pe_start = next_pe_start;
 779			cur_nptes = nptes;
 780			cur_dst = dst;
 781		}
 782
 783		/* for next ptb*/
 784		addr += nptes;
 785		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 786	}
 787
 788	params->func(params, cur_pe_start, cur_dst, cur_nptes,
 789		     AMDGPU_GPU_PAGE_SIZE, flags);
 790}
 791
 792/*
 793 * amdgpu_vm_frag_ptes - add fragment information to PTEs
 794 *
 795 * @params: see amdgpu_pte_update_params definition
 796 * @vm: requested vm
 797 * @start: first PTE to handle
 798 * @end: last PTE to handle
 799 * @dst: addr those PTEs should point to
 800 * @flags: hw mapping flags
 801 */
 802static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
 803				struct amdgpu_vm *vm,
 804				uint64_t start, uint64_t end,
 805				uint64_t dst, uint32_t flags)
 806{
 807	/**
 808	 * The MC L1 TLB supports variable sized pages, based on a fragment
 809	 * field in the PTE. When this field is set to a non-zero value, page
 810	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
 811	 * flags are considered valid for all PTEs within the fragment range
 812	 * and corresponding mappings are assumed to be physically contiguous.
 813	 *
 814	 * The L1 TLB can store a single PTE for the whole fragment,
 815	 * significantly increasing the space available for translation
 816	 * caching. This leads to large improvements in throughput when the
 817	 * TLB is under pressure.
 818	 *
 819	 * The L2 TLB distributes small and large fragments into two
 820	 * asymmetric partitions. The large fragment cache is significantly
 821	 * larger. Thus, we try to use large fragments wherever possible.
 822	 * Userspace can support this by aligning virtual base address and
 823	 * allocation size to the fragment size.
 824	 */
 825
 826	/* SI and newer are optimized for 64KB */
 827	uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
 828	uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
 829
 830	uint64_t frag_start = ALIGN(start, frag_align);
 831	uint64_t frag_end = end & ~(frag_align - 1);
 832
 833	/* system pages are non continuously */
 834	if (params->src || !(flags & AMDGPU_PTE_VALID) ||
 835	    (frag_start >= frag_end)) {
 836
 837		amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
 838		return;
 839	}
 840
 841	/* handle the 4K area at the beginning */
 842	if (start != frag_start) {
 843		amdgpu_vm_update_ptes(params, vm, start, frag_start,
 844				      dst, flags);
 845		dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
 846	}
 847
 848	/* handle the area in the middle */
 849	amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
 850			      flags | frag_flags);
 851
 852	/* handle the 4K area at the end */
 853	if (frag_end != end) {
 854		dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
 855		amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
 856	}
 857}
 858
 859/**
 860 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 861 *
 862 * @adev: amdgpu_device pointer
 863 * @exclusive: fence we need to sync to
 864 * @src: address where to copy page table entries from
 865 * @pages_addr: DMA addresses to use for mapping
 866 * @vm: requested vm
 
 867 * @start: start of mapped range
 868 * @last: last mapped entry
 869 * @flags: flags for the entries
 870 * @addr: addr to set the area to
 
 
 
 871 * @fence: optional resulting fence
 872 *
 873 * Fill in the page table entries between @start and @last.
 874 * Returns 0 for success, -EINVAL for failure.
 
 
 875 */
 876static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 877				       struct dma_fence *exclusive,
 878				       uint64_t src,
 879				       dma_addr_t *pages_addr,
 880				       struct amdgpu_vm *vm,
 881				       uint64_t start, uint64_t last,
 882				       uint32_t flags, uint64_t addr,
 883				       struct dma_fence **fence)
 884{
 885	struct amdgpu_ring *ring;
 886	void *owner = AMDGPU_FENCE_OWNER_VM;
 887	unsigned nptes, ncmds, ndw;
 888	struct amdgpu_job *job;
 889	struct amdgpu_pte_update_params params;
 890	struct dma_fence *f = NULL;
 891	int r;
 
 
 
 
 
 892
 893	memset(&params, 0, sizeof(params));
 894	params.adev = adev;
 895	params.src = src;
 
 
 896
 897	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 
 
 898
 899	memset(&params, 0, sizeof(params));
 900	params.adev = adev;
 901	params.src = src;
 
 
 
 902
 903	/* sync to everything on unmapping */
 
 
 904	if (!(flags & AMDGPU_PTE_VALID))
 905		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
 
 
 906
 907	nptes = last - start + 1;
 
 
 
 
 908
 909	/*
 910	 * reserve space for one command every (1 << BLOCK_SIZE)
 911	 *  entries or 2k dwords (whatever is smaller)
 912	 */
 913	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
 914
 915	/* padding, etc. */
 916	ndw = 64;
 
 
 917
 918	if (src) {
 919		/* only copy commands needed */
 920		ndw += ncmds * 7;
 921
 922		params.func = amdgpu_vm_do_copy_ptes;
 
 
 
 923
 924	} else if (pages_addr) {
 925		/* copy commands needed */
 926		ndw += ncmds * 7;
 927
 928		/* and also PTEs */
 929		ndw += nptes * 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930
 931		params.func = amdgpu_vm_do_copy_ptes;
 
 
 
 
 
 
 932
 933	} else {
 934		/* set page commands needed */
 935		ndw += ncmds * 10;
 
 
 936
 937		/* two extra commands for begin/end of fragment */
 938		ndw += 2 * 10;
 
 
 939
 940		params.func = amdgpu_vm_do_set_ptes;
 
 941	}
 942
 943	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 944	if (r)
 945		return r;
 946
 947	params.ib = &job->ibs[0];
 948
 949	if (!src && pages_addr) {
 950		uint64_t *pte;
 951		unsigned i;
 952
 953		/* Put the PTEs at the end of the IB. */
 954		i = ndw - nptes * 2;
 955		pte= (uint64_t *)&(job->ibs->ptr[i]);
 956		params.src = job->ibs->gpu_addr + i * 4;
 957
 958		for (i = 0; i < nptes; ++i) {
 959			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
 960						    AMDGPU_GPU_PAGE_SIZE);
 961			pte[i] |= flags;
 962		}
 963		addr = 0;
 964	}
 965
 966	r = amdgpu_sync_fence(adev, &job->sync, exclusive);
 967	if (r)
 968		goto error_free;
 969
 970	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
 971			     owner);
 972	if (r)
 973		goto error_free;
 974
 975	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
 976	if (r)
 977		goto error_free;
 978
 979	params.shadow = true;
 980	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
 981	params.shadow = false;
 982	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
 983
 984	amdgpu_ring_pad_ib(ring, params.ib);
 985	WARN_ON(params.ib->length_dw > ndw);
 986	r = amdgpu_job_submit(job, ring, &vm->entity,
 987			      AMDGPU_FENCE_OWNER_VM, &f);
 988	if (r)
 989		goto error_free;
 990
 991	amdgpu_bo_fence(vm->page_directory, f, true);
 992	if (fence) {
 993		dma_fence_put(*fence);
 994		*fence = dma_fence_get(f);
 995	}
 996	dma_fence_put(f);
 997	return 0;
 998
 999error_free:
1000	amdgpu_job_free(job);
 
1001	return r;
1002}
1003
1004/**
1005 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1006 *
1007 * @adev: amdgpu_device pointer
1008 * @exclusive: fence we need to sync to
1009 * @gtt_flags: flags as they are used for GTT
1010 * @pages_addr: DMA addresses to use for mapping
1011 * @vm: requested vm
1012 * @mapping: mapped range and flags to use for the update
1013 * @flags: HW flags for the mapping
1014 * @nodes: array of drm_mm_nodes with the MC addresses
1015 * @fence: optional resulting fence
1016 *
1017 * Split the mapping into smaller chunks so that each update fits
1018 * into a SDMA IB.
1019 * Returns 0 for success, -EINVAL for failure.
1020 */
1021static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1022				      struct dma_fence *exclusive,
1023				      uint32_t gtt_flags,
1024				      dma_addr_t *pages_addr,
1025				      struct amdgpu_vm *vm,
1026				      struct amdgpu_bo_va_mapping *mapping,
1027				      uint32_t flags,
1028				      struct drm_mm_node *nodes,
1029				      struct dma_fence **fence)
1030{
1031	uint64_t pfn, src = 0, start = mapping->it.start;
1032	int r;
1033
1034	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1035	 * but in case of something, we filter the flags in first place
1036	 */
1037	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1038		flags &= ~AMDGPU_PTE_READABLE;
1039	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1040		flags &= ~AMDGPU_PTE_WRITEABLE;
1041
1042	trace_amdgpu_vm_bo_update(mapping);
1043
1044	pfn = mapping->offset >> PAGE_SHIFT;
1045	if (nodes) {
1046		while (pfn >= nodes->size) {
1047			pfn -= nodes->size;
1048			++nodes;
1049		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1050	}
1051
1052	do {
1053		uint64_t max_entries;
1054		uint64_t addr, last;
1055
1056		if (nodes) {
1057			addr = nodes->start << PAGE_SHIFT;
1058			max_entries = (nodes->size - pfn) *
1059				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1060		} else {
1061			addr = 0;
1062			max_entries = S64_MAX;
1063		}
1064
1065		if (pages_addr) {
1066			if (flags == gtt_flags)
1067				src = adev->gart.table_addr +
1068					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1069			else
1070				max_entries = min(max_entries, 16ull * 1024ull);
1071			addr = 0;
1072		} else if (flags & AMDGPU_PTE_VALID) {
1073			addr += adev->vm_manager.vram_base_offset;
1074		}
1075		addr += pfn << PAGE_SHIFT;
1076
1077		last = min((uint64_t)mapping->it.last, start + max_entries - 1);
1078		r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1079						src, pages_addr, vm,
1080						start, last, flags, addr,
1081						fence);
1082		if (r)
1083			return r;
1084
1085		pfn += last - start + 1;
1086		if (nodes && nodes->size == pfn) {
1087			pfn = 0;
1088			++nodes;
1089		}
1090		start = last + 1;
1091
1092	} while (unlikely(start != mapping->it.last + 1));
1093
1094	return 0;
1095}
1096
1097/**
1098 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1099 *
1100 * @adev: amdgpu_device pointer
1101 * @bo_va: requested BO and VM object
1102 * @clear: if true clear the entries
1103 *
1104 * Fill in the page table entries for @bo_va.
1105 * Returns 0 for success, -EINVAL for failure.
 
 
1106 */
1107int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1108			struct amdgpu_bo_va *bo_va,
1109			bool clear)
1110{
1111	struct amdgpu_vm *vm = bo_va->vm;
 
1112	struct amdgpu_bo_va_mapping *mapping;
1113	dma_addr_t *pages_addr = NULL;
1114	uint32_t gtt_flags, flags;
1115	struct ttm_mem_reg *mem;
1116	struct drm_mm_node *nodes;
1117	struct dma_fence *exclusive;
 
 
1118	int r;
1119
1120	if (clear) {
1121		mem = NULL;
1122		nodes = NULL;
1123		exclusive = NULL;
1124	} else {
1125		struct ttm_dma_tt *ttm;
 
 
 
 
 
 
1126
1127		mem = &bo_va->bo->tbo.mem;
1128		nodes = mem->mm_node;
1129		if (mem->mem_type == TTM_PL_TT) {
1130			ttm = container_of(bo_va->bo->tbo.ttm, struct
1131					   ttm_dma_tt, ttm);
1132			pages_addr = ttm->dma_address;
1133		}
1134		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135	}
1136
1137	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1138	gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1139		adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
 
 
 
 
 
 
1140
1141	spin_lock(&vm->status_lock);
1142	if (!list_empty(&bo_va->vm_status))
1143		list_splice_init(&bo_va->valids, &bo_va->invalids);
1144	spin_unlock(&vm->status_lock);
1145
1146	list_for_each_entry(mapping, &bo_va->invalids, list) {
1147		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1148					       gtt_flags, pages_addr, vm,
1149					       mapping, flags, nodes,
1150					       &bo_va->last_pt_update);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151		if (r)
1152			return r;
1153	}
1154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1156		list_for_each_entry(mapping, &bo_va->valids, list)
1157			trace_amdgpu_vm_bo_mapping(mapping);
 
1158
1159		list_for_each_entry(mapping, &bo_va->invalids, list)
1160			trace_amdgpu_vm_bo_mapping(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161	}
 
1162
1163	spin_lock(&vm->status_lock);
1164	list_splice_init(&bo_va->invalids, &bo_va->valids);
1165	list_del_init(&bo_va->vm_status);
1166	if (clear)
1167		list_add(&bo_va->vm_status, &vm->cleared);
1168	spin_unlock(&vm->status_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
1169
1170	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1171}
1172
1173/**
1174 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1175 *
1176 * @adev: amdgpu_device pointer
1177 * @vm: requested vm
 
 
1178 *
1179 * Make sure all freed BOs are cleared in the PT.
1180 * Returns 0 for success.
 
 
 
1181 *
1182 * PTs have to be reserved and mutex must be locked!
1183 */
1184int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1185			  struct amdgpu_vm *vm)
 
1186{
 
1187	struct amdgpu_bo_va_mapping *mapping;
 
 
1188	int r;
1189
1190	while (!list_empty(&vm->freed)) {
1191		mapping = list_first_entry(&vm->freed,
1192			struct amdgpu_bo_va_mapping, list);
1193		list_del(&mapping->list);
1194
1195		r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1196					       0, 0, NULL);
1197		kfree(mapping);
1198		if (r)
 
 
 
 
 
 
 
1199			return r;
 
 
1200
 
 
 
 
 
1201	}
 
1202	return 0;
1203
1204}
1205
1206/**
1207 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1208 *
1209 * @adev: amdgpu_device pointer
1210 * @vm: requested vm
1211 *
1212 * Make sure all invalidated BOs are cleared in the PT.
1213 * Returns 0 for success.
1214 *
1215 * PTs have to be reserved and mutex must be locked!
 
 
 
1216 */
1217int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1218			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1219{
1220	struct amdgpu_bo_va *bo_va = NULL;
1221	int r = 0;
 
 
1222
1223	spin_lock(&vm->status_lock);
 
 
 
 
 
 
 
 
 
 
 
 
1224	while (!list_empty(&vm->invalidated)) {
1225		bo_va = list_first_entry(&vm->invalidated,
1226			struct amdgpu_bo_va, vm_status);
 
1227		spin_unlock(&vm->status_lock);
1228
1229		r = amdgpu_vm_bo_update(adev, bo_va, true);
 
 
 
 
 
 
 
1230		if (r)
1231			return r;
1232
 
 
1233		spin_lock(&vm->status_lock);
1234	}
1235	spin_unlock(&vm->status_lock);
1236
1237	if (bo_va)
1238		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1239
1240	return r;
1241}
1242
1243/**
1244 * amdgpu_vm_bo_add - add a bo to a specific vm
1245 *
1246 * @adev: amdgpu_device pointer
1247 * @vm: requested vm
1248 * @bo: amdgpu buffer object
1249 *
1250 * Add @bo into the requested vm.
1251 * Add @bo to the list of bos associated with the vm
1252 * Returns newly added bo_va or NULL for failure
 
 
1253 *
1254 * Object has to be reserved!
1255 */
1256struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1257				      struct amdgpu_vm *vm,
1258				      struct amdgpu_bo *bo)
1259{
1260	struct amdgpu_bo_va *bo_va;
1261
1262	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1263	if (bo_va == NULL) {
1264		return NULL;
1265	}
1266	bo_va->vm = vm;
1267	bo_va->bo = bo;
1268	bo_va->ref_count = 1;
1269	INIT_LIST_HEAD(&bo_va->bo_list);
1270	INIT_LIST_HEAD(&bo_va->valids);
1271	INIT_LIST_HEAD(&bo_va->invalids);
1272	INIT_LIST_HEAD(&bo_va->vm_status);
1273
1274	list_add_tail(&bo_va->bo_list, &bo->va);
 
 
 
 
 
 
 
 
1275
1276	return bo_va;
1277}
1278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1279/**
1280 * amdgpu_vm_bo_map - map bo inside a vm
1281 *
1282 * @adev: amdgpu_device pointer
1283 * @bo_va: bo_va to store the address
1284 * @saddr: where to map the BO
1285 * @offset: requested offset in the BO
 
1286 * @flags: attributes of pages (read/write/valid/etc.)
1287 *
1288 * Add a mapping of the BO at the specefied addr into the VM.
1289 * Returns 0 for success, error for failure.
 
 
1290 *
1291 * Object has to be reserved and unreserved outside!
1292 */
1293int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1294		     struct amdgpu_bo_va *bo_va,
1295		     uint64_t saddr, uint64_t offset,
1296		     uint64_t size, uint32_t flags)
1297{
1298	struct amdgpu_bo_va_mapping *mapping;
1299	struct amdgpu_vm *vm = bo_va->vm;
1300	struct interval_tree_node *it;
1301	unsigned last_pfn, pt_idx;
1302	uint64_t eaddr;
1303	int r;
1304
1305	/* validate the parameters */
1306	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1307	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1308		return -EINVAL;
1309
1310	/* make sure object fit at this offset */
1311	eaddr = saddr + size - 1;
1312	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1313		return -EINVAL;
1314
1315	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1316	if (last_pfn >= adev->vm_manager.max_pfn) {
1317		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1318			last_pfn, adev->vm_manager.max_pfn);
1319		return -EINVAL;
1320	}
1321
1322	saddr /= AMDGPU_GPU_PAGE_SIZE;
1323	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1324
1325	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1326	if (it) {
1327		struct amdgpu_bo_va_mapping *tmp;
1328		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1329		/* bo and tmp overlap, invalid addr */
1330		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1331			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1332			tmp->it.start, tmp->it.last + 1);
1333		r = -EINVAL;
1334		goto error;
1335	}
1336
1337	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1338	if (!mapping) {
1339		r = -ENOMEM;
1340		goto error;
1341	}
1342
1343	INIT_LIST_HEAD(&mapping->list);
1344	mapping->it.start = saddr;
1345	mapping->it.last = eaddr;
1346	mapping->offset = offset;
1347	mapping->flags = flags;
1348
1349	list_add(&mapping->list, &bo_va->invalids);
1350	interval_tree_insert(&mapping->it, &vm->va);
 
 
1351
1352	/* Make sure the page tables are allocated */
1353	saddr >>= amdgpu_vm_block_size;
1354	eaddr >>= amdgpu_vm_block_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355
1356	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
 
 
 
1357
1358	if (eaddr > vm->max_pde_used)
1359		vm->max_pde_used = eaddr;
 
 
 
 
1360
1361	/* walk over the address space and allocate the page tables */
1362	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1363		struct reservation_object *resv = vm->page_directory->tbo.resv;
1364		struct amdgpu_bo *pt;
1365
1366		if (vm->page_tables[pt_idx].bo)
1367			continue;
 
 
 
1368
1369		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1370				     AMDGPU_GPU_PAGE_SIZE, true,
1371				     AMDGPU_GEM_DOMAIN_VRAM,
1372				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1373				     AMDGPU_GEM_CREATE_SHADOW |
1374				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1375				     AMDGPU_GEM_CREATE_VRAM_CLEARED,
1376				     NULL, resv, &pt);
1377		if (r)
1378			goto error_free;
1379
1380		/* Keep a reference to the page table to avoid freeing
1381		 * them up in the wrong order.
1382		 */
1383		pt->parent = amdgpu_bo_ref(vm->page_directory);
1384
1385		vm->page_tables[pt_idx].bo = pt;
1386		vm->page_tables[pt_idx].addr = 0;
1387	}
1388
1389	return 0;
1390
1391error_free:
1392	list_del(&mapping->list);
1393	interval_tree_remove(&mapping->it, &vm->va);
1394	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1395	kfree(mapping);
1396
1397error:
1398	return r;
1399}
1400
1401/**
1402 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1403 *
1404 * @adev: amdgpu_device pointer
1405 * @bo_va: bo_va to remove the address from
1406 * @saddr: where to the BO is mapped
1407 *
1408 * Remove a mapping of the BO at the specefied addr from the VM.
1409 * Returns 0 for success, error for failure.
 
 
1410 *
1411 * Object has to be reserved and unreserved outside!
1412 */
1413int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1414		       struct amdgpu_bo_va *bo_va,
1415		       uint64_t saddr)
1416{
1417	struct amdgpu_bo_va_mapping *mapping;
1418	struct amdgpu_vm *vm = bo_va->vm;
1419	bool valid = true;
1420
1421	saddr /= AMDGPU_GPU_PAGE_SIZE;
1422
1423	list_for_each_entry(mapping, &bo_va->valids, list) {
1424		if (mapping->it.start == saddr)
1425			break;
1426	}
1427
1428	if (&mapping->list == &bo_va->valids) {
1429		valid = false;
1430
1431		list_for_each_entry(mapping, &bo_va->invalids, list) {
1432			if (mapping->it.start == saddr)
1433				break;
1434		}
1435
1436		if (&mapping->list == &bo_va->invalids)
1437			return -ENOENT;
1438	}
1439
1440	list_del(&mapping->list);
1441	interval_tree_remove(&mapping->it, &vm->va);
 
1442	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1443
1444	if (valid)
1445		list_add(&mapping->list, &vm->freed);
1446	else
1447		kfree(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1448
1449	return 0;
1450}
1451
1452/**
1453 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1454 *
1455 * @adev: amdgpu_device pointer
1456 * @bo_va: requested bo_va
1457 *
1458 * Remove @bo_va->bo from the requested vm.
1459 *
1460 * Object have to be reserved!
1461 */
1462void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1463		      struct amdgpu_bo_va *bo_va)
1464{
1465	struct amdgpu_bo_va_mapping *mapping, *next;
1466	struct amdgpu_vm *vm = bo_va->vm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1467
1468	list_del(&bo_va->bo_list);
 
 
 
1469
1470	spin_lock(&vm->status_lock);
1471	list_del(&bo_va->vm_status);
1472	spin_unlock(&vm->status_lock);
1473
1474	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1475		list_del(&mapping->list);
1476		interval_tree_remove(&mapping->it, &vm->va);
 
1477		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1478		list_add(&mapping->list, &vm->freed);
1479	}
1480	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1481		list_del(&mapping->list);
1482		interval_tree_remove(&mapping->it, &vm->va);
1483		kfree(mapping);
 
1484	}
1485
1486	dma_fence_put(bo_va->last_pt_update);
 
 
 
 
1487	kfree(bo_va);
1488}
1489
1490/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1491 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1492 *
1493 * @adev: amdgpu_device pointer
1494 * @vm: requested vm
1495 * @bo: amdgpu buffer object
 
1496 *
1497 * Mark @bo as invalid.
1498 */
1499void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1500			     struct amdgpu_bo *bo)
1501{
1502	struct amdgpu_bo_va *bo_va;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1503
1504	list_for_each_entry(bo_va, &bo->va, bo_list) {
1505		spin_lock(&bo_va->vm->status_lock);
1506		if (list_empty(&bo_va->vm_status))
1507			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1508		spin_unlock(&bo_va->vm->status_lock);
 
1509	}
1510}
1511
1512/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1513 * amdgpu_vm_init - initialize a vm instance
1514 *
1515 * @adev: amdgpu_device pointer
1516 * @vm: requested vm
1517 *
1518 * Init @vm fields.
 
 
 
1519 */
1520int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1521{
1522	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1523		AMDGPU_VM_PTE_COUNT * 8);
1524	unsigned pd_size, pd_entries;
1525	unsigned ring_instance;
1526	struct amdgpu_ring *ring;
1527	struct amd_sched_rq *rq;
1528	int i, r;
1529
1530	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1531		vm->ids[i] = NULL;
1532	vm->va = RB_ROOT;
1533	vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
1534	spin_lock_init(&vm->status_lock);
1535	INIT_LIST_HEAD(&vm->invalidated);
1536	INIT_LIST_HEAD(&vm->cleared);
1537	INIT_LIST_HEAD(&vm->freed);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1538
1539	pd_size = amdgpu_vm_directory_size(adev);
1540	pd_entries = amdgpu_vm_num_pdes(adev);
 
 
 
1541
1542	/* allocate page table array */
1543	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1544	if (vm->page_tables == NULL) {
1545		DRM_ERROR("Cannot allocate memory for page table array\n");
1546		return -ENOMEM;
1547	}
 
1548
1549	/* create scheduler entity for page table updates */
 
1550
1551	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1552	ring_instance %= adev->vm_manager.vm_pte_num_rings;
1553	ring = adev->vm_manager.vm_pte_rings[ring_instance];
1554	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1555	r = amd_sched_entity_init(&ring->sched, &vm->entity,
1556				  rq, amdgpu_sched_jobs);
1557	if (r)
1558		goto err;
1559
1560	vm->page_directory_fence = NULL;
 
 
 
 
1561
1562	r = amdgpu_bo_create(adev, pd_size, align, true,
1563			     AMDGPU_GEM_DOMAIN_VRAM,
1564			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1565			     AMDGPU_GEM_CREATE_SHADOW |
1566			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1567			     AMDGPU_GEM_CREATE_VRAM_CLEARED,
1568			     NULL, NULL, &vm->page_directory);
1569	if (r)
1570		goto error_free_sched_entity;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1571
1572	r = amdgpu_bo_reserve(vm->page_directory, false);
1573	if (r)
1574		goto error_free_page_directory;
1575
1576	vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1577	amdgpu_bo_unreserve(vm->page_directory);
 
 
 
1578
1579	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580
1581error_free_page_directory:
1582	amdgpu_bo_unref(&vm->page_directory->shadow);
1583	amdgpu_bo_unref(&vm->page_directory);
1584	vm->page_directory = NULL;
1585
1586error_free_sched_entity:
1587	amd_sched_entity_fini(&ring->sched, &vm->entity);
1588
1589err:
1590	drm_free_large(vm->page_tables);
1591
 
 
1592	return r;
1593}
1594
1595/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1596 * amdgpu_vm_fini - tear down a vm instance
1597 *
1598 * @adev: amdgpu_device pointer
1599 * @vm: requested vm
1600 *
1601 * Tear down @vm.
1602 * Unbind the VM and remove all bos from the vm bo list
1603 */
1604void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1605{
1606	struct amdgpu_bo_va_mapping *mapping, *tmp;
 
 
 
1607	int i;
1608
1609	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
 
 
 
 
 
 
 
 
 
 
 
 
1610
1611	if (!RB_EMPTY_ROOT(&vm->va)) {
1612		dev_err(adev->dev, "still active bo inside vm\n");
1613	}
1614	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1615		list_del(&mapping->list);
1616		interval_tree_remove(&mapping->it, &vm->va);
1617		kfree(mapping);
1618	}
1619	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
 
 
 
 
 
1620		list_del(&mapping->list);
1621		kfree(mapping);
1622	}
1623
1624	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
1625		struct amdgpu_bo *pt = vm->page_tables[i].bo;
 
 
1626
1627		if (!pt)
1628			continue;
1629
1630		amdgpu_bo_unref(&pt->shadow);
1631		amdgpu_bo_unref(&pt);
 
 
 
 
 
 
 
 
1632	}
1633	drm_free_large(vm->page_tables);
1634
1635	amdgpu_bo_unref(&vm->page_directory->shadow);
1636	amdgpu_bo_unref(&vm->page_directory);
1637	dma_fence_put(vm->page_directory_fence);
1638}
1639
1640/**
1641 * amdgpu_vm_manager_init - init the VM manager
1642 *
1643 * @adev: amdgpu_device pointer
1644 *
1645 * Initialize the VM manager structures
1646 */
1647void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1648{
1649	unsigned i;
1650
1651	INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1652
1653	/* skip over VMID 0, since it is the system VM */
1654	for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1655		amdgpu_vm_reset_id(adev, i);
1656		amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1657		list_add_tail(&adev->vm_manager.ids[i].list,
1658			      &adev->vm_manager.ids_lru);
1659	}
1660
1661	adev->vm_manager.fence_context =
1662		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1663	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1664		adev->vm_manager.seqno[i] = 0;
1665
1666	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1667	atomic64_set(&adev->vm_manager.client_counter, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1668}
1669
1670/**
1671 * amdgpu_vm_manager_fini - cleanup VM manager
1672 *
1673 * @adev: amdgpu_device pointer
1674 *
1675 * Cleanup the VM manager and free resources.
1676 */
1677void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1678{
1679	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1680
1681	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1682		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
 
 
 
 
 
 
1683
1684		dma_fence_put(adev->vm_manager.ids[i].first);
1685		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1686		dma_fence_put(id->flushed_updates);
1687		dma_fence_put(id->last_flush);
 
1688	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1689}