Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
 
  27#include <linux/pagemap.h>
  28#include <linux/sync_file.h>
  29#include <drm/drmP.h>
  30#include <drm/amdgpu_drm.h>
  31#include <drm/drm_syncobj.h>
  32#include "amdgpu.h"
  33#include "amdgpu_trace.h"
  34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  35static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
  36				      struct drm_amdgpu_cs_chunk_fence *data,
  37				      uint32_t *offset)
  38{
  39	struct drm_gem_object *gobj;
  40	unsigned long size;
  41
  42	gobj = drm_gem_object_lookup(p->filp, data->handle);
 
 
  43	if (gobj == NULL)
  44		return -EINVAL;
  45
  46	p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 
 
 
 
 
 
 
 
  47	p->uf_entry.priority = 0;
  48	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
  49	p->uf_entry.tv.shared = true;
  50	p->uf_entry.user_pages = NULL;
  51
  52	size = amdgpu_bo_size(p->uf_entry.robj);
  53	if (size != PAGE_SIZE || (data->offset + 8) > size)
  54		return -EINVAL;
  55
  56	*offset = data->offset;
  57
  58	drm_gem_object_put_unlocked(gobj);
  59
  60	if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
  61		amdgpu_bo_unref(&p->uf_entry.robj);
  62		return -EINVAL;
  63	}
  64
  65	return 0;
  66}
  67
  68static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
  69{
  70	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
  71	struct amdgpu_vm *vm = &fpriv->vm;
  72	union drm_amdgpu_cs *cs = data;
  73	uint64_t *chunk_array_user;
  74	uint64_t *chunk_array;
 
  75	unsigned size, num_ibs = 0;
  76	uint32_t uf_offset = 0;
  77	int i;
  78	int ret;
  79
  80	if (cs->in.num_chunks == 0)
  81		return 0;
  82
  83	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
  84	if (!chunk_array)
  85		return -ENOMEM;
  86
  87	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
  88	if (!p->ctx) {
  89		ret = -EINVAL;
  90		goto free_chunk;
  91	}
  92
  93	/* skip guilty context job */
  94	if (atomic_read(&p->ctx->guilty) == 1) {
  95		ret = -ECANCELED;
  96		goto free_chunk;
  97	}
  98
  99	mutex_lock(&p->ctx->lock);
 100
 101	/* get chunks */
 102	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 103	if (copy_from_user(chunk_array, chunk_array_user,
 104			   sizeof(uint64_t)*cs->in.num_chunks)) {
 105		ret = -EFAULT;
 106		goto free_chunk;
 107	}
 108
 109	p->nchunks = cs->in.num_chunks;
 110	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 111			    GFP_KERNEL);
 112	if (!p->chunks) {
 113		ret = -ENOMEM;
 114		goto free_chunk;
 115	}
 116
 117	for (i = 0; i < p->nchunks; i++) {
 118		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
 119		struct drm_amdgpu_cs_chunk user_chunk;
 120		uint32_t __user *cdata;
 121
 122		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
 123		if (copy_from_user(&user_chunk, chunk_ptr,
 124				       sizeof(struct drm_amdgpu_cs_chunk))) {
 125			ret = -EFAULT;
 126			i--;
 127			goto free_partial_kdata;
 128		}
 129		p->chunks[i].chunk_id = user_chunk.chunk_id;
 130		p->chunks[i].length_dw = user_chunk.length_dw;
 131
 132		size = p->chunks[i].length_dw;
 133		cdata = u64_to_user_ptr(user_chunk.chunk_data);
 134
 135		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
 136		if (p->chunks[i].kdata == NULL) {
 137			ret = -ENOMEM;
 138			i--;
 139			goto free_partial_kdata;
 140		}
 141		size *= sizeof(uint32_t);
 142		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 143			ret = -EFAULT;
 144			goto free_partial_kdata;
 145		}
 146
 147		switch (p->chunks[i].chunk_id) {
 148		case AMDGPU_CHUNK_ID_IB:
 149			++num_ibs;
 150			break;
 151
 152		case AMDGPU_CHUNK_ID_FENCE:
 153			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
 154			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 155				ret = -EINVAL;
 156				goto free_partial_kdata;
 157			}
 158
 159			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
 160							 &uf_offset);
 161			if (ret)
 162				goto free_partial_kdata;
 163
 164			break;
 165
 166		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 167		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 168		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 169			break;
 170
 171		default:
 172			ret = -EINVAL;
 173			goto free_partial_kdata;
 174		}
 175	}
 176
 177	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
 178	if (ret)
 179		goto free_all_kdata;
 180
 181	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
 182		ret = -ECANCELED;
 183		goto free_all_kdata;
 184	}
 185
 186	if (p->uf_entry.robj)
 187		p->job->uf_addr = uf_offset;
 188	kfree(chunk_array);
 189	return 0;
 190
 191free_all_kdata:
 192	i = p->nchunks - 1;
 193free_partial_kdata:
 194	for (; i >= 0; i--)
 195		kvfree(p->chunks[i].kdata);
 196	kfree(p->chunks);
 197	p->chunks = NULL;
 198	p->nchunks = 0;
 199free_chunk:
 200	kfree(chunk_array);
 201
 202	return ret;
 203}
 204
 205/* Convert microseconds to bytes. */
 206static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 207{
 208	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 209		return 0;
 210
 211	/* Since accum_us is incremented by a million per second, just
 212	 * multiply it by the number of MB/s to get the number of bytes.
 213	 */
 214	return us << adev->mm_stats.log2_max_MBps;
 215}
 216
 217static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 218{
 219	if (!adev->mm_stats.log2_max_MBps)
 220		return 0;
 221
 222	return bytes >> adev->mm_stats.log2_max_MBps;
 223}
 224
 225/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 226 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 227 * which means it can go over the threshold once. If that happens, the driver
 228 * will be in debt and no other buffer migrations can be done until that debt
 229 * is repaid.
 230 *
 231 * This approach allows moving a buffer of any size (it's important to allow
 232 * that).
 233 *
 234 * The currency is simply time in microseconds and it increases as the clock
 235 * ticks. The accumulated microseconds (us) are converted to bytes and
 236 * returned.
 237 */
 238static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 239					      u64 *max_bytes,
 240					      u64 *max_vis_bytes)
 241{
 242	s64 time_us, increment_us;
 243	u64 free_vram, total_vram, used_vram;
 244
 245	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 246	 * throttling.
 247	 *
 248	 * It means that in order to get full max MBps, at least 5 IBs per
 249	 * second must be submitted and not more than 200ms apart from each
 250	 * other.
 251	 */
 252	const s64 us_upper_bound = 200000;
 253
 254	if (!adev->mm_stats.log2_max_MBps) {
 255		*max_bytes = 0;
 256		*max_vis_bytes = 0;
 257		return;
 258	}
 259
 260	total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
 261	used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
 262	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 263
 264	spin_lock(&adev->mm_stats.lock);
 265
 266	/* Increase the amount of accumulated us. */
 267	time_us = ktime_to_us(ktime_get());
 268	increment_us = time_us - adev->mm_stats.last_update_us;
 269	adev->mm_stats.last_update_us = time_us;
 270	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 271                                      us_upper_bound);
 272
 273	/* This prevents the short period of low performance when the VRAM
 274	 * usage is low and the driver is in debt or doesn't have enough
 275	 * accumulated us to fill VRAM quickly.
 276	 *
 277	 * The situation can occur in these cases:
 278	 * - a lot of VRAM is freed by userspace
 279	 * - the presence of a big buffer causes a lot of evictions
 280	 *   (solution: split buffers into smaller ones)
 
 
 
 
 
 
 
 
 
 
 281	 *
 282	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 283	 * accum_us to a positive number.
 284	 */
 285	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 286		s64 min_us;
 287
 288		/* Be more aggresive on dGPUs. Try to fill a portion of free
 289		 * VRAM now.
 290		 */
 291		if (!(adev->flags & AMD_IS_APU))
 292			min_us = bytes_to_us(adev, free_vram / 4);
 293		else
 294			min_us = 0; /* Reset accum_us on APUs. */
 295
 296		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 297	}
 298
 299	/* This is set to 0 if the driver is in debt to disallow (optional)
 300	 * buffer moves.
 301	 */
 302	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 303
 304	/* Do the same for visible VRAM if half of it is free */
 305	if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
 306		u64 total_vis_vram = adev->gmc.visible_vram_size;
 307		u64 used_vis_vram =
 308			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
 309
 310		if (used_vis_vram < total_vis_vram) {
 311			u64 free_vis_vram = total_vis_vram - used_vis_vram;
 312			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
 313							  increment_us, us_upper_bound);
 314
 315			if (free_vis_vram >= total_vis_vram / 2)
 316				adev->mm_stats.accum_us_vis =
 317					max(bytes_to_us(adev, free_vis_vram / 2),
 318					    adev->mm_stats.accum_us_vis);
 319		}
 320
 321		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
 322	} else {
 323		*max_vis_bytes = 0;
 324	}
 325
 326	spin_unlock(&adev->mm_stats.lock);
 327}
 328
 329/* Report how many bytes have really been moved for the last command
 330 * submission. This can result in a debt that can stop buffer migrations
 331 * temporarily.
 332 */
 333void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
 334				  u64 num_vis_bytes)
 335{
 336	spin_lock(&adev->mm_stats.lock);
 337	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 338	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 339	spin_unlock(&adev->mm_stats.lock);
 340}
 341
 342static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 343				 struct amdgpu_bo *bo)
 344{
 345	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 346	struct ttm_operation_ctx ctx = {
 347		.interruptible = true,
 348		.no_wait_gpu = false,
 349		.resv = bo->tbo.resv,
 350		.flags = 0
 351	};
 352	uint32_t domain;
 353	int r;
 354
 355	if (bo->pin_count)
 356		return 0;
 357
 358	/* Don't move this buffer if we have depleted our allowance
 359	 * to move it. Don't move anything if the threshold is zero.
 360	 */
 361	if (p->bytes_moved < p->bytes_moved_threshold) {
 362		if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 363		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 364			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
 365			 * visible VRAM if we've depleted our allowance to do
 366			 * that.
 367			 */
 368			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
 369				domain = bo->preferred_domains;
 370			else
 371				domain = bo->allowed_domains;
 372		} else {
 373			domain = bo->preferred_domains;
 374		}
 375	} else {
 376		domain = bo->allowed_domains;
 377	}
 378
 379retry:
 380	amdgpu_ttm_placement_from_domain(bo, domain);
 381	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 382
 383	p->bytes_moved += ctx.bytes_moved;
 384	if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 385	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 386	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 387		p->bytes_moved_vis += ctx.bytes_moved;
 388
 389	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 390		domain = bo->allowed_domains;
 391		goto retry;
 392	}
 393
 394	return r;
 395}
 396
 397/* Last resort, try to evict something from the current working set */
 398static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 399				struct amdgpu_bo *validated)
 400{
 401	uint32_t domain = validated->allowed_domains;
 402	struct ttm_operation_ctx ctx = { true, false };
 403	int r;
 404
 405	if (!p->evictable)
 406		return false;
 407
 408	for (;&p->evictable->tv.head != &p->validated;
 409	     p->evictable = list_prev_entry(p->evictable, tv.head)) {
 410
 411		struct amdgpu_bo_list_entry *candidate = p->evictable;
 412		struct amdgpu_bo *bo = candidate->robj;
 413		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 414		u64 initial_bytes_moved, bytes_moved;
 415		bool update_bytes_moved_vis;
 416		uint32_t other;
 417
 418		/* If we reached our current BO we can forget it */
 419		if (candidate->robj == validated)
 420			break;
 421
 422		/* We can't move pinned BOs here */
 423		if (bo->pin_count)
 424			continue;
 425
 426		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 427
 428		/* Check if this BO is in one of the domains we need space for */
 429		if (!(other & domain))
 430			continue;
 431
 432		/* Check if we can move this BO somewhere else */
 433		other = bo->allowed_domains & ~domain;
 434		if (!other)
 435			continue;
 436
 437		/* Good we can try to move this BO somewhere else */
 438		amdgpu_ttm_placement_from_domain(bo, other);
 439		update_bytes_moved_vis =
 440			adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 441			bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 442			bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
 443		initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 444		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 445		bytes_moved = atomic64_read(&adev->num_bytes_moved) -
 446			initial_bytes_moved;
 447		p->bytes_moved += bytes_moved;
 448		if (update_bytes_moved_vis)
 449			p->bytes_moved_vis += bytes_moved;
 450
 451		if (unlikely(r))
 452			break;
 453
 454		p->evictable = list_prev_entry(p->evictable, tv.head);
 455		list_move(&candidate->tv.head, &p->validated);
 456
 457		return true;
 458	}
 459
 460	return false;
 461}
 462
 463static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
 464{
 465	struct amdgpu_cs_parser *p = param;
 466	int r;
 467
 468	do {
 469		r = amdgpu_cs_bo_validate(p, bo);
 470	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
 471	if (r)
 472		return r;
 473
 474	if (bo->shadow)
 475		r = amdgpu_cs_bo_validate(p, bo->shadow);
 476
 477	return r;
 478}
 479
 480static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 481			    struct list_head *validated)
 482{
 483	struct ttm_operation_ctx ctx = { true, false };
 484	struct amdgpu_bo_list_entry *lobj;
 
 485	int r;
 486
 487	list_for_each_entry(lobj, validated, tv.head) {
 488		struct amdgpu_bo *bo = lobj->robj;
 489		bool binding_userptr = false;
 490		struct mm_struct *usermm;
 
 491
 492		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 493		if (usermm && usermm != current->mm)
 494			return -EPERM;
 495
 496		/* Check if we have user pages and nobody bound the BO already */
 497		if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
 498		    lobj->user_pages) {
 499			amdgpu_ttm_placement_from_domain(bo,
 500							 AMDGPU_GEM_DOMAIN_CPU);
 501			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 502			if (r)
 503				return r;
 504			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
 505						     lobj->user_pages);
 506			binding_userptr = true;
 507		}
 508
 509		if (p->evictable == lobj)
 510			p->evictable = NULL;
 511
 512		r = amdgpu_cs_validate(p, bo);
 513		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514			return r;
 
 515
 516		if (binding_userptr) {
 517			kvfree(lobj->user_pages);
 518			lobj->user_pages = NULL;
 519		}
 520	}
 521	return 0;
 522}
 523
 524static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 525				union drm_amdgpu_cs *cs)
 526{
 527	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 528	struct amdgpu_bo_list_entry *e;
 529	struct list_head duplicates;
 
 530	unsigned i, tries = 10;
 531	int r;
 532
 533	INIT_LIST_HEAD(&p->validated);
 534
 535	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
 536	if (p->bo_list) {
 
 
 537		amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 538		if (p->bo_list->first_userptr != p->bo_list->num_entries)
 539			p->mn = amdgpu_mn_get(p->adev);
 540	}
 541
 542	INIT_LIST_HEAD(&duplicates);
 543	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 544
 545	if (p->uf_entry.robj && !p->uf_entry.robj->parent)
 546		list_add(&p->uf_entry.tv.head, &p->validated);
 547
 
 
 
 548	while (1) {
 549		struct list_head need_pages;
 550		unsigned i;
 551
 552		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 553					   &duplicates);
 554		if (unlikely(r != 0)) {
 555			if (r != -ERESTARTSYS)
 556				DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 557			goto error_free_pages;
 558		}
 559
 560		/* Without a BO list we don't have userptr BOs */
 561		if (!p->bo_list)
 562			break;
 563
 564		INIT_LIST_HEAD(&need_pages);
 565		for (i = p->bo_list->first_userptr;
 566		     i < p->bo_list->num_entries; ++i) {
 567			struct amdgpu_bo *bo;
 568
 569			e = &p->bo_list->array[i];
 570			bo = e->robj;
 571
 572			if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
 573				 &e->user_invalidated) && e->user_pages) {
 574
 575				/* We acquired a page array, but somebody
 576				 * invalidated it. Free it and try again
 577				 */
 578				release_pages(e->user_pages,
 579					      bo->tbo.ttm->num_pages);
 580				kvfree(e->user_pages);
 
 581				e->user_pages = NULL;
 582			}
 583
 584			if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
 585			    !e->user_pages) {
 586				list_del(&e->tv.head);
 587				list_add(&e->tv.head, &need_pages);
 588
 589				amdgpu_bo_unreserve(e->robj);
 590			}
 591		}
 592
 593		if (list_empty(&need_pages))
 594			break;
 595
 596		/* Unreserve everything again. */
 597		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 598
 599		/* We tried too many times, just abort */
 600		if (!--tries) {
 601			r = -EDEADLK;
 602			DRM_ERROR("deadlock in %s\n", __func__);
 603			goto error_free_pages;
 604		}
 605
 606		/* Fill the page arrays for all userptrs. */
 607		list_for_each_entry(e, &need_pages, tv.head) {
 608			struct ttm_tt *ttm = e->robj->tbo.ttm;
 609
 610			e->user_pages = kvmalloc_array(ttm->num_pages,
 611							 sizeof(struct page*),
 612							 GFP_KERNEL | __GFP_ZERO);
 613			if (!e->user_pages) {
 614				r = -ENOMEM;
 615				DRM_ERROR("calloc failure in %s\n", __func__);
 616				goto error_free_pages;
 617			}
 618
 619			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
 620			if (r) {
 621				DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
 622				kvfree(e->user_pages);
 623				e->user_pages = NULL;
 624				goto error_free_pages;
 625			}
 626		}
 627
 628		/* And try again. */
 629		list_splice(&need_pages, &p->validated);
 630	}
 631
 632	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
 633					  &p->bytes_moved_vis_threshold);
 634	p->bytes_moved = 0;
 635	p->bytes_moved_vis = 0;
 636	p->evictable = list_last_entry(&p->validated,
 637				       struct amdgpu_bo_list_entry,
 638				       tv.head);
 639
 640	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
 641				      amdgpu_cs_validate, p);
 642	if (r) {
 643		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
 644		goto error_validate;
 645	}
 646
 647	r = amdgpu_cs_list_validate(p, &duplicates);
 648	if (r) {
 649		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
 650		goto error_validate;
 651	}
 652
 653	r = amdgpu_cs_list_validate(p, &p->validated);
 654	if (r) {
 655		DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
 656		goto error_validate;
 657	}
 658
 659	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
 660				     p->bytes_moved_vis);
 661	if (p->bo_list) {
 662		struct amdgpu_bo *gds = p->bo_list->gds_obj;
 663		struct amdgpu_bo *gws = p->bo_list->gws_obj;
 664		struct amdgpu_bo *oa = p->bo_list->oa_obj;
 665		struct amdgpu_vm *vm = &fpriv->vm;
 666		unsigned i;
 667
 668		for (i = 0; i < p->bo_list->num_entries; i++) {
 669			struct amdgpu_bo *bo = p->bo_list->array[i].robj;
 670
 671			p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
 672		}
 673
 674		if (gds) {
 675			p->job->gds_base = amdgpu_bo_gpu_offset(gds);
 676			p->job->gds_size = amdgpu_bo_size(gds);
 677		}
 678		if (gws) {
 679			p->job->gws_base = amdgpu_bo_gpu_offset(gws);
 680			p->job->gws_size = amdgpu_bo_size(gws);
 681		}
 682		if (oa) {
 683			p->job->oa_base = amdgpu_bo_gpu_offset(oa);
 684			p->job->oa_size = amdgpu_bo_size(oa);
 685		}
 686	}
 687
 688	if (!r && p->uf_entry.robj) {
 689		struct amdgpu_bo *uf = p->uf_entry.robj;
 690
 691		r = amdgpu_ttm_alloc_gart(&uf->tbo);
 692		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
 693	}
 694
 695error_validate:
 696	if (r)
 
 697		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 
 698
 699error_free_pages:
 700
 
 
 
 701	if (p->bo_list) {
 702		for (i = p->bo_list->first_userptr;
 703		     i < p->bo_list->num_entries; ++i) {
 704			e = &p->bo_list->array[i];
 705
 706			if (!e->user_pages)
 707				continue;
 708
 709			release_pages(e->user_pages,
 710				      e->robj->tbo.ttm->num_pages);
 711			kvfree(e->user_pages);
 
 712		}
 713	}
 714
 715	return r;
 716}
 717
 718static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 719{
 720	struct amdgpu_bo_list_entry *e;
 721	int r;
 722
 723	list_for_each_entry(e, &p->validated, tv.head) {
 724		struct reservation_object *resv = e->robj->tbo.resv;
 725		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
 726				     amdgpu_bo_explicit_sync(e->robj));
 727
 728		if (r)
 729			return r;
 730	}
 731	return 0;
 732}
 733
 
 
 
 
 
 
 
 
 
 
 734/**
 735 * cs_parser_fini() - clean parser states
 736 * @parser:	parser structure holding parsing context.
 737 * @error:	error number
 738 *
 739 * If error is set than unvalidate buffer, otherwise just free memory
 740 * used by parsing context.
 741 **/
 742static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 743				  bool backoff)
 744{
 
 745	unsigned i;
 746
 747	if (error && backoff)
 748		ttm_eu_backoff_reservation(&parser->ticket,
 749					   &parser->validated);
 750
 751	for (i = 0; i < parser->num_post_dep_syncobjs; i++)
 752		drm_syncobj_put(parser->post_dep_syncobjs[i]);
 753	kfree(parser->post_dep_syncobjs);
 
 
 
 
 
 
 
 
 754
 755	dma_fence_put(parser->fence);
 
 
 
 
 
 
 
 756
 757	if (parser->ctx) {
 758		mutex_unlock(&parser->ctx->lock);
 759		amdgpu_ctx_put(parser->ctx);
 760	}
 761	if (parser->bo_list)
 762		amdgpu_bo_list_put(parser->bo_list);
 763
 764	for (i = 0; i < parser->nchunks; i++)
 765		kvfree(parser->chunks[i].kdata);
 766	kfree(parser->chunks);
 767	if (parser->job)
 768		amdgpu_job_free(parser->job);
 769	amdgpu_bo_unref(&parser->uf_entry.robj);
 770}
 771
 772static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
 
 773{
 774	struct amdgpu_device *adev = p->adev;
 775	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 776	struct amdgpu_vm *vm = &fpriv->vm;
 777	struct amdgpu_bo_va *bo_va;
 778	struct amdgpu_bo *bo;
 779	int i, r;
 780
 781	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 782	if (r)
 783		return r;
 784
 785	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
 786	if (r)
 787		return r;
 788
 789	r = amdgpu_sync_fence(adev, &p->job->sync,
 790			      fpriv->prt_va->last_pt_update, false);
 791	if (r)
 792		return r;
 793
 794	if (amdgpu_sriov_vf(adev)) {
 795		struct dma_fence *f;
 796
 797		bo_va = fpriv->csa_va;
 798		BUG_ON(!bo_va);
 799		r = amdgpu_vm_bo_update(adev, bo_va, false);
 800		if (r)
 801			return r;
 802
 803		f = bo_va->last_pt_update;
 804		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
 805		if (r)
 806			return r;
 807	}
 808
 809	if (p->bo_list) {
 810		for (i = 0; i < p->bo_list->num_entries; i++) {
 811			struct dma_fence *f;
 812
 813			/* ignore duplicates */
 814			bo = p->bo_list->array[i].robj;
 815			if (!bo)
 816				continue;
 817
 818			bo_va = p->bo_list->array[i].bo_va;
 819			if (bo_va == NULL)
 820				continue;
 821
 822			r = amdgpu_vm_bo_update(adev, bo_va, false);
 823			if (r)
 824				return r;
 825
 826			f = bo_va->last_pt_update;
 827			r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
 828			if (r)
 829				return r;
 830		}
 831
 832	}
 833
 834	r = amdgpu_vm_handle_moved(adev, vm);
 835	if (r)
 836		return r;
 837
 838	r = amdgpu_vm_update_directories(adev, vm);
 839	if (r)
 840		return r;
 841
 842	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
 843	if (r)
 844		return r;
 845
 846	if (amdgpu_vm_debug && p->bo_list) {
 847		/* Invalidate all BOs to test for userspace bugs */
 848		for (i = 0; i < p->bo_list->num_entries; i++) {
 849			/* ignore duplicates */
 850			bo = p->bo_list->array[i].robj;
 851			if (!bo)
 852				continue;
 853
 854			amdgpu_vm_bo_invalidate(adev, bo, false);
 855		}
 856	}
 857
 858	return r;
 859}
 860
 861static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 862				 struct amdgpu_cs_parser *p)
 863{
 864	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 865	struct amdgpu_vm *vm = &fpriv->vm;
 866	struct amdgpu_ring *ring = p->job->ring;
 867	int r;
 868
 869	/* Only for UVD/VCE VM emulation */
 870	if (p->job->ring->funcs->parse_cs) {
 871		unsigned i, j;
 872
 873		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
 874			struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 875			struct amdgpu_bo_va_mapping *m;
 876			struct amdgpu_bo *aobj = NULL;
 877			struct amdgpu_cs_chunk *chunk;
 878			uint64_t offset, va_start;
 879			struct amdgpu_ib *ib;
 880			uint8_t *kptr;
 881
 882			chunk = &p->chunks[i];
 883			ib = &p->job->ibs[j];
 884			chunk_ib = chunk->kdata;
 885
 886			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 887				continue;
 888
 889			va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
 890			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
 891			if (r) {
 892				DRM_ERROR("IB va_start is invalid\n");
 893				return r;
 894			}
 895
 896			if ((va_start + chunk_ib->ib_bytes) >
 897			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
 898				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
 899				return -EINVAL;
 900			}
 901
 902			/* the IB should be reserved at this point */
 903			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
 904			if (r) {
 905				return r;
 906			}
 907
 908			offset = m->start * AMDGPU_GPU_PAGE_SIZE;
 909			kptr += va_start - offset;
 910
 911			memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
 912			amdgpu_bo_kunmap(aobj);
 913
 914			r = amdgpu_ring_parse_cs(ring, p, j);
 915			if (r)
 916				return r;
 917
 918			j++;
 919		}
 920	}
 921
 922	if (p->job->vm) {
 923		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
 
 924
 925		r = amdgpu_bo_vm_update_pte(p);
 926		if (r)
 927			return r;
 928	}
 929
 930	return amdgpu_cs_sync_rings(p);
 
 
 
 
 
 
 
 931}
 932
 933static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 934			     struct amdgpu_cs_parser *parser)
 935{
 936	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 937	struct amdgpu_vm *vm = &fpriv->vm;
 938	int i, j;
 939	int r, ce_preempt = 0, de_preempt = 0;
 940
 941	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
 942		struct amdgpu_cs_chunk *chunk;
 943		struct amdgpu_ib *ib;
 944		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 945		struct amdgpu_ring *ring;
 946
 947		chunk = &parser->chunks[i];
 948		ib = &parser->job->ibs[j];
 949		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
 950
 951		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 952			continue;
 953
 954		if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
 955			if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
 956				if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
 957					ce_preempt++;
 958				else
 959					de_preempt++;
 960			}
 961
 962			/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
 963			if (ce_preempt > 1 || de_preempt > 1)
 964				return -EINVAL;
 965		}
 966
 967		r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
 968					 chunk_ib->ip_instance, chunk_ib->ring, &ring);
 969		if (r)
 970			return r;
 971
 972		if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
 973			parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
 974			if (!parser->ctx->preamble_presented) {
 975				parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
 976				parser->ctx->preamble_presented = true;
 977			}
 978		}
 979
 980		if (parser->job->ring && parser->job->ring != ring)
 981			return -EINVAL;
 982
 983		parser->job->ring = ring;
 984
 985		r =  amdgpu_ib_get(adev, vm,
 986					ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
 987					ib);
 988		if (r) {
 989			DRM_ERROR("Failed to get ib !\n");
 990			return r;
 991		}
 992
 993		ib->gpu_addr = chunk_ib->va_start;
 994		ib->length_dw = chunk_ib->ib_bytes / 4;
 995		ib->flags = chunk_ib->flags;
 996
 997		j++;
 998	}
 999
1000	/* UVD & VCE fw doesn't support user fences */
1001	if (parser->job->uf_addr && (
1002	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
1003	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
1004		return -EINVAL;
 
1005
1006	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
1007}
 
 
 
1008
1009static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
1010				       struct amdgpu_cs_chunk *chunk)
1011{
1012	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1013	unsigned num_deps;
1014	int i, r;
1015	struct drm_amdgpu_cs_chunk_dep *deps;
1016
1017	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
1018	num_deps = chunk->length_dw * 4 /
1019		sizeof(struct drm_amdgpu_cs_chunk_dep);
1020
1021	for (i = 0; i < num_deps; ++i) {
1022		struct amdgpu_ring *ring;
1023		struct amdgpu_ctx *ctx;
1024		struct dma_fence *fence;
 
1025
1026		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
1027		if (ctx == NULL)
1028			return -EINVAL;
 
 
 
 
 
1029
1030		r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
1031					 deps[i].ip_type,
1032					 deps[i].ip_instance,
1033					 deps[i].ring, &ring);
1034		if (r) {
1035			amdgpu_ctx_put(ctx);
1036			return r;
1037		}
1038
1039		fence = amdgpu_ctx_get_fence(ctx, ring,
1040					     deps[i].handle);
1041		if (IS_ERR(fence)) {
1042			r = PTR_ERR(fence);
1043			amdgpu_ctx_put(ctx);
1044			return r;
1045		} else if (fence) {
1046			r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
1047					true);
1048			dma_fence_put(fence);
1049			amdgpu_ctx_put(ctx);
1050			if (r)
1051				return r;
1052		}
1053	}
1054	return 0;
1055}
1056
1057static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1058						 uint32_t handle)
1059{
1060	int r;
1061	struct dma_fence *fence;
1062	r = drm_syncobj_find_fence(p->filp, handle, &fence);
1063	if (r)
1064		return r;
1065
1066	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1067	dma_fence_put(fence);
1068
1069	return r;
1070}
1071
1072static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1073					    struct amdgpu_cs_chunk *chunk)
1074{
1075	unsigned num_deps;
1076	int i, r;
1077	struct drm_amdgpu_cs_chunk_sem *deps;
1078
1079	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1080	num_deps = chunk->length_dw * 4 /
1081		sizeof(struct drm_amdgpu_cs_chunk_sem);
1082
1083	for (i = 0; i < num_deps; ++i) {
1084		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
1085		if (r)
1086			return r;
 
 
 
 
1087	}
1088	return 0;
1089}
1090
1091static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1092					     struct amdgpu_cs_chunk *chunk)
1093{
1094	unsigned num_deps;
1095	int i;
1096	struct drm_amdgpu_cs_chunk_sem *deps;
1097	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1098	num_deps = chunk->length_dw * 4 /
1099		sizeof(struct drm_amdgpu_cs_chunk_sem);
1100
1101	p->post_dep_syncobjs = kmalloc_array(num_deps,
1102					     sizeof(struct drm_syncobj *),
1103					     GFP_KERNEL);
1104	p->num_post_dep_syncobjs = 0;
1105
1106	if (!p->post_dep_syncobjs)
1107		return -ENOMEM;
1108
1109	for (i = 0; i < num_deps; ++i) {
1110		p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
1111		if (!p->post_dep_syncobjs[i])
1112			return -EINVAL;
1113		p->num_post_dep_syncobjs++;
 
1114	}
 
1115	return 0;
1116}
1117
1118static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1119				  struct amdgpu_cs_parser *p)
1120{
1121	int i, r;
 
1122
1123	for (i = 0; i < p->nchunks; ++i) {
 
1124		struct amdgpu_cs_chunk *chunk;
 
1125
1126		chunk = &p->chunks[i];
1127
1128		if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
1129			r = amdgpu_cs_process_fence_dep(p, chunk);
1130			if (r)
1131				return r;
1132		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
1133			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
 
 
 
 
 
 
 
 
 
1134			if (r)
1135				return r;
1136		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
1137			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1138			if (r)
 
 
 
 
 
 
 
1139				return r;
 
 
 
 
 
 
 
 
 
1140		}
1141	}
1142
1143	return 0;
1144}
1145
1146static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1147{
1148	int i;
1149
1150	for (i = 0; i < p->num_post_dep_syncobjs; ++i)
1151		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
1152}
1153
1154static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1155			    union drm_amdgpu_cs *cs)
1156{
1157	struct amdgpu_ring *ring = p->job->ring;
1158	struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
1159	struct amdgpu_job *job;
1160	unsigned i;
1161	uint64_t seq;
1162
1163	int r;
1164
1165	amdgpu_mn_lock(p->mn);
1166	if (p->bo_list) {
1167		for (i = p->bo_list->first_userptr;
1168		     i < p->bo_list->num_entries; ++i) {
1169			struct amdgpu_bo *bo = p->bo_list->array[i].robj;
1170
1171			if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1172				amdgpu_mn_unlock(p->mn);
1173				return -ERESTARTSYS;
1174			}
1175		}
1176	}
1177
1178	job = p->job;
1179	p->job = NULL;
1180
1181	r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
1182	if (r) {
1183		amdgpu_job_free(job);
1184		amdgpu_mn_unlock(p->mn);
1185		return r;
1186	}
1187
1188	job->owner = p->filp;
1189	job->fence_ctx = entity->fence_context;
1190	p->fence = dma_fence_get(&job->base.s_fence->finished);
1191
1192	r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
1193	if (r) {
1194		dma_fence_put(p->fence);
1195		dma_fence_put(&job->base.s_fence->finished);
1196		amdgpu_job_free(job);
1197		amdgpu_mn_unlock(p->mn);
1198		return r;
1199	}
1200
1201	amdgpu_cs_post_dependencies(p);
1202
1203	cs->out.handle = seq;
1204	job->uf_sequence = seq;
1205
1206	amdgpu_job_free_resources(job);
1207	amdgpu_ring_priority_get(job->ring, job->base.s_priority);
 
1208
1209	trace_amdgpu_cs_ioctl(job);
1210	drm_sched_entity_push_job(&job->base, entity);
1211
1212	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1213	amdgpu_mn_unlock(p->mn);
1214
1215	return 0;
1216}
1217
1218int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1219{
1220	struct amdgpu_device *adev = dev->dev_private;
1221	union drm_amdgpu_cs *cs = data;
1222	struct amdgpu_cs_parser parser = {};
1223	bool reserved_buffers = false;
1224	int i, r;
1225
1226	if (!adev->accel_working)
1227		return -EBUSY;
1228
1229	parser.adev = adev;
1230	parser.filp = filp;
1231
1232	r = amdgpu_cs_parser_init(&parser, data);
1233	if (r) {
1234		DRM_ERROR("Failed to initialize parser !\n");
1235		goto out;
 
 
1236	}
1237
1238	r = amdgpu_cs_ib_fill(adev, &parser);
1239	if (r)
1240		goto out;
1241
1242	r = amdgpu_cs_parser_bos(&parser, data);
1243	if (r) {
1244		if (r == -ENOMEM)
1245			DRM_ERROR("Not enough memory for command submission!\n");
1246		else if (r != -ERESTARTSYS)
1247			DRM_ERROR("Failed to process the buffer list %d!\n", r);
1248		goto out;
 
1249	}
1250
1251	reserved_buffers = true;
 
 
 
 
1252
1253	r = amdgpu_cs_dependencies(adev, &parser);
1254	if (r) {
1255		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1256		goto out;
1257	}
1258
1259	for (i = 0; i < parser.job->num_ibs; i++)
1260		trace_amdgpu_cs(&parser, i);
1261
1262	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
1263	if (r)
1264		goto out;
1265
1266	r = amdgpu_cs_submit(&parser, cs);
1267
1268out:
1269	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
 
1270	return r;
1271}
1272
1273/**
1274 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1275 *
1276 * @dev: drm device
1277 * @data: data from userspace
1278 * @filp: file private
1279 *
1280 * Wait for the command submission identified by handle to finish.
1281 */
1282int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1283			 struct drm_file *filp)
1284{
1285	union drm_amdgpu_wait_cs *wait = data;
1286	struct amdgpu_device *adev = dev->dev_private;
1287	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1288	struct amdgpu_ring *ring = NULL;
1289	struct amdgpu_ctx *ctx;
1290	struct dma_fence *fence;
1291	long r;
1292
 
 
 
 
 
1293	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1294	if (ctx == NULL)
1295		return -EINVAL;
1296
1297	r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
1298				 wait->in.ip_type, wait->in.ip_instance,
1299				 wait->in.ring, &ring);
1300	if (r) {
1301		amdgpu_ctx_put(ctx);
1302		return r;
1303	}
1304
1305	fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
1306	if (IS_ERR(fence))
1307		r = PTR_ERR(fence);
1308	else if (fence) {
1309		r = dma_fence_wait_timeout(fence, true, timeout);
1310		if (r > 0 && fence->error)
1311			r = fence->error;
1312		dma_fence_put(fence);
1313	} else
1314		r = 1;
1315
1316	amdgpu_ctx_put(ctx);
1317	if (r < 0)
1318		return r;
1319
1320	memset(wait, 0, sizeof(*wait));
1321	wait->out.status = (r == 0);
1322
1323	return 0;
1324}
1325
1326/**
1327 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1328 *
1329 * @adev: amdgpu device
1330 * @filp: file private
1331 * @user: drm_amdgpu_fence copied from user space
1332 */
1333static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1334					     struct drm_file *filp,
1335					     struct drm_amdgpu_fence *user)
1336{
1337	struct amdgpu_ring *ring;
1338	struct amdgpu_ctx *ctx;
1339	struct dma_fence *fence;
1340	int r;
1341
1342	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1343	if (ctx == NULL)
1344		return ERR_PTR(-EINVAL);
1345
1346	r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
1347				 user->ip_instance, user->ring, &ring);
1348	if (r) {
1349		amdgpu_ctx_put(ctx);
1350		return ERR_PTR(r);
1351	}
1352
1353	fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
1354	amdgpu_ctx_put(ctx);
1355
1356	return fence;
1357}
1358
1359int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1360				    struct drm_file *filp)
1361{
1362	struct amdgpu_device *adev = dev->dev_private;
1363	union drm_amdgpu_fence_to_handle *info = data;
1364	struct dma_fence *fence;
1365	struct drm_syncobj *syncobj;
1366	struct sync_file *sync_file;
1367	int fd, r;
1368
1369	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1370	if (IS_ERR(fence))
1371		return PTR_ERR(fence);
1372
1373	switch (info->in.what) {
1374	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1375		r = drm_syncobj_create(&syncobj, 0, fence);
1376		dma_fence_put(fence);
1377		if (r)
1378			return r;
1379		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1380		drm_syncobj_put(syncobj);
1381		return r;
1382
1383	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1384		r = drm_syncobj_create(&syncobj, 0, fence);
1385		dma_fence_put(fence);
1386		if (r)
1387			return r;
1388		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1389		drm_syncobj_put(syncobj);
1390		return r;
1391
1392	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1393		fd = get_unused_fd_flags(O_CLOEXEC);
1394		if (fd < 0) {
1395			dma_fence_put(fence);
1396			return fd;
1397		}
1398
1399		sync_file = sync_file_create(fence);
1400		dma_fence_put(fence);
1401		if (!sync_file) {
1402			put_unused_fd(fd);
1403			return -ENOMEM;
1404		}
1405
1406		fd_install(fd, sync_file->file);
1407		info->out.handle = fd;
1408		return 0;
1409
1410	default:
1411		return -EINVAL;
1412	}
1413}
1414
1415/**
1416 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1417 *
1418 * @adev: amdgpu device
1419 * @filp: file private
1420 * @wait: wait parameters
1421 * @fences: array of drm_amdgpu_fence
1422 */
1423static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1424				     struct drm_file *filp,
1425				     union drm_amdgpu_wait_fences *wait,
1426				     struct drm_amdgpu_fence *fences)
1427{
1428	uint32_t fence_count = wait->in.fence_count;
1429	unsigned int i;
1430	long r = 1;
1431
1432	for (i = 0; i < fence_count; i++) {
1433		struct dma_fence *fence;
1434		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1435
1436		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1437		if (IS_ERR(fence))
1438			return PTR_ERR(fence);
1439		else if (!fence)
1440			continue;
1441
1442		r = dma_fence_wait_timeout(fence, true, timeout);
1443		dma_fence_put(fence);
1444		if (r < 0)
1445			return r;
1446
1447		if (r == 0)
1448			break;
1449
1450		if (fence->error)
1451			return fence->error;
1452	}
1453
1454	memset(wait, 0, sizeof(*wait));
1455	wait->out.status = (r > 0);
1456
1457	return 0;
1458}
1459
1460/**
1461 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1462 *
1463 * @adev: amdgpu device
1464 * @filp: file private
1465 * @wait: wait parameters
1466 * @fences: array of drm_amdgpu_fence
1467 */
1468static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1469				    struct drm_file *filp,
1470				    union drm_amdgpu_wait_fences *wait,
1471				    struct drm_amdgpu_fence *fences)
1472{
1473	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1474	uint32_t fence_count = wait->in.fence_count;
1475	uint32_t first = ~0;
1476	struct dma_fence **array;
1477	unsigned int i;
1478	long r;
1479
1480	/* Prepare the fence array */
1481	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1482
1483	if (array == NULL)
1484		return -ENOMEM;
1485
1486	for (i = 0; i < fence_count; i++) {
1487		struct dma_fence *fence;
1488
1489		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1490		if (IS_ERR(fence)) {
1491			r = PTR_ERR(fence);
1492			goto err_free_fence_array;
1493		} else if (fence) {
1494			array[i] = fence;
1495		} else { /* NULL, the fence has been already signaled */
1496			r = 1;
1497			first = i;
1498			goto out;
1499		}
1500	}
1501
1502	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1503				       &first);
1504	if (r < 0)
1505		goto err_free_fence_array;
1506
1507out:
1508	memset(wait, 0, sizeof(*wait));
1509	wait->out.status = (r > 0);
1510	wait->out.first_signaled = first;
1511
1512	if (first < fence_count && array[first])
1513		r = array[first]->error;
1514	else
1515		r = 0;
1516
1517err_free_fence_array:
1518	for (i = 0; i < fence_count; i++)
1519		dma_fence_put(array[i]);
1520	kfree(array);
1521
1522	return r;
1523}
1524
1525/**
1526 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1527 *
1528 * @dev: drm device
1529 * @data: data from userspace
1530 * @filp: file private
1531 */
1532int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1533				struct drm_file *filp)
1534{
1535	struct amdgpu_device *adev = dev->dev_private;
1536	union drm_amdgpu_wait_fences *wait = data;
1537	uint32_t fence_count = wait->in.fence_count;
1538	struct drm_amdgpu_fence *fences_user;
1539	struct drm_amdgpu_fence *fences;
1540	int r;
1541
1542	/* Get the fences from userspace */
1543	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1544			GFP_KERNEL);
1545	if (fences == NULL)
1546		return -ENOMEM;
1547
1548	fences_user = u64_to_user_ptr(wait->in.fences);
1549	if (copy_from_user(fences, fences_user,
1550		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1551		r = -EFAULT;
1552		goto err_free_fences;
1553	}
1554
1555	if (wait->in.wait_all)
1556		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1557	else
1558		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1559
1560err_free_fences:
1561	kfree(fences);
1562
1563	return r;
1564}
1565
1566/**
1567 * amdgpu_cs_find_bo_va - find bo_va for VM address
1568 *
1569 * @parser: command submission parser context
1570 * @addr: VM address
1571 * @bo: resulting BO of the mapping found
1572 *
1573 * Search the buffer objects in the command submission context for a certain
1574 * virtual memory address. Returns allocation structure when found, NULL
1575 * otherwise.
1576 */
1577int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1578			   uint64_t addr, struct amdgpu_bo **bo,
1579			   struct amdgpu_bo_va_mapping **map)
1580{
1581	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1582	struct ttm_operation_ctx ctx = { false, false };
1583	struct amdgpu_vm *vm = &fpriv->vm;
1584	struct amdgpu_bo_va_mapping *mapping;
1585	int r;
 
 
 
1586
1587	addr /= AMDGPU_GPU_PAGE_SIZE;
1588
1589	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1590	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1591		return -EINVAL;
1592
1593	*bo = mapping->bo_va->base.bo;
1594	*map = mapping;
 
1595
1596	/* Double check that the BO is reserved by this CS */
1597	if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
1598		return -EINVAL;
 
1599
1600	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1601		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1602		amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
1603		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1604		if (r)
1605			return r;
 
 
 
 
 
 
1606	}
1607
1608	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1609}
v4.6
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
  27#include <linux/list_sort.h>
  28#include <linux/pagemap.h>
 
  29#include <drm/drmP.h>
  30#include <drm/amdgpu_drm.h>
 
  31#include "amdgpu.h"
  32#include "amdgpu_trace.h"
  33
  34int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
  35		       u32 ip_instance, u32 ring,
  36		       struct amdgpu_ring **out_ring)
  37{
  38	/* Right now all IPs have only one instance - multiple rings. */
  39	if (ip_instance != 0) {
  40		DRM_ERROR("invalid ip instance: %d\n", ip_instance);
  41		return -EINVAL;
  42	}
  43
  44	switch (ip_type) {
  45	default:
  46		DRM_ERROR("unknown ip type: %d\n", ip_type);
  47		return -EINVAL;
  48	case AMDGPU_HW_IP_GFX:
  49		if (ring < adev->gfx.num_gfx_rings) {
  50			*out_ring = &adev->gfx.gfx_ring[ring];
  51		} else {
  52			DRM_ERROR("only %d gfx rings are supported now\n",
  53				  adev->gfx.num_gfx_rings);
  54			return -EINVAL;
  55		}
  56		break;
  57	case AMDGPU_HW_IP_COMPUTE:
  58		if (ring < adev->gfx.num_compute_rings) {
  59			*out_ring = &adev->gfx.compute_ring[ring];
  60		} else {
  61			DRM_ERROR("only %d compute rings are supported now\n",
  62				  adev->gfx.num_compute_rings);
  63			return -EINVAL;
  64		}
  65		break;
  66	case AMDGPU_HW_IP_DMA:
  67		if (ring < adev->sdma.num_instances) {
  68			*out_ring = &adev->sdma.instance[ring].ring;
  69		} else {
  70			DRM_ERROR("only %d SDMA rings are supported\n",
  71				  adev->sdma.num_instances);
  72			return -EINVAL;
  73		}
  74		break;
  75	case AMDGPU_HW_IP_UVD:
  76		*out_ring = &adev->uvd.ring;
  77		break;
  78	case AMDGPU_HW_IP_VCE:
  79		if (ring < 2){
  80			*out_ring = &adev->vce.ring[ring];
  81		} else {
  82			DRM_ERROR("only two VCE rings are supported\n");
  83			return -EINVAL;
  84		}
  85		break;
  86	}
  87	return 0;
  88}
  89
  90static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
  91				      struct amdgpu_user_fence *uf,
  92				      struct drm_amdgpu_cs_chunk_fence *fence_data)
  93{
  94	struct drm_gem_object *gobj;
  95	uint32_t handle;
  96
  97	handle = fence_data->handle;
  98	gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
  99				     fence_data->handle);
 100	if (gobj == NULL)
 101		return -EINVAL;
 102
 103	uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 104	uf->offset = fence_data->offset;
 105
 106	if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
 107		drm_gem_object_unreference_unlocked(gobj);
 108		return -EINVAL;
 109	}
 110
 111	p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
 112	p->uf_entry.priority = 0;
 113	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
 114	p->uf_entry.tv.shared = true;
 115	p->uf_entry.user_pages = NULL;
 116
 117	drm_gem_object_unreference_unlocked(gobj);
 
 
 
 
 
 
 
 
 
 
 
 
 118	return 0;
 119}
 120
 121int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 122{
 123	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 
 124	union drm_amdgpu_cs *cs = data;
 125	uint64_t *chunk_array_user;
 126	uint64_t *chunk_array;
 127	struct amdgpu_user_fence uf = {};
 128	unsigned size, num_ibs = 0;
 
 129	int i;
 130	int ret;
 131
 132	if (cs->in.num_chunks == 0)
 133		return 0;
 134
 135	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 136	if (!chunk_array)
 137		return -ENOMEM;
 138
 139	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
 140	if (!p->ctx) {
 141		ret = -EINVAL;
 142		goto free_chunk;
 143	}
 144
 
 
 
 
 
 
 
 
 145	/* get chunks */
 146	chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
 147	if (copy_from_user(chunk_array, chunk_array_user,
 148			   sizeof(uint64_t)*cs->in.num_chunks)) {
 149		ret = -EFAULT;
 150		goto put_ctx;
 151	}
 152
 153	p->nchunks = cs->in.num_chunks;
 154	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 155			    GFP_KERNEL);
 156	if (!p->chunks) {
 157		ret = -ENOMEM;
 158		goto put_ctx;
 159	}
 160
 161	for (i = 0; i < p->nchunks; i++) {
 162		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
 163		struct drm_amdgpu_cs_chunk user_chunk;
 164		uint32_t __user *cdata;
 165
 166		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
 167		if (copy_from_user(&user_chunk, chunk_ptr,
 168				       sizeof(struct drm_amdgpu_cs_chunk))) {
 169			ret = -EFAULT;
 170			i--;
 171			goto free_partial_kdata;
 172		}
 173		p->chunks[i].chunk_id = user_chunk.chunk_id;
 174		p->chunks[i].length_dw = user_chunk.length_dw;
 175
 176		size = p->chunks[i].length_dw;
 177		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
 178
 179		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
 180		if (p->chunks[i].kdata == NULL) {
 181			ret = -ENOMEM;
 182			i--;
 183			goto free_partial_kdata;
 184		}
 185		size *= sizeof(uint32_t);
 186		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 187			ret = -EFAULT;
 188			goto free_partial_kdata;
 189		}
 190
 191		switch (p->chunks[i].chunk_id) {
 192		case AMDGPU_CHUNK_ID_IB:
 193			++num_ibs;
 194			break;
 195
 196		case AMDGPU_CHUNK_ID_FENCE:
 197			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
 198			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 199				ret = -EINVAL;
 200				goto free_partial_kdata;
 201			}
 202
 203			ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
 
 204			if (ret)
 205				goto free_partial_kdata;
 206
 207			break;
 208
 209		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 
 
 210			break;
 211
 212		default:
 213			ret = -EINVAL;
 214			goto free_partial_kdata;
 215		}
 216	}
 217
 218	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job);
 219	if (ret)
 220		goto free_all_kdata;
 221
 222	p->job->uf = uf;
 
 
 
 223
 
 
 224	kfree(chunk_array);
 225	return 0;
 226
 227free_all_kdata:
 228	i = p->nchunks - 1;
 229free_partial_kdata:
 230	for (; i >= 0; i--)
 231		drm_free_large(p->chunks[i].kdata);
 232	kfree(p->chunks);
 233put_ctx:
 234	amdgpu_ctx_put(p->ctx);
 235free_chunk:
 236	kfree(chunk_array);
 237
 238	return ret;
 239}
 240
 241/* Returns how many bytes TTM can move per IB.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 242 */
 243static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 
 
 244{
 245	u64 real_vram_size = adev->mc.real_vram_size;
 246	u64 vram_usage = atomic64_read(&adev->vram_usage);
 247
 248	/* This function is based on the current VRAM usage.
 
 249	 *
 250	 * - If all of VRAM is free, allow relocating the number of bytes that
 251	 *   is equal to 1/4 of the size of VRAM for this IB.
 
 
 
 252
 253	 * - If more than one half of VRAM is occupied, only allow relocating
 254	 *   1 MB of data for this IB.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255	 *
 256	 * - From 0 to one half of used VRAM, the threshold decreases
 257	 *   linearly.
 258	 *         __________________
 259	 * 1/4 of -|\               |
 260	 * VRAM    | \              |
 261	 *         |  \             |
 262	 *         |   \            |
 263	 *         |    \           |
 264	 *         |     \          |
 265	 *         |      \         |
 266	 *         |       \________|1 MB
 267	 *         |----------------|
 268	 *    VRAM 0 %             100 %
 269	 *         used            used
 270	 *
 271	 * Note: It's a threshold, not a limit. The threshold must be crossed
 272	 * for buffer relocations to stop, so any buffer of an arbitrary size
 273	 * can be moved as long as the threshold isn't crossed before
 274	 * the relocation takes place. We don't want to disable buffer
 275	 * relocations completely.
 276	 *
 277	 * The idea is that buffers should be placed in VRAM at creation time
 278	 * and TTM should only do a minimum number of relocations during
 279	 * command submission. In practice, you need to submit at least
 280	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
 281	 *
 282	 * Also, things can get pretty crazy under memory pressure and actual
 283	 * VRAM usage can change a lot, so playing safe even at 50% does
 284	 * consistently increase performance.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 286
 287	u64 half_vram = real_vram_size >> 1;
 288	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
 289	u64 bytes_moved_threshold = half_free_vram >> 1;
 290	return max(bytes_moved_threshold, 1024*1024ull);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291}
 292
 293int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 294			    struct list_head *validated)
 295{
 
 296	struct amdgpu_bo_list_entry *lobj;
 297	u64 initial_bytes_moved;
 298	int r;
 299
 300	list_for_each_entry(lobj, validated, tv.head) {
 301		struct amdgpu_bo *bo = lobj->robj;
 302		bool binding_userptr = false;
 303		struct mm_struct *usermm;
 304		uint32_t domain;
 305
 306		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 307		if (usermm && usermm != current->mm)
 308			return -EPERM;
 309
 310		/* Check if we have user pages and nobody bound the BO already */
 311		if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
 312			size_t size = sizeof(struct page *);
 313
 314			size *= bo->tbo.ttm->num_pages;
 315			memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
 
 
 
 
 316			binding_userptr = true;
 317		}
 318
 319		if (bo->pin_count)
 320			continue;
 321
 322		/* Avoid moving this one if we have moved too many buffers
 323		 * for this IB already.
 324		 *
 325		 * Note that this allows moving at least one buffer of
 326		 * any size, because it doesn't take the current "bo"
 327		 * into account. We don't want to disallow buffer moves
 328		 * completely.
 329		 */
 330		if (p->bytes_moved <= p->bytes_moved_threshold)
 331			domain = bo->prefered_domains;
 332		else
 333			domain = bo->allowed_domains;
 334
 335	retry:
 336		amdgpu_ttm_placement_from_domain(bo, domain);
 337		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
 338		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 339		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
 340			       initial_bytes_moved;
 341
 342		if (unlikely(r)) {
 343			if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
 344				domain = bo->allowed_domains;
 345				goto retry;
 346			}
 347			return r;
 348		}
 349
 350		if (binding_userptr) {
 351			drm_free_large(lobj->user_pages);
 352			lobj->user_pages = NULL;
 353		}
 354	}
 355	return 0;
 356}
 357
 358static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 359				union drm_amdgpu_cs *cs)
 360{
 361	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 362	struct amdgpu_bo_list_entry *e;
 363	struct list_head duplicates;
 364	bool need_mmap_lock = false;
 365	unsigned i, tries = 10;
 366	int r;
 367
 368	INIT_LIST_HEAD(&p->validated);
 369
 370	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
 371	if (p->bo_list) {
 372		need_mmap_lock = p->bo_list->first_userptr !=
 373			p->bo_list->num_entries;
 374		amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 
 
 375	}
 376
 377	INIT_LIST_HEAD(&duplicates);
 378	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 379
 380	if (p->job->uf.bo)
 381		list_add(&p->uf_entry.tv.head, &p->validated);
 382
 383	if (need_mmap_lock)
 384		down_read(&current->mm->mmap_sem);
 385
 386	while (1) {
 387		struct list_head need_pages;
 388		unsigned i;
 389
 390		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 391					   &duplicates);
 392		if (unlikely(r != 0))
 
 
 393			goto error_free_pages;
 
 394
 395		/* Without a BO list we don't have userptr BOs */
 396		if (!p->bo_list)
 397			break;
 398
 399		INIT_LIST_HEAD(&need_pages);
 400		for (i = p->bo_list->first_userptr;
 401		     i < p->bo_list->num_entries; ++i) {
 
 402
 403			e = &p->bo_list->array[i];
 
 404
 405			if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
 406				 &e->user_invalidated) && e->user_pages) {
 407
 408				/* We acquired a page array, but somebody
 409				 * invalidated it. Free it an try again
 410				 */
 411				release_pages(e->user_pages,
 412					      e->robj->tbo.ttm->num_pages,
 413					      false);
 414				drm_free_large(e->user_pages);
 415				e->user_pages = NULL;
 416			}
 417
 418			if (e->robj->tbo.ttm->state != tt_bound &&
 419			    !e->user_pages) {
 420				list_del(&e->tv.head);
 421				list_add(&e->tv.head, &need_pages);
 422
 423				amdgpu_bo_unreserve(e->robj);
 424			}
 425		}
 426
 427		if (list_empty(&need_pages))
 428			break;
 429
 430		/* Unreserve everything again. */
 431		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 432
 433		/* We tried to often, just abort */
 434		if (!--tries) {
 435			r = -EDEADLK;
 
 436			goto error_free_pages;
 437		}
 438
 439		/* Fill the page arrays for all useptrs. */
 440		list_for_each_entry(e, &need_pages, tv.head) {
 441			struct ttm_tt *ttm = e->robj->tbo.ttm;
 442
 443			e->user_pages = drm_calloc_large(ttm->num_pages,
 444							 sizeof(struct page*));
 
 445			if (!e->user_pages) {
 446				r = -ENOMEM;
 
 447				goto error_free_pages;
 448			}
 449
 450			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
 451			if (r) {
 452				drm_free_large(e->user_pages);
 
 453				e->user_pages = NULL;
 454				goto error_free_pages;
 455			}
 456		}
 457
 458		/* And try again. */
 459		list_splice(&need_pages, &p->validated);
 460	}
 461
 462	amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
 
 
 
 
 
 
 463
 464	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
 465	p->bytes_moved = 0;
 
 
 
 
 466
 467	r = amdgpu_cs_list_validate(p, &duplicates);
 468	if (r)
 
 469		goto error_validate;
 
 470
 471	r = amdgpu_cs_list_validate(p, &p->validated);
 472	if (r)
 
 473		goto error_validate;
 
 474
 
 
 475	if (p->bo_list) {
 
 
 
 476		struct amdgpu_vm *vm = &fpriv->vm;
 477		unsigned i;
 478
 479		for (i = 0; i < p->bo_list->num_entries; i++) {
 480			struct amdgpu_bo *bo = p->bo_list->array[i].robj;
 481
 482			p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
 483		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 484	}
 485
 486error_validate:
 487	if (r) {
 488		amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
 489		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 490	}
 491
 492error_free_pages:
 493
 494	if (need_mmap_lock)
 495		up_read(&current->mm->mmap_sem);
 496
 497	if (p->bo_list) {
 498		for (i = p->bo_list->first_userptr;
 499		     i < p->bo_list->num_entries; ++i) {
 500			e = &p->bo_list->array[i];
 501
 502			if (!e->user_pages)
 503				continue;
 504
 505			release_pages(e->user_pages,
 506				      e->robj->tbo.ttm->num_pages,
 507				      false);
 508			drm_free_large(e->user_pages);
 509		}
 510	}
 511
 512	return r;
 513}
 514
 515static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 516{
 517	struct amdgpu_bo_list_entry *e;
 518	int r;
 519
 520	list_for_each_entry(e, &p->validated, tv.head) {
 521		struct reservation_object *resv = e->robj->tbo.resv;
 522		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
 
 523
 524		if (r)
 525			return r;
 526	}
 527	return 0;
 528}
 529
 530static int cmp_size_smaller_first(void *priv, struct list_head *a,
 531				  struct list_head *b)
 532{
 533	struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
 534	struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
 535
 536	/* Sort A before B if A is smaller. */
 537	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
 538}
 539
 540/**
 541 * cs_parser_fini() - clean parser states
 542 * @parser:	parser structure holding parsing context.
 543 * @error:	error number
 544 *
 545 * If error is set than unvalidate buffer, otherwise just free memory
 546 * used by parsing context.
 547 **/
 548static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
 
 549{
 550	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 551	unsigned i;
 552
 553	if (!error) {
 554		amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
 
 555
 556		/* Sort the buffer list from the smallest to largest buffer,
 557		 * which affects the order of buffers in the LRU list.
 558		 * This assures that the smallest buffers are added first
 559		 * to the LRU list, so they are likely to be later evicted
 560		 * first, instead of large buffers whose eviction is more
 561		 * expensive.
 562		 *
 563		 * This slightly lowers the number of bytes moved by TTM
 564		 * per frame under memory pressure.
 565		 */
 566		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
 567
 568		ttm_eu_fence_buffer_objects(&parser->ticket,
 569					    &parser->validated,
 570					    parser->fence);
 571	} else if (backoff) {
 572		ttm_eu_backoff_reservation(&parser->ticket,
 573					   &parser->validated);
 574	}
 575	fence_put(parser->fence);
 576
 577	if (parser->ctx)
 
 578		amdgpu_ctx_put(parser->ctx);
 
 579	if (parser->bo_list)
 580		amdgpu_bo_list_put(parser->bo_list);
 581
 582	for (i = 0; i < parser->nchunks; i++)
 583		drm_free_large(parser->chunks[i].kdata);
 584	kfree(parser->chunks);
 585	if (parser->job)
 586		amdgpu_job_free(parser->job);
 587	amdgpu_bo_unref(&parser->uf_entry.robj);
 588}
 589
 590static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 591				   struct amdgpu_vm *vm)
 592{
 593	struct amdgpu_device *adev = p->adev;
 
 
 594	struct amdgpu_bo_va *bo_va;
 595	struct amdgpu_bo *bo;
 596	int i, r;
 597
 598	r = amdgpu_vm_update_page_directory(adev, vm);
 599	if (r)
 600		return r;
 601
 602	r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence);
 603	if (r)
 604		return r;
 605
 606	r = amdgpu_vm_clear_freed(adev, vm);
 
 607	if (r)
 608		return r;
 609
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610	if (p->bo_list) {
 611		for (i = 0; i < p->bo_list->num_entries; i++) {
 612			struct fence *f;
 613
 614			/* ignore duplicates */
 615			bo = p->bo_list->array[i].robj;
 616			if (!bo)
 617				continue;
 618
 619			bo_va = p->bo_list->array[i].bo_va;
 620			if (bo_va == NULL)
 621				continue;
 622
 623			r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
 624			if (r)
 625				return r;
 626
 627			f = bo_va->last_pt_update;
 628			r = amdgpu_sync_fence(adev, &p->job->sync, f);
 629			if (r)
 630				return r;
 631		}
 632
 633	}
 634
 635	r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
 
 
 
 
 
 
 
 
 
 
 636
 637	if (amdgpu_vm_debug && p->bo_list) {
 638		/* Invalidate all BOs to test for userspace bugs */
 639		for (i = 0; i < p->bo_list->num_entries; i++) {
 640			/* ignore duplicates */
 641			bo = p->bo_list->array[i].robj;
 642			if (!bo)
 643				continue;
 644
 645			amdgpu_vm_bo_invalidate(adev, bo);
 646		}
 647	}
 648
 649	return r;
 650}
 651
 652static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 653				 struct amdgpu_cs_parser *p)
 654{
 655	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 656	struct amdgpu_vm *vm = &fpriv->vm;
 657	struct amdgpu_ring *ring = p->job->ring;
 658	int i, r;
 659
 660	/* Only for UVD/VCE VM emulation */
 661	if (ring->funcs->parse_cs) {
 662		for (i = 0; i < p->job->num_ibs; i++) {
 663			r = amdgpu_ring_parse_cs(ring, p, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664			if (r)
 665				return r;
 
 
 666		}
 667	}
 668
 669	r = amdgpu_bo_vm_update_pte(p, vm);
 670	if (!r)
 671		amdgpu_cs_sync_rings(p);
 672
 673	return r;
 674}
 
 
 675
 676static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
 677{
 678	if (r == -EDEADLK) {
 679		r = amdgpu_gpu_reset(adev);
 680		if (!r)
 681			r = -EAGAIN;
 682	}
 683	return r;
 684}
 685
 686static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 687			     struct amdgpu_cs_parser *parser)
 688{
 689	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 690	struct amdgpu_vm *vm = &fpriv->vm;
 691	int i, j;
 692	int r;
 693
 694	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
 695		struct amdgpu_cs_chunk *chunk;
 696		struct amdgpu_ib *ib;
 697		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 698		struct amdgpu_ring *ring;
 699
 700		chunk = &parser->chunks[i];
 701		ib = &parser->job->ibs[j];
 702		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
 703
 704		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 705			continue;
 706
 707		r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
 708				       chunk_ib->ip_instance, chunk_ib->ring,
 709				       &ring);
 
 
 
 
 
 
 
 
 
 
 
 
 710		if (r)
 711			return r;
 712
 
 
 
 
 
 
 
 
 713		if (parser->job->ring && parser->job->ring != ring)
 714			return -EINVAL;
 715
 716		parser->job->ring = ring;
 717
 718		if (ring->funcs->parse_cs) {
 719			struct amdgpu_bo_va_mapping *m;
 720			struct amdgpu_bo *aobj = NULL;
 721			uint64_t offset;
 722			uint8_t *kptr;
 
 
 
 
 
 
 
 
 
 723
 724			m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
 725						   &aobj);
 726			if (!aobj) {
 727				DRM_ERROR("IB va_start is invalid\n");
 728				return -EINVAL;
 729			}
 730
 731			if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
 732			    (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
 733				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
 734				return -EINVAL;
 735			}
 736
 737			/* the IB should be reserved at this point */
 738			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
 739			if (r) {
 740				return r;
 741			}
 
 
 742
 743			offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
 744			kptr += chunk_ib->va_start - offset;
 
 745
 746			r =  amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
 747			if (r) {
 748				DRM_ERROR("Failed to get ib !\n");
 749				return r;
 750			}
 751
 752			memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
 753			amdgpu_bo_kunmap(aobj);
 754		} else {
 755			r =  amdgpu_ib_get(adev, vm, 0, ib);
 756			if (r) {
 757				DRM_ERROR("Failed to get ib !\n");
 758				return r;
 759			}
 760
 761			ib->gpu_addr = chunk_ib->va_start;
 
 
 
 
 
 
 762		}
 763
 764		ib->length_dw = chunk_ib->ib_bytes / 4;
 765		ib->flags = chunk_ib->flags;
 766		ib->ctx = parser->ctx;
 767		j++;
 
 
 
 
 
 
 
 
 
 
 768	}
 
 
 769
 770	/* add GDS resources to first IB */
 771	if (parser->bo_list) {
 772		struct amdgpu_bo *gds = parser->bo_list->gds_obj;
 773		struct amdgpu_bo *gws = parser->bo_list->gws_obj;
 774		struct amdgpu_bo *oa = parser->bo_list->oa_obj;
 775		struct amdgpu_ib *ib = &parser->job->ibs[0];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 776
 777		if (gds) {
 778			ib->gds_base = amdgpu_bo_gpu_offset(gds);
 779			ib->gds_size = amdgpu_bo_size(gds);
 780		}
 781		if (gws) {
 782			ib->gws_base = amdgpu_bo_gpu_offset(gws);
 783			ib->gws_size = amdgpu_bo_size(gws);
 784		}
 785		if (oa) {
 786			ib->oa_base = amdgpu_bo_gpu_offset(oa);
 787			ib->oa_size = amdgpu_bo_size(oa);
 788		}
 789	}
 790	/* wrap the last IB with user fence */
 791	if (parser->job->uf.bo) {
 792		struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793
 794		/* UVD & VCE fw doesn't support user fences */
 795		if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
 796		    parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
 797			return -EINVAL;
 798
 799		ib->user = &parser->job->uf;
 800	}
 801
 802	return 0;
 803}
 804
 805static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 806				  struct amdgpu_cs_parser *p)
 807{
 808	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 809	int i, j, r;
 810
 811	for (i = 0; i < p->nchunks; ++i) {
 812		struct drm_amdgpu_cs_chunk_dep *deps;
 813		struct amdgpu_cs_chunk *chunk;
 814		unsigned num_deps;
 815
 816		chunk = &p->chunks[i];
 817
 818		if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
 819			continue;
 820
 821		deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
 822		num_deps = chunk->length_dw * 4 /
 823			sizeof(struct drm_amdgpu_cs_chunk_dep);
 824
 825		for (j = 0; j < num_deps; ++j) {
 826			struct amdgpu_ring *ring;
 827			struct amdgpu_ctx *ctx;
 828			struct fence *fence;
 829
 830			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 831					       deps[j].ip_instance,
 832					       deps[j].ring, &ring);
 833			if (r)
 834				return r;
 835
 836			ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
 837			if (ctx == NULL)
 838				return -EINVAL;
 839
 840			fence = amdgpu_ctx_get_fence(ctx, ring,
 841						     deps[j].handle);
 842			if (IS_ERR(fence)) {
 843				r = PTR_ERR(fence);
 844				amdgpu_ctx_put(ctx);
 845				return r;
 846
 847			} else if (fence) {
 848				r = amdgpu_sync_fence(adev, &p->job->sync,
 849						      fence);
 850				fence_put(fence);
 851				amdgpu_ctx_put(ctx);
 852				if (r)
 853					return r;
 854			}
 855		}
 856	}
 857
 858	return 0;
 859}
 860
 
 
 
 
 
 
 
 
 861static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 862			    union drm_amdgpu_cs *cs)
 863{
 864	struct amdgpu_ring *ring = p->job->ring;
 865	struct amd_sched_fence *fence;
 866	struct amdgpu_job *job;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 867
 868	job = p->job;
 869	p->job = NULL;
 870
 871	job->base.sched = &ring->sched;
 872	job->base.s_entity = &p->ctx->rings[ring->idx].entity;
 
 
 
 
 
 873	job->owner = p->filp;
 
 
 874
 875	fence = amd_sched_fence_create(job->base.s_entity, p->filp);
 876	if (!fence) {
 
 
 877		amdgpu_job_free(job);
 878		return -ENOMEM;
 
 879	}
 880
 881	job->base.s_fence = fence;
 882	p->fence = fence_get(&fence->base);
 
 
 883
 884	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring,
 885					      &fence->base);
 886	job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
 887
 888	trace_amdgpu_cs_ioctl(job);
 889	amd_sched_entity_push_job(&job->base);
 
 
 
 890
 891	return 0;
 892}
 893
 894int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 895{
 896	struct amdgpu_device *adev = dev->dev_private;
 897	union drm_amdgpu_cs *cs = data;
 898	struct amdgpu_cs_parser parser = {};
 899	bool reserved_buffers = false;
 900	int i, r;
 901
 902	if (!adev->accel_working)
 903		return -EBUSY;
 904
 905	parser.adev = adev;
 906	parser.filp = filp;
 907
 908	r = amdgpu_cs_parser_init(&parser, data);
 909	if (r) {
 910		DRM_ERROR("Failed to initialize parser !\n");
 911		amdgpu_cs_parser_fini(&parser, r, false);
 912		r = amdgpu_cs_handle_lockup(adev, r);
 913		return r;
 914	}
 
 
 
 
 
 915	r = amdgpu_cs_parser_bos(&parser, data);
 916	if (r == -ENOMEM)
 917		DRM_ERROR("Not enough memory for command submission!\n");
 918	else if (r && r != -ERESTARTSYS)
 919		DRM_ERROR("Failed to process the buffer list %d!\n", r);
 920	else if (!r) {
 921		reserved_buffers = true;
 922		r = amdgpu_cs_ib_fill(adev, &parser);
 923	}
 924
 925	if (!r) {
 926		r = amdgpu_cs_dependencies(adev, &parser);
 927		if (r)
 928			DRM_ERROR("Failed in the dependencies handling %d!\n", r);
 929	}
 930
 931	if (r)
 
 
 932		goto out;
 
 933
 934	for (i = 0; i < parser.job->num_ibs; i++)
 935		trace_amdgpu_cs(&parser, i);
 936
 937	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
 938	if (r)
 939		goto out;
 940
 941	r = amdgpu_cs_submit(&parser, cs);
 942
 943out:
 944	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
 945	r = amdgpu_cs_handle_lockup(adev, r);
 946	return r;
 947}
 948
 949/**
 950 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
 951 *
 952 * @dev: drm device
 953 * @data: data from userspace
 954 * @filp: file private
 955 *
 956 * Wait for the command submission identified by handle to finish.
 957 */
 958int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 959			 struct drm_file *filp)
 960{
 961	union drm_amdgpu_wait_cs *wait = data;
 962	struct amdgpu_device *adev = dev->dev_private;
 963	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
 964	struct amdgpu_ring *ring = NULL;
 965	struct amdgpu_ctx *ctx;
 966	struct fence *fence;
 967	long r;
 968
 969	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
 970			       wait->in.ring, &ring);
 971	if (r)
 972		return r;
 973
 974	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
 975	if (ctx == NULL)
 976		return -EINVAL;
 977
 
 
 
 
 
 
 
 
 978	fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
 979	if (IS_ERR(fence))
 980		r = PTR_ERR(fence);
 981	else if (fence) {
 982		r = fence_wait_timeout(fence, true, timeout);
 983		fence_put(fence);
 
 
 984	} else
 985		r = 1;
 986
 987	amdgpu_ctx_put(ctx);
 988	if (r < 0)
 989		return r;
 990
 991	memset(wait, 0, sizeof(*wait));
 992	wait->out.status = (r == 0);
 993
 994	return 0;
 995}
 996
 997/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998 * amdgpu_cs_find_bo_va - find bo_va for VM address
 999 *
1000 * @parser: command submission parser context
1001 * @addr: VM address
1002 * @bo: resulting BO of the mapping found
1003 *
1004 * Search the buffer objects in the command submission context for a certain
1005 * virtual memory address. Returns allocation structure when found, NULL
1006 * otherwise.
1007 */
1008struct amdgpu_bo_va_mapping *
1009amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1010		       uint64_t addr, struct amdgpu_bo **bo)
1011{
 
 
 
1012	struct amdgpu_bo_va_mapping *mapping;
1013	unsigned i;
1014
1015	if (!parser->bo_list)
1016		return NULL;
1017
1018	addr /= AMDGPU_GPU_PAGE_SIZE;
1019
1020	for (i = 0; i < parser->bo_list->num_entries; i++) {
1021		struct amdgpu_bo_list_entry *lobj;
 
1022
1023		lobj = &parser->bo_list->array[i];
1024		if (!lobj->bo_va)
1025			continue;
1026
1027		list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
1028			if (mapping->it.start > addr ||
1029			    addr > mapping->it.last)
1030				continue;
1031
1032			*bo = lobj->bo_va->bo;
1033			return mapping;
1034		}
1035
1036		list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
1037			if (mapping->it.start > addr ||
1038			    addr > mapping->it.last)
1039				continue;
1040
1041			*bo = lobj->bo_va->bo;
1042			return mapping;
1043		}
1044	}
1045
1046	return NULL;
1047}