Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/pagemap.h>
  30#include <linux/sync_file.h>
  31#include <linux/dma-buf.h>
  32
  33#include <drm/amdgpu_drm.h>
  34#include <drm/drm_syncobj.h>
  35#include "amdgpu.h"
  36#include "amdgpu_trace.h"
  37#include "amdgpu_gmc.h"
  38#include "amdgpu_gem.h"
  39#include "amdgpu_ras.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  40
  41static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
  42				      struct drm_amdgpu_cs_chunk_fence *data,
  43				      uint32_t *offset)
  44{
  45	struct drm_gem_object *gobj;
  46	struct amdgpu_bo *bo;
  47	unsigned long size;
  48	int r;
  49
  50	gobj = drm_gem_object_lookup(p->filp, data->handle);
  51	if (gobj == NULL)
  52		return -EINVAL;
  53
  54	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
  55	p->uf_entry.priority = 0;
  56	p->uf_entry.tv.bo = &bo->tbo;
  57	/* One for TTM and one for the CS job */
  58	p->uf_entry.tv.num_shared = 2;
  59
  60	drm_gem_object_put(gobj);
  61
  62	size = amdgpu_bo_size(bo);
  63	if (size != PAGE_SIZE || (data->offset + 8) > size) {
  64		r = -EINVAL;
  65		goto error_unref;
  66	}
  67
  68	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
  69		r = -EINVAL;
  70		goto error_unref;
  71	}
  72
  73	*offset = data->offset;
  74
  75	return 0;
  76
  77error_unref:
  78	amdgpu_bo_unref(&bo);
  79	return r;
  80}
  81
  82static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
  83				      struct drm_amdgpu_bo_list_in *data)
  84{
  85	int r;
  86	struct drm_amdgpu_bo_list_entry *info = NULL;
  87
  88	r = amdgpu_bo_create_list_entry_array(data, &info);
  89	if (r)
  90		return r;
  91
  92	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
  93				  &p->bo_list);
  94	if (r)
  95		goto error_free;
  96
  97	kvfree(info);
  98	return 0;
  99
 100error_free:
 101	kvfree(info);
 102
 103	return r;
 104}
 105
 106static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
 107{
 108	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 109	struct amdgpu_vm *vm = &fpriv->vm;
 
 110	uint64_t *chunk_array_user;
 111	uint64_t *chunk_array;
 112	unsigned size, num_ibs = 0;
 113	uint32_t uf_offset = 0;
 114	int i;
 115	int ret;
 116
 117	if (cs->in.num_chunks == 0)
 118		return 0;
 119
 120	chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 121	if (!chunk_array)
 122		return -ENOMEM;
 123
 124	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
 125	if (!p->ctx) {
 126		ret = -EINVAL;
 127		goto free_chunk;
 128	}
 129
 130	mutex_lock(&p->ctx->lock);
 131
 132	/* skip guilty context job */
 133	if (atomic_read(&p->ctx->guilty) == 1) {
 134		ret = -ECANCELED;
 135		goto free_chunk;
 136	}
 137
 138	/* get chunks */
 139	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 140	if (copy_from_user(chunk_array, chunk_array_user,
 141			   sizeof(uint64_t)*cs->in.num_chunks)) {
 142		ret = -EFAULT;
 143		goto free_chunk;
 144	}
 145
 146	p->nchunks = cs->in.num_chunks;
 147	p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 148			    GFP_KERNEL);
 149	if (!p->chunks) {
 150		ret = -ENOMEM;
 151		goto free_chunk;
 152	}
 153
 154	for (i = 0; i < p->nchunks; i++) {
 155		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
 156		struct drm_amdgpu_cs_chunk user_chunk;
 157		uint32_t __user *cdata;
 158
 159		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
 160		if (copy_from_user(&user_chunk, chunk_ptr,
 161				       sizeof(struct drm_amdgpu_cs_chunk))) {
 162			ret = -EFAULT;
 163			i--;
 164			goto free_partial_kdata;
 165		}
 166		p->chunks[i].chunk_id = user_chunk.chunk_id;
 167		p->chunks[i].length_dw = user_chunk.length_dw;
 168
 169		size = p->chunks[i].length_dw;
 170		cdata = u64_to_user_ptr(user_chunk.chunk_data);
 171
 172		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
 173		if (p->chunks[i].kdata == NULL) {
 174			ret = -ENOMEM;
 175			i--;
 176			goto free_partial_kdata;
 177		}
 178		size *= sizeof(uint32_t);
 179		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 180			ret = -EFAULT;
 181			goto free_partial_kdata;
 182		}
 183
 184		switch (p->chunks[i].chunk_id) {
 185		case AMDGPU_CHUNK_ID_IB:
 186			++num_ibs;
 187			break;
 188
 189		case AMDGPU_CHUNK_ID_FENCE:
 190			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
 191			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 192				ret = -EINVAL;
 193				goto free_partial_kdata;
 194			}
 195
 196			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
 197							 &uf_offset);
 198			if (ret)
 199				goto free_partial_kdata;
 200
 201			break;
 202
 203		case AMDGPU_CHUNK_ID_BO_HANDLES:
 204			size = sizeof(struct drm_amdgpu_bo_list_in);
 205			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 206				ret = -EINVAL;
 207				goto free_partial_kdata;
 208			}
 209
 210			ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
 211			if (ret)
 212				goto free_partial_kdata;
 213
 214			break;
 215
 216		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 217		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 218		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 219		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 220		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 221		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 222			break;
 223
 224		default:
 225			ret = -EINVAL;
 226			goto free_partial_kdata;
 227		}
 228	}
 229
 230	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
 231	if (ret)
 232		goto free_all_kdata;
 233
 234	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
 235		ret = -ECANCELED;
 236		goto free_all_kdata;
 237	}
 238
 239	if (p->uf_entry.tv.bo)
 240		p->job->uf_addr = uf_offset;
 241	kvfree(chunk_array);
 242
 243	/* Use this opportunity to fill in task info for the vm */
 244	amdgpu_vm_set_task_info(vm);
 245
 246	return 0;
 247
 248free_all_kdata:
 249	i = p->nchunks - 1;
 250free_partial_kdata:
 251	for (; i >= 0; i--)
 252		kvfree(p->chunks[i].kdata);
 253	kvfree(p->chunks);
 254	p->chunks = NULL;
 255	p->nchunks = 0;
 256free_chunk:
 257	kvfree(chunk_array);
 258
 259	return ret;
 260}
 261
 262/* Convert microseconds to bytes. */
 263static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 264{
 265	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 266		return 0;
 267
 268	/* Since accum_us is incremented by a million per second, just
 269	 * multiply it by the number of MB/s to get the number of bytes.
 270	 */
 271	return us << adev->mm_stats.log2_max_MBps;
 272}
 273
 274static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 275{
 276	if (!adev->mm_stats.log2_max_MBps)
 277		return 0;
 278
 279	return bytes >> adev->mm_stats.log2_max_MBps;
 280}
 281
 282/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 283 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 284 * which means it can go over the threshold once. If that happens, the driver
 285 * will be in debt and no other buffer migrations can be done until that debt
 286 * is repaid.
 287 *
 288 * This approach allows moving a buffer of any size (it's important to allow
 289 * that).
 290 *
 291 * The currency is simply time in microseconds and it increases as the clock
 292 * ticks. The accumulated microseconds (us) are converted to bytes and
 293 * returned.
 294 */
 295static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 296					      u64 *max_bytes,
 297					      u64 *max_vis_bytes)
 298{
 299	s64 time_us, increment_us;
 
 300	u64 free_vram, total_vram, used_vram;
 301	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 302	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 303	 * throttling.
 304	 *
 305	 * It means that in order to get full max MBps, at least 5 IBs per
 306	 * second must be submitted and not more than 200ms apart from each
 307	 * other.
 308	 */
 309	const s64 us_upper_bound = 200000;
 310
 311	if (!adev->mm_stats.log2_max_MBps) {
 312		*max_bytes = 0;
 313		*max_vis_bytes = 0;
 314		return;
 315	}
 316
 317	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
 318	used_vram = amdgpu_vram_mgr_usage(vram_man);
 319	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 320
 321	spin_lock(&adev->mm_stats.lock);
 322
 323	/* Increase the amount of accumulated us. */
 324	time_us = ktime_to_us(ktime_get());
 325	increment_us = time_us - adev->mm_stats.last_update_us;
 326	adev->mm_stats.last_update_us = time_us;
 327	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 328				      us_upper_bound);
 329
 330	/* This prevents the short period of low performance when the VRAM
 331	 * usage is low and the driver is in debt or doesn't have enough
 332	 * accumulated us to fill VRAM quickly.
 333	 *
 334	 * The situation can occur in these cases:
 335	 * - a lot of VRAM is freed by userspace
 336	 * - the presence of a big buffer causes a lot of evictions
 337	 *   (solution: split buffers into smaller ones)
 338	 *
 339	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 340	 * accum_us to a positive number.
 341	 */
 342	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 343		s64 min_us;
 344
 345		/* Be more aggresive on dGPUs. Try to fill a portion of free
 346		 * VRAM now.
 347		 */
 348		if (!(adev->flags & AMD_IS_APU))
 349			min_us = bytes_to_us(adev, free_vram / 4);
 350		else
 351			min_us = 0; /* Reset accum_us on APUs. */
 352
 353		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 354	}
 355
 356	/* This is set to 0 if the driver is in debt to disallow (optional)
 357	 * buffer moves.
 358	 */
 359	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 360
 361	/* Do the same for visible VRAM if half of it is free */
 362	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
 363		u64 total_vis_vram = adev->gmc.visible_vram_size;
 364		u64 used_vis_vram =
 365		  amdgpu_vram_mgr_vis_usage(vram_man);
 366
 367		if (used_vis_vram < total_vis_vram) {
 368			u64 free_vis_vram = total_vis_vram - used_vis_vram;
 369			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
 370							  increment_us, us_upper_bound);
 371
 372			if (free_vis_vram >= total_vis_vram / 2)
 373				adev->mm_stats.accum_us_vis =
 374					max(bytes_to_us(adev, free_vis_vram / 2),
 375					    adev->mm_stats.accum_us_vis);
 376		}
 377
 378		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
 379	} else {
 380		*max_vis_bytes = 0;
 381	}
 382
 383	spin_unlock(&adev->mm_stats.lock);
 
 384}
 385
 386/* Report how many bytes have really been moved for the last command
 387 * submission. This can result in a debt that can stop buffer migrations
 388 * temporarily.
 389 */
 390void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
 391				  u64 num_vis_bytes)
 392{
 393	spin_lock(&adev->mm_stats.lock);
 394	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 395	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 396	spin_unlock(&adev->mm_stats.lock);
 397}
 398
 399static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
 
 400{
 401	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 402	struct amdgpu_cs_parser *p = param;
 403	struct ttm_operation_ctx ctx = {
 404		.interruptible = true,
 405		.no_wait_gpu = false,
 406		.resv = bo->tbo.base.resv
 407	};
 408	uint32_t domain;
 409	int r;
 410
 411	if (bo->tbo.pin_count)
 412		return 0;
 413
 414	/* Don't move this buffer if we have depleted our allowance
 415	 * to move it. Don't move anything if the threshold is zero.
 416	 */
 417	if (p->bytes_moved < p->bytes_moved_threshold &&
 418	    (!bo->tbo.base.dma_buf ||
 419	    list_empty(&bo->tbo.base.dma_buf->attachments))) {
 420		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 421		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 422			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
 423			 * visible VRAM if we've depleted our allowance to do
 424			 * that.
 425			 */
 426			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
 427				domain = bo->preferred_domains;
 428			else
 429				domain = bo->allowed_domains;
 430		} else {
 431			domain = bo->preferred_domains;
 432		}
 433	} else {
 434		domain = bo->allowed_domains;
 435	}
 436
 437retry:
 438	amdgpu_bo_placement_from_domain(bo, domain);
 439	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 440
 441	p->bytes_moved += ctx.bytes_moved;
 442	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 443	    amdgpu_bo_in_cpu_visible_vram(bo))
 444		p->bytes_moved_vis += ctx.bytes_moved;
 445
 446	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 447		domain = bo->allowed_domains;
 448		goto retry;
 449	}
 450
 451	return r;
 452}
 453
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 455			    struct list_head *validated)
 456{
 457	struct ttm_operation_ctx ctx = { true, false };
 458	struct amdgpu_bo_list_entry *lobj;
 459	int r;
 460
 461	list_for_each_entry(lobj, validated, tv.head) {
 462		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
 
 463		struct mm_struct *usermm;
 464
 465		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 466		if (usermm && usermm != current->mm)
 467			return -EPERM;
 468
 469		if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
 470		    lobj->user_invalidated && lobj->user_pages) {
 471			amdgpu_bo_placement_from_domain(bo,
 472							AMDGPU_GEM_DOMAIN_CPU);
 473			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 474			if (r)
 475				return r;
 476
 477			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
 478						     lobj->user_pages);
 
 479		}
 480
 481		r = amdgpu_cs_bo_validate(p, bo);
 
 
 
 482		if (r)
 483			return r;
 484
 485		kvfree(lobj->user_pages);
 486		lobj->user_pages = NULL;
 
 
 487	}
 488	return 0;
 489}
 490
 491static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 492				union drm_amdgpu_cs *cs)
 493{
 494	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 495	struct amdgpu_vm *vm = &fpriv->vm;
 496	struct amdgpu_bo_list_entry *e;
 497	struct list_head duplicates;
 498	struct amdgpu_bo *gds;
 499	struct amdgpu_bo *gws;
 500	struct amdgpu_bo *oa;
 501	int r;
 502
 503	INIT_LIST_HEAD(&p->validated);
 504
 505	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
 506	if (cs->in.bo_list_handle) {
 507		if (p->bo_list)
 508			return -EINVAL;
 509
 510		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
 511				       &p->bo_list);
 512		if (r)
 513			return r;
 514	} else if (!p->bo_list) {
 515		/* Create a empty bo_list when no handle is provided */
 516		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
 517					  &p->bo_list);
 518		if (r)
 519			return r;
 520	}
 521
 522	/* One for TTM and one for the CS job */
 523	amdgpu_bo_list_for_each_entry(e, p->bo_list)
 524		e->tv.num_shared = 2;
 525
 526	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 527
 528	INIT_LIST_HEAD(&duplicates);
 529	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 530
 531	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
 532		list_add(&p->uf_entry.tv.head, &p->validated);
 533
 534	/* Get userptr backing pages. If pages are updated after registered
 535	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
 536	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
 537	 */
 538	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 539		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 540		bool userpage_invalidated = false;
 541		int i;
 542
 543		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
 544					sizeof(struct page *),
 545					GFP_KERNEL | __GFP_ZERO);
 546		if (!e->user_pages) {
 547			DRM_ERROR("kvmalloc_array failure\n");
 548			return -ENOMEM;
 549		}
 550
 551		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
 552		if (r) {
 553			kvfree(e->user_pages);
 554			e->user_pages = NULL;
 555			return r;
 556		}
 557
 558		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
 559			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
 560				userpage_invalidated = true;
 561				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562			}
 563		}
 564		e->user_invalidated = userpage_invalidated;
 565	}
 566
 567	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 568				   &duplicates);
 569	if (unlikely(r != 0)) {
 570		if (r != -ERESTARTSYS)
 571			DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 572		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 573	}
 574
 575	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
 576					  &p->bytes_moved_vis_threshold);
 577	p->bytes_moved = 0;
 578	p->bytes_moved_vis = 0;
 
 
 579
 580	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
 581				      amdgpu_cs_bo_validate, p);
 582	if (r) {
 583		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
 584		goto error_validate;
 585	}
 586
 587	r = amdgpu_cs_list_validate(p, &duplicates);
 588	if (r)
 
 589		goto error_validate;
 
 590
 591	r = amdgpu_cs_list_validate(p, &p->validated);
 592	if (r)
 
 593		goto error_validate;
 
 594
 595	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
 596				     p->bytes_moved_vis);
 597
 598	gds = p->bo_list->gds_obj;
 599	gws = p->bo_list->gws_obj;
 600	oa = p->bo_list->oa_obj;
 601
 602	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 603		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
 
 
 
 604
 605		/* Make sure we use the exclusive slot for shared BOs */
 606		if (bo->prime_shared_count)
 607			e->tv.num_shared = 0;
 608		e->bo_va = amdgpu_vm_bo_find(vm, bo);
 609	}
 610
 611	if (gds) {
 612		p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
 613		p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
 614	}
 615	if (gws) {
 616		p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
 617		p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
 618	}
 619	if (oa) {
 620		p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
 621		p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
 
 
 
 
 622	}
 623
 624	if (!r && p->uf_entry.tv.bo) {
 625		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
 626
 627		r = amdgpu_ttm_alloc_gart(&uf->tbo);
 628		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
 629	}
 630
 631error_validate:
 632	if (r)
 
 633		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 634out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635	return r;
 636}
 637
 638static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 639{
 640	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 641	struct amdgpu_bo_list_entry *e;
 642	int r;
 643
 644	list_for_each_entry(e, &p->validated, tv.head) {
 645		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 646		struct dma_resv *resv = bo->tbo.base.resv;
 647		enum amdgpu_sync_mode sync_mode;
 648
 649		sync_mode = amdgpu_bo_explicit_sync(bo) ?
 650			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
 651		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
 652				     &fpriv->vm);
 653		if (r)
 654			return r;
 655	}
 656	return 0;
 657}
 658
 659/**
 660 * amdgpu_cs_parser_fini() - clean parser states
 661 * @parser:	parser structure holding parsing context.
 662 * @error:	error number
 663 * @backoff:	indicator to backoff the reservation
 664 *
 665 * If error is set then unvalidate buffer, otherwise just free memory
 666 * used by parsing context.
 667 **/
 668static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 669				  bool backoff)
 670{
 
 671	unsigned i;
 672
 673	if (error && backoff)
 
 
 
 
 
 
 674		ttm_eu_backoff_reservation(&parser->ticket,
 675					   &parser->validated);
 676
 677	for (i = 0; i < parser->num_post_deps; i++) {
 678		drm_syncobj_put(parser->post_deps[i].syncobj);
 679		kfree(parser->post_deps[i].chain);
 680	}
 681	kfree(parser->post_deps);
 682
 683	dma_fence_put(parser->fence);
 684
 685	if (parser->ctx) {
 686		mutex_unlock(&parser->ctx->lock);
 687		amdgpu_ctx_put(parser->ctx);
 688	}
 689	if (parser->bo_list)
 690		amdgpu_bo_list_put(parser->bo_list);
 691
 692	for (i = 0; i < parser->nchunks; i++)
 693		kvfree(parser->chunks[i].kdata);
 694	kvfree(parser->chunks);
 695	if (parser->job)
 696		amdgpu_job_free(parser->job);
 697	if (parser->uf_entry.tv.bo) {
 698		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
 699
 700		amdgpu_bo_unref(&uf);
 701	}
 702}
 703
 704static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 
 705{
 706	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
 707	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 708	struct amdgpu_device *adev = p->adev;
 709	struct amdgpu_vm *vm = &fpriv->vm;
 710	struct amdgpu_bo_list_entry *e;
 711	struct amdgpu_bo_va *bo_va;
 712	struct amdgpu_bo *bo;
 713	int r;
 714
 715	/* Only for UVD/VCE VM emulation */
 716	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
 717		unsigned i, j;
 718
 719		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
 720			struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 721			struct amdgpu_bo_va_mapping *m;
 722			struct amdgpu_bo *aobj = NULL;
 723			struct amdgpu_cs_chunk *chunk;
 724			uint64_t offset, va_start;
 725			struct amdgpu_ib *ib;
 726			uint8_t *kptr;
 727
 728			chunk = &p->chunks[i];
 729			ib = &p->job->ibs[j];
 730			chunk_ib = chunk->kdata;
 731
 732			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 733				continue;
 734
 735			va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
 736			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
 737			if (r) {
 738				DRM_ERROR("IB va_start is invalid\n");
 739				return r;
 740			}
 741
 742			if ((va_start + chunk_ib->ib_bytes) >
 743			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
 744				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
 745				return -EINVAL;
 746			}
 747
 748			/* the IB should be reserved at this point */
 749			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
 750			if (r) {
 751				return r;
 752			}
 753
 754			offset = m->start * AMDGPU_GPU_PAGE_SIZE;
 755			kptr += va_start - offset;
 756
 757			if (ring->funcs->parse_cs) {
 758				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
 759				amdgpu_bo_kunmap(aobj);
 760
 761				r = amdgpu_ring_parse_cs(ring, p, j);
 762				if (r)
 763					return r;
 764			} else {
 765				ib->ptr = (uint32_t *)kptr;
 766				r = amdgpu_ring_patch_cs_in_place(ring, p, j);
 767				amdgpu_bo_kunmap(aobj);
 768				if (r)
 769					return r;
 770			}
 771
 772			j++;
 773		}
 774	}
 775
 776	if (!p->job->vm)
 777		return amdgpu_cs_sync_rings(p);
 778
 779
 780	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 781	if (r)
 782		return r;
 783
 784	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
 785	if (r)
 786		return r;
 787
 788	r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
 789	if (r)
 790		return r;
 791
 792	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
 793		bo_va = fpriv->csa_va;
 794		BUG_ON(!bo_va);
 795		r = amdgpu_vm_bo_update(adev, bo_va, false);
 796		if (r)
 797			return r;
 798
 799		r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
 800		if (r)
 801			return r;
 802	}
 803
 804	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 805		/* ignore duplicates */
 806		bo = ttm_to_amdgpu_bo(e->tv.bo);
 807		if (!bo)
 808			continue;
 809
 810		bo_va = e->bo_va;
 811		if (bo_va == NULL)
 812			continue;
 813
 814		r = amdgpu_vm_bo_update(adev, bo_va, false);
 815		if (r)
 816			return r;
 
 
 817
 818		r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
 819		if (r)
 820			return r;
 821	}
 822
 823	r = amdgpu_vm_handle_moved(adev, vm);
 824	if (r)
 825		return r;
 826
 827	r = amdgpu_vm_update_pdes(adev, vm, false);
 828	if (r)
 829		return r;
 830
 831	r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
 832	if (r)
 833		return r;
 834
 835	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
 836
 837	if (amdgpu_vm_debug) {
 838		/* Invalidate all BOs to test for userspace bugs */
 839		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 840			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 841
 842			/* ignore duplicates */
 
 843			if (!bo)
 844				continue;
 845
 846			amdgpu_vm_bo_invalidate(adev, bo, false);
 847		}
 848	}
 849
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 850	return amdgpu_cs_sync_rings(p);
 851}
 852
 853static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 854			     struct amdgpu_cs_parser *parser)
 855{
 856	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 857	struct amdgpu_vm *vm = &fpriv->vm;
 858	int r, ce_preempt = 0, de_preempt = 0;
 859	struct amdgpu_ring *ring;
 860	int i, j;
 
 861
 862	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
 863		struct amdgpu_cs_chunk *chunk;
 864		struct amdgpu_ib *ib;
 865		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 866		struct drm_sched_entity *entity;
 867
 868		chunk = &parser->chunks[i];
 869		ib = &parser->job->ibs[j];
 870		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
 871
 872		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 873			continue;
 874
 875		if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
 876		    (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
 877			if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
 878				if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
 879					ce_preempt++;
 880				else
 881					de_preempt++;
 882			}
 883
 884			/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
 885			if (ce_preempt > 1 || de_preempt > 1)
 886				return -EINVAL;
 887		}
 888
 889		r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
 890					  chunk_ib->ip_instance, chunk_ib->ring,
 891					  &entity);
 892		if (r)
 893			return r;
 894
 895		if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
 896			parser->job->preamble_status |=
 897				AMDGPU_PREAMBLE_IB_PRESENT;
 898
 899		if (parser->entity && parser->entity != entity)
 900			return -EINVAL;
 901
 902		/* Return if there is no run queue associated with this entity.
 903		 * Possibly because of disabled HW IP*/
 904		if (entity->rq == NULL)
 905			return -EINVAL;
 906
 907		parser->entity = entity;
 908
 909		ring = to_amdgpu_ring(entity->rq->sched);
 910		r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
 911				   chunk_ib->ib_bytes : 0,
 912				   AMDGPU_IB_POOL_DELAYED, ib);
 913		if (r) {
 914			DRM_ERROR("Failed to get ib !\n");
 915			return r;
 916		}
 917
 918		ib->gpu_addr = chunk_ib->va_start;
 919		ib->length_dw = chunk_ib->ib_bytes / 4;
 920		ib->flags = chunk_ib->flags;
 921
 922		j++;
 923	}
 924
 925	/* MM engine doesn't support user fences */
 926	ring = to_amdgpu_ring(parser->entity->rq->sched);
 927	if (parser->job->uf_addr && ring->funcs->no_user_fence)
 928		return -EINVAL;
 929
 930	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
 931}
 932
 933static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
 934				       struct amdgpu_cs_chunk *chunk)
 935{
 936	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 937	unsigned num_deps;
 938	int i, r;
 939	struct drm_amdgpu_cs_chunk_dep *deps;
 940
 941	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
 942	num_deps = chunk->length_dw * 4 /
 943		sizeof(struct drm_amdgpu_cs_chunk_dep);
 944
 945	for (i = 0; i < num_deps; ++i) {
 946		struct amdgpu_ctx *ctx;
 947		struct drm_sched_entity *entity;
 948		struct dma_fence *fence;
 949
 950		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
 951		if (ctx == NULL)
 952			return -EINVAL;
 953
 954		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
 955					  deps[i].ip_instance,
 956					  deps[i].ring, &entity);
 957		if (r) {
 958			amdgpu_ctx_put(ctx);
 959			return r;
 960		}
 961
 962		fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
 963		amdgpu_ctx_put(ctx);
 964
 965		if (IS_ERR(fence))
 966			return PTR_ERR(fence);
 967		else if (!fence)
 968			continue;
 969
 970		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
 971			struct drm_sched_fence *s_fence;
 972			struct dma_fence *old = fence;
 973
 974			s_fence = to_drm_sched_fence(fence);
 975			fence = dma_fence_get(&s_fence->scheduled);
 976			dma_fence_put(old);
 977		}
 978
 979		r = amdgpu_sync_fence(&p->job->sync, fence);
 980		dma_fence_put(fence);
 981		if (r)
 982			return r;
 983	}
 984	return 0;
 985}
 986
 987static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
 988						 uint32_t handle, u64 point,
 989						 u64 flags)
 990{
 991	struct dma_fence *fence;
 992	int r;
 993
 994	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
 995	if (r) {
 996		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
 997			  handle, point, r);
 998		return r;
 999	}
1000
1001	r = amdgpu_sync_fence(&p->job->sync, fence);
1002	dma_fence_put(fence);
1003
1004	return r;
1005}
1006
1007static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1008					    struct amdgpu_cs_chunk *chunk)
1009{
1010	struct drm_amdgpu_cs_chunk_sem *deps;
1011	unsigned num_deps;
1012	int i, r;
1013
1014	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1015	num_deps = chunk->length_dw * 4 /
1016		sizeof(struct drm_amdgpu_cs_chunk_sem);
1017	for (i = 0; i < num_deps; ++i) {
1018		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1019							  0, 0);
1020		if (r)
1021			return r;
1022	}
1023
1024	return 0;
1025}
1026
1027
1028static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1029						     struct amdgpu_cs_chunk *chunk)
1030{
1031	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1032	unsigned num_deps;
1033	int i, r;
1034
1035	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1036	num_deps = chunk->length_dw * 4 /
1037		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1038	for (i = 0; i < num_deps; ++i) {
1039		r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1040							  syncobj_deps[i].handle,
1041							  syncobj_deps[i].point,
1042							  syncobj_deps[i].flags);
1043		if (r)
1044			return r;
1045	}
1046
1047	return 0;
1048}
 
 
 
 
1049
1050static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1051					     struct amdgpu_cs_chunk *chunk)
1052{
1053	struct drm_amdgpu_cs_chunk_sem *deps;
1054	unsigned num_deps;
1055	int i;
1056
1057	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1058	num_deps = chunk->length_dw * 4 /
1059		sizeof(struct drm_amdgpu_cs_chunk_sem);
 
 
1060
1061	if (p->post_deps)
1062		return -EINVAL;
1063
1064	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1065				     GFP_KERNEL);
1066	p->num_post_deps = 0;
 
 
1067
1068	if (!p->post_deps)
1069		return -ENOMEM;
 
 
 
 
 
 
1070
 
1071
1072	for (i = 0; i < num_deps; ++i) {
1073		p->post_deps[i].syncobj =
1074			drm_syncobj_find(p->filp, deps[i].handle);
1075		if (!p->post_deps[i].syncobj)
1076			return -EINVAL;
1077		p->post_deps[i].chain = NULL;
1078		p->post_deps[i].point = 0;
1079		p->num_post_deps++;
1080	}
1081
1082	return 0;
1083}
1084
1085
1086static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1087						      struct amdgpu_cs_chunk *chunk)
1088{
1089	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1090	unsigned num_deps;
1091	int i;
1092
1093	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1094	num_deps = chunk->length_dw * 4 /
1095		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1096
1097	if (p->post_deps)
1098		return -EINVAL;
1099
1100	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1101				     GFP_KERNEL);
1102	p->num_post_deps = 0;
1103
1104	if (!p->post_deps)
1105		return -ENOMEM;
1106
1107	for (i = 0; i < num_deps; ++i) {
1108		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1109
1110		dep->chain = NULL;
1111		if (syncobj_deps[i].point) {
1112			dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
1113			if (!dep->chain)
1114				return -ENOMEM;
1115		}
1116
1117		dep->syncobj = drm_syncobj_find(p->filp,
1118						syncobj_deps[i].handle);
1119		if (!dep->syncobj) {
1120			kfree(dep->chain);
1121			return -EINVAL;
1122		}
1123		dep->point = syncobj_deps[i].point;
1124		p->num_post_deps++;
1125	}
1126
1127	return 0;
1128}
1129
1130static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1131				  struct amdgpu_cs_parser *p)
1132{
1133	int i, r;
 
1134
1135	for (i = 0; i < p->nchunks; ++i) {
 
1136		struct amdgpu_cs_chunk *chunk;
 
1137
1138		chunk = &p->chunks[i];
1139
1140		switch (chunk->chunk_id) {
1141		case AMDGPU_CHUNK_ID_DEPENDENCIES:
1142		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1143			r = amdgpu_cs_process_fence_dep(p, chunk);
1144			if (r)
1145				return r;
1146			break;
1147		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1148			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1149			if (r)
1150				return r;
1151			break;
1152		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1153			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1154			if (r)
1155				return r;
1156			break;
1157		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1158			r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1159			if (r)
1160				return r;
1161			break;
1162		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1163			r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1164			if (r)
1165				return r;
1166			break;
1167		}
1168	}
1169
1170	return 0;
1171}
 
1172
1173static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1174{
1175	int i;
 
 
 
1176
1177	for (i = 0; i < p->num_post_deps; ++i) {
1178		if (p->post_deps[i].chain && p->post_deps[i].point) {
1179			drm_syncobj_add_point(p->post_deps[i].syncobj,
1180					      p->post_deps[i].chain,
1181					      p->fence, p->post_deps[i].point);
1182			p->post_deps[i].chain = NULL;
1183		} else {
1184			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1185						  p->fence);
1186		}
1187	}
 
 
1188}
1189
1190static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1191			    union drm_amdgpu_cs *cs)
1192{
1193	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1194	struct drm_sched_entity *entity = p->entity;
1195	struct amdgpu_bo_list_entry *e;
1196	struct amdgpu_job *job;
1197	uint64_t seq;
1198	int r;
1199
1200	job = p->job;
1201	p->job = NULL;
1202
1203	r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
1204	if (r)
1205		goto error_unlock;
1206
1207	/* No memory allocation is allowed while holding the notifier lock.
1208	 * The lock is held until amdgpu_cs_submit is finished and fence is
1209	 * added to BOs.
1210	 */
1211	mutex_lock(&p->adev->notifier_lock);
1212
1213	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1214	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1215	 */
1216	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1217		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1218
1219		r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1220	}
1221	if (r) {
1222		r = -EAGAIN;
1223		goto error_abort;
1224	}
1225
 
 
1226	p->fence = dma_fence_get(&job->base.s_fence->finished);
1227
1228	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1229	amdgpu_cs_post_dependencies(p);
1230
1231	if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1232	    !p->ctx->preamble_presented) {
1233		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1234		p->ctx->preamble_presented = true;
1235	}
1236
1237	cs->out.handle = seq;
1238	job->uf_sequence = seq;
1239
1240	amdgpu_job_free_resources(job);
1241
1242	trace_amdgpu_cs_ioctl(job);
1243	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1244	drm_sched_entity_push_job(&job->base, entity);
1245
1246	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1247
1248	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1249	mutex_unlock(&p->adev->notifier_lock);
1250
1251	return 0;
1252
1253error_abort:
1254	drm_sched_job_cleanup(&job->base);
1255	mutex_unlock(&p->adev->notifier_lock);
1256
1257error_unlock:
1258	amdgpu_job_free(job);
1259	return r;
1260}
1261
1262static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
1263{
1264	int i;
1265
1266	if (!trace_amdgpu_cs_enabled())
1267		return;
1268
1269	for (i = 0; i < parser->job->num_ibs; i++)
1270		trace_amdgpu_cs(parser, i);
1271}
1272
1273int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1274{
1275	struct amdgpu_device *adev = drm_to_adev(dev);
1276	union drm_amdgpu_cs *cs = data;
1277	struct amdgpu_cs_parser parser = {};
1278	bool reserved_buffers = false;
1279	int r;
1280
1281	if (amdgpu_ras_intr_triggered())
1282		return -EHWPOISON;
1283
1284	if (!adev->accel_working)
1285		return -EBUSY;
1286
1287	parser.adev = adev;
1288	parser.filp = filp;
1289
1290	r = amdgpu_cs_parser_init(&parser, data);
1291	if (r) {
1292		if (printk_ratelimit())
1293			DRM_ERROR("Failed to initialize parser %d!\n", r);
1294		goto out;
1295	}
1296
1297	r = amdgpu_cs_ib_fill(adev, &parser);
1298	if (r)
1299		goto out;
1300
1301	r = amdgpu_cs_dependencies(adev, &parser);
1302	if (r) {
1303		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1304		goto out;
1305	}
1306
1307	r = amdgpu_cs_parser_bos(&parser, data);
1308	if (r) {
1309		if (r == -ENOMEM)
1310			DRM_ERROR("Not enough memory for command submission!\n");
1311		else if (r != -ERESTARTSYS && r != -EAGAIN)
1312			DRM_ERROR("Failed to process the buffer list %d!\n", r);
1313		goto out;
1314	}
1315
1316	reserved_buffers = true;
 
 
 
 
 
 
 
 
 
1317
1318	trace_amdgpu_cs_ibs(&parser);
 
1319
1320	r = amdgpu_cs_vm_handling(&parser);
1321	if (r)
1322		goto out;
1323
1324	r = amdgpu_cs_submit(&parser, cs);
1325
1326out:
1327	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1328
1329	return r;
1330}
1331
1332/**
1333 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1334 *
1335 * @dev: drm device
1336 * @data: data from userspace
1337 * @filp: file private
1338 *
1339 * Wait for the command submission identified by handle to finish.
1340 */
1341int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1342			 struct drm_file *filp)
1343{
1344	union drm_amdgpu_wait_cs *wait = data;
 
1345	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1346	struct drm_sched_entity *entity;
1347	struct amdgpu_ctx *ctx;
1348	struct dma_fence *fence;
1349	long r;
1350
 
 
 
 
 
1351	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1352	if (ctx == NULL)
1353		return -EINVAL;
1354
1355	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1356				  wait->in.ring, &entity);
1357	if (r) {
1358		amdgpu_ctx_put(ctx);
1359		return r;
1360	}
1361
1362	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1363	if (IS_ERR(fence))
1364		r = PTR_ERR(fence);
1365	else if (fence) {
1366		r = dma_fence_wait_timeout(fence, true, timeout);
1367		if (r > 0 && fence->error)
1368			r = fence->error;
1369		dma_fence_put(fence);
1370	} else
1371		r = 1;
1372
1373	amdgpu_ctx_put(ctx);
1374	if (r < 0)
1375		return r;
1376
1377	memset(wait, 0, sizeof(*wait));
1378	wait->out.status = (r == 0);
1379
1380	return 0;
1381}
1382
1383/**
1384 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1385 *
1386 * @adev: amdgpu device
1387 * @filp: file private
1388 * @user: drm_amdgpu_fence copied from user space
1389 */
1390static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1391					     struct drm_file *filp,
1392					     struct drm_amdgpu_fence *user)
1393{
1394	struct drm_sched_entity *entity;
1395	struct amdgpu_ctx *ctx;
1396	struct dma_fence *fence;
1397	int r;
1398
 
 
 
 
 
1399	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1400	if (ctx == NULL)
1401		return ERR_PTR(-EINVAL);
1402
1403	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1404				  user->ring, &entity);
1405	if (r) {
1406		amdgpu_ctx_put(ctx);
1407		return ERR_PTR(r);
1408	}
1409
1410	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1411	amdgpu_ctx_put(ctx);
1412
1413	return fence;
1414}
1415
1416int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1417				    struct drm_file *filp)
1418{
1419	struct amdgpu_device *adev = drm_to_adev(dev);
1420	union drm_amdgpu_fence_to_handle *info = data;
1421	struct dma_fence *fence;
1422	struct drm_syncobj *syncobj;
1423	struct sync_file *sync_file;
1424	int fd, r;
1425
1426	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1427	if (IS_ERR(fence))
1428		return PTR_ERR(fence);
1429
1430	if (!fence)
1431		fence = dma_fence_get_stub();
1432
1433	switch (info->in.what) {
1434	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1435		r = drm_syncobj_create(&syncobj, 0, fence);
1436		dma_fence_put(fence);
1437		if (r)
1438			return r;
1439		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1440		drm_syncobj_put(syncobj);
1441		return r;
1442
1443	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1444		r = drm_syncobj_create(&syncobj, 0, fence);
1445		dma_fence_put(fence);
1446		if (r)
1447			return r;
1448		r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1449		drm_syncobj_put(syncobj);
1450		return r;
1451
1452	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1453		fd = get_unused_fd_flags(O_CLOEXEC);
1454		if (fd < 0) {
1455			dma_fence_put(fence);
1456			return fd;
1457		}
1458
1459		sync_file = sync_file_create(fence);
1460		dma_fence_put(fence);
1461		if (!sync_file) {
1462			put_unused_fd(fd);
1463			return -ENOMEM;
1464		}
1465
1466		fd_install(fd, sync_file->file);
1467		info->out.handle = fd;
1468		return 0;
1469
1470	default:
1471		return -EINVAL;
1472	}
1473}
1474
1475/**
1476 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1477 *
1478 * @adev: amdgpu device
1479 * @filp: file private
1480 * @wait: wait parameters
1481 * @fences: array of drm_amdgpu_fence
1482 */
1483static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1484				     struct drm_file *filp,
1485				     union drm_amdgpu_wait_fences *wait,
1486				     struct drm_amdgpu_fence *fences)
1487{
1488	uint32_t fence_count = wait->in.fence_count;
1489	unsigned int i;
1490	long r = 1;
1491
1492	for (i = 0; i < fence_count; i++) {
1493		struct dma_fence *fence;
1494		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1495
1496		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1497		if (IS_ERR(fence))
1498			return PTR_ERR(fence);
1499		else if (!fence)
1500			continue;
1501
1502		r = dma_fence_wait_timeout(fence, true, timeout);
1503		dma_fence_put(fence);
1504		if (r < 0)
1505			return r;
1506
1507		if (r == 0)
1508			break;
1509
1510		if (fence->error)
1511			return fence->error;
1512	}
1513
1514	memset(wait, 0, sizeof(*wait));
1515	wait->out.status = (r > 0);
1516
1517	return 0;
1518}
1519
1520/**
1521 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1522 *
1523 * @adev: amdgpu device
1524 * @filp: file private
1525 * @wait: wait parameters
1526 * @fences: array of drm_amdgpu_fence
1527 */
1528static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1529				    struct drm_file *filp,
1530				    union drm_amdgpu_wait_fences *wait,
1531				    struct drm_amdgpu_fence *fences)
1532{
1533	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1534	uint32_t fence_count = wait->in.fence_count;
1535	uint32_t first = ~0;
1536	struct dma_fence **array;
1537	unsigned int i;
1538	long r;
1539
1540	/* Prepare the fence array */
1541	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1542
1543	if (array == NULL)
1544		return -ENOMEM;
1545
1546	for (i = 0; i < fence_count; i++) {
1547		struct dma_fence *fence;
1548
1549		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1550		if (IS_ERR(fence)) {
1551			r = PTR_ERR(fence);
1552			goto err_free_fence_array;
1553		} else if (fence) {
1554			array[i] = fence;
1555		} else { /* NULL, the fence has been already signaled */
1556			r = 1;
1557			first = i;
1558			goto out;
1559		}
1560	}
1561
1562	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1563				       &first);
1564	if (r < 0)
1565		goto err_free_fence_array;
1566
1567out:
1568	memset(wait, 0, sizeof(*wait));
1569	wait->out.status = (r > 0);
1570	wait->out.first_signaled = first;
1571
1572	if (first < fence_count && array[first])
1573		r = array[first]->error;
1574	else
1575		r = 0;
1576
1577err_free_fence_array:
1578	for (i = 0; i < fence_count; i++)
1579		dma_fence_put(array[i]);
1580	kfree(array);
1581
1582	return r;
1583}
1584
1585/**
1586 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1587 *
1588 * @dev: drm device
1589 * @data: data from userspace
1590 * @filp: file private
1591 */
1592int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1593				struct drm_file *filp)
1594{
1595	struct amdgpu_device *adev = drm_to_adev(dev);
1596	union drm_amdgpu_wait_fences *wait = data;
1597	uint32_t fence_count = wait->in.fence_count;
1598	struct drm_amdgpu_fence *fences_user;
1599	struct drm_amdgpu_fence *fences;
1600	int r;
1601
1602	/* Get the fences from userspace */
1603	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1604			GFP_KERNEL);
1605	if (fences == NULL)
1606		return -ENOMEM;
1607
1608	fences_user = u64_to_user_ptr(wait->in.fences);
1609	if (copy_from_user(fences, fences_user,
1610		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1611		r = -EFAULT;
1612		goto err_free_fences;
1613	}
1614
1615	if (wait->in.wait_all)
1616		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1617	else
1618		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1619
1620err_free_fences:
1621	kfree(fences);
1622
1623	return r;
1624}
1625
1626/**
1627 * amdgpu_cs_find_mapping - find bo_va for VM address
1628 *
1629 * @parser: command submission parser context
1630 * @addr: VM address
1631 * @bo: resulting BO of the mapping found
1632 * @map: Placeholder to return found BO mapping
1633 *
1634 * Search the buffer objects in the command submission context for a certain
1635 * virtual memory address. Returns allocation structure when found, NULL
1636 * otherwise.
1637 */
1638int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1639			   uint64_t addr, struct amdgpu_bo **bo,
1640			   struct amdgpu_bo_va_mapping **map)
1641{
1642	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1643	struct ttm_operation_ctx ctx = { false, false };
1644	struct amdgpu_vm *vm = &fpriv->vm;
1645	struct amdgpu_bo_va_mapping *mapping;
1646	int r;
 
 
 
1647
1648	addr /= AMDGPU_GPU_PAGE_SIZE;
1649
1650	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1651	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1652		return -EINVAL;
1653
1654	*bo = mapping->bo_va->base.bo;
1655	*map = mapping;
 
1656
1657	/* Double check that the BO is reserved by this CS */
1658	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1659		return -EINVAL;
 
1660
1661	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1662		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1663		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1664		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1665		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1666			return r;
1667	}
1668
1669	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1670}
v4.10.11
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
 
 
  27#include <linux/pagemap.h>
  28#include <drm/drmP.h>
 
 
  29#include <drm/amdgpu_drm.h>
 
  30#include "amdgpu.h"
  31#include "amdgpu_trace.h"
  32
  33int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
  34		       u32 ip_instance, u32 ring,
  35		       struct amdgpu_ring **out_ring)
  36{
  37	/* Right now all IPs have only one instance - multiple rings. */
  38	if (ip_instance != 0) {
  39		DRM_ERROR("invalid ip instance: %d\n", ip_instance);
  40		return -EINVAL;
  41	}
  42
  43	switch (ip_type) {
  44	default:
  45		DRM_ERROR("unknown ip type: %d\n", ip_type);
  46		return -EINVAL;
  47	case AMDGPU_HW_IP_GFX:
  48		if (ring < adev->gfx.num_gfx_rings) {
  49			*out_ring = &adev->gfx.gfx_ring[ring];
  50		} else {
  51			DRM_ERROR("only %d gfx rings are supported now\n",
  52				  adev->gfx.num_gfx_rings);
  53			return -EINVAL;
  54		}
  55		break;
  56	case AMDGPU_HW_IP_COMPUTE:
  57		if (ring < adev->gfx.num_compute_rings) {
  58			*out_ring = &adev->gfx.compute_ring[ring];
  59		} else {
  60			DRM_ERROR("only %d compute rings are supported now\n",
  61				  adev->gfx.num_compute_rings);
  62			return -EINVAL;
  63		}
  64		break;
  65	case AMDGPU_HW_IP_DMA:
  66		if (ring < adev->sdma.num_instances) {
  67			*out_ring = &adev->sdma.instance[ring].ring;
  68		} else {
  69			DRM_ERROR("only %d SDMA rings are supported\n",
  70				  adev->sdma.num_instances);
  71			return -EINVAL;
  72		}
  73		break;
  74	case AMDGPU_HW_IP_UVD:
  75		*out_ring = &adev->uvd.ring;
  76		break;
  77	case AMDGPU_HW_IP_VCE:
  78		if (ring < 2){
  79			*out_ring = &adev->vce.ring[ring];
  80		} else {
  81			DRM_ERROR("only two VCE rings are supported\n");
  82			return -EINVAL;
  83		}
  84		break;
  85	}
  86
  87	if (!(*out_ring && (*out_ring)->adev)) {
  88		DRM_ERROR("Ring %d is not initialized on IP %d\n",
  89			  ring, ip_type);
  90		return -EINVAL;
  91	}
  92
  93	return 0;
  94}
  95
  96static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
  97				      struct drm_amdgpu_cs_chunk_fence *data,
  98				      uint32_t *offset)
  99{
 100	struct drm_gem_object *gobj;
 
 101	unsigned long size;
 
 102
 103	gobj = drm_gem_object_lookup(p->filp, data->handle);
 104	if (gobj == NULL)
 105		return -EINVAL;
 106
 107	p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 108	p->uf_entry.priority = 0;
 109	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
 110	p->uf_entry.tv.shared = true;
 111	p->uf_entry.user_pages = NULL;
 
 
 
 
 
 
 
 
 112
 113	size = amdgpu_bo_size(p->uf_entry.robj);
 114	if (size != PAGE_SIZE || (data->offset + 8) > size)
 115		return -EINVAL;
 
 116
 117	*offset = data->offset;
 118
 119	drm_gem_object_unreference_unlocked(gobj);
 
 
 
 
 
 
 
 
 
 
 
 120
 121	if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
 122		amdgpu_bo_unref(&p->uf_entry.robj);
 123		return -EINVAL;
 124	}
 
 
 
 
 125
 
 126	return 0;
 
 
 
 
 
 127}
 128
 129int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 130{
 131	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 132	struct amdgpu_vm *vm = &fpriv->vm;
 133	union drm_amdgpu_cs *cs = data;
 134	uint64_t *chunk_array_user;
 135	uint64_t *chunk_array;
 136	unsigned size, num_ibs = 0;
 137	uint32_t uf_offset = 0;
 138	int i;
 139	int ret;
 140
 141	if (cs->in.num_chunks == 0)
 142		return 0;
 143
 144	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 145	if (!chunk_array)
 146		return -ENOMEM;
 147
 148	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
 149	if (!p->ctx) {
 150		ret = -EINVAL;
 151		goto free_chunk;
 152	}
 153
 
 
 
 
 
 
 
 
 154	/* get chunks */
 155	chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
 156	if (copy_from_user(chunk_array, chunk_array_user,
 157			   sizeof(uint64_t)*cs->in.num_chunks)) {
 158		ret = -EFAULT;
 159		goto put_ctx;
 160	}
 161
 162	p->nchunks = cs->in.num_chunks;
 163	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 164			    GFP_KERNEL);
 165	if (!p->chunks) {
 166		ret = -ENOMEM;
 167		goto put_ctx;
 168	}
 169
 170	for (i = 0; i < p->nchunks; i++) {
 171		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
 172		struct drm_amdgpu_cs_chunk user_chunk;
 173		uint32_t __user *cdata;
 174
 175		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
 176		if (copy_from_user(&user_chunk, chunk_ptr,
 177				       sizeof(struct drm_amdgpu_cs_chunk))) {
 178			ret = -EFAULT;
 179			i--;
 180			goto free_partial_kdata;
 181		}
 182		p->chunks[i].chunk_id = user_chunk.chunk_id;
 183		p->chunks[i].length_dw = user_chunk.length_dw;
 184
 185		size = p->chunks[i].length_dw;
 186		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
 187
 188		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
 189		if (p->chunks[i].kdata == NULL) {
 190			ret = -ENOMEM;
 191			i--;
 192			goto free_partial_kdata;
 193		}
 194		size *= sizeof(uint32_t);
 195		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 196			ret = -EFAULT;
 197			goto free_partial_kdata;
 198		}
 199
 200		switch (p->chunks[i].chunk_id) {
 201		case AMDGPU_CHUNK_ID_IB:
 202			++num_ibs;
 203			break;
 204
 205		case AMDGPU_CHUNK_ID_FENCE:
 206			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
 207			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 208				ret = -EINVAL;
 209				goto free_partial_kdata;
 210			}
 211
 212			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
 213							 &uf_offset);
 214			if (ret)
 215				goto free_partial_kdata;
 216
 217			break;
 218
 
 
 
 
 
 
 
 
 
 
 
 
 
 219		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 
 
 
 
 
 220			break;
 221
 222		default:
 223			ret = -EINVAL;
 224			goto free_partial_kdata;
 225		}
 226	}
 227
 228	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
 229	if (ret)
 230		goto free_all_kdata;
 231
 232	if (p->uf_entry.robj)
 
 
 
 
 
 233		p->job->uf_addr = uf_offset;
 234	kfree(chunk_array);
 
 
 
 
 235	return 0;
 236
 237free_all_kdata:
 238	i = p->nchunks - 1;
 239free_partial_kdata:
 240	for (; i >= 0; i--)
 241		drm_free_large(p->chunks[i].kdata);
 242	kfree(p->chunks);
 243put_ctx:
 244	amdgpu_ctx_put(p->ctx);
 245free_chunk:
 246	kfree(chunk_array);
 247
 248	return ret;
 249}
 250
 251/* Convert microseconds to bytes. */
 252static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 253{
 254	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 255		return 0;
 256
 257	/* Since accum_us is incremented by a million per second, just
 258	 * multiply it by the number of MB/s to get the number of bytes.
 259	 */
 260	return us << adev->mm_stats.log2_max_MBps;
 261}
 262
 263static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 264{
 265	if (!adev->mm_stats.log2_max_MBps)
 266		return 0;
 267
 268	return bytes >> adev->mm_stats.log2_max_MBps;
 269}
 270
 271/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 272 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 273 * which means it can go over the threshold once. If that happens, the driver
 274 * will be in debt and no other buffer migrations can be done until that debt
 275 * is repaid.
 276 *
 277 * This approach allows moving a buffer of any size (it's important to allow
 278 * that).
 279 *
 280 * The currency is simply time in microseconds and it increases as the clock
 281 * ticks. The accumulated microseconds (us) are converted to bytes and
 282 * returned.
 283 */
 284static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 
 
 285{
 286	s64 time_us, increment_us;
 287	u64 max_bytes;
 288	u64 free_vram, total_vram, used_vram;
 289
 290	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 291	 * throttling.
 292	 *
 293	 * It means that in order to get full max MBps, at least 5 IBs per
 294	 * second must be submitted and not more than 200ms apart from each
 295	 * other.
 296	 */
 297	const s64 us_upper_bound = 200000;
 298
 299	if (!adev->mm_stats.log2_max_MBps)
 300		return 0;
 
 
 
 301
 302	total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
 303	used_vram = atomic64_read(&adev->vram_usage);
 304	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 305
 306	spin_lock(&adev->mm_stats.lock);
 307
 308	/* Increase the amount of accumulated us. */
 309	time_us = ktime_to_us(ktime_get());
 310	increment_us = time_us - adev->mm_stats.last_update_us;
 311	adev->mm_stats.last_update_us = time_us;
 312	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 313                                      us_upper_bound);
 314
 315	/* This prevents the short period of low performance when the VRAM
 316	 * usage is low and the driver is in debt or doesn't have enough
 317	 * accumulated us to fill VRAM quickly.
 318	 *
 319	 * The situation can occur in these cases:
 320	 * - a lot of VRAM is freed by userspace
 321	 * - the presence of a big buffer causes a lot of evictions
 322	 *   (solution: split buffers into smaller ones)
 323	 *
 324	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 325	 * accum_us to a positive number.
 326	 */
 327	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 328		s64 min_us;
 329
 330		/* Be more aggresive on dGPUs. Try to fill a portion of free
 331		 * VRAM now.
 332		 */
 333		if (!(adev->flags & AMD_IS_APU))
 334			min_us = bytes_to_us(adev, free_vram / 4);
 335		else
 336			min_us = 0; /* Reset accum_us on APUs. */
 337
 338		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 339	}
 340
 341	/* This returns 0 if the driver is in debt to disallow (optional)
 342	 * buffer moves.
 343	 */
 344	max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 345
 346	spin_unlock(&adev->mm_stats.lock);
 347	return max_bytes;
 348}
 349
 350/* Report how many bytes have really been moved for the last command
 351 * submission. This can result in a debt that can stop buffer migrations
 352 * temporarily.
 353 */
 354static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
 355					 u64 num_bytes)
 356{
 357	spin_lock(&adev->mm_stats.lock);
 358	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 
 359	spin_unlock(&adev->mm_stats.lock);
 360}
 361
 362static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 363				 struct amdgpu_bo *bo)
 364{
 365	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 366	u64 initial_bytes_moved;
 
 
 
 
 
 367	uint32_t domain;
 368	int r;
 369
 370	if (bo->pin_count)
 371		return 0;
 372
 373	/* Don't move this buffer if we have depleted our allowance
 374	 * to move it. Don't move anything if the threshold is zero.
 375	 */
 376	if (p->bytes_moved < p->bytes_moved_threshold)
 377		domain = bo->prefered_domains;
 378	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 379		domain = bo->allowed_domains;
 
 380
 381retry:
 382	amdgpu_ttm_placement_from_domain(bo, domain);
 383	initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 384	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 385	p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
 386		initial_bytes_moved;
 
 
 387
 388	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 389		domain = bo->allowed_domains;
 390		goto retry;
 391	}
 392
 393	return r;
 394}
 395
 396/* Last resort, try to evict something from the current working set */
 397static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 398				struct amdgpu_bo *validated)
 399{
 400	uint32_t domain = validated->allowed_domains;
 401	int r;
 402
 403	if (!p->evictable)
 404		return false;
 405
 406	for (;&p->evictable->tv.head != &p->validated;
 407	     p->evictable = list_prev_entry(p->evictable, tv.head)) {
 408
 409		struct amdgpu_bo_list_entry *candidate = p->evictable;
 410		struct amdgpu_bo *bo = candidate->robj;
 411		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 412		u64 initial_bytes_moved;
 413		uint32_t other;
 414
 415		/* If we reached our current BO we can forget it */
 416		if (candidate->robj == validated)
 417			break;
 418
 419		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 420
 421		/* Check if this BO is in one of the domains we need space for */
 422		if (!(other & domain))
 423			continue;
 424
 425		/* Check if we can move this BO somewhere else */
 426		other = bo->allowed_domains & ~domain;
 427		if (!other)
 428			continue;
 429
 430		/* Good we can try to move this BO somewhere else */
 431		amdgpu_ttm_placement_from_domain(bo, other);
 432		initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 433		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 434		p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
 435			initial_bytes_moved;
 436
 437		if (unlikely(r))
 438			break;
 439
 440		p->evictable = list_prev_entry(p->evictable, tv.head);
 441		list_move(&candidate->tv.head, &p->validated);
 442
 443		return true;
 444	}
 445
 446	return false;
 447}
 448
 449static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
 450{
 451	struct amdgpu_cs_parser *p = param;
 452	int r;
 453
 454	do {
 455		r = amdgpu_cs_bo_validate(p, bo);
 456	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
 457	if (r)
 458		return r;
 459
 460	if (bo->shadow)
 461		r = amdgpu_cs_bo_validate(p, bo->shadow);
 462
 463	return r;
 464}
 465
 466static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 467			    struct list_head *validated)
 468{
 
 469	struct amdgpu_bo_list_entry *lobj;
 470	int r;
 471
 472	list_for_each_entry(lobj, validated, tv.head) {
 473		struct amdgpu_bo *bo = lobj->robj;
 474		bool binding_userptr = false;
 475		struct mm_struct *usermm;
 476
 477		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 478		if (usermm && usermm != current->mm)
 479			return -EPERM;
 480
 481		/* Check if we have user pages and nobody bound the BO already */
 482		if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
 483			size_t size = sizeof(struct page *);
 
 
 
 
 484
 485			size *= bo->tbo.ttm->num_pages;
 486			memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
 487			binding_userptr = true;
 488		}
 489
 490		if (p->evictable == lobj)
 491			p->evictable = NULL;
 492
 493		r = amdgpu_cs_validate(p, bo);
 494		if (r)
 495			return r;
 496
 497		if (binding_userptr) {
 498			drm_free_large(lobj->user_pages);
 499			lobj->user_pages = NULL;
 500		}
 501	}
 502	return 0;
 503}
 504
 505static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 506				union drm_amdgpu_cs *cs)
 507{
 508	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 
 509	struct amdgpu_bo_list_entry *e;
 510	struct list_head duplicates;
 511	bool need_mmap_lock = false;
 512	unsigned i, tries = 10;
 
 513	int r;
 514
 515	INIT_LIST_HEAD(&p->validated);
 516
 517	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
 518	if (p->bo_list) {
 519		need_mmap_lock = p->bo_list->first_userptr !=
 520			p->bo_list->num_entries;
 521		amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 
 
 
 
 
 
 
 
 
 
 522	}
 523
 
 
 
 
 
 
 524	INIT_LIST_HEAD(&duplicates);
 525	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 526
 527	if (p->uf_entry.robj)
 528		list_add(&p->uf_entry.tv.head, &p->validated);
 529
 530	if (need_mmap_lock)
 531		down_read(&current->mm->mmap_sem);
 532
 533	while (1) {
 534		struct list_head need_pages;
 535		unsigned i;
 536
 537		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 538					   &duplicates);
 539		if (unlikely(r != 0)) {
 540			if (r != -ERESTARTSYS)
 541				DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 542			goto error_free_pages;
 
 
 
 
 
 
 
 
 
 543		}
 544
 545		/* Without a BO list we don't have userptr BOs */
 546		if (!p->bo_list)
 547			break;
 548
 549		INIT_LIST_HEAD(&need_pages);
 550		for (i = p->bo_list->first_userptr;
 551		     i < p->bo_list->num_entries; ++i) {
 552
 553			e = &p->bo_list->array[i];
 554
 555			if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
 556				 &e->user_invalidated) && e->user_pages) {
 557
 558				/* We acquired a page array, but somebody
 559				 * invalidated it. Free it an try again
 560				 */
 561				release_pages(e->user_pages,
 562					      e->robj->tbo.ttm->num_pages,
 563					      false);
 564				drm_free_large(e->user_pages);
 565				e->user_pages = NULL;
 566			}
 567
 568			if (e->robj->tbo.ttm->state != tt_bound &&
 569			    !e->user_pages) {
 570				list_del(&e->tv.head);
 571				list_add(&e->tv.head, &need_pages);
 572
 573				amdgpu_bo_unreserve(e->robj);
 574			}
 575		}
 
 
 576
 577		if (list_empty(&need_pages))
 578			break;
 579
 580		/* Unreserve everything again. */
 581		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 582
 583		/* We tried too many times, just abort */
 584		if (!--tries) {
 585			r = -EDEADLK;
 586			DRM_ERROR("deadlock in %s\n", __func__);
 587			goto error_free_pages;
 588		}
 589
 590		/* Fill the page arrays for all useptrs. */
 591		list_for_each_entry(e, &need_pages, tv.head) {
 592			struct ttm_tt *ttm = e->robj->tbo.ttm;
 593
 594			e->user_pages = drm_calloc_large(ttm->num_pages,
 595							 sizeof(struct page*));
 596			if (!e->user_pages) {
 597				r = -ENOMEM;
 598				DRM_ERROR("calloc failure in %s\n", __func__);
 599				goto error_free_pages;
 600			}
 601
 602			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
 603			if (r) {
 604				DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
 605				drm_free_large(e->user_pages);
 606				e->user_pages = NULL;
 607				goto error_free_pages;
 608			}
 609		}
 610
 611		/* And try again. */
 612		list_splice(&need_pages, &p->validated);
 613	}
 614
 615	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
 
 616	p->bytes_moved = 0;
 617	p->evictable = list_last_entry(&p->validated,
 618				       struct amdgpu_bo_list_entry,
 619				       tv.head);
 620
 621	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
 622				      amdgpu_cs_validate, p);
 623	if (r) {
 624		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
 625		goto error_validate;
 626	}
 627
 628	r = amdgpu_cs_list_validate(p, &duplicates);
 629	if (r) {
 630		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
 631		goto error_validate;
 632	}
 633
 634	r = amdgpu_cs_list_validate(p, &p->validated);
 635	if (r) {
 636		DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
 637		goto error_validate;
 638	}
 639
 640	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
 
 641
 642	fpriv->vm.last_eviction_counter =
 643		atomic64_read(&p->adev->num_evictions);
 
 644
 645	if (p->bo_list) {
 646		struct amdgpu_bo *gds = p->bo_list->gds_obj;
 647		struct amdgpu_bo *gws = p->bo_list->gws_obj;
 648		struct amdgpu_bo *oa = p->bo_list->oa_obj;
 649		struct amdgpu_vm *vm = &fpriv->vm;
 650		unsigned i;
 651
 652		for (i = 0; i < p->bo_list->num_entries; i++) {
 653			struct amdgpu_bo *bo = p->bo_list->array[i].robj;
 
 
 
 654
 655			p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
 656		}
 657
 658		if (gds) {
 659			p->job->gds_base = amdgpu_bo_gpu_offset(gds);
 660			p->job->gds_size = amdgpu_bo_size(gds);
 661		}
 662		if (gws) {
 663			p->job->gws_base = amdgpu_bo_gpu_offset(gws);
 664			p->job->gws_size = amdgpu_bo_size(gws);
 665		}
 666		if (oa) {
 667			p->job->oa_base = amdgpu_bo_gpu_offset(oa);
 668			p->job->oa_size = amdgpu_bo_size(oa);
 669		}
 670	}
 671
 672	if (!r && p->uf_entry.robj) {
 673		struct amdgpu_bo *uf = p->uf_entry.robj;
 674
 675		r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
 676		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
 677	}
 678
 679error_validate:
 680	if (r) {
 681		amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
 682		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 683	}
 684
 685error_free_pages:
 686
 687	if (need_mmap_lock)
 688		up_read(&current->mm->mmap_sem);
 689
 690	if (p->bo_list) {
 691		for (i = p->bo_list->first_userptr;
 692		     i < p->bo_list->num_entries; ++i) {
 693			e = &p->bo_list->array[i];
 694
 695			if (!e->user_pages)
 696				continue;
 697
 698			release_pages(e->user_pages,
 699				      e->robj->tbo.ttm->num_pages,
 700				      false);
 701			drm_free_large(e->user_pages);
 702		}
 703	}
 704
 705	return r;
 706}
 707
 708static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 709{
 
 710	struct amdgpu_bo_list_entry *e;
 711	int r;
 712
 713	list_for_each_entry(e, &p->validated, tv.head) {
 714		struct reservation_object *resv = e->robj->tbo.resv;
 715		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
 716
 
 
 
 
 
 717		if (r)
 718			return r;
 719	}
 720	return 0;
 721}
 722
 723/**
 724 * cs_parser_fini() - clean parser states
 725 * @parser:	parser structure holding parsing context.
 726 * @error:	error number
 
 727 *
 728 * If error is set than unvalidate buffer, otherwise just free memory
 729 * used by parsing context.
 730 **/
 731static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
 
 732{
 733	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 734	unsigned i;
 735
 736	if (!error) {
 737		amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
 738
 739		ttm_eu_fence_buffer_objects(&parser->ticket,
 740					    &parser->validated,
 741					    parser->fence);
 742	} else if (backoff) {
 743		ttm_eu_backoff_reservation(&parser->ticket,
 744					   &parser->validated);
 
 
 
 
 745	}
 
 
 746	dma_fence_put(parser->fence);
 747
 748	if (parser->ctx)
 
 749		amdgpu_ctx_put(parser->ctx);
 
 750	if (parser->bo_list)
 751		amdgpu_bo_list_put(parser->bo_list);
 752
 753	for (i = 0; i < parser->nchunks; i++)
 754		drm_free_large(parser->chunks[i].kdata);
 755	kfree(parser->chunks);
 756	if (parser->job)
 757		amdgpu_job_free(parser->job);
 758	amdgpu_bo_unref(&parser->uf_entry.robj);
 
 
 
 
 759}
 760
 761static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 762				   struct amdgpu_vm *vm)
 763{
 
 
 764	struct amdgpu_device *adev = p->adev;
 
 
 765	struct amdgpu_bo_va *bo_va;
 766	struct amdgpu_bo *bo;
 767	int i, r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 768
 769	r = amdgpu_vm_update_page_directory(adev, vm);
 770	if (r)
 771		return r;
 772
 773	r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence);
 774	if (r)
 775		return r;
 776
 777	r = amdgpu_vm_clear_freed(adev, vm);
 778	if (r)
 779		return r;
 780
 781	if (p->bo_list) {
 782		for (i = 0; i < p->bo_list->num_entries; i++) {
 783			struct dma_fence *f;
 
 
 
 784
 785			/* ignore duplicates */
 786			bo = p->bo_list->array[i].robj;
 787			if (!bo)
 788				continue;
 789
 790			bo_va = p->bo_list->array[i].bo_va;
 791			if (bo_va == NULL)
 792				continue;
 
 
 793
 794			r = amdgpu_vm_bo_update(adev, bo_va, false);
 795			if (r)
 796				return r;
 797
 798			f = bo_va->last_pt_update;
 799			r = amdgpu_sync_fence(adev, &p->job->sync, f);
 800			if (r)
 801				return r;
 802		}
 803
 
 
 
 804	}
 805
 806	r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
 
 
 807
 808	if (amdgpu_vm_debug && p->bo_list) {
 
 
 
 
 
 
 
 
 
 
 809		/* Invalidate all BOs to test for userspace bugs */
 810		for (i = 0; i < p->bo_list->num_entries; i++) {
 
 
 811			/* ignore duplicates */
 812			bo = p->bo_list->array[i].robj;
 813			if (!bo)
 814				continue;
 815
 816			amdgpu_vm_bo_invalidate(adev, bo);
 817		}
 818	}
 819
 820	return r;
 821}
 822
 823static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 824				 struct amdgpu_cs_parser *p)
 825{
 826	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 827	struct amdgpu_vm *vm = &fpriv->vm;
 828	struct amdgpu_ring *ring = p->job->ring;
 829	int i, r;
 830
 831	/* Only for UVD/VCE VM emulation */
 832	if (ring->funcs->parse_cs) {
 833		for (i = 0; i < p->job->num_ibs; i++) {
 834			r = amdgpu_ring_parse_cs(ring, p, i);
 835			if (r)
 836				return r;
 837		}
 838	}
 839
 840	if (p->job->vm) {
 841		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 842
 843		r = amdgpu_bo_vm_update_pte(p, vm);
 844		if (r)
 845			return r;
 846	}
 847
 848	return amdgpu_cs_sync_rings(p);
 849}
 850
 851static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 852			     struct amdgpu_cs_parser *parser)
 853{
 854	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 855	struct amdgpu_vm *vm = &fpriv->vm;
 
 
 856	int i, j;
 857	int r;
 858
 859	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
 860		struct amdgpu_cs_chunk *chunk;
 861		struct amdgpu_ib *ib;
 862		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 863		struct amdgpu_ring *ring;
 864
 865		chunk = &parser->chunks[i];
 866		ib = &parser->job->ibs[j];
 867		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
 868
 869		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 870			continue;
 871
 872		r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
 873				       chunk_ib->ip_instance, chunk_ib->ring,
 874				       &ring);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 875		if (r)
 876			return r;
 877
 878		if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
 879			parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
 880			if (!parser->ctx->preamble_presented) {
 881				parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
 882				parser->ctx->preamble_presented = true;
 883			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 884		}
 885
 886		if (parser->job->ring && parser->job->ring != ring)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 887			return -EINVAL;
 888
 889		parser->job->ring = ring;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 890
 891		if (ring->funcs->parse_cs) {
 892			struct amdgpu_bo_va_mapping *m;
 893			struct amdgpu_bo *aobj = NULL;
 894			uint64_t offset;
 895			uint8_t *kptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896
 897			m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
 898						   &aobj);
 899			if (!aobj) {
 900				DRM_ERROR("IB va_start is invalid\n");
 901				return -EINVAL;
 902			}
 903
 904			if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
 905			    (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
 906				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
 907				return -EINVAL;
 908			}
 
 909
 910			/* the IB should be reserved at this point */
 911			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
 912			if (r) {
 913				return r;
 914			}
 915
 916			offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
 917			kptr += chunk_ib->va_start - offset;
 918
 919			r =  amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
 920			if (r) {
 921				DRM_ERROR("Failed to get ib !\n");
 922				return r;
 923			}
 924
 925			memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
 926			amdgpu_bo_kunmap(aobj);
 927		} else {
 928			r =  amdgpu_ib_get(adev, vm, 0, ib);
 929			if (r) {
 930				DRM_ERROR("Failed to get ib !\n");
 931				return r;
 932			}
 933
 934		}
 935
 936		ib->gpu_addr = chunk_ib->va_start;
 937		ib->length_dw = chunk_ib->ib_bytes / 4;
 938		ib->flags = chunk_ib->flags;
 939		j++;
 
 
 
 
 940	}
 941
 942	/* UVD & VCE fw doesn't support user fences */
 943	if (parser->job->uf_addr && (
 944	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
 945	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
 
 
 
 
 
 
 
 
 
 
 
 
 946		return -EINVAL;
 947
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 948	return 0;
 949}
 950
 951static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 952				  struct amdgpu_cs_parser *p)
 953{
 954	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 955	int i, j, r;
 956
 957	for (i = 0; i < p->nchunks; ++i) {
 958		struct drm_amdgpu_cs_chunk_dep *deps;
 959		struct amdgpu_cs_chunk *chunk;
 960		unsigned num_deps;
 961
 962		chunk = &p->chunks[i];
 963
 964		if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
 965			continue;
 966
 967		deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
 968		num_deps = chunk->length_dw * 4 /
 969			sizeof(struct drm_amdgpu_cs_chunk_dep);
 970
 971		for (j = 0; j < num_deps; ++j) {
 972			struct amdgpu_ring *ring;
 973			struct amdgpu_ctx *ctx;
 974			struct dma_fence *fence;
 975
 976			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 977					       deps[j].ip_instance,
 978					       deps[j].ring, &ring);
 
 
 
 
 
 
 
 
 
 979			if (r)
 980				return r;
 
 
 
 981
 982			ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
 983			if (ctx == NULL)
 984				return -EINVAL;
 985
 986			fence = amdgpu_ctx_get_fence(ctx, ring,
 987						     deps[j].handle);
 988			if (IS_ERR(fence)) {
 989				r = PTR_ERR(fence);
 990				amdgpu_ctx_put(ctx);
 991				return r;
 992
 993			} else if (fence) {
 994				r = amdgpu_sync_fence(adev, &p->job->sync,
 995						      fence);
 996				dma_fence_put(fence);
 997				amdgpu_ctx_put(ctx);
 998				if (r)
 999					return r;
1000			}
 
1001		}
1002	}
1003
1004	return 0;
1005}
1006
1007static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1008			    union drm_amdgpu_cs *cs)
1009{
1010	struct amdgpu_ring *ring = p->job->ring;
1011	struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
 
1012	struct amdgpu_job *job;
 
1013	int r;
1014
1015	job = p->job;
1016	p->job = NULL;
1017
1018	r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019	if (r) {
1020		amdgpu_job_free(job);
1021		return r;
1022	}
1023
1024	job->owner = p->filp;
1025	job->fence_ctx = entity->fence_context;
1026	p->fence = dma_fence_get(&job->base.s_fence->finished);
1027	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
1028	job->uf_sequence = cs->out.handle;
 
 
 
 
 
 
 
 
 
 
 
1029	amdgpu_job_free_resources(job);
1030
1031	trace_amdgpu_cs_ioctl(job);
1032	amd_sched_entity_push_job(&job->base);
 
 
 
 
 
 
1033
1034	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1035}
1036
1037int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1038{
1039	struct amdgpu_device *adev = dev->dev_private;
1040	union drm_amdgpu_cs *cs = data;
1041	struct amdgpu_cs_parser parser = {};
1042	bool reserved_buffers = false;
1043	int i, r;
 
 
 
1044
1045	if (!adev->accel_working)
1046		return -EBUSY;
1047
1048	parser.adev = adev;
1049	parser.filp = filp;
1050
1051	r = amdgpu_cs_parser_init(&parser, data);
1052	if (r) {
1053		DRM_ERROR("Failed to initialize parser !\n");
 
 
 
 
 
 
 
 
 
 
 
1054		goto out;
1055	}
1056
1057	r = amdgpu_cs_parser_bos(&parser, data);
1058	if (r) {
1059		if (r == -ENOMEM)
1060			DRM_ERROR("Not enough memory for command submission!\n");
1061		else if (r != -ERESTARTSYS)
1062			DRM_ERROR("Failed to process the buffer list %d!\n", r);
1063		goto out;
1064	}
1065
1066	reserved_buffers = true;
1067	r = amdgpu_cs_ib_fill(adev, &parser);
1068	if (r)
1069		goto out;
1070
1071	r = amdgpu_cs_dependencies(adev, &parser);
1072	if (r) {
1073		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1074		goto out;
1075	}
1076
1077	for (i = 0; i < parser.job->num_ibs; i++)
1078		trace_amdgpu_cs(&parser, i);
1079
1080	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
1081	if (r)
1082		goto out;
1083
1084	r = amdgpu_cs_submit(&parser, cs);
1085
1086out:
1087	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
 
1088	return r;
1089}
1090
1091/**
1092 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1093 *
1094 * @dev: drm device
1095 * @data: data from userspace
1096 * @filp: file private
1097 *
1098 * Wait for the command submission identified by handle to finish.
1099 */
1100int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1101			 struct drm_file *filp)
1102{
1103	union drm_amdgpu_wait_cs *wait = data;
1104	struct amdgpu_device *adev = dev->dev_private;
1105	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1106	struct amdgpu_ring *ring = NULL;
1107	struct amdgpu_ctx *ctx;
1108	struct dma_fence *fence;
1109	long r;
1110
1111	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
1112			       wait->in.ring, &ring);
1113	if (r)
1114		return r;
1115
1116	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1117	if (ctx == NULL)
1118		return -EINVAL;
1119
1120	fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
 
 
 
 
 
 
 
1121	if (IS_ERR(fence))
1122		r = PTR_ERR(fence);
1123	else if (fence) {
1124		r = dma_fence_wait_timeout(fence, true, timeout);
 
 
1125		dma_fence_put(fence);
1126	} else
1127		r = 1;
1128
1129	amdgpu_ctx_put(ctx);
1130	if (r < 0)
1131		return r;
1132
1133	memset(wait, 0, sizeof(*wait));
1134	wait->out.status = (r == 0);
1135
1136	return 0;
1137}
1138
1139/**
1140 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1141 *
1142 * @adev: amdgpu device
1143 * @filp: file private
1144 * @user: drm_amdgpu_fence copied from user space
1145 */
1146static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1147					     struct drm_file *filp,
1148					     struct drm_amdgpu_fence *user)
1149{
1150	struct amdgpu_ring *ring;
1151	struct amdgpu_ctx *ctx;
1152	struct dma_fence *fence;
1153	int r;
1154
1155	r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
1156			       user->ring, &ring);
1157	if (r)
1158		return ERR_PTR(r);
1159
1160	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1161	if (ctx == NULL)
1162		return ERR_PTR(-EINVAL);
1163
1164	fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
 
 
 
 
 
 
 
1165	amdgpu_ctx_put(ctx);
1166
1167	return fence;
1168}
1169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170/**
1171 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1172 *
1173 * @adev: amdgpu device
1174 * @filp: file private
1175 * @wait: wait parameters
1176 * @fences: array of drm_amdgpu_fence
1177 */
1178static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1179				     struct drm_file *filp,
1180				     union drm_amdgpu_wait_fences *wait,
1181				     struct drm_amdgpu_fence *fences)
1182{
1183	uint32_t fence_count = wait->in.fence_count;
1184	unsigned int i;
1185	long r = 1;
1186
1187	for (i = 0; i < fence_count; i++) {
1188		struct dma_fence *fence;
1189		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1190
1191		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1192		if (IS_ERR(fence))
1193			return PTR_ERR(fence);
1194		else if (!fence)
1195			continue;
1196
1197		r = dma_fence_wait_timeout(fence, true, timeout);
 
1198		if (r < 0)
1199			return r;
1200
1201		if (r == 0)
1202			break;
 
 
 
1203	}
1204
1205	memset(wait, 0, sizeof(*wait));
1206	wait->out.status = (r > 0);
1207
1208	return 0;
1209}
1210
1211/**
1212 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1213 *
1214 * @adev: amdgpu device
1215 * @filp: file private
1216 * @wait: wait parameters
1217 * @fences: array of drm_amdgpu_fence
1218 */
1219static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1220				    struct drm_file *filp,
1221				    union drm_amdgpu_wait_fences *wait,
1222				    struct drm_amdgpu_fence *fences)
1223{
1224	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1225	uint32_t fence_count = wait->in.fence_count;
1226	uint32_t first = ~0;
1227	struct dma_fence **array;
1228	unsigned int i;
1229	long r;
1230
1231	/* Prepare the fence array */
1232	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1233
1234	if (array == NULL)
1235		return -ENOMEM;
1236
1237	for (i = 0; i < fence_count; i++) {
1238		struct dma_fence *fence;
1239
1240		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1241		if (IS_ERR(fence)) {
1242			r = PTR_ERR(fence);
1243			goto err_free_fence_array;
1244		} else if (fence) {
1245			array[i] = fence;
1246		} else { /* NULL, the fence has been already signaled */
1247			r = 1;
 
1248			goto out;
1249		}
1250	}
1251
1252	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1253				       &first);
1254	if (r < 0)
1255		goto err_free_fence_array;
1256
1257out:
1258	memset(wait, 0, sizeof(*wait));
1259	wait->out.status = (r > 0);
1260	wait->out.first_signaled = first;
1261	/* set return value 0 to indicate success */
1262	r = 0;
 
 
 
1263
1264err_free_fence_array:
1265	for (i = 0; i < fence_count; i++)
1266		dma_fence_put(array[i]);
1267	kfree(array);
1268
1269	return r;
1270}
1271
1272/**
1273 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1274 *
1275 * @dev: drm device
1276 * @data: data from userspace
1277 * @filp: file private
1278 */
1279int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1280				struct drm_file *filp)
1281{
1282	struct amdgpu_device *adev = dev->dev_private;
1283	union drm_amdgpu_wait_fences *wait = data;
1284	uint32_t fence_count = wait->in.fence_count;
1285	struct drm_amdgpu_fence *fences_user;
1286	struct drm_amdgpu_fence *fences;
1287	int r;
1288
1289	/* Get the fences from userspace */
1290	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1291			GFP_KERNEL);
1292	if (fences == NULL)
1293		return -ENOMEM;
1294
1295	fences_user = (void __user *)(unsigned long)(wait->in.fences);
1296	if (copy_from_user(fences, fences_user,
1297		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1298		r = -EFAULT;
1299		goto err_free_fences;
1300	}
1301
1302	if (wait->in.wait_all)
1303		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1304	else
1305		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1306
1307err_free_fences:
1308	kfree(fences);
1309
1310	return r;
1311}
1312
1313/**
1314 * amdgpu_cs_find_bo_va - find bo_va for VM address
1315 *
1316 * @parser: command submission parser context
1317 * @addr: VM address
1318 * @bo: resulting BO of the mapping found
 
1319 *
1320 * Search the buffer objects in the command submission context for a certain
1321 * virtual memory address. Returns allocation structure when found, NULL
1322 * otherwise.
1323 */
1324struct amdgpu_bo_va_mapping *
1325amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1326		       uint64_t addr, struct amdgpu_bo **bo)
1327{
 
 
 
1328	struct amdgpu_bo_va_mapping *mapping;
1329	unsigned i;
1330
1331	if (!parser->bo_list)
1332		return NULL;
1333
1334	addr /= AMDGPU_GPU_PAGE_SIZE;
1335
1336	for (i = 0; i < parser->bo_list->num_entries; i++) {
1337		struct amdgpu_bo_list_entry *lobj;
 
1338
1339		lobj = &parser->bo_list->array[i];
1340		if (!lobj->bo_va)
1341			continue;
1342
1343		list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
1344			if (mapping->it.start > addr ||
1345			    addr > mapping->it.last)
1346				continue;
1347
1348			*bo = lobj->bo_va->bo;
1349			return mapping;
1350		}
1351
1352		list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
1353			if (mapping->it.start > addr ||
1354			    addr > mapping->it.last)
1355				continue;
1356
1357			*bo = lobj->bo_va->bo;
1358			return mapping;
1359		}
1360	}
1361
1362	return NULL;
1363}
1364
1365/**
1366 * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
1367 *
1368 * @parser: command submission parser context
1369 *
1370 * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
1371 */
1372int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
1373{
1374	unsigned i;
1375	int r;
1376
1377	if (!parser->bo_list)
1378		return 0;
1379
1380	for (i = 0; i < parser->bo_list->num_entries; i++) {
1381		struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
1382
1383		r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
1384		if (unlikely(r))
1385			return r;
1386
1387		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
1388			continue;
1389
1390		bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1391		amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
1392		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
1393		if (unlikely(r))
1394			return r;
1395	}
1396
1397	return 0;
1398}