Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.13.7
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/pagemap.h>
  30#include <linux/sync_file.h>
  31#include <linux/dma-buf.h>
  32
  33#include <drm/amdgpu_drm.h>
  34#include <drm/drm_syncobj.h>
  35#include <drm/ttm/ttm_tt.h>
  36
  37#include "amdgpu_cs.h"
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40#include "amdgpu_gmc.h"
  41#include "amdgpu_gem.h"
  42#include "amdgpu_ras.h"
  43
  44static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
  45				 struct amdgpu_device *adev,
  46				 struct drm_file *filp,
  47				 union drm_amdgpu_cs *cs)
  48{
  49	struct amdgpu_fpriv *fpriv = filp->driver_priv;
  50
  51	if (cs->in.num_chunks == 0)
  52		return -EINVAL;
  53
  54	memset(p, 0, sizeof(*p));
  55	p->adev = adev;
  56	p->filp = filp;
  57
  58	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
  59	if (!p->ctx)
  60		return -EINVAL;
  61
  62	if (atomic_read(&p->ctx->guilty)) {
  63		amdgpu_ctx_put(p->ctx);
  64		return -ECANCELED;
  65	}
  66
  67	amdgpu_sync_create(&p->sync);
  68	drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
  69		      DRM_EXEC_IGNORE_DUPLICATES, 0);
  70	return 0;
  71}
  72
  73static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
  74			     struct drm_amdgpu_cs_chunk_ib *chunk_ib)
  75{
  76	struct drm_sched_entity *entity;
  77	unsigned int i;
  78	int r;
  79
  80	r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
  81				  chunk_ib->ip_instance,
  82				  chunk_ib->ring, &entity);
  83	if (r)
  84		return r;
  85
  86	/*
  87	 * Abort if there is no run queue associated with this entity.
  88	 * Possibly because of disabled HW IP.
  89	 */
  90	if (entity->rq == NULL)
  91		return -EINVAL;
  92
  93	/* Check if we can add this IB to some existing job */
  94	for (i = 0; i < p->gang_size; ++i)
  95		if (p->entities[i] == entity)
  96			return i;
  97
  98	/* If not increase the gang size if possible */
  99	if (i == AMDGPU_CS_GANG_SIZE)
 100		return -EINVAL;
 101
 102	p->entities[i] = entity;
 103	p->gang_size = i + 1;
 104	return i;
 105}
 106
 107static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
 108			   struct drm_amdgpu_cs_chunk_ib *chunk_ib,
 109			   unsigned int *num_ibs)
 110{
 111	int r;
 112
 113	r = amdgpu_cs_job_idx(p, chunk_ib);
 114	if (r < 0)
 115		return r;
 116
 117	if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
 118		return -EINVAL;
 119
 120	++(num_ibs[r]);
 121	p->gang_leader_idx = r;
 122	return 0;
 123}
 124
 125static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
 126				   struct drm_amdgpu_cs_chunk_fence *data,
 127				   uint32_t *offset)
 128{
 129	struct drm_gem_object *gobj;
 
 130	unsigned long size;
 
 131
 132	gobj = drm_gem_object_lookup(p->filp, data->handle);
 133	if (gobj == NULL)
 134		return -EINVAL;
 135
 136	p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 
 
 
 
 
 137	drm_gem_object_put(gobj);
 138
 139	size = amdgpu_bo_size(p->uf_bo);
 140	if (size != PAGE_SIZE || data->offset > (size - 8))
 141		return -EINVAL;
 
 
 142
 143	if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
 144		return -EINVAL;
 
 
 145
 146	*offset = data->offset;
 
 147	return 0;
 
 
 
 
 148}
 149
 150static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
 151				   struct drm_amdgpu_bo_list_in *data)
 152{
 153	struct drm_amdgpu_bo_list_entry *info;
 154	int r;
 155
 156	r = amdgpu_bo_create_list_entry_array(data, &info);
 157	if (r)
 158		return r;
 159
 160	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
 161				  &p->bo_list);
 162	if (r)
 163		goto error_free;
 164
 165	kvfree(info);
 166	return 0;
 167
 168error_free:
 169	kvfree(info);
 170
 171	return r;
 172}
 173
 174/* Copy the data from userspace and go over it the first time */
 175static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
 176			   union drm_amdgpu_cs *cs)
 177{
 178	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 179	unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
 180	struct amdgpu_vm *vm = &fpriv->vm;
 181	uint64_t *chunk_array_user;
 182	uint64_t *chunk_array;
 183	uint32_t uf_offset = 0;
 184	size_t size;
 185	int ret;
 186	int i;
 187
 188	chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
 189				     GFP_KERNEL);
 190	if (!chunk_array)
 191		return -ENOMEM;
 192
 193	/* get chunks */
 194	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 195	if (copy_from_user(chunk_array, chunk_array_user,
 196			   sizeof(uint64_t)*cs->in.num_chunks)) {
 197		ret = -EFAULT;
 198		goto free_chunk;
 199	}
 200
 201	p->nchunks = cs->in.num_chunks;
 202	p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 203			    GFP_KERNEL);
 204	if (!p->chunks) {
 205		ret = -ENOMEM;
 206		goto free_chunk;
 207	}
 208
 209	for (i = 0; i < p->nchunks; i++) {
 210		struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
 211		struct drm_amdgpu_cs_chunk user_chunk;
 212		uint32_t __user *cdata;
 213
 214		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
 215		if (copy_from_user(&user_chunk, chunk_ptr,
 216				       sizeof(struct drm_amdgpu_cs_chunk))) {
 217			ret = -EFAULT;
 218			i--;
 219			goto free_partial_kdata;
 220		}
 221		p->chunks[i].chunk_id = user_chunk.chunk_id;
 222		p->chunks[i].length_dw = user_chunk.length_dw;
 223
 224		size = p->chunks[i].length_dw;
 225		cdata = u64_to_user_ptr(user_chunk.chunk_data);
 226
 227		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
 228						    GFP_KERNEL);
 229		if (p->chunks[i].kdata == NULL) {
 230			ret = -ENOMEM;
 231			i--;
 232			goto free_partial_kdata;
 233		}
 234		size *= sizeof(uint32_t);
 235		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 236			ret = -EFAULT;
 237			goto free_partial_kdata;
 238		}
 239
 240		/* Assume the worst on the following checks */
 241		ret = -EINVAL;
 242		switch (p->chunks[i].chunk_id) {
 243		case AMDGPU_CHUNK_ID_IB:
 244			if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
 245				goto free_partial_kdata;
 246
 247			ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
 248			if (ret)
 249				goto free_partial_kdata;
 250			break;
 251
 252		case AMDGPU_CHUNK_ID_FENCE:
 253			if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
 254				goto free_partial_kdata;
 255
 256			ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
 257						      &uf_offset);
 258			if (ret)
 259				goto free_partial_kdata;
 260			break;
 261
 262		case AMDGPU_CHUNK_ID_BO_HANDLES:
 263			if (size < sizeof(struct drm_amdgpu_bo_list_in))
 264				goto free_partial_kdata;
 265
 266			/* Only a single BO list is allowed to simplify handling. */
 267			if (p->bo_list)
 268				goto free_partial_kdata;
 269
 270			ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
 271			if (ret)
 272				goto free_partial_kdata;
 273			break;
 274
 275		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 276		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 277		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 278		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 279		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 280		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 281		case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
 282			break;
 283
 284		default:
 285			goto free_partial_kdata;
 286		}
 287	}
 288
 289	if (!p->gang_size) {
 290		ret = -EINVAL;
 291		goto free_all_kdata;
 292	}
 293
 294	for (i = 0; i < p->gang_size; ++i) {
 295		ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
 296				       num_ibs[i], &p->jobs[i]);
 297		if (ret)
 298			goto free_all_kdata;
 299		p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
 300	}
 301	p->gang_leader = p->jobs[p->gang_leader_idx];
 302
 303	if (p->ctx->generation != p->gang_leader->generation) {
 304		ret = -ECANCELED;
 305		goto free_all_kdata;
 306	}
 307
 308	if (p->uf_bo)
 309		p->gang_leader->uf_addr = uf_offset;
 310	kvfree(chunk_array);
 311
 312	/* Use this opportunity to fill in task info for the vm */
 313	amdgpu_vm_set_task_info(vm);
 314
 315	return 0;
 316
 317free_all_kdata:
 318	i = p->nchunks - 1;
 319free_partial_kdata:
 320	for (; i >= 0; i--)
 321		kvfree(p->chunks[i].kdata);
 322	kvfree(p->chunks);
 323	p->chunks = NULL;
 324	p->nchunks = 0;
 325free_chunk:
 326	kvfree(chunk_array);
 327
 328	return ret;
 329}
 330
 331static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
 332			   struct amdgpu_cs_chunk *chunk,
 333			   unsigned int *ce_preempt,
 334			   unsigned int *de_preempt)
 335{
 336	struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
 337	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 338	struct amdgpu_vm *vm = &fpriv->vm;
 339	struct amdgpu_ring *ring;
 340	struct amdgpu_job *job;
 341	struct amdgpu_ib *ib;
 342	int r;
 343
 344	r = amdgpu_cs_job_idx(p, chunk_ib);
 345	if (r < 0)
 346		return r;
 347
 348	job = p->jobs[r];
 349	ring = amdgpu_job_ring(job);
 350	ib = &job->ibs[job->num_ibs++];
 351
 352	/* MM engine doesn't support user fences */
 353	if (p->uf_bo && ring->funcs->no_user_fence)
 354		return -EINVAL;
 355
 356	if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
 357	    chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
 358		if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
 359			(*ce_preempt)++;
 360		else
 361			(*de_preempt)++;
 362
 363		/* Each GFX command submit allows only 1 IB max
 364		 * preemptible for CE & DE */
 365		if (*ce_preempt > 1 || *de_preempt > 1)
 366			return -EINVAL;
 367	}
 368
 369	if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
 370		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
 371
 372	r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
 373			   chunk_ib->ib_bytes : 0,
 374			   AMDGPU_IB_POOL_DELAYED, ib);
 375	if (r) {
 376		DRM_ERROR("Failed to get ib !\n");
 377		return r;
 378	}
 379
 380	ib->gpu_addr = chunk_ib->va_start;
 381	ib->length_dw = chunk_ib->ib_bytes / 4;
 382	ib->flags = chunk_ib->flags;
 383	return 0;
 384}
 385
 386static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
 387				     struct amdgpu_cs_chunk *chunk)
 388{
 389	struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
 390	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 391	unsigned int num_deps;
 392	int i, r;
 393
 394	num_deps = chunk->length_dw * 4 /
 395		sizeof(struct drm_amdgpu_cs_chunk_dep);
 396
 397	for (i = 0; i < num_deps; ++i) {
 398		struct amdgpu_ctx *ctx;
 399		struct drm_sched_entity *entity;
 400		struct dma_fence *fence;
 401
 402		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
 403		if (ctx == NULL)
 404			return -EINVAL;
 405
 406		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
 407					  deps[i].ip_instance,
 408					  deps[i].ring, &entity);
 409		if (r) {
 410			amdgpu_ctx_put(ctx);
 411			return r;
 412		}
 413
 414		fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
 415		amdgpu_ctx_put(ctx);
 416
 417		if (IS_ERR(fence))
 418			return PTR_ERR(fence);
 419		else if (!fence)
 420			continue;
 421
 422		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
 423			struct drm_sched_fence *s_fence;
 424			struct dma_fence *old = fence;
 425
 426			s_fence = to_drm_sched_fence(fence);
 427			fence = dma_fence_get(&s_fence->scheduled);
 428			dma_fence_put(old);
 429		}
 430
 431		r = amdgpu_sync_fence(&p->sync, fence);
 432		dma_fence_put(fence);
 433		if (r)
 434			return r;
 435	}
 436	return 0;
 437}
 438
 439static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
 440					 uint32_t handle, u64 point,
 441					 u64 flags)
 442{
 443	struct dma_fence *fence;
 444	int r;
 445
 446	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
 447	if (r) {
 448		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
 449			  handle, point, r);
 450		return r;
 451	}
 452
 453	r = amdgpu_sync_fence(&p->sync, fence);
 454	dma_fence_put(fence);
 455	return r;
 456}
 457
 458static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
 459				   struct amdgpu_cs_chunk *chunk)
 460{
 461	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
 462	unsigned int num_deps;
 463	int i, r;
 464
 465	num_deps = chunk->length_dw * 4 /
 466		sizeof(struct drm_amdgpu_cs_chunk_sem);
 467	for (i = 0; i < num_deps; ++i) {
 468		r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
 469		if (r)
 470			return r;
 471	}
 472
 473	return 0;
 474}
 475
 476static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
 477					      struct amdgpu_cs_chunk *chunk)
 478{
 479	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
 480	unsigned int num_deps;
 481	int i, r;
 482
 483	num_deps = chunk->length_dw * 4 /
 484		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 485	for (i = 0; i < num_deps; ++i) {
 486		r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
 487						  syncobj_deps[i].point,
 488						  syncobj_deps[i].flags);
 489		if (r)
 490			return r;
 491	}
 492
 493	return 0;
 494}
 495
 496static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
 497				    struct amdgpu_cs_chunk *chunk)
 498{
 499	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
 500	unsigned int num_deps;
 501	int i;
 502
 503	num_deps = chunk->length_dw * 4 /
 504		sizeof(struct drm_amdgpu_cs_chunk_sem);
 505
 506	if (p->post_deps)
 507		return -EINVAL;
 508
 509	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
 510				     GFP_KERNEL);
 511	p->num_post_deps = 0;
 512
 513	if (!p->post_deps)
 514		return -ENOMEM;
 515
 516
 517	for (i = 0; i < num_deps; ++i) {
 518		p->post_deps[i].syncobj =
 519			drm_syncobj_find(p->filp, deps[i].handle);
 520		if (!p->post_deps[i].syncobj)
 521			return -EINVAL;
 522		p->post_deps[i].chain = NULL;
 523		p->post_deps[i].point = 0;
 524		p->num_post_deps++;
 525	}
 526
 527	return 0;
 528}
 529
 530static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
 531						struct amdgpu_cs_chunk *chunk)
 532{
 533	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
 534	unsigned int num_deps;
 535	int i;
 536
 537	num_deps = chunk->length_dw * 4 /
 538		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 539
 540	if (p->post_deps)
 541		return -EINVAL;
 542
 543	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
 544				     GFP_KERNEL);
 545	p->num_post_deps = 0;
 546
 547	if (!p->post_deps)
 548		return -ENOMEM;
 549
 550	for (i = 0; i < num_deps; ++i) {
 551		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
 552
 553		dep->chain = NULL;
 554		if (syncobj_deps[i].point) {
 555			dep->chain = dma_fence_chain_alloc();
 556			if (!dep->chain)
 557				return -ENOMEM;
 558		}
 559
 560		dep->syncobj = drm_syncobj_find(p->filp,
 561						syncobj_deps[i].handle);
 562		if (!dep->syncobj) {
 563			dma_fence_chain_free(dep->chain);
 564			return -EINVAL;
 565		}
 566		dep->point = syncobj_deps[i].point;
 567		p->num_post_deps++;
 568	}
 569
 570	return 0;
 571}
 572
 573static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
 574			       struct amdgpu_cs_chunk *chunk)
 575{
 576	struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
 577	int i;
 578
 579	if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
 580		return -EINVAL;
 581
 582	for (i = 0; i < p->gang_size; ++i) {
 583		p->jobs[i]->shadow_va = shadow->shadow_va;
 584		p->jobs[i]->csa_va = shadow->csa_va;
 585		p->jobs[i]->gds_va = shadow->gds_va;
 586		p->jobs[i]->init_shadow =
 587			shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
 588	}
 589
 590	return 0;
 591}
 592
 593static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
 594{
 595	unsigned int ce_preempt = 0, de_preempt = 0;
 596	int i, r;
 597
 598	for (i = 0; i < p->nchunks; ++i) {
 599		struct amdgpu_cs_chunk *chunk;
 600
 601		chunk = &p->chunks[i];
 602
 603		switch (chunk->chunk_id) {
 604		case AMDGPU_CHUNK_ID_IB:
 605			r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
 606			if (r)
 607				return r;
 608			break;
 609		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 610		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 611			r = amdgpu_cs_p2_dependencies(p, chunk);
 612			if (r)
 613				return r;
 614			break;
 615		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 616			r = amdgpu_cs_p2_syncobj_in(p, chunk);
 617			if (r)
 618				return r;
 619			break;
 620		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 621			r = amdgpu_cs_p2_syncobj_out(p, chunk);
 622			if (r)
 623				return r;
 624			break;
 625		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 626			r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
 627			if (r)
 628				return r;
 629			break;
 630		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 631			r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
 632			if (r)
 633				return r;
 634			break;
 635		case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
 636			r = amdgpu_cs_p2_shadow(p, chunk);
 637			if (r)
 638				return r;
 639			break;
 640		}
 641	}
 642
 643	return 0;
 644}
 645
 646/* Convert microseconds to bytes. */
 647static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 648{
 649	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 650		return 0;
 651
 652	/* Since accum_us is incremented by a million per second, just
 653	 * multiply it by the number of MB/s to get the number of bytes.
 654	 */
 655	return us << adev->mm_stats.log2_max_MBps;
 656}
 657
 658static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 659{
 660	if (!adev->mm_stats.log2_max_MBps)
 661		return 0;
 662
 663	return bytes >> adev->mm_stats.log2_max_MBps;
 664}
 665
 666/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 667 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 668 * which means it can go over the threshold once. If that happens, the driver
 669 * will be in debt and no other buffer migrations can be done until that debt
 670 * is repaid.
 671 *
 672 * This approach allows moving a buffer of any size (it's important to allow
 673 * that).
 674 *
 675 * The currency is simply time in microseconds and it increases as the clock
 676 * ticks. The accumulated microseconds (us) are converted to bytes and
 677 * returned.
 678 */
 679static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 680					      u64 *max_bytes,
 681					      u64 *max_vis_bytes)
 682{
 683	s64 time_us, increment_us;
 684	u64 free_vram, total_vram, used_vram;
 685	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 686	 * throttling.
 687	 *
 688	 * It means that in order to get full max MBps, at least 5 IBs per
 689	 * second must be submitted and not more than 200ms apart from each
 690	 * other.
 691	 */
 692	const s64 us_upper_bound = 200000;
 693
 694	if (!adev->mm_stats.log2_max_MBps) {
 695		*max_bytes = 0;
 696		*max_vis_bytes = 0;
 697		return;
 698	}
 699
 700	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
 701	used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
 702	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 703
 704	spin_lock(&adev->mm_stats.lock);
 705
 706	/* Increase the amount of accumulated us. */
 707	time_us = ktime_to_us(ktime_get());
 708	increment_us = time_us - adev->mm_stats.last_update_us;
 709	adev->mm_stats.last_update_us = time_us;
 710	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 711				      us_upper_bound);
 712
 713	/* This prevents the short period of low performance when the VRAM
 714	 * usage is low and the driver is in debt or doesn't have enough
 715	 * accumulated us to fill VRAM quickly.
 716	 *
 717	 * The situation can occur in these cases:
 718	 * - a lot of VRAM is freed by userspace
 719	 * - the presence of a big buffer causes a lot of evictions
 720	 *   (solution: split buffers into smaller ones)
 721	 *
 722	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 723	 * accum_us to a positive number.
 724	 */
 725	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 726		s64 min_us;
 727
 728		/* Be more aggressive on dGPUs. Try to fill a portion of free
 729		 * VRAM now.
 730		 */
 731		if (!(adev->flags & AMD_IS_APU))
 732			min_us = bytes_to_us(adev, free_vram / 4);
 733		else
 734			min_us = 0; /* Reset accum_us on APUs. */
 735
 736		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 737	}
 738
 739	/* This is set to 0 if the driver is in debt to disallow (optional)
 740	 * buffer moves.
 741	 */
 742	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 743
 744	/* Do the same for visible VRAM if half of it is free */
 745	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
 746		u64 total_vis_vram = adev->gmc.visible_vram_size;
 747		u64 used_vis_vram =
 748		  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
 749
 750		if (used_vis_vram < total_vis_vram) {
 751			u64 free_vis_vram = total_vis_vram - used_vis_vram;
 752
 753			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
 754							  increment_us, us_upper_bound);
 755
 756			if (free_vis_vram >= total_vis_vram / 2)
 757				adev->mm_stats.accum_us_vis =
 758					max(bytes_to_us(adev, free_vis_vram / 2),
 759					    adev->mm_stats.accum_us_vis);
 760		}
 761
 762		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
 763	} else {
 764		*max_vis_bytes = 0;
 765	}
 766
 767	spin_unlock(&adev->mm_stats.lock);
 768}
 769
 770/* Report how many bytes have really been moved for the last command
 771 * submission. This can result in a debt that can stop buffer migrations
 772 * temporarily.
 773 */
 774void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
 775				  u64 num_vis_bytes)
 776{
 777	spin_lock(&adev->mm_stats.lock);
 778	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 779	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 780	spin_unlock(&adev->mm_stats.lock);
 781}
 782
 783static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
 784{
 785	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 786	struct amdgpu_cs_parser *p = param;
 787	struct ttm_operation_ctx ctx = {
 788		.interruptible = true,
 789		.no_wait_gpu = false,
 790		.resv = bo->tbo.base.resv
 791	};
 792	uint32_t domain;
 793	int r;
 794
 795	if (bo->tbo.pin_count)
 796		return 0;
 797
 798	/* Don't move this buffer if we have depleted our allowance
 799	 * to move it. Don't move anything if the threshold is zero.
 800	 */
 801	if (p->bytes_moved < p->bytes_moved_threshold &&
 802	    (!bo->tbo.base.dma_buf ||
 803	    list_empty(&bo->tbo.base.dma_buf->attachments))) {
 804		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 805		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 806			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
 807			 * visible VRAM if we've depleted our allowance to do
 808			 * that.
 809			 */
 810			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
 811				domain = bo->preferred_domains;
 812			else
 813				domain = bo->allowed_domains;
 814		} else {
 815			domain = bo->preferred_domains;
 816		}
 817	} else {
 818		domain = bo->allowed_domains;
 819	}
 820
 821retry:
 822	amdgpu_bo_placement_from_domain(bo, domain);
 823	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 824
 825	p->bytes_moved += ctx.bytes_moved;
 826	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 827	    amdgpu_res_cpu_visible(adev, bo->tbo.resource))
 828		p->bytes_moved_vis += ctx.bytes_moved;
 829
 830	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 831		domain = bo->allowed_domains;
 832		goto retry;
 833	}
 834
 835	return r;
 836}
 837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 838static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 839				union drm_amdgpu_cs *cs)
 840{
 841	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 842	struct ttm_operation_ctx ctx = { true, false };
 843	struct amdgpu_vm *vm = &fpriv->vm;
 844	struct amdgpu_bo_list_entry *e;
 845	struct drm_gem_object *obj;
 846	unsigned long index;
 847	unsigned int i;
 848	int r;
 849
 
 
 850	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
 851	if (cs->in.bo_list_handle) {
 852		if (p->bo_list)
 853			return -EINVAL;
 854
 855		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
 856				       &p->bo_list);
 857		if (r)
 858			return r;
 859	} else if (!p->bo_list) {
 860		/* Create a empty bo_list when no handle is provided */
 861		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
 862					  &p->bo_list);
 863		if (r)
 864			return r;
 865	}
 866
 867	mutex_lock(&p->bo_list->bo_list_mutex);
 868
 
 
 
 
 
 
 
 
 
 
 
 
 869	/* Get userptr backing pages. If pages are updated after registered
 870	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
 871	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
 872	 */
 873	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 
 874		bool userpage_invalidated = false;
 875		struct amdgpu_bo *bo = e->bo;
 876		int i;
 877
 878		e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
 879					 sizeof(struct page *),
 880					 GFP_KERNEL);
 881		if (!e->user_pages) {
 882			DRM_ERROR("kvmalloc_array failure\n");
 883			r = -ENOMEM;
 884			goto out_free_user_pages;
 885		}
 886
 887		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
 888		if (r) {
 889			kvfree(e->user_pages);
 890			e->user_pages = NULL;
 891			goto out_free_user_pages;
 892		}
 893
 894		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
 895			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
 896				userpage_invalidated = true;
 897				break;
 898			}
 899		}
 900		e->user_invalidated = userpage_invalidated;
 901	}
 902
 903	drm_exec_until_all_locked(&p->exec) {
 904		r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
 905		drm_exec_retry_on_contention(&p->exec);
 906		if (unlikely(r))
 907			goto out_free_user_pages;
 908
 909		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 910			/* One fence for TTM and one for each CS job */
 911			r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
 912						 1 + p->gang_size);
 913			drm_exec_retry_on_contention(&p->exec);
 914			if (unlikely(r))
 915				goto out_free_user_pages;
 916
 917			e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
 918		}
 919
 920		if (p->uf_bo) {
 921			r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
 922						 1 + p->gang_size);
 923			drm_exec_retry_on_contention(&p->exec);
 924			if (unlikely(r))
 925				goto out_free_user_pages;
 926		}
 927	}
 928
 929	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 930		struct mm_struct *usermm;
 931
 932		usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
 933		if (usermm && usermm != current->mm) {
 934			r = -EPERM;
 935			goto out_free_user_pages;
 936		}
 937
 938		if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
 939		    e->user_invalidated && e->user_pages) {
 940			amdgpu_bo_placement_from_domain(e->bo,
 941							AMDGPU_GEM_DOMAIN_CPU);
 942			r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
 943					    &ctx);
 944			if (r)
 945				goto out_free_user_pages;
 946
 947			amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
 948						     e->user_pages);
 949		}
 950
 951		kvfree(e->user_pages);
 952		e->user_pages = NULL;
 953	}
 954
 955	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
 956					  &p->bytes_moved_vis_threshold);
 957	p->bytes_moved = 0;
 958	p->bytes_moved_vis = 0;
 959
 960	r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
 961			       amdgpu_cs_bo_validate, p);
 962	if (r) {
 963		DRM_ERROR("amdgpu_vm_validate() failed.\n");
 964		goto out_free_user_pages;
 965	}
 966
 967	drm_exec_for_each_locked_object(&p->exec, index, obj) {
 968		r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
 969		if (unlikely(r))
 970			goto out_free_user_pages;
 971	}
 972
 973	if (p->uf_bo) {
 974		r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
 975		if (unlikely(r))
 976			goto out_free_user_pages;
 977
 978		p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
 
 
 
 
 
 
 
 979	}
 980
 981	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
 982				     p->bytes_moved_vis);
 983
 984	for (i = 0; i < p->gang_size; ++i)
 985		amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
 986					 p->bo_list->gws_obj,
 987					 p->bo_list->oa_obj);
 988	return 0;
 989
 
 
 
 990out_free_user_pages:
 991	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 992		struct amdgpu_bo *bo = e->bo;
 993
 994		if (!e->user_pages)
 995			continue;
 996		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
 997		kvfree(e->user_pages);
 998		e->user_pages = NULL;
 999		e->range = NULL;
1000	}
1001	mutex_unlock(&p->bo_list->bo_list_mutex);
1002	return r;
1003}
1004
1005static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1006{
1007	int i, j;
1008
1009	if (!trace_amdgpu_cs_enabled())
1010		return;
1011
1012	for (i = 0; i < p->gang_size; ++i) {
1013		struct amdgpu_job *job = p->jobs[i];
1014
1015		for (j = 0; j < job->num_ibs; ++j)
1016			trace_amdgpu_cs(p, job, &job->ibs[j]);
1017	}
1018}
1019
1020static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1021			       struct amdgpu_job *job)
1022{
1023	struct amdgpu_ring *ring = amdgpu_job_ring(job);
1024	unsigned int i;
1025	int r;
1026
1027	/* Only for UVD/VCE VM emulation */
1028	if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1029		return 0;
1030
1031	for (i = 0; i < job->num_ibs; ++i) {
1032		struct amdgpu_ib *ib = &job->ibs[i];
1033		struct amdgpu_bo_va_mapping *m;
1034		struct amdgpu_bo *aobj;
1035		uint64_t va_start;
1036		uint8_t *kptr;
1037
1038		va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1039		r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1040		if (r) {
1041			DRM_ERROR("IB va_start is invalid\n");
1042			return r;
1043		}
1044
1045		if ((va_start + ib->length_dw * 4) >
1046		    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1047			DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1048			return -EINVAL;
1049		}
1050
1051		/* the IB should be reserved at this point */
1052		r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1053		if (r)
1054			return r;
 
1055
1056		kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1057
1058		if (ring->funcs->parse_cs) {
1059			memcpy(ib->ptr, kptr, ib->length_dw * 4);
1060			amdgpu_bo_kunmap(aobj);
1061
1062			r = amdgpu_ring_parse_cs(ring, p, job, ib);
1063			if (r)
1064				return r;
1065
1066			if (ib->sa_bo)
1067				ib->gpu_addr =  amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1068		} else {
1069			ib->ptr = (uint32_t *)kptr;
1070			r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1071			amdgpu_bo_kunmap(aobj);
1072			if (r)
1073				return r;
1074		}
1075	}
1076
1077	return 0;
1078}
1079
1080static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1081{
1082	unsigned int i;
1083	int r;
1084
1085	for (i = 0; i < p->gang_size; ++i) {
1086		r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1087		if (r)
1088			return r;
1089	}
1090	return 0;
1091}
1092
1093static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1094{
1095	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1096	struct amdgpu_job *job = p->gang_leader;
1097	struct amdgpu_device *adev = p->adev;
1098	struct amdgpu_vm *vm = &fpriv->vm;
1099	struct amdgpu_bo_list_entry *e;
1100	struct amdgpu_bo_va *bo_va;
 
1101	unsigned int i;
1102	int r;
1103
1104	/*
1105	 * We can't use gang submit on with reserved VMIDs when the VM changes
1106	 * can't be invalidated by more than one engine at the same time.
1107	 */
1108	if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
1109		for (i = 0; i < p->gang_size; ++i) {
1110			struct drm_sched_entity *entity = p->entities[i];
1111			struct drm_gpu_scheduler *sched = entity->rq->sched;
1112			struct amdgpu_ring *ring = to_amdgpu_ring(sched);
1113
1114			if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub))
1115				return -EINVAL;
1116		}
1117	}
1118
1119	r = amdgpu_vm_clear_freed(adev, vm, NULL);
1120	if (r)
1121		return r;
1122
1123	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1124	if (r)
1125		return r;
1126
1127	r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1128	if (r)
1129		return r;
1130
1131	if (fpriv->csa_va) {
1132		bo_va = fpriv->csa_va;
1133		BUG_ON(!bo_va);
1134		r = amdgpu_vm_bo_update(adev, bo_va, false);
1135		if (r)
1136			return r;
1137
1138		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1139		if (r)
1140			return r;
1141	}
1142
1143	/* FIXME: In theory this loop shouldn't be needed any more when
1144	 * amdgpu_vm_handle_moved handles all moved BOs that are reserved
1145	 * with p->ticket. But removing it caused test regressions, so I'm
1146	 * leaving it here for now.
1147	 */
1148	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 
 
 
 
 
1149		bo_va = e->bo_va;
1150		if (bo_va == NULL)
1151			continue;
1152
1153		r = amdgpu_vm_bo_update(adev, bo_va, false);
1154		if (r)
1155			return r;
1156
1157		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1158		if (r)
1159			return r;
1160	}
1161
1162	r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
1163	if (r)
1164		return r;
1165
1166	r = amdgpu_vm_update_pdes(adev, vm, false);
1167	if (r)
1168		return r;
1169
1170	r = amdgpu_sync_fence(&p->sync, vm->last_update);
1171	if (r)
1172		return r;
1173
1174	for (i = 0; i < p->gang_size; ++i) {
1175		job = p->jobs[i];
1176
1177		if (!job->vm)
1178			continue;
1179
1180		job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1181	}
1182
1183	if (adev->debug_vm) {
1184		/* Invalidate all BOs to test for userspace bugs */
1185		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1186			struct amdgpu_bo *bo = e->bo;
1187
1188			/* ignore duplicates */
1189			if (!bo)
1190				continue;
1191
1192			amdgpu_vm_bo_invalidate(adev, bo, false);
1193		}
1194	}
1195
1196	return 0;
1197}
1198
1199static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1200{
1201	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1202	struct drm_gpu_scheduler *sched;
1203	struct drm_gem_object *obj;
1204	struct dma_fence *fence;
1205	unsigned long index;
1206	unsigned int i;
1207	int r;
1208
1209	r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1210	if (r) {
1211		if (r != -ERESTARTSYS)
1212			DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1213		return r;
1214	}
1215
1216	drm_exec_for_each_locked_object(&p->exec, index, obj) {
1217		struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
1218
1219		struct dma_resv *resv = bo->tbo.base.resv;
1220		enum amdgpu_sync_mode sync_mode;
1221
1222		sync_mode = amdgpu_bo_explicit_sync(bo) ?
1223			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1224		r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1225				     &fpriv->vm);
1226		if (r)
1227			return r;
1228	}
1229
1230	for (i = 0; i < p->gang_size; ++i) {
1231		r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1232		if (r)
1233			return r;
1234	}
1235
1236	sched = p->gang_leader->base.entity->rq->sched;
1237	while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1238		struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1239
1240		/*
1241		 * When we have an dependency it might be necessary to insert a
1242		 * pipeline sync to make sure that all caches etc are flushed and the
1243		 * next job actually sees the results from the previous one
1244		 * before we start executing on the same scheduler ring.
1245		 */
1246		if (!s_fence || s_fence->sched != sched) {
1247			dma_fence_put(fence);
1248			continue;
1249		}
1250
1251		r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1252		dma_fence_put(fence);
1253		if (r)
1254			return r;
1255	}
1256	return 0;
1257}
1258
1259static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1260{
1261	int i;
1262
1263	for (i = 0; i < p->num_post_deps; ++i) {
1264		if (p->post_deps[i].chain && p->post_deps[i].point) {
1265			drm_syncobj_add_point(p->post_deps[i].syncobj,
1266					      p->post_deps[i].chain,
1267					      p->fence, p->post_deps[i].point);
1268			p->post_deps[i].chain = NULL;
1269		} else {
1270			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1271						  p->fence);
1272		}
1273	}
1274}
1275
1276static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1277			    union drm_amdgpu_cs *cs)
1278{
1279	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1280	struct amdgpu_job *leader = p->gang_leader;
1281	struct amdgpu_bo_list_entry *e;
1282	struct drm_gem_object *gobj;
1283	unsigned long index;
1284	unsigned int i;
1285	uint64_t seq;
1286	int r;
1287
1288	for (i = 0; i < p->gang_size; ++i)
1289		drm_sched_job_arm(&p->jobs[i]->base);
1290
1291	for (i = 0; i < p->gang_size; ++i) {
1292		struct dma_fence *fence;
1293
1294		if (p->jobs[i] == leader)
1295			continue;
1296
1297		fence = &p->jobs[i]->base.s_fence->scheduled;
1298		dma_fence_get(fence);
1299		r = drm_sched_job_add_dependency(&leader->base, fence);
1300		if (r) {
1301			dma_fence_put(fence);
1302			return r;
1303		}
1304	}
1305
1306	if (p->gang_size > 1) {
1307		for (i = 0; i < p->gang_size; ++i)
1308			amdgpu_job_set_gang_leader(p->jobs[i], leader);
1309	}
1310
1311	/* No memory allocation is allowed while holding the notifier lock.
1312	 * The lock is held until amdgpu_cs_submit is finished and fence is
1313	 * added to BOs.
1314	 */
1315	mutex_lock(&p->adev->notifier_lock);
1316
1317	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1318	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1319	 */
1320	r = 0;
1321	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1322		r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
1323							e->range);
 
1324		e->range = NULL;
1325	}
1326	if (r) {
1327		r = -EAGAIN;
1328		mutex_unlock(&p->adev->notifier_lock);
1329		return r;
1330	}
1331
1332	p->fence = dma_fence_get(&leader->base.s_fence->finished);
1333	drm_exec_for_each_locked_object(&p->exec, index, gobj) {
1334
1335		ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
1336
1337		/* Everybody except for the gang leader uses READ */
1338		for (i = 0; i < p->gang_size; ++i) {
1339			if (p->jobs[i] == leader)
1340				continue;
1341
1342			dma_resv_add_fence(gobj->resv,
1343					   &p->jobs[i]->base.s_fence->finished,
1344					   DMA_RESV_USAGE_READ);
1345		}
1346
1347		/* The gang leader as remembered as writer */
1348		dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
1349	}
1350
1351	seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1352				   p->fence);
1353	amdgpu_cs_post_dependencies(p);
1354
1355	if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1356	    !p->ctx->preamble_presented) {
1357		leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1358		p->ctx->preamble_presented = true;
1359	}
1360
1361	cs->out.handle = seq;
1362	leader->uf_sequence = seq;
1363
1364	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
1365	for (i = 0; i < p->gang_size; ++i) {
1366		amdgpu_job_free_resources(p->jobs[i]);
1367		trace_amdgpu_cs_ioctl(p->jobs[i]);
1368		drm_sched_entity_push_job(&p->jobs[i]->base);
1369		p->jobs[i] = NULL;
1370	}
1371
1372	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
 
1373
1374	mutex_unlock(&p->adev->notifier_lock);
1375	mutex_unlock(&p->bo_list->bo_list_mutex);
1376	return 0;
 
 
 
 
 
 
 
 
1377}
1378
1379/* Cleanup the parser structure */
1380static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1381{
1382	unsigned int i;
1383
1384	amdgpu_sync_free(&parser->sync);
1385	drm_exec_fini(&parser->exec);
1386
1387	for (i = 0; i < parser->num_post_deps; i++) {
1388		drm_syncobj_put(parser->post_deps[i].syncobj);
1389		kfree(parser->post_deps[i].chain);
1390	}
1391	kfree(parser->post_deps);
1392
1393	dma_fence_put(parser->fence);
1394
1395	if (parser->ctx)
1396		amdgpu_ctx_put(parser->ctx);
1397	if (parser->bo_list)
1398		amdgpu_bo_list_put(parser->bo_list);
1399
1400	for (i = 0; i < parser->nchunks; i++)
1401		kvfree(parser->chunks[i].kdata);
1402	kvfree(parser->chunks);
1403	for (i = 0; i < parser->gang_size; ++i) {
1404		if (parser->jobs[i])
1405			amdgpu_job_free(parser->jobs[i]);
1406	}
1407	amdgpu_bo_unref(&parser->uf_bo);
 
 
 
 
1408}
1409
1410int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1411{
1412	struct amdgpu_device *adev = drm_to_adev(dev);
1413	struct amdgpu_cs_parser parser;
1414	int r;
1415
1416	if (amdgpu_ras_intr_triggered())
1417		return -EHWPOISON;
1418
1419	if (!adev->accel_working)
1420		return -EBUSY;
1421
1422	r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1423	if (r) {
1424		DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
 
1425		return r;
1426	}
1427
1428	r = amdgpu_cs_pass1(&parser, data);
1429	if (r)
1430		goto error_fini;
1431
1432	r = amdgpu_cs_pass2(&parser);
1433	if (r)
1434		goto error_fini;
1435
1436	r = amdgpu_cs_parser_bos(&parser, data);
1437	if (r) {
1438		if (r == -ENOMEM)
1439			DRM_ERROR("Not enough memory for command submission!\n");
1440		else if (r != -ERESTARTSYS && r != -EAGAIN)
1441			DRM_DEBUG("Failed to process the buffer list %d!\n", r);
1442		goto error_fini;
1443	}
1444
1445	r = amdgpu_cs_patch_jobs(&parser);
1446	if (r)
1447		goto error_backoff;
1448
1449	r = amdgpu_cs_vm_handling(&parser);
1450	if (r)
1451		goto error_backoff;
1452
1453	r = amdgpu_cs_sync_rings(&parser);
1454	if (r)
1455		goto error_backoff;
1456
1457	trace_amdgpu_cs_ibs(&parser);
1458
1459	r = amdgpu_cs_submit(&parser, data);
1460	if (r)
1461		goto error_backoff;
1462
1463	amdgpu_cs_parser_fini(&parser);
1464	return 0;
1465
1466error_backoff:
 
1467	mutex_unlock(&parser.bo_list->bo_list_mutex);
1468
1469error_fini:
1470	amdgpu_cs_parser_fini(&parser);
1471	return r;
1472}
1473
1474/**
1475 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1476 *
1477 * @dev: drm device
1478 * @data: data from userspace
1479 * @filp: file private
1480 *
1481 * Wait for the command submission identified by handle to finish.
1482 */
1483int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1484			 struct drm_file *filp)
1485{
1486	union drm_amdgpu_wait_cs *wait = data;
1487	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1488	struct drm_sched_entity *entity;
1489	struct amdgpu_ctx *ctx;
1490	struct dma_fence *fence;
1491	long r;
1492
1493	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1494	if (ctx == NULL)
1495		return -EINVAL;
1496
1497	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1498				  wait->in.ring, &entity);
1499	if (r) {
1500		amdgpu_ctx_put(ctx);
1501		return r;
1502	}
1503
1504	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1505	if (IS_ERR(fence))
1506		r = PTR_ERR(fence);
1507	else if (fence) {
1508		r = dma_fence_wait_timeout(fence, true, timeout);
1509		if (r > 0 && fence->error)
1510			r = fence->error;
1511		dma_fence_put(fence);
1512	} else
1513		r = 1;
1514
1515	amdgpu_ctx_put(ctx);
1516	if (r < 0)
1517		return r;
1518
1519	memset(wait, 0, sizeof(*wait));
1520	wait->out.status = (r == 0);
1521
1522	return 0;
1523}
1524
1525/**
1526 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1527 *
1528 * @adev: amdgpu device
1529 * @filp: file private
1530 * @user: drm_amdgpu_fence copied from user space
1531 */
1532static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1533					     struct drm_file *filp,
1534					     struct drm_amdgpu_fence *user)
1535{
1536	struct drm_sched_entity *entity;
1537	struct amdgpu_ctx *ctx;
1538	struct dma_fence *fence;
1539	int r;
1540
1541	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1542	if (ctx == NULL)
1543		return ERR_PTR(-EINVAL);
1544
1545	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1546				  user->ring, &entity);
1547	if (r) {
1548		amdgpu_ctx_put(ctx);
1549		return ERR_PTR(r);
1550	}
1551
1552	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1553	amdgpu_ctx_put(ctx);
1554
1555	return fence;
1556}
1557
1558int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1559				    struct drm_file *filp)
1560{
1561	struct amdgpu_device *adev = drm_to_adev(dev);
1562	union drm_amdgpu_fence_to_handle *info = data;
1563	struct dma_fence *fence;
1564	struct drm_syncobj *syncobj;
1565	struct sync_file *sync_file;
1566	int fd, r;
1567
1568	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1569	if (IS_ERR(fence))
1570		return PTR_ERR(fence);
1571
1572	if (!fence)
1573		fence = dma_fence_get_stub();
1574
1575	switch (info->in.what) {
1576	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1577		r = drm_syncobj_create(&syncobj, 0, fence);
1578		dma_fence_put(fence);
1579		if (r)
1580			return r;
1581		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1582		drm_syncobj_put(syncobj);
1583		return r;
1584
1585	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1586		r = drm_syncobj_create(&syncobj, 0, fence);
1587		dma_fence_put(fence);
1588		if (r)
1589			return r;
1590		r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1591		drm_syncobj_put(syncobj);
1592		return r;
1593
1594	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1595		fd = get_unused_fd_flags(O_CLOEXEC);
1596		if (fd < 0) {
1597			dma_fence_put(fence);
1598			return fd;
1599		}
1600
1601		sync_file = sync_file_create(fence);
1602		dma_fence_put(fence);
1603		if (!sync_file) {
1604			put_unused_fd(fd);
1605			return -ENOMEM;
1606		}
1607
1608		fd_install(fd, sync_file->file);
1609		info->out.handle = fd;
1610		return 0;
1611
1612	default:
1613		dma_fence_put(fence);
1614		return -EINVAL;
1615	}
1616}
1617
1618/**
1619 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1620 *
1621 * @adev: amdgpu device
1622 * @filp: file private
1623 * @wait: wait parameters
1624 * @fences: array of drm_amdgpu_fence
1625 */
1626static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1627				     struct drm_file *filp,
1628				     union drm_amdgpu_wait_fences *wait,
1629				     struct drm_amdgpu_fence *fences)
1630{
1631	uint32_t fence_count = wait->in.fence_count;
1632	unsigned int i;
1633	long r = 1;
1634
1635	for (i = 0; i < fence_count; i++) {
1636		struct dma_fence *fence;
1637		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1638
1639		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1640		if (IS_ERR(fence))
1641			return PTR_ERR(fence);
1642		else if (!fence)
1643			continue;
1644
1645		r = dma_fence_wait_timeout(fence, true, timeout);
1646		if (r > 0 && fence->error)
1647			r = fence->error;
1648
1649		dma_fence_put(fence);
1650		if (r < 0)
1651			return r;
1652
1653		if (r == 0)
1654			break;
 
 
 
1655	}
1656
1657	memset(wait, 0, sizeof(*wait));
1658	wait->out.status = (r > 0);
1659
1660	return 0;
1661}
1662
1663/**
1664 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1665 *
1666 * @adev: amdgpu device
1667 * @filp: file private
1668 * @wait: wait parameters
1669 * @fences: array of drm_amdgpu_fence
1670 */
1671static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1672				    struct drm_file *filp,
1673				    union drm_amdgpu_wait_fences *wait,
1674				    struct drm_amdgpu_fence *fences)
1675{
1676	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1677	uint32_t fence_count = wait->in.fence_count;
1678	uint32_t first = ~0;
1679	struct dma_fence **array;
1680	unsigned int i;
1681	long r;
1682
1683	/* Prepare the fence array */
1684	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1685
1686	if (array == NULL)
1687		return -ENOMEM;
1688
1689	for (i = 0; i < fence_count; i++) {
1690		struct dma_fence *fence;
1691
1692		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1693		if (IS_ERR(fence)) {
1694			r = PTR_ERR(fence);
1695			goto err_free_fence_array;
1696		} else if (fence) {
1697			array[i] = fence;
1698		} else { /* NULL, the fence has been already signaled */
1699			r = 1;
1700			first = i;
1701			goto out;
1702		}
1703	}
1704
1705	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1706				       &first);
1707	if (r < 0)
1708		goto err_free_fence_array;
1709
1710out:
1711	memset(wait, 0, sizeof(*wait));
1712	wait->out.status = (r > 0);
1713	wait->out.first_signaled = first;
1714
1715	if (first < fence_count && array[first])
1716		r = array[first]->error;
1717	else
1718		r = 0;
1719
1720err_free_fence_array:
1721	for (i = 0; i < fence_count; i++)
1722		dma_fence_put(array[i]);
1723	kfree(array);
1724
1725	return r;
1726}
1727
1728/**
1729 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1730 *
1731 * @dev: drm device
1732 * @data: data from userspace
1733 * @filp: file private
1734 */
1735int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1736				struct drm_file *filp)
1737{
1738	struct amdgpu_device *adev = drm_to_adev(dev);
1739	union drm_amdgpu_wait_fences *wait = data;
1740	uint32_t fence_count = wait->in.fence_count;
1741	struct drm_amdgpu_fence *fences_user;
1742	struct drm_amdgpu_fence *fences;
1743	int r;
1744
1745	/* Get the fences from userspace */
1746	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1747			GFP_KERNEL);
1748	if (fences == NULL)
1749		return -ENOMEM;
1750
1751	fences_user = u64_to_user_ptr(wait->in.fences);
1752	if (copy_from_user(fences, fences_user,
1753		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1754		r = -EFAULT;
1755		goto err_free_fences;
1756	}
1757
1758	if (wait->in.wait_all)
1759		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1760	else
1761		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1762
1763err_free_fences:
1764	kfree(fences);
1765
1766	return r;
1767}
1768
1769/**
1770 * amdgpu_cs_find_mapping - find bo_va for VM address
1771 *
1772 * @parser: command submission parser context
1773 * @addr: VM address
1774 * @bo: resulting BO of the mapping found
1775 * @map: Placeholder to return found BO mapping
1776 *
1777 * Search the buffer objects in the command submission context for a certain
1778 * virtual memory address. Returns allocation structure when found, NULL
1779 * otherwise.
1780 */
1781int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1782			   uint64_t addr, struct amdgpu_bo **bo,
1783			   struct amdgpu_bo_va_mapping **map)
1784{
1785	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1786	struct ttm_operation_ctx ctx = { false, false };
1787	struct amdgpu_vm *vm = &fpriv->vm;
1788	struct amdgpu_bo_va_mapping *mapping;
1789	int i, r;
1790
1791	addr /= AMDGPU_GPU_PAGE_SIZE;
1792
1793	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1794	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1795		return -EINVAL;
1796
1797	*bo = mapping->bo_va->base.bo;
1798	*map = mapping;
1799
1800	/* Double check that the BO is reserved by this CS */
1801	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
1802		return -EINVAL;
1803
1804	/* Make sure VRAM is allocated contigiously */
1805	(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1806	if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
1807	    !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
1808
1809		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1810		for (i = 0; i < (*bo)->placement.num_placement; i++)
1811			(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
1812		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1813		if (r)
1814			return r;
1815	}
1816
1817	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1818}
v6.2
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/pagemap.h>
  30#include <linux/sync_file.h>
  31#include <linux/dma-buf.h>
  32
  33#include <drm/amdgpu_drm.h>
  34#include <drm/drm_syncobj.h>
 
 
  35#include "amdgpu_cs.h"
  36#include "amdgpu.h"
  37#include "amdgpu_trace.h"
  38#include "amdgpu_gmc.h"
  39#include "amdgpu_gem.h"
  40#include "amdgpu_ras.h"
  41
  42static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
  43				 struct amdgpu_device *adev,
  44				 struct drm_file *filp,
  45				 union drm_amdgpu_cs *cs)
  46{
  47	struct amdgpu_fpriv *fpriv = filp->driver_priv;
  48
  49	if (cs->in.num_chunks == 0)
  50		return -EINVAL;
  51
  52	memset(p, 0, sizeof(*p));
  53	p->adev = adev;
  54	p->filp = filp;
  55
  56	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
  57	if (!p->ctx)
  58		return -EINVAL;
  59
  60	if (atomic_read(&p->ctx->guilty)) {
  61		amdgpu_ctx_put(p->ctx);
  62		return -ECANCELED;
  63	}
  64
  65	amdgpu_sync_create(&p->sync);
 
 
  66	return 0;
  67}
  68
  69static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
  70			     struct drm_amdgpu_cs_chunk_ib *chunk_ib)
  71{
  72	struct drm_sched_entity *entity;
  73	unsigned int i;
  74	int r;
  75
  76	r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
  77				  chunk_ib->ip_instance,
  78				  chunk_ib->ring, &entity);
  79	if (r)
  80		return r;
  81
  82	/*
  83	 * Abort if there is no run queue associated with this entity.
  84	 * Possibly because of disabled HW IP.
  85	 */
  86	if (entity->rq == NULL)
  87		return -EINVAL;
  88
  89	/* Check if we can add this IB to some existing job */
  90	for (i = 0; i < p->gang_size; ++i)
  91		if (p->entities[i] == entity)
  92			return i;
  93
  94	/* If not increase the gang size if possible */
  95	if (i == AMDGPU_CS_GANG_SIZE)
  96		return -EINVAL;
  97
  98	p->entities[i] = entity;
  99	p->gang_size = i + 1;
 100	return i;
 101}
 102
 103static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
 104			   struct drm_amdgpu_cs_chunk_ib *chunk_ib,
 105			   unsigned int *num_ibs)
 106{
 107	int r;
 108
 109	r = amdgpu_cs_job_idx(p, chunk_ib);
 110	if (r < 0)
 111		return r;
 112
 
 
 
 113	++(num_ibs[r]);
 114	p->gang_leader_idx = r;
 115	return 0;
 116}
 117
 118static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
 119				   struct drm_amdgpu_cs_chunk_fence *data,
 120				   uint32_t *offset)
 121{
 122	struct drm_gem_object *gobj;
 123	struct amdgpu_bo *bo;
 124	unsigned long size;
 125	int r;
 126
 127	gobj = drm_gem_object_lookup(p->filp, data->handle);
 128	if (gobj == NULL)
 129		return -EINVAL;
 130
 131	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 132	p->uf_entry.priority = 0;
 133	p->uf_entry.tv.bo = &bo->tbo;
 134	/* One for TTM and two for the CS job */
 135	p->uf_entry.tv.num_shared = 3;
 136
 137	drm_gem_object_put(gobj);
 138
 139	size = amdgpu_bo_size(bo);
 140	if (size != PAGE_SIZE || (data->offset + 8) > size) {
 141		r = -EINVAL;
 142		goto error_unref;
 143	}
 144
 145	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
 146		r = -EINVAL;
 147		goto error_unref;
 148	}
 149
 150	*offset = data->offset;
 151
 152	return 0;
 153
 154error_unref:
 155	amdgpu_bo_unref(&bo);
 156	return r;
 157}
 158
 159static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
 160				   struct drm_amdgpu_bo_list_in *data)
 161{
 162	struct drm_amdgpu_bo_list_entry *info;
 163	int r;
 164
 165	r = amdgpu_bo_create_list_entry_array(data, &info);
 166	if (r)
 167		return r;
 168
 169	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
 170				  &p->bo_list);
 171	if (r)
 172		goto error_free;
 173
 174	kvfree(info);
 175	return 0;
 176
 177error_free:
 178	kvfree(info);
 179
 180	return r;
 181}
 182
 183/* Copy the data from userspace and go over it the first time */
 184static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
 185			   union drm_amdgpu_cs *cs)
 186{
 187	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 188	unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
 189	struct amdgpu_vm *vm = &fpriv->vm;
 190	uint64_t *chunk_array_user;
 191	uint64_t *chunk_array;
 192	uint32_t uf_offset = 0;
 193	unsigned int size;
 194	int ret;
 195	int i;
 196
 197	chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
 198				     GFP_KERNEL);
 199	if (!chunk_array)
 200		return -ENOMEM;
 201
 202	/* get chunks */
 203	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 204	if (copy_from_user(chunk_array, chunk_array_user,
 205			   sizeof(uint64_t)*cs->in.num_chunks)) {
 206		ret = -EFAULT;
 207		goto free_chunk;
 208	}
 209
 210	p->nchunks = cs->in.num_chunks;
 211	p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 212			    GFP_KERNEL);
 213	if (!p->chunks) {
 214		ret = -ENOMEM;
 215		goto free_chunk;
 216	}
 217
 218	for (i = 0; i < p->nchunks; i++) {
 219		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
 220		struct drm_amdgpu_cs_chunk user_chunk;
 221		uint32_t __user *cdata;
 222
 223		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
 224		if (copy_from_user(&user_chunk, chunk_ptr,
 225				       sizeof(struct drm_amdgpu_cs_chunk))) {
 226			ret = -EFAULT;
 227			i--;
 228			goto free_partial_kdata;
 229		}
 230		p->chunks[i].chunk_id = user_chunk.chunk_id;
 231		p->chunks[i].length_dw = user_chunk.length_dw;
 232
 233		size = p->chunks[i].length_dw;
 234		cdata = u64_to_user_ptr(user_chunk.chunk_data);
 235
 236		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
 237						    GFP_KERNEL);
 238		if (p->chunks[i].kdata == NULL) {
 239			ret = -ENOMEM;
 240			i--;
 241			goto free_partial_kdata;
 242		}
 243		size *= sizeof(uint32_t);
 244		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 245			ret = -EFAULT;
 246			goto free_partial_kdata;
 247		}
 248
 249		/* Assume the worst on the following checks */
 250		ret = -EINVAL;
 251		switch (p->chunks[i].chunk_id) {
 252		case AMDGPU_CHUNK_ID_IB:
 253			if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
 254				goto free_partial_kdata;
 255
 256			ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
 257			if (ret)
 258				goto free_partial_kdata;
 259			break;
 260
 261		case AMDGPU_CHUNK_ID_FENCE:
 262			if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
 263				goto free_partial_kdata;
 264
 265			ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
 266						      &uf_offset);
 267			if (ret)
 268				goto free_partial_kdata;
 269			break;
 270
 271		case AMDGPU_CHUNK_ID_BO_HANDLES:
 272			if (size < sizeof(struct drm_amdgpu_bo_list_in))
 273				goto free_partial_kdata;
 274
 
 
 
 
 275			ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
 276			if (ret)
 277				goto free_partial_kdata;
 278			break;
 279
 280		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 281		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 282		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 283		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 284		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 285		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 
 286			break;
 287
 288		default:
 289			goto free_partial_kdata;
 290		}
 291	}
 292
 293	if (!p->gang_size) {
 294		ret = -EINVAL;
 295		goto free_partial_kdata;
 296	}
 297
 298	for (i = 0; i < p->gang_size; ++i) {
 299		ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
 300				       num_ibs[i], &p->jobs[i]);
 301		if (ret)
 302			goto free_all_kdata;
 
 303	}
 304	p->gang_leader = p->jobs[p->gang_leader_idx];
 305
 306	if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
 307		ret = -ECANCELED;
 308		goto free_all_kdata;
 309	}
 310
 311	if (p->uf_entry.tv.bo)
 312		p->gang_leader->uf_addr = uf_offset;
 313	kvfree(chunk_array);
 314
 315	/* Use this opportunity to fill in task info for the vm */
 316	amdgpu_vm_set_task_info(vm);
 317
 318	return 0;
 319
 320free_all_kdata:
 321	i = p->nchunks - 1;
 322free_partial_kdata:
 323	for (; i >= 0; i--)
 324		kvfree(p->chunks[i].kdata);
 325	kvfree(p->chunks);
 326	p->chunks = NULL;
 327	p->nchunks = 0;
 328free_chunk:
 329	kvfree(chunk_array);
 330
 331	return ret;
 332}
 333
 334static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
 335			   struct amdgpu_cs_chunk *chunk,
 336			   unsigned int *ce_preempt,
 337			   unsigned int *de_preempt)
 338{
 339	struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
 340	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 341	struct amdgpu_vm *vm = &fpriv->vm;
 342	struct amdgpu_ring *ring;
 343	struct amdgpu_job *job;
 344	struct amdgpu_ib *ib;
 345	int r;
 346
 347	r = amdgpu_cs_job_idx(p, chunk_ib);
 348	if (r < 0)
 349		return r;
 350
 351	job = p->jobs[r];
 352	ring = amdgpu_job_ring(job);
 353	ib = &job->ibs[job->num_ibs++];
 354
 355	/* MM engine doesn't support user fences */
 356	if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
 357		return -EINVAL;
 358
 359	if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
 360	    chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
 361		if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
 362			(*ce_preempt)++;
 363		else
 364			(*de_preempt)++;
 365
 366		/* Each GFX command submit allows only 1 IB max
 367		 * preemptible for CE & DE */
 368		if (*ce_preempt > 1 || *de_preempt > 1)
 369			return -EINVAL;
 370	}
 371
 372	if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
 373		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
 374
 375	r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
 376			   chunk_ib->ib_bytes : 0,
 377			   AMDGPU_IB_POOL_DELAYED, ib);
 378	if (r) {
 379		DRM_ERROR("Failed to get ib !\n");
 380		return r;
 381	}
 382
 383	ib->gpu_addr = chunk_ib->va_start;
 384	ib->length_dw = chunk_ib->ib_bytes / 4;
 385	ib->flags = chunk_ib->flags;
 386	return 0;
 387}
 388
 389static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
 390				     struct amdgpu_cs_chunk *chunk)
 391{
 392	struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
 393	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 394	unsigned num_deps;
 395	int i, r;
 396
 397	num_deps = chunk->length_dw * 4 /
 398		sizeof(struct drm_amdgpu_cs_chunk_dep);
 399
 400	for (i = 0; i < num_deps; ++i) {
 401		struct amdgpu_ctx *ctx;
 402		struct drm_sched_entity *entity;
 403		struct dma_fence *fence;
 404
 405		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
 406		if (ctx == NULL)
 407			return -EINVAL;
 408
 409		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
 410					  deps[i].ip_instance,
 411					  deps[i].ring, &entity);
 412		if (r) {
 413			amdgpu_ctx_put(ctx);
 414			return r;
 415		}
 416
 417		fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
 418		amdgpu_ctx_put(ctx);
 419
 420		if (IS_ERR(fence))
 421			return PTR_ERR(fence);
 422		else if (!fence)
 423			continue;
 424
 425		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
 426			struct drm_sched_fence *s_fence;
 427			struct dma_fence *old = fence;
 428
 429			s_fence = to_drm_sched_fence(fence);
 430			fence = dma_fence_get(&s_fence->scheduled);
 431			dma_fence_put(old);
 432		}
 433
 434		r = amdgpu_sync_fence(&p->sync, fence);
 435		dma_fence_put(fence);
 436		if (r)
 437			return r;
 438	}
 439	return 0;
 440}
 441
 442static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
 443					 uint32_t handle, u64 point,
 444					 u64 flags)
 445{
 446	struct dma_fence *fence;
 447	int r;
 448
 449	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
 450	if (r) {
 451		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
 452			  handle, point, r);
 453		return r;
 454	}
 455
 456	r = amdgpu_sync_fence(&p->sync, fence);
 457	dma_fence_put(fence);
 458	return r;
 459}
 460
 461static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
 462				   struct amdgpu_cs_chunk *chunk)
 463{
 464	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
 465	unsigned num_deps;
 466	int i, r;
 467
 468	num_deps = chunk->length_dw * 4 /
 469		sizeof(struct drm_amdgpu_cs_chunk_sem);
 470	for (i = 0; i < num_deps; ++i) {
 471		r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
 472		if (r)
 473			return r;
 474	}
 475
 476	return 0;
 477}
 478
 479static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
 480					      struct amdgpu_cs_chunk *chunk)
 481{
 482	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
 483	unsigned num_deps;
 484	int i, r;
 485
 486	num_deps = chunk->length_dw * 4 /
 487		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 488	for (i = 0; i < num_deps; ++i) {
 489		r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
 490						  syncobj_deps[i].point,
 491						  syncobj_deps[i].flags);
 492		if (r)
 493			return r;
 494	}
 495
 496	return 0;
 497}
 498
 499static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
 500				    struct amdgpu_cs_chunk *chunk)
 501{
 502	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
 503	unsigned num_deps;
 504	int i;
 505
 506	num_deps = chunk->length_dw * 4 /
 507		sizeof(struct drm_amdgpu_cs_chunk_sem);
 508
 509	if (p->post_deps)
 510		return -EINVAL;
 511
 512	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
 513				     GFP_KERNEL);
 514	p->num_post_deps = 0;
 515
 516	if (!p->post_deps)
 517		return -ENOMEM;
 518
 519
 520	for (i = 0; i < num_deps; ++i) {
 521		p->post_deps[i].syncobj =
 522			drm_syncobj_find(p->filp, deps[i].handle);
 523		if (!p->post_deps[i].syncobj)
 524			return -EINVAL;
 525		p->post_deps[i].chain = NULL;
 526		p->post_deps[i].point = 0;
 527		p->num_post_deps++;
 528	}
 529
 530	return 0;
 531}
 532
 533static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
 534						struct amdgpu_cs_chunk *chunk)
 535{
 536	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
 537	unsigned num_deps;
 538	int i;
 539
 540	num_deps = chunk->length_dw * 4 /
 541		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 542
 543	if (p->post_deps)
 544		return -EINVAL;
 545
 546	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
 547				     GFP_KERNEL);
 548	p->num_post_deps = 0;
 549
 550	if (!p->post_deps)
 551		return -ENOMEM;
 552
 553	for (i = 0; i < num_deps; ++i) {
 554		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
 555
 556		dep->chain = NULL;
 557		if (syncobj_deps[i].point) {
 558			dep->chain = dma_fence_chain_alloc();
 559			if (!dep->chain)
 560				return -ENOMEM;
 561		}
 562
 563		dep->syncobj = drm_syncobj_find(p->filp,
 564						syncobj_deps[i].handle);
 565		if (!dep->syncobj) {
 566			dma_fence_chain_free(dep->chain);
 567			return -EINVAL;
 568		}
 569		dep->point = syncobj_deps[i].point;
 570		p->num_post_deps++;
 571	}
 572
 573	return 0;
 574}
 575
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
 577{
 578	unsigned int ce_preempt = 0, de_preempt = 0;
 579	int i, r;
 580
 581	for (i = 0; i < p->nchunks; ++i) {
 582		struct amdgpu_cs_chunk *chunk;
 583
 584		chunk = &p->chunks[i];
 585
 586		switch (chunk->chunk_id) {
 587		case AMDGPU_CHUNK_ID_IB:
 588			r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
 589			if (r)
 590				return r;
 591			break;
 592		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 593		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 594			r = amdgpu_cs_p2_dependencies(p, chunk);
 595			if (r)
 596				return r;
 597			break;
 598		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 599			r = amdgpu_cs_p2_syncobj_in(p, chunk);
 600			if (r)
 601				return r;
 602			break;
 603		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 604			r = amdgpu_cs_p2_syncobj_out(p, chunk);
 605			if (r)
 606				return r;
 607			break;
 608		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 609			r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
 610			if (r)
 611				return r;
 612			break;
 613		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 614			r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
 615			if (r)
 616				return r;
 617			break;
 
 
 
 
 
 618		}
 619	}
 620
 621	return 0;
 622}
 623
 624/* Convert microseconds to bytes. */
 625static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 626{
 627	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 628		return 0;
 629
 630	/* Since accum_us is incremented by a million per second, just
 631	 * multiply it by the number of MB/s to get the number of bytes.
 632	 */
 633	return us << adev->mm_stats.log2_max_MBps;
 634}
 635
 636static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 637{
 638	if (!adev->mm_stats.log2_max_MBps)
 639		return 0;
 640
 641	return bytes >> adev->mm_stats.log2_max_MBps;
 642}
 643
 644/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 645 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 646 * which means it can go over the threshold once. If that happens, the driver
 647 * will be in debt and no other buffer migrations can be done until that debt
 648 * is repaid.
 649 *
 650 * This approach allows moving a buffer of any size (it's important to allow
 651 * that).
 652 *
 653 * The currency is simply time in microseconds and it increases as the clock
 654 * ticks. The accumulated microseconds (us) are converted to bytes and
 655 * returned.
 656 */
 657static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 658					      u64 *max_bytes,
 659					      u64 *max_vis_bytes)
 660{
 661	s64 time_us, increment_us;
 662	u64 free_vram, total_vram, used_vram;
 663	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 664	 * throttling.
 665	 *
 666	 * It means that in order to get full max MBps, at least 5 IBs per
 667	 * second must be submitted and not more than 200ms apart from each
 668	 * other.
 669	 */
 670	const s64 us_upper_bound = 200000;
 671
 672	if (!adev->mm_stats.log2_max_MBps) {
 673		*max_bytes = 0;
 674		*max_vis_bytes = 0;
 675		return;
 676	}
 677
 678	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
 679	used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
 680	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 681
 682	spin_lock(&adev->mm_stats.lock);
 683
 684	/* Increase the amount of accumulated us. */
 685	time_us = ktime_to_us(ktime_get());
 686	increment_us = time_us - adev->mm_stats.last_update_us;
 687	adev->mm_stats.last_update_us = time_us;
 688	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 689				      us_upper_bound);
 690
 691	/* This prevents the short period of low performance when the VRAM
 692	 * usage is low and the driver is in debt or doesn't have enough
 693	 * accumulated us to fill VRAM quickly.
 694	 *
 695	 * The situation can occur in these cases:
 696	 * - a lot of VRAM is freed by userspace
 697	 * - the presence of a big buffer causes a lot of evictions
 698	 *   (solution: split buffers into smaller ones)
 699	 *
 700	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 701	 * accum_us to a positive number.
 702	 */
 703	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 704		s64 min_us;
 705
 706		/* Be more aggressive on dGPUs. Try to fill a portion of free
 707		 * VRAM now.
 708		 */
 709		if (!(adev->flags & AMD_IS_APU))
 710			min_us = bytes_to_us(adev, free_vram / 4);
 711		else
 712			min_us = 0; /* Reset accum_us on APUs. */
 713
 714		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 715	}
 716
 717	/* This is set to 0 if the driver is in debt to disallow (optional)
 718	 * buffer moves.
 719	 */
 720	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 721
 722	/* Do the same for visible VRAM if half of it is free */
 723	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
 724		u64 total_vis_vram = adev->gmc.visible_vram_size;
 725		u64 used_vis_vram =
 726		  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
 727
 728		if (used_vis_vram < total_vis_vram) {
 729			u64 free_vis_vram = total_vis_vram - used_vis_vram;
 
 730			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
 731							  increment_us, us_upper_bound);
 732
 733			if (free_vis_vram >= total_vis_vram / 2)
 734				adev->mm_stats.accum_us_vis =
 735					max(bytes_to_us(adev, free_vis_vram / 2),
 736					    adev->mm_stats.accum_us_vis);
 737		}
 738
 739		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
 740	} else {
 741		*max_vis_bytes = 0;
 742	}
 743
 744	spin_unlock(&adev->mm_stats.lock);
 745}
 746
 747/* Report how many bytes have really been moved for the last command
 748 * submission. This can result in a debt that can stop buffer migrations
 749 * temporarily.
 750 */
 751void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
 752				  u64 num_vis_bytes)
 753{
 754	spin_lock(&adev->mm_stats.lock);
 755	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 756	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 757	spin_unlock(&adev->mm_stats.lock);
 758}
 759
 760static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
 761{
 762	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 763	struct amdgpu_cs_parser *p = param;
 764	struct ttm_operation_ctx ctx = {
 765		.interruptible = true,
 766		.no_wait_gpu = false,
 767		.resv = bo->tbo.base.resv
 768	};
 769	uint32_t domain;
 770	int r;
 771
 772	if (bo->tbo.pin_count)
 773		return 0;
 774
 775	/* Don't move this buffer if we have depleted our allowance
 776	 * to move it. Don't move anything if the threshold is zero.
 777	 */
 778	if (p->bytes_moved < p->bytes_moved_threshold &&
 779	    (!bo->tbo.base.dma_buf ||
 780	    list_empty(&bo->tbo.base.dma_buf->attachments))) {
 781		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 782		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 783			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
 784			 * visible VRAM if we've depleted our allowance to do
 785			 * that.
 786			 */
 787			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
 788				domain = bo->preferred_domains;
 789			else
 790				domain = bo->allowed_domains;
 791		} else {
 792			domain = bo->preferred_domains;
 793		}
 794	} else {
 795		domain = bo->allowed_domains;
 796	}
 797
 798retry:
 799	amdgpu_bo_placement_from_domain(bo, domain);
 800	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 801
 802	p->bytes_moved += ctx.bytes_moved;
 803	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 804	    amdgpu_bo_in_cpu_visible_vram(bo))
 805		p->bytes_moved_vis += ctx.bytes_moved;
 806
 807	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 808		domain = bo->allowed_domains;
 809		goto retry;
 810	}
 811
 812	return r;
 813}
 814
 815static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 816			    struct list_head *validated)
 817{
 818	struct ttm_operation_ctx ctx = { true, false };
 819	struct amdgpu_bo_list_entry *lobj;
 820	int r;
 821
 822	list_for_each_entry(lobj, validated, tv.head) {
 823		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
 824		struct mm_struct *usermm;
 825
 826		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 827		if (usermm && usermm != current->mm)
 828			return -EPERM;
 829
 830		if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
 831		    lobj->user_invalidated && lobj->user_pages) {
 832			amdgpu_bo_placement_from_domain(bo,
 833							AMDGPU_GEM_DOMAIN_CPU);
 834			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 835			if (r)
 836				return r;
 837
 838			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
 839						     lobj->user_pages);
 840		}
 841
 842		r = amdgpu_cs_bo_validate(p, bo);
 843		if (r)
 844			return r;
 845
 846		kvfree(lobj->user_pages);
 847		lobj->user_pages = NULL;
 848	}
 849	return 0;
 850}
 851
 852static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 853				union drm_amdgpu_cs *cs)
 854{
 855	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 
 856	struct amdgpu_vm *vm = &fpriv->vm;
 857	struct amdgpu_bo_list_entry *e;
 858	struct list_head duplicates;
 
 859	unsigned int i;
 860	int r;
 861
 862	INIT_LIST_HEAD(&p->validated);
 863
 864	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
 865	if (cs->in.bo_list_handle) {
 866		if (p->bo_list)
 867			return -EINVAL;
 868
 869		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
 870				       &p->bo_list);
 871		if (r)
 872			return r;
 873	} else if (!p->bo_list) {
 874		/* Create a empty bo_list when no handle is provided */
 875		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
 876					  &p->bo_list);
 877		if (r)
 878			return r;
 879	}
 880
 881	mutex_lock(&p->bo_list->bo_list_mutex);
 882
 883	/* One for TTM and one for the CS job */
 884	amdgpu_bo_list_for_each_entry(e, p->bo_list)
 885		e->tv.num_shared = 2;
 886
 887	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 888
 889	INIT_LIST_HEAD(&duplicates);
 890	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 891
 892	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
 893		list_add(&p->uf_entry.tv.head, &p->validated);
 894
 895	/* Get userptr backing pages. If pages are updated after registered
 896	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
 897	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
 898	 */
 899	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 900		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 901		bool userpage_invalidated = false;
 
 902		int i;
 903
 904		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
 905					sizeof(struct page *),
 906					GFP_KERNEL | __GFP_ZERO);
 907		if (!e->user_pages) {
 908			DRM_ERROR("kvmalloc_array failure\n");
 909			r = -ENOMEM;
 910			goto out_free_user_pages;
 911		}
 912
 913		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
 914		if (r) {
 915			kvfree(e->user_pages);
 916			e->user_pages = NULL;
 917			goto out_free_user_pages;
 918		}
 919
 920		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
 921			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
 922				userpage_invalidated = true;
 923				break;
 924			}
 925		}
 926		e->user_invalidated = userpage_invalidated;
 927	}
 928
 929	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 930				   &duplicates);
 931	if (unlikely(r != 0)) {
 932		if (r != -ERESTARTSYS)
 933			DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 934		goto out_free_user_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 935	}
 936
 937	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 938		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939
 940		e->bo_va = amdgpu_vm_bo_find(vm, bo);
 
 941	}
 942
 943	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
 944					  &p->bytes_moved_vis_threshold);
 945	p->bytes_moved = 0;
 946	p->bytes_moved_vis = 0;
 947
 948	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
 949				      amdgpu_cs_bo_validate, p);
 950	if (r) {
 951		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
 952		goto error_validate;
 953	}
 954
 955	r = amdgpu_cs_list_validate(p, &duplicates);
 956	if (r)
 957		goto error_validate;
 
 
 958
 959	r = amdgpu_cs_list_validate(p, &p->validated);
 960	if (r)
 961		goto error_validate;
 
 962
 963	if (p->uf_entry.tv.bo) {
 964		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
 965
 966		r = amdgpu_ttm_alloc_gart(&uf->tbo);
 967		if (r)
 968			goto error_validate;
 969
 970		p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
 971	}
 972
 973	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
 974				     p->bytes_moved_vis);
 975
 976	for (i = 0; i < p->gang_size; ++i)
 977		amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
 978					 p->bo_list->gws_obj,
 979					 p->bo_list->oa_obj);
 980	return 0;
 981
 982error_validate:
 983	ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 984
 985out_free_user_pages:
 986	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 987		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 988
 989		if (!e->user_pages)
 990			continue;
 991		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
 992		kvfree(e->user_pages);
 993		e->user_pages = NULL;
 994		e->range = NULL;
 995	}
 996	mutex_unlock(&p->bo_list->bo_list_mutex);
 997	return r;
 998}
 999
1000static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1001{
1002	int i, j;
1003
1004	if (!trace_amdgpu_cs_enabled())
1005		return;
1006
1007	for (i = 0; i < p->gang_size; ++i) {
1008		struct amdgpu_job *job = p->jobs[i];
1009
1010		for (j = 0; j < job->num_ibs; ++j)
1011			trace_amdgpu_cs(p, job, &job->ibs[j]);
1012	}
1013}
1014
1015static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1016			       struct amdgpu_job *job)
1017{
1018	struct amdgpu_ring *ring = amdgpu_job_ring(job);
1019	unsigned int i;
1020	int r;
1021
1022	/* Only for UVD/VCE VM emulation */
1023	if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1024		return 0;
1025
1026	for (i = 0; i < job->num_ibs; ++i) {
1027		struct amdgpu_ib *ib = &job->ibs[i];
1028		struct amdgpu_bo_va_mapping *m;
1029		struct amdgpu_bo *aobj;
1030		uint64_t va_start;
1031		uint8_t *kptr;
1032
1033		va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1034		r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1035		if (r) {
1036			DRM_ERROR("IB va_start is invalid\n");
1037			return r;
1038		}
1039
1040		if ((va_start + ib->length_dw * 4) >
1041		    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1042			DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1043			return -EINVAL;
1044		}
1045
1046		/* the IB should be reserved at this point */
1047		r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1048		if (r) {
1049			return r;
1050		}
1051
1052		kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1053
1054		if (ring->funcs->parse_cs) {
1055			memcpy(ib->ptr, kptr, ib->length_dw * 4);
1056			amdgpu_bo_kunmap(aobj);
1057
1058			r = amdgpu_ring_parse_cs(ring, p, job, ib);
1059			if (r)
1060				return r;
 
 
 
1061		} else {
1062			ib->ptr = (uint32_t *)kptr;
1063			r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1064			amdgpu_bo_kunmap(aobj);
1065			if (r)
1066				return r;
1067		}
1068	}
1069
1070	return 0;
1071}
1072
1073static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1074{
1075	unsigned int i;
1076	int r;
1077
1078	for (i = 0; i < p->gang_size; ++i) {
1079		r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1080		if (r)
1081			return r;
1082	}
1083	return 0;
1084}
1085
1086static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1087{
1088	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1089	struct amdgpu_job *job = p->gang_leader;
1090	struct amdgpu_device *adev = p->adev;
1091	struct amdgpu_vm *vm = &fpriv->vm;
1092	struct amdgpu_bo_list_entry *e;
1093	struct amdgpu_bo_va *bo_va;
1094	struct amdgpu_bo *bo;
1095	unsigned int i;
1096	int r;
1097
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098	r = amdgpu_vm_clear_freed(adev, vm, NULL);
1099	if (r)
1100		return r;
1101
1102	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1103	if (r)
1104		return r;
1105
1106	r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1107	if (r)
1108		return r;
1109
1110	if (fpriv->csa_va) {
1111		bo_va = fpriv->csa_va;
1112		BUG_ON(!bo_va);
1113		r = amdgpu_vm_bo_update(adev, bo_va, false);
1114		if (r)
1115			return r;
1116
1117		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1118		if (r)
1119			return r;
1120	}
1121
 
 
 
 
 
1122	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1123		/* ignore duplicates */
1124		bo = ttm_to_amdgpu_bo(e->tv.bo);
1125		if (!bo)
1126			continue;
1127
1128		bo_va = e->bo_va;
1129		if (bo_va == NULL)
1130			continue;
1131
1132		r = amdgpu_vm_bo_update(adev, bo_va, false);
1133		if (r)
1134			return r;
1135
1136		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1137		if (r)
1138			return r;
1139	}
1140
1141	r = amdgpu_vm_handle_moved(adev, vm);
1142	if (r)
1143		return r;
1144
1145	r = amdgpu_vm_update_pdes(adev, vm, false);
1146	if (r)
1147		return r;
1148
1149	r = amdgpu_sync_fence(&p->sync, vm->last_update);
1150	if (r)
1151		return r;
1152
1153	for (i = 0; i < p->gang_size; ++i) {
1154		job = p->jobs[i];
1155
1156		if (!job->vm)
1157			continue;
1158
1159		job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1160	}
1161
1162	if (amdgpu_vm_debug) {
1163		/* Invalidate all BOs to test for userspace bugs */
1164		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1165			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1166
1167			/* ignore duplicates */
1168			if (!bo)
1169				continue;
1170
1171			amdgpu_vm_bo_invalidate(adev, bo, false);
1172		}
1173	}
1174
1175	return 0;
1176}
1177
1178static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1179{
1180	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1181	struct drm_gpu_scheduler *sched;
1182	struct amdgpu_bo_list_entry *e;
1183	struct dma_fence *fence;
 
1184	unsigned int i;
1185	int r;
1186
1187	r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1188	if (r) {
1189		if (r != -ERESTARTSYS)
1190			DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1191		return r;
1192	}
1193
1194	list_for_each_entry(e, &p->validated, tv.head) {
1195		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
1196		struct dma_resv *resv = bo->tbo.base.resv;
1197		enum amdgpu_sync_mode sync_mode;
1198
1199		sync_mode = amdgpu_bo_explicit_sync(bo) ?
1200			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1201		r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1202				     &fpriv->vm);
1203		if (r)
1204			return r;
1205	}
1206
1207	for (i = 0; i < p->gang_size; ++i) {
1208		r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1209		if (r)
1210			return r;
1211	}
1212
1213	sched = p->gang_leader->base.entity->rq->sched;
1214	while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1215		struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1216
1217		/*
1218		 * When we have an dependency it might be necessary to insert a
1219		 * pipeline sync to make sure that all caches etc are flushed and the
1220		 * next job actually sees the results from the previous one
1221		 * before we start executing on the same scheduler ring.
1222		 */
1223		if (!s_fence || s_fence->sched != sched) {
1224			dma_fence_put(fence);
1225			continue;
1226		}
1227
1228		r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1229		dma_fence_put(fence);
1230		if (r)
1231			return r;
1232	}
1233	return 0;
1234}
1235
1236static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1237{
1238	int i;
1239
1240	for (i = 0; i < p->num_post_deps; ++i) {
1241		if (p->post_deps[i].chain && p->post_deps[i].point) {
1242			drm_syncobj_add_point(p->post_deps[i].syncobj,
1243					      p->post_deps[i].chain,
1244					      p->fence, p->post_deps[i].point);
1245			p->post_deps[i].chain = NULL;
1246		} else {
1247			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1248						  p->fence);
1249		}
1250	}
1251}
1252
1253static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1254			    union drm_amdgpu_cs *cs)
1255{
1256	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1257	struct amdgpu_job *leader = p->gang_leader;
1258	struct amdgpu_bo_list_entry *e;
 
 
1259	unsigned int i;
1260	uint64_t seq;
1261	int r;
1262
1263	for (i = 0; i < p->gang_size; ++i)
1264		drm_sched_job_arm(&p->jobs[i]->base);
1265
1266	for (i = 0; i < p->gang_size; ++i) {
1267		struct dma_fence *fence;
1268
1269		if (p->jobs[i] == leader)
1270			continue;
1271
1272		fence = &p->jobs[i]->base.s_fence->scheduled;
1273		dma_fence_get(fence);
1274		r = drm_sched_job_add_dependency(&leader->base, fence);
1275		if (r) {
1276			dma_fence_put(fence);
1277			goto error_cleanup;
1278		}
1279	}
1280
1281	if (p->gang_size > 1) {
1282		for (i = 0; i < p->gang_size; ++i)
1283			amdgpu_job_set_gang_leader(p->jobs[i], leader);
1284	}
1285
1286	/* No memory allocation is allowed while holding the notifier lock.
1287	 * The lock is held until amdgpu_cs_submit is finished and fence is
1288	 * added to BOs.
1289	 */
1290	mutex_lock(&p->adev->notifier_lock);
1291
1292	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1293	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1294	 */
1295	r = 0;
1296	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1297		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1298
1299		r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1300		e->range = NULL;
1301	}
1302	if (r) {
1303		r = -EAGAIN;
1304		goto error_unlock;
 
1305	}
1306
1307	p->fence = dma_fence_get(&leader->base.s_fence->finished);
1308	list_for_each_entry(e, &p->validated, tv.head) {
 
 
1309
1310		/* Everybody except for the gang leader uses READ */
1311		for (i = 0; i < p->gang_size; ++i) {
1312			if (p->jobs[i] == leader)
1313				continue;
1314
1315			dma_resv_add_fence(e->tv.bo->base.resv,
1316					   &p->jobs[i]->base.s_fence->finished,
1317					   DMA_RESV_USAGE_READ);
1318		}
1319
1320		/* The gang leader is remembered as writer */
1321		e->tv.num_shared = 0;
1322	}
1323
1324	seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1325				   p->fence);
1326	amdgpu_cs_post_dependencies(p);
1327
1328	if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1329	    !p->ctx->preamble_presented) {
1330		leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1331		p->ctx->preamble_presented = true;
1332	}
1333
1334	cs->out.handle = seq;
1335	leader->uf_sequence = seq;
1336
1337	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1338	for (i = 0; i < p->gang_size; ++i) {
1339		amdgpu_job_free_resources(p->jobs[i]);
1340		trace_amdgpu_cs_ioctl(p->jobs[i]);
1341		drm_sched_entity_push_job(&p->jobs[i]->base);
1342		p->jobs[i] = NULL;
1343	}
1344
1345	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1346	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1347
1348	mutex_unlock(&p->adev->notifier_lock);
1349	mutex_unlock(&p->bo_list->bo_list_mutex);
1350	return 0;
1351
1352error_unlock:
1353	mutex_unlock(&p->adev->notifier_lock);
1354
1355error_cleanup:
1356	for (i = 0; i < p->gang_size; ++i)
1357		drm_sched_job_cleanup(&p->jobs[i]->base);
1358	return r;
1359}
1360
1361/* Cleanup the parser structure */
1362static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1363{
1364	unsigned i;
1365
1366	amdgpu_sync_free(&parser->sync);
 
 
1367	for (i = 0; i < parser->num_post_deps; i++) {
1368		drm_syncobj_put(parser->post_deps[i].syncobj);
1369		kfree(parser->post_deps[i].chain);
1370	}
1371	kfree(parser->post_deps);
1372
1373	dma_fence_put(parser->fence);
1374
1375	if (parser->ctx)
1376		amdgpu_ctx_put(parser->ctx);
1377	if (parser->bo_list)
1378		amdgpu_bo_list_put(parser->bo_list);
1379
1380	for (i = 0; i < parser->nchunks; i++)
1381		kvfree(parser->chunks[i].kdata);
1382	kvfree(parser->chunks);
1383	for (i = 0; i < parser->gang_size; ++i) {
1384		if (parser->jobs[i])
1385			amdgpu_job_free(parser->jobs[i]);
1386	}
1387	if (parser->uf_entry.tv.bo) {
1388		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
1389
1390		amdgpu_bo_unref(&uf);
1391	}
1392}
1393
1394int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1395{
1396	struct amdgpu_device *adev = drm_to_adev(dev);
1397	struct amdgpu_cs_parser parser;
1398	int r;
1399
1400	if (amdgpu_ras_intr_triggered())
1401		return -EHWPOISON;
1402
1403	if (!adev->accel_working)
1404		return -EBUSY;
1405
1406	r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1407	if (r) {
1408		if (printk_ratelimit())
1409			DRM_ERROR("Failed to initialize parser %d!\n", r);
1410		return r;
1411	}
1412
1413	r = amdgpu_cs_pass1(&parser, data);
1414	if (r)
1415		goto error_fini;
1416
1417	r = amdgpu_cs_pass2(&parser);
1418	if (r)
1419		goto error_fini;
1420
1421	r = amdgpu_cs_parser_bos(&parser, data);
1422	if (r) {
1423		if (r == -ENOMEM)
1424			DRM_ERROR("Not enough memory for command submission!\n");
1425		else if (r != -ERESTARTSYS && r != -EAGAIN)
1426			DRM_ERROR("Failed to process the buffer list %d!\n", r);
1427		goto error_fini;
1428	}
1429
1430	r = amdgpu_cs_patch_jobs(&parser);
1431	if (r)
1432		goto error_backoff;
1433
1434	r = amdgpu_cs_vm_handling(&parser);
1435	if (r)
1436		goto error_backoff;
1437
1438	r = amdgpu_cs_sync_rings(&parser);
1439	if (r)
1440		goto error_backoff;
1441
1442	trace_amdgpu_cs_ibs(&parser);
1443
1444	r = amdgpu_cs_submit(&parser, data);
1445	if (r)
1446		goto error_backoff;
1447
1448	amdgpu_cs_parser_fini(&parser);
1449	return 0;
1450
1451error_backoff:
1452	ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
1453	mutex_unlock(&parser.bo_list->bo_list_mutex);
1454
1455error_fini:
1456	amdgpu_cs_parser_fini(&parser);
1457	return r;
1458}
1459
1460/**
1461 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1462 *
1463 * @dev: drm device
1464 * @data: data from userspace
1465 * @filp: file private
1466 *
1467 * Wait for the command submission identified by handle to finish.
1468 */
1469int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1470			 struct drm_file *filp)
1471{
1472	union drm_amdgpu_wait_cs *wait = data;
1473	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1474	struct drm_sched_entity *entity;
1475	struct amdgpu_ctx *ctx;
1476	struct dma_fence *fence;
1477	long r;
1478
1479	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1480	if (ctx == NULL)
1481		return -EINVAL;
1482
1483	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1484				  wait->in.ring, &entity);
1485	if (r) {
1486		amdgpu_ctx_put(ctx);
1487		return r;
1488	}
1489
1490	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1491	if (IS_ERR(fence))
1492		r = PTR_ERR(fence);
1493	else if (fence) {
1494		r = dma_fence_wait_timeout(fence, true, timeout);
1495		if (r > 0 && fence->error)
1496			r = fence->error;
1497		dma_fence_put(fence);
1498	} else
1499		r = 1;
1500
1501	amdgpu_ctx_put(ctx);
1502	if (r < 0)
1503		return r;
1504
1505	memset(wait, 0, sizeof(*wait));
1506	wait->out.status = (r == 0);
1507
1508	return 0;
1509}
1510
1511/**
1512 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1513 *
1514 * @adev: amdgpu device
1515 * @filp: file private
1516 * @user: drm_amdgpu_fence copied from user space
1517 */
1518static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1519					     struct drm_file *filp,
1520					     struct drm_amdgpu_fence *user)
1521{
1522	struct drm_sched_entity *entity;
1523	struct amdgpu_ctx *ctx;
1524	struct dma_fence *fence;
1525	int r;
1526
1527	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1528	if (ctx == NULL)
1529		return ERR_PTR(-EINVAL);
1530
1531	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1532				  user->ring, &entity);
1533	if (r) {
1534		amdgpu_ctx_put(ctx);
1535		return ERR_PTR(r);
1536	}
1537
1538	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1539	amdgpu_ctx_put(ctx);
1540
1541	return fence;
1542}
1543
1544int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1545				    struct drm_file *filp)
1546{
1547	struct amdgpu_device *adev = drm_to_adev(dev);
1548	union drm_amdgpu_fence_to_handle *info = data;
1549	struct dma_fence *fence;
1550	struct drm_syncobj *syncobj;
1551	struct sync_file *sync_file;
1552	int fd, r;
1553
1554	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1555	if (IS_ERR(fence))
1556		return PTR_ERR(fence);
1557
1558	if (!fence)
1559		fence = dma_fence_get_stub();
1560
1561	switch (info->in.what) {
1562	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1563		r = drm_syncobj_create(&syncobj, 0, fence);
1564		dma_fence_put(fence);
1565		if (r)
1566			return r;
1567		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1568		drm_syncobj_put(syncobj);
1569		return r;
1570
1571	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1572		r = drm_syncobj_create(&syncobj, 0, fence);
1573		dma_fence_put(fence);
1574		if (r)
1575			return r;
1576		r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1577		drm_syncobj_put(syncobj);
1578		return r;
1579
1580	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1581		fd = get_unused_fd_flags(O_CLOEXEC);
1582		if (fd < 0) {
1583			dma_fence_put(fence);
1584			return fd;
1585		}
1586
1587		sync_file = sync_file_create(fence);
1588		dma_fence_put(fence);
1589		if (!sync_file) {
1590			put_unused_fd(fd);
1591			return -ENOMEM;
1592		}
1593
1594		fd_install(fd, sync_file->file);
1595		info->out.handle = fd;
1596		return 0;
1597
1598	default:
1599		dma_fence_put(fence);
1600		return -EINVAL;
1601	}
1602}
1603
1604/**
1605 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1606 *
1607 * @adev: amdgpu device
1608 * @filp: file private
1609 * @wait: wait parameters
1610 * @fences: array of drm_amdgpu_fence
1611 */
1612static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1613				     struct drm_file *filp,
1614				     union drm_amdgpu_wait_fences *wait,
1615				     struct drm_amdgpu_fence *fences)
1616{
1617	uint32_t fence_count = wait->in.fence_count;
1618	unsigned int i;
1619	long r = 1;
1620
1621	for (i = 0; i < fence_count; i++) {
1622		struct dma_fence *fence;
1623		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1624
1625		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1626		if (IS_ERR(fence))
1627			return PTR_ERR(fence);
1628		else if (!fence)
1629			continue;
1630
1631		r = dma_fence_wait_timeout(fence, true, timeout);
 
 
 
1632		dma_fence_put(fence);
1633		if (r < 0)
1634			return r;
1635
1636		if (r == 0)
1637			break;
1638
1639		if (fence->error)
1640			return fence->error;
1641	}
1642
1643	memset(wait, 0, sizeof(*wait));
1644	wait->out.status = (r > 0);
1645
1646	return 0;
1647}
1648
1649/**
1650 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1651 *
1652 * @adev: amdgpu device
1653 * @filp: file private
1654 * @wait: wait parameters
1655 * @fences: array of drm_amdgpu_fence
1656 */
1657static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1658				    struct drm_file *filp,
1659				    union drm_amdgpu_wait_fences *wait,
1660				    struct drm_amdgpu_fence *fences)
1661{
1662	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1663	uint32_t fence_count = wait->in.fence_count;
1664	uint32_t first = ~0;
1665	struct dma_fence **array;
1666	unsigned int i;
1667	long r;
1668
1669	/* Prepare the fence array */
1670	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1671
1672	if (array == NULL)
1673		return -ENOMEM;
1674
1675	for (i = 0; i < fence_count; i++) {
1676		struct dma_fence *fence;
1677
1678		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1679		if (IS_ERR(fence)) {
1680			r = PTR_ERR(fence);
1681			goto err_free_fence_array;
1682		} else if (fence) {
1683			array[i] = fence;
1684		} else { /* NULL, the fence has been already signaled */
1685			r = 1;
1686			first = i;
1687			goto out;
1688		}
1689	}
1690
1691	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1692				       &first);
1693	if (r < 0)
1694		goto err_free_fence_array;
1695
1696out:
1697	memset(wait, 0, sizeof(*wait));
1698	wait->out.status = (r > 0);
1699	wait->out.first_signaled = first;
1700
1701	if (first < fence_count && array[first])
1702		r = array[first]->error;
1703	else
1704		r = 0;
1705
1706err_free_fence_array:
1707	for (i = 0; i < fence_count; i++)
1708		dma_fence_put(array[i]);
1709	kfree(array);
1710
1711	return r;
1712}
1713
1714/**
1715 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1716 *
1717 * @dev: drm device
1718 * @data: data from userspace
1719 * @filp: file private
1720 */
1721int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1722				struct drm_file *filp)
1723{
1724	struct amdgpu_device *adev = drm_to_adev(dev);
1725	union drm_amdgpu_wait_fences *wait = data;
1726	uint32_t fence_count = wait->in.fence_count;
1727	struct drm_amdgpu_fence *fences_user;
1728	struct drm_amdgpu_fence *fences;
1729	int r;
1730
1731	/* Get the fences from userspace */
1732	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1733			GFP_KERNEL);
1734	if (fences == NULL)
1735		return -ENOMEM;
1736
1737	fences_user = u64_to_user_ptr(wait->in.fences);
1738	if (copy_from_user(fences, fences_user,
1739		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1740		r = -EFAULT;
1741		goto err_free_fences;
1742	}
1743
1744	if (wait->in.wait_all)
1745		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1746	else
1747		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1748
1749err_free_fences:
1750	kfree(fences);
1751
1752	return r;
1753}
1754
1755/**
1756 * amdgpu_cs_find_mapping - find bo_va for VM address
1757 *
1758 * @parser: command submission parser context
1759 * @addr: VM address
1760 * @bo: resulting BO of the mapping found
1761 * @map: Placeholder to return found BO mapping
1762 *
1763 * Search the buffer objects in the command submission context for a certain
1764 * virtual memory address. Returns allocation structure when found, NULL
1765 * otherwise.
1766 */
1767int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1768			   uint64_t addr, struct amdgpu_bo **bo,
1769			   struct amdgpu_bo_va_mapping **map)
1770{
1771	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1772	struct ttm_operation_ctx ctx = { false, false };
1773	struct amdgpu_vm *vm = &fpriv->vm;
1774	struct amdgpu_bo_va_mapping *mapping;
1775	int r;
1776
1777	addr /= AMDGPU_GPU_PAGE_SIZE;
1778
1779	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1780	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1781		return -EINVAL;
1782
1783	*bo = mapping->bo_va->base.bo;
1784	*map = mapping;
1785
1786	/* Double check that the BO is reserved by this CS */
1787	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1788		return -EINVAL;
1789
1790	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1791		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 
 
 
1792		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
 
 
1793		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1794		if (r)
1795			return r;
1796	}
1797
1798	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1799}