Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/pagemap.h>
  30#include <linux/sync_file.h>
  31#include <linux/dma-buf.h>
  32
  33#include <drm/amdgpu_drm.h>
  34#include <drm/drm_syncobj.h>
  35#include <drm/ttm/ttm_tt.h>
  36
  37#include "amdgpu_cs.h"
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40#include "amdgpu_gmc.h"
  41#include "amdgpu_gem.h"
  42#include "amdgpu_ras.h"
  43
  44static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
  45				 struct amdgpu_device *adev,
  46				 struct drm_file *filp,
  47				 union drm_amdgpu_cs *cs)
  48{
  49	struct amdgpu_fpriv *fpriv = filp->driver_priv;
  50
  51	if (cs->in.num_chunks == 0)
  52		return -EINVAL;
  53
  54	memset(p, 0, sizeof(*p));
  55	p->adev = adev;
  56	p->filp = filp;
  57
  58	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
  59	if (!p->ctx)
  60		return -EINVAL;
  61
  62	if (atomic_read(&p->ctx->guilty)) {
  63		amdgpu_ctx_put(p->ctx);
  64		return -ECANCELED;
  65	}
  66
  67	amdgpu_sync_create(&p->sync);
  68	drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
  69		      DRM_EXEC_IGNORE_DUPLICATES, 0);
  70	return 0;
  71}
  72
  73static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
  74			     struct drm_amdgpu_cs_chunk_ib *chunk_ib)
  75{
  76	struct drm_sched_entity *entity;
  77	unsigned int i;
  78	int r;
  79
  80	r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
  81				  chunk_ib->ip_instance,
  82				  chunk_ib->ring, &entity);
  83	if (r)
  84		return r;
  85
  86	/*
  87	 * Abort if there is no run queue associated with this entity.
  88	 * Possibly because of disabled HW IP.
  89	 */
  90	if (entity->rq == NULL)
  91		return -EINVAL;
  92
  93	/* Check if we can add this IB to some existing job */
  94	for (i = 0; i < p->gang_size; ++i)
  95		if (p->entities[i] == entity)
  96			return i;
  97
  98	/* If not increase the gang size if possible */
  99	if (i == AMDGPU_CS_GANG_SIZE)
 100		return -EINVAL;
 101
 102	p->entities[i] = entity;
 103	p->gang_size = i + 1;
 104	return i;
 105}
 106
 107static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
 108			   struct drm_amdgpu_cs_chunk_ib *chunk_ib,
 109			   unsigned int *num_ibs)
 110{
 111	int r;
 112
 113	r = amdgpu_cs_job_idx(p, chunk_ib);
 114	if (r < 0)
 115		return r;
 116
 117	if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
 118		return -EINVAL;
 119
 120	++(num_ibs[r]);
 121	p->gang_leader_idx = r;
 122	return 0;
 123}
 124
 125static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
 126				   struct drm_amdgpu_cs_chunk_fence *data,
 127				   uint32_t *offset)
 128{
 129	struct drm_gem_object *gobj;
 
 130	unsigned long size;
 
 131
 132	gobj = drm_gem_object_lookup(p->filp, data->handle);
 133	if (gobj == NULL)
 134		return -EINVAL;
 135
 136	p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 137	drm_gem_object_put(gobj);
 138
 139	size = amdgpu_bo_size(p->uf_bo);
 140	if (size != PAGE_SIZE || data->offset > (size - 8))
 141		return -EINVAL;
 142
 143	if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
 144		return -EINVAL;
 
 
 
 
 
 
 
 
 
 145
 146	*offset = data->offset;
 
 147	return 0;
 
 
 
 
 148}
 149
 150static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
 151				   struct drm_amdgpu_bo_list_in *data)
 152{
 153	struct drm_amdgpu_bo_list_entry *info;
 154	int r;
 
 155
 156	r = amdgpu_bo_create_list_entry_array(data, &info);
 157	if (r)
 158		return r;
 159
 160	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
 161				  &p->bo_list);
 162	if (r)
 163		goto error_free;
 164
 165	kvfree(info);
 166	return 0;
 167
 168error_free:
 169	kvfree(info);
 
 170
 171	return r;
 172}
 173
 174/* Copy the data from userspace and go over it the first time */
 175static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
 176			   union drm_amdgpu_cs *cs)
 177{
 178	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 179	unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
 180	struct amdgpu_vm *vm = &fpriv->vm;
 181	uint64_t *chunk_array_user;
 182	uint64_t *chunk_array;
 
 183	uint32_t uf_offset = 0;
 184	size_t size;
 185	int ret;
 186	int i;
 
 187
 188	chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
 189				     GFP_KERNEL);
 
 
 190	if (!chunk_array)
 191		return -ENOMEM;
 192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 193	/* get chunks */
 194	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 195	if (copy_from_user(chunk_array, chunk_array_user,
 196			   sizeof(uint64_t)*cs->in.num_chunks)) {
 197		ret = -EFAULT;
 198		goto free_chunk;
 199	}
 200
 201	p->nchunks = cs->in.num_chunks;
 202	p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 203			    GFP_KERNEL);
 204	if (!p->chunks) {
 205		ret = -ENOMEM;
 206		goto free_chunk;
 207	}
 208
 209	for (i = 0; i < p->nchunks; i++) {
 210		struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
 211		struct drm_amdgpu_cs_chunk user_chunk;
 212		uint32_t __user *cdata;
 213
 214		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
 215		if (copy_from_user(&user_chunk, chunk_ptr,
 216				       sizeof(struct drm_amdgpu_cs_chunk))) {
 217			ret = -EFAULT;
 218			i--;
 219			goto free_partial_kdata;
 220		}
 221		p->chunks[i].chunk_id = user_chunk.chunk_id;
 222		p->chunks[i].length_dw = user_chunk.length_dw;
 223
 224		size = p->chunks[i].length_dw;
 225		cdata = u64_to_user_ptr(user_chunk.chunk_data);
 226
 227		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
 228						    GFP_KERNEL);
 229		if (p->chunks[i].kdata == NULL) {
 230			ret = -ENOMEM;
 231			i--;
 232			goto free_partial_kdata;
 233		}
 234		size *= sizeof(uint32_t);
 235		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 236			ret = -EFAULT;
 237			goto free_partial_kdata;
 238		}
 239
 240		/* Assume the worst on the following checks */
 241		ret = -EINVAL;
 242		switch (p->chunks[i].chunk_id) {
 243		case AMDGPU_CHUNK_ID_IB:
 244			if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
 245				goto free_partial_kdata;
 246
 247			ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
 248			if (ret)
 249				goto free_partial_kdata;
 250			break;
 251
 252		case AMDGPU_CHUNK_ID_FENCE:
 253			if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
 
 
 254				goto free_partial_kdata;
 
 255
 256			ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
 257						      &uf_offset);
 258			if (ret)
 259				goto free_partial_kdata;
 
 260			break;
 261
 262		case AMDGPU_CHUNK_ID_BO_HANDLES:
 263			if (size < sizeof(struct drm_amdgpu_bo_list_in))
 
 
 264				goto free_partial_kdata;
 
 265
 266			ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
 267			if (ret)
 268				goto free_partial_kdata;
 
 269			break;
 270
 271		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 272		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 273		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 274		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 275		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 276		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 277		case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
 278			break;
 279
 280		default:
 
 281			goto free_partial_kdata;
 282		}
 283	}
 284
 285	if (!p->gang_size) {
 286		ret = -EINVAL;
 287		goto free_all_kdata;
 288	}
 289
 290	for (i = 0; i < p->gang_size; ++i) {
 291		ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
 292				       num_ibs[i], &p->jobs[i]);
 293		if (ret)
 294			goto free_all_kdata;
 295	}
 296	p->gang_leader = p->jobs[p->gang_leader_idx];
 297
 298	if (p->ctx->generation != p->gang_leader->generation) {
 299		ret = -ECANCELED;
 300		goto free_all_kdata;
 301	}
 302
 303	if (p->uf_bo)
 304		p->gang_leader->uf_addr = uf_offset;
 305	kvfree(chunk_array);
 306
 307	/* Use this opportunity to fill in task info for the vm */
 308	amdgpu_vm_set_task_info(vm);
 309
 310	return 0;
 311
 312free_all_kdata:
 313	i = p->nchunks - 1;
 314free_partial_kdata:
 315	for (; i >= 0; i--)
 316		kvfree(p->chunks[i].kdata);
 317	kvfree(p->chunks);
 318	p->chunks = NULL;
 319	p->nchunks = 0;
 320free_chunk:
 321	kvfree(chunk_array);
 322
 323	return ret;
 324}
 325
 326static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
 327			   struct amdgpu_cs_chunk *chunk,
 328			   unsigned int *ce_preempt,
 329			   unsigned int *de_preempt)
 330{
 331	struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
 332	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 333	struct amdgpu_vm *vm = &fpriv->vm;
 334	struct amdgpu_ring *ring;
 335	struct amdgpu_job *job;
 336	struct amdgpu_ib *ib;
 337	int r;
 338
 339	r = amdgpu_cs_job_idx(p, chunk_ib);
 340	if (r < 0)
 341		return r;
 342
 343	job = p->jobs[r];
 344	ring = amdgpu_job_ring(job);
 345	ib = &job->ibs[job->num_ibs++];
 346
 347	/* MM engine doesn't support user fences */
 348	if (p->uf_bo && ring->funcs->no_user_fence)
 349		return -EINVAL;
 350
 351	if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
 352	    chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
 353		if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
 354			(*ce_preempt)++;
 355		else
 356			(*de_preempt)++;
 357
 358		/* Each GFX command submit allows only 1 IB max
 359		 * preemptible for CE & DE */
 360		if (*ce_preempt > 1 || *de_preempt > 1)
 361			return -EINVAL;
 362	}
 363
 364	if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
 365		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
 366
 367	r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
 368			   chunk_ib->ib_bytes : 0,
 369			   AMDGPU_IB_POOL_DELAYED, ib);
 370	if (r) {
 371		DRM_ERROR("Failed to get ib !\n");
 372		return r;
 373	}
 374
 375	ib->gpu_addr = chunk_ib->va_start;
 376	ib->length_dw = chunk_ib->ib_bytes / 4;
 377	ib->flags = chunk_ib->flags;
 378	return 0;
 379}
 380
 381static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
 382				     struct amdgpu_cs_chunk *chunk)
 383{
 384	struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
 385	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 386	unsigned int num_deps;
 387	int i, r;
 388
 389	num_deps = chunk->length_dw * 4 /
 390		sizeof(struct drm_amdgpu_cs_chunk_dep);
 391
 392	for (i = 0; i < num_deps; ++i) {
 393		struct amdgpu_ctx *ctx;
 394		struct drm_sched_entity *entity;
 395		struct dma_fence *fence;
 396
 397		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
 398		if (ctx == NULL)
 399			return -EINVAL;
 400
 401		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
 402					  deps[i].ip_instance,
 403					  deps[i].ring, &entity);
 404		if (r) {
 405			amdgpu_ctx_put(ctx);
 406			return r;
 407		}
 408
 409		fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
 410		amdgpu_ctx_put(ctx);
 411
 412		if (IS_ERR(fence))
 413			return PTR_ERR(fence);
 414		else if (!fence)
 415			continue;
 416
 417		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
 418			struct drm_sched_fence *s_fence;
 419			struct dma_fence *old = fence;
 420
 421			s_fence = to_drm_sched_fence(fence);
 422			fence = dma_fence_get(&s_fence->scheduled);
 423			dma_fence_put(old);
 424		}
 425
 426		r = amdgpu_sync_fence(&p->sync, fence);
 427		dma_fence_put(fence);
 428		if (r)
 429			return r;
 430	}
 431	return 0;
 432}
 433
 434static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
 435					 uint32_t handle, u64 point,
 436					 u64 flags)
 437{
 438	struct dma_fence *fence;
 439	int r;
 440
 441	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
 442	if (r) {
 443		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
 444			  handle, point, r);
 445		return r;
 446	}
 447
 448	r = amdgpu_sync_fence(&p->sync, fence);
 449	dma_fence_put(fence);
 450	return r;
 451}
 452
 453static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
 454				   struct amdgpu_cs_chunk *chunk)
 455{
 456	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
 457	unsigned int num_deps;
 458	int i, r;
 459
 460	num_deps = chunk->length_dw * 4 /
 461		sizeof(struct drm_amdgpu_cs_chunk_sem);
 462	for (i = 0; i < num_deps; ++i) {
 463		r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
 464		if (r)
 465			return r;
 466	}
 467
 468	return 0;
 469}
 470
 471static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
 472					      struct amdgpu_cs_chunk *chunk)
 473{
 474	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
 475	unsigned int num_deps;
 476	int i, r;
 477
 478	num_deps = chunk->length_dw * 4 /
 479		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 480	for (i = 0; i < num_deps; ++i) {
 481		r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
 482						  syncobj_deps[i].point,
 483						  syncobj_deps[i].flags);
 484		if (r)
 485			return r;
 486	}
 487
 488	return 0;
 489}
 490
 491static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
 492				    struct amdgpu_cs_chunk *chunk)
 493{
 494	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
 495	unsigned int num_deps;
 496	int i;
 497
 498	num_deps = chunk->length_dw * 4 /
 499		sizeof(struct drm_amdgpu_cs_chunk_sem);
 500
 501	if (p->post_deps)
 502		return -EINVAL;
 503
 504	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
 505				     GFP_KERNEL);
 506	p->num_post_deps = 0;
 507
 508	if (!p->post_deps)
 509		return -ENOMEM;
 510
 511
 512	for (i = 0; i < num_deps; ++i) {
 513		p->post_deps[i].syncobj =
 514			drm_syncobj_find(p->filp, deps[i].handle);
 515		if (!p->post_deps[i].syncobj)
 516			return -EINVAL;
 517		p->post_deps[i].chain = NULL;
 518		p->post_deps[i].point = 0;
 519		p->num_post_deps++;
 520	}
 521
 522	return 0;
 523}
 524
 525static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
 526						struct amdgpu_cs_chunk *chunk)
 527{
 528	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
 529	unsigned int num_deps;
 530	int i;
 531
 532	num_deps = chunk->length_dw * 4 /
 533		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 534
 535	if (p->post_deps)
 536		return -EINVAL;
 537
 538	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
 539				     GFP_KERNEL);
 540	p->num_post_deps = 0;
 541
 542	if (!p->post_deps)
 543		return -ENOMEM;
 544
 545	for (i = 0; i < num_deps; ++i) {
 546		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
 547
 548		dep->chain = NULL;
 549		if (syncobj_deps[i].point) {
 550			dep->chain = dma_fence_chain_alloc();
 551			if (!dep->chain)
 552				return -ENOMEM;
 553		}
 554
 555		dep->syncobj = drm_syncobj_find(p->filp,
 556						syncobj_deps[i].handle);
 557		if (!dep->syncobj) {
 558			dma_fence_chain_free(dep->chain);
 559			return -EINVAL;
 560		}
 561		dep->point = syncobj_deps[i].point;
 562		p->num_post_deps++;
 563	}
 564
 565	return 0;
 566}
 567
 568static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
 569			       struct amdgpu_cs_chunk *chunk)
 570{
 571	struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
 572	int i;
 573
 574	if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
 575		return -EINVAL;
 576
 577	for (i = 0; i < p->gang_size; ++i) {
 578		p->jobs[i]->shadow_va = shadow->shadow_va;
 579		p->jobs[i]->csa_va = shadow->csa_va;
 580		p->jobs[i]->gds_va = shadow->gds_va;
 581		p->jobs[i]->init_shadow =
 582			shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
 583	}
 584
 585	return 0;
 586}
 587
 588static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
 589{
 590	unsigned int ce_preempt = 0, de_preempt = 0;
 591	int i, r;
 592
 593	for (i = 0; i < p->nchunks; ++i) {
 594		struct amdgpu_cs_chunk *chunk;
 595
 596		chunk = &p->chunks[i];
 597
 598		switch (chunk->chunk_id) {
 599		case AMDGPU_CHUNK_ID_IB:
 600			r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
 601			if (r)
 602				return r;
 603			break;
 604		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 605		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 606			r = amdgpu_cs_p2_dependencies(p, chunk);
 607			if (r)
 608				return r;
 609			break;
 610		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 611			r = amdgpu_cs_p2_syncobj_in(p, chunk);
 612			if (r)
 613				return r;
 614			break;
 615		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 616			r = amdgpu_cs_p2_syncobj_out(p, chunk);
 617			if (r)
 618				return r;
 619			break;
 620		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 621			r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
 622			if (r)
 623				return r;
 624			break;
 625		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 626			r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
 627			if (r)
 628				return r;
 629			break;
 630		case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
 631			r = amdgpu_cs_p2_shadow(p, chunk);
 632			if (r)
 633				return r;
 634			break;
 635		}
 636	}
 637
 638	return 0;
 639}
 640
 641/* Convert microseconds to bytes. */
 642static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 643{
 644	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 645		return 0;
 646
 647	/* Since accum_us is incremented by a million per second, just
 648	 * multiply it by the number of MB/s to get the number of bytes.
 649	 */
 650	return us << adev->mm_stats.log2_max_MBps;
 651}
 652
 653static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 654{
 655	if (!adev->mm_stats.log2_max_MBps)
 656		return 0;
 657
 658	return bytes >> adev->mm_stats.log2_max_MBps;
 659}
 660
 661/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 662 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 663 * which means it can go over the threshold once. If that happens, the driver
 664 * will be in debt and no other buffer migrations can be done until that debt
 665 * is repaid.
 666 *
 667 * This approach allows moving a buffer of any size (it's important to allow
 668 * that).
 669 *
 670 * The currency is simply time in microseconds and it increases as the clock
 671 * ticks. The accumulated microseconds (us) are converted to bytes and
 672 * returned.
 673 */
 674static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 675					      u64 *max_bytes,
 676					      u64 *max_vis_bytes)
 677{
 678	s64 time_us, increment_us;
 679	u64 free_vram, total_vram, used_vram;
 
 680	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 681	 * throttling.
 682	 *
 683	 * It means that in order to get full max MBps, at least 5 IBs per
 684	 * second must be submitted and not more than 200ms apart from each
 685	 * other.
 686	 */
 687	const s64 us_upper_bound = 200000;
 688
 689	if (!adev->mm_stats.log2_max_MBps) {
 690		*max_bytes = 0;
 691		*max_vis_bytes = 0;
 692		return;
 693	}
 694
 695	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
 696	used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
 697	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 698
 699	spin_lock(&adev->mm_stats.lock);
 700
 701	/* Increase the amount of accumulated us. */
 702	time_us = ktime_to_us(ktime_get());
 703	increment_us = time_us - adev->mm_stats.last_update_us;
 704	adev->mm_stats.last_update_us = time_us;
 705	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 706				      us_upper_bound);
 707
 708	/* This prevents the short period of low performance when the VRAM
 709	 * usage is low and the driver is in debt or doesn't have enough
 710	 * accumulated us to fill VRAM quickly.
 711	 *
 712	 * The situation can occur in these cases:
 713	 * - a lot of VRAM is freed by userspace
 714	 * - the presence of a big buffer causes a lot of evictions
 715	 *   (solution: split buffers into smaller ones)
 716	 *
 717	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 718	 * accum_us to a positive number.
 719	 */
 720	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 721		s64 min_us;
 722
 723		/* Be more aggressive on dGPUs. Try to fill a portion of free
 724		 * VRAM now.
 725		 */
 726		if (!(adev->flags & AMD_IS_APU))
 727			min_us = bytes_to_us(adev, free_vram / 4);
 728		else
 729			min_us = 0; /* Reset accum_us on APUs. */
 730
 731		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 732	}
 733
 734	/* This is set to 0 if the driver is in debt to disallow (optional)
 735	 * buffer moves.
 736	 */
 737	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 738
 739	/* Do the same for visible VRAM if half of it is free */
 740	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
 741		u64 total_vis_vram = adev->gmc.visible_vram_size;
 742		u64 used_vis_vram =
 743		  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
 744
 745		if (used_vis_vram < total_vis_vram) {
 746			u64 free_vis_vram = total_vis_vram - used_vis_vram;
 747
 748			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
 749							  increment_us, us_upper_bound);
 750
 751			if (free_vis_vram >= total_vis_vram / 2)
 752				adev->mm_stats.accum_us_vis =
 753					max(bytes_to_us(adev, free_vis_vram / 2),
 754					    adev->mm_stats.accum_us_vis);
 755		}
 756
 757		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
 758	} else {
 759		*max_vis_bytes = 0;
 760	}
 761
 762	spin_unlock(&adev->mm_stats.lock);
 763}
 764
 765/* Report how many bytes have really been moved for the last command
 766 * submission. This can result in a debt that can stop buffer migrations
 767 * temporarily.
 768 */
 769void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
 770				  u64 num_vis_bytes)
 771{
 772	spin_lock(&adev->mm_stats.lock);
 773	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 774	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 775	spin_unlock(&adev->mm_stats.lock);
 776}
 777
 778static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
 
 779{
 780	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 781	struct amdgpu_cs_parser *p = param;
 782	struct ttm_operation_ctx ctx = {
 783		.interruptible = true,
 784		.no_wait_gpu = false,
 785		.resv = bo->tbo.base.resv
 
 786	};
 787	uint32_t domain;
 788	int r;
 789
 790	if (bo->tbo.pin_count)
 791		return 0;
 792
 793	/* Don't move this buffer if we have depleted our allowance
 794	 * to move it. Don't move anything if the threshold is zero.
 795	 */
 796	if (p->bytes_moved < p->bytes_moved_threshold &&
 797	    (!bo->tbo.base.dma_buf ||
 798	    list_empty(&bo->tbo.base.dma_buf->attachments))) {
 799		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 800		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 801			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
 802			 * visible VRAM if we've depleted our allowance to do
 803			 * that.
 804			 */
 805			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
 806				domain = bo->preferred_domains;
 807			else
 808				domain = bo->allowed_domains;
 809		} else {
 810			domain = bo->preferred_domains;
 811		}
 812	} else {
 813		domain = bo->allowed_domains;
 814	}
 815
 816retry:
 817	amdgpu_bo_placement_from_domain(bo, domain);
 818	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 819
 820	p->bytes_moved += ctx.bytes_moved;
 821	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 822	    amdgpu_res_cpu_visible(adev, bo->tbo.resource))
 823		p->bytes_moved_vis += ctx.bytes_moved;
 824
 825	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 826		domain = bo->allowed_domains;
 827		goto retry;
 828	}
 829
 830	return r;
 831}
 832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 834				union drm_amdgpu_cs *cs)
 835{
 836	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 837	struct ttm_operation_ctx ctx = { true, false };
 838	struct amdgpu_vm *vm = &fpriv->vm;
 839	struct amdgpu_bo_list_entry *e;
 840	struct drm_gem_object *obj;
 841	unsigned long index;
 842	unsigned int i;
 
 843	int r;
 844
 
 
 845	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
 846	if (cs->in.bo_list_handle) {
 847		if (p->bo_list)
 848			return -EINVAL;
 849
 850		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
 851				       &p->bo_list);
 852		if (r)
 853			return r;
 854	} else if (!p->bo_list) {
 855		/* Create a empty bo_list when no handle is provided */
 856		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
 857					  &p->bo_list);
 858		if (r)
 859			return r;
 860	}
 861
 862	mutex_lock(&p->bo_list->bo_list_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 863
 864	/* Get userptr backing pages. If pages are updated after registered
 865	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
 866	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
 867	 */
 868	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 
 869		bool userpage_invalidated = false;
 870		struct amdgpu_bo *bo = e->bo;
 871		int i;
 872
 873		e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
 874					 sizeof(struct page *),
 875					 GFP_KERNEL);
 876		if (!e->user_pages) {
 877			DRM_ERROR("kvmalloc_array failure\n");
 878			r = -ENOMEM;
 879			goto out_free_user_pages;
 880		}
 881
 882		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
 883		if (r) {
 884			kvfree(e->user_pages);
 885			e->user_pages = NULL;
 886			goto out_free_user_pages;
 887		}
 888
 889		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
 890			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
 891				userpage_invalidated = true;
 892				break;
 893			}
 894		}
 895		e->user_invalidated = userpage_invalidated;
 896	}
 897
 898	drm_exec_until_all_locked(&p->exec) {
 899		r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
 900		drm_exec_retry_on_contention(&p->exec);
 901		if (unlikely(r))
 902			goto out_free_user_pages;
 903
 904		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 905			/* One fence for TTM and one for each CS job */
 906			r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
 907						 1 + p->gang_size);
 908			drm_exec_retry_on_contention(&p->exec);
 909			if (unlikely(r))
 910				goto out_free_user_pages;
 911
 912			e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
 913		}
 914
 915		if (p->uf_bo) {
 916			r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
 917						 1 + p->gang_size);
 918			drm_exec_retry_on_contention(&p->exec);
 919			if (unlikely(r))
 920				goto out_free_user_pages;
 921		}
 922	}
 923
 924	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 925		struct mm_struct *usermm;
 926
 927		usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
 928		if (usermm && usermm != current->mm) {
 929			r = -EPERM;
 930			goto out_free_user_pages;
 931		}
 932
 933		if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
 934		    e->user_invalidated && e->user_pages) {
 935			amdgpu_bo_placement_from_domain(e->bo,
 936							AMDGPU_GEM_DOMAIN_CPU);
 937			r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
 938					    &ctx);
 939			if (r)
 940				goto out_free_user_pages;
 941
 942			amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
 943						     e->user_pages);
 944		}
 945
 946		kvfree(e->user_pages);
 947		e->user_pages = NULL;
 948	}
 949
 950	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
 951					  &p->bytes_moved_vis_threshold);
 952	p->bytes_moved = 0;
 953	p->bytes_moved_vis = 0;
 
 
 
 954
 955	r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
 956			       amdgpu_cs_bo_validate, p);
 957	if (r) {
 958		DRM_ERROR("amdgpu_vm_validate() failed.\n");
 959		goto out_free_user_pages;
 960	}
 961
 962	drm_exec_for_each_locked_object(&p->exec, index, obj) {
 963		r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
 964		if (unlikely(r))
 965			goto out_free_user_pages;
 966	}
 967
 968	if (p->uf_bo) {
 969		r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
 970		if (unlikely(r))
 971			goto out_free_user_pages;
 972
 973		p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
 974	}
 
 975
 976	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
 977				     p->bytes_moved_vis);
 978
 979	for (i = 0; i < p->gang_size; ++i)
 980		amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
 981					 p->bo_list->gws_obj,
 982					 p->bo_list->oa_obj);
 983	return 0;
 984
 985out_free_user_pages:
 986	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 987		struct amdgpu_bo *bo = e->bo;
 988
 989		if (!e->user_pages)
 990			continue;
 991		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
 992		kvfree(e->user_pages);
 993		e->user_pages = NULL;
 994		e->range = NULL;
 995	}
 996	mutex_unlock(&p->bo_list->bo_list_mutex);
 997	return r;
 998}
 999
1000static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1001{
1002	int i, j;
1003
1004	if (!trace_amdgpu_cs_enabled())
1005		return;
 
 
 
 
 
 
 
 
 
 
1006
1007	for (i = 0; i < p->gang_size; ++i) {
1008		struct amdgpu_job *job = p->jobs[i];
1009
1010		for (j = 0; j < job->num_ibs; ++j)
1011			trace_amdgpu_cs(p, job, &job->ibs[j]);
1012	}
 
 
 
 
 
 
1013}
1014
1015static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1016			       struct amdgpu_job *job)
1017{
1018	struct amdgpu_ring *ring = amdgpu_job_ring(job);
1019	unsigned int i;
1020	int r;
1021
1022	/* Only for UVD/VCE VM emulation */
1023	if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1024		return 0;
1025
1026	for (i = 0; i < job->num_ibs; ++i) {
1027		struct amdgpu_ib *ib = &job->ibs[i];
1028		struct amdgpu_bo_va_mapping *m;
1029		struct amdgpu_bo *aobj;
1030		uint64_t va_start;
1031		uint8_t *kptr;
1032
1033		va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1034		r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1035		if (r) {
1036			DRM_ERROR("IB va_start is invalid\n");
1037			return r;
1038		}
1039
1040		if ((va_start + ib->length_dw * 4) >
1041		    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1042			DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1043			return -EINVAL;
1044		}
1045
1046		/* the IB should be reserved at this point */
1047		r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1048		if (r)
1049			return r;
 
 
 
1050
1051		kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1052
1053		if (ring->funcs->parse_cs) {
1054			memcpy(ib->ptr, kptr, ib->length_dw * 4);
1055			amdgpu_bo_kunmap(aobj);
 
 
 
 
 
 
 
 
 
 
 
1056
1057			r = amdgpu_ring_parse_cs(ring, p, job, ib);
1058			if (r)
1059				return r;
1060		} else {
1061			ib->ptr = (uint32_t *)kptr;
1062			r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1063			amdgpu_bo_kunmap(aobj);
1064			if (r)
1065				return r;
1066		}
1067	}
 
1068
1069	return 0;
1070}
1071
1072static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1073{
1074	unsigned int i;
1075	int r;
 
 
1076
1077	for (i = 0; i < p->gang_size; ++i) {
1078		r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1079		if (r)
1080			return r;
 
 
 
 
 
1081	}
1082	return 0;
1083}
1084
1085static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1086{
 
1087	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1088	struct amdgpu_job *job = p->gang_leader;
1089	struct amdgpu_device *adev = p->adev;
1090	struct amdgpu_vm *vm = &fpriv->vm;
1091	struct amdgpu_bo_list_entry *e;
1092	struct amdgpu_bo_va *bo_va;
1093	unsigned int i;
1094	int r;
1095
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096	r = amdgpu_vm_clear_freed(adev, vm, NULL);
1097	if (r)
1098		return r;
1099
1100	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1101	if (r)
1102		return r;
1103
1104	r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
 
1105	if (r)
1106		return r;
1107
1108	if (fpriv->csa_va) {
 
 
1109		bo_va = fpriv->csa_va;
1110		BUG_ON(!bo_va);
1111		r = amdgpu_vm_bo_update(adev, bo_va, false);
1112		if (r)
1113			return r;
1114
1115		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
 
1116		if (r)
1117			return r;
1118	}
1119
1120	/* FIXME: In theory this loop shouldn't be needed any more when
1121	 * amdgpu_vm_handle_moved handles all moved BOs that are reserved
1122	 * with p->ticket. But removing it caused test regressions, so I'm
1123	 * leaving it here for now.
1124	 */
1125	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 
 
 
 
 
 
 
1126		bo_va = e->bo_va;
1127		if (bo_va == NULL)
1128			continue;
1129
1130		r = amdgpu_vm_bo_update(adev, bo_va, false);
1131		if (r)
1132			return r;
1133
1134		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
 
1135		if (r)
1136			return r;
1137	}
1138
1139	r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
1140	if (r)
1141		return r;
1142
1143	r = amdgpu_vm_update_pdes(adev, vm, false);
1144	if (r)
1145		return r;
1146
1147	r = amdgpu_sync_fence(&p->sync, vm->last_update);
1148	if (r)
1149		return r;
1150
1151	for (i = 0; i < p->gang_size; ++i) {
1152		job = p->jobs[i];
1153
1154		if (!job->vm)
1155			continue;
1156
1157		job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1158	}
1159
1160	if (adev->debug_vm) {
1161		/* Invalidate all BOs to test for userspace bugs */
1162		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1163			struct amdgpu_bo *bo = e->bo;
1164
1165			/* ignore duplicates */
1166			if (!bo)
1167				continue;
1168
1169			amdgpu_vm_bo_invalidate(adev, bo, false);
1170		}
1171	}
1172
1173	return 0;
1174}
1175
1176static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177{
1178	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1179	struct drm_gpu_scheduler *sched;
1180	struct drm_gem_object *obj;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1181	struct dma_fence *fence;
1182	unsigned long index;
1183	unsigned int i;
1184	int r;
1185
1186	r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1187	if (r) {
1188		if (r != -ERESTARTSYS)
1189			DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1190		return r;
1191	}
1192
1193	drm_exec_for_each_locked_object(&p->exec, index, obj) {
1194		struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
1195
1196		struct dma_resv *resv = bo->tbo.base.resv;
1197		enum amdgpu_sync_mode sync_mode;
 
 
 
 
 
 
 
1198
1199		sync_mode = amdgpu_bo_explicit_sync(bo) ?
1200			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1201		r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1202				     &fpriv->vm);
 
 
1203		if (r)
1204			return r;
1205	}
1206
1207	for (i = 0; i < p->gang_size; ++i) {
1208		r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209		if (r)
1210			return r;
1211	}
1212
1213	sched = p->gang_leader->base.entity->rq->sched;
1214	while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1215		struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1216
1217		/*
1218		 * When we have an dependency it might be necessary to insert a
1219		 * pipeline sync to make sure that all caches etc are flushed and the
1220		 * next job actually sees the results from the previous one
1221		 * before we start executing on the same scheduler ring.
1222		 */
1223		if (!s_fence || s_fence->sched != sched) {
1224			dma_fence_put(fence);
1225			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1226		}
1227
1228		r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1229		dma_fence_put(fence);
1230		if (r)
1231			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232	}
 
1233	return 0;
1234}
1235
1236static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1237{
1238	int i;
1239
1240	for (i = 0; i < p->num_post_deps; ++i) {
1241		if (p->post_deps[i].chain && p->post_deps[i].point) {
1242			drm_syncobj_add_point(p->post_deps[i].syncobj,
1243					      p->post_deps[i].chain,
1244					      p->fence, p->post_deps[i].point);
1245			p->post_deps[i].chain = NULL;
1246		} else {
1247			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1248						  p->fence);
1249		}
1250	}
1251}
1252
1253static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1254			    union drm_amdgpu_cs *cs)
1255{
1256	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1257	struct amdgpu_job *leader = p->gang_leader;
 
 
1258	struct amdgpu_bo_list_entry *e;
1259	struct drm_gem_object *gobj;
1260	unsigned long index;
1261	unsigned int i;
1262	uint64_t seq;
1263	int r;
1264
1265	for (i = 0; i < p->gang_size; ++i)
1266		drm_sched_job_arm(&p->jobs[i]->base);
1267
1268	for (i = 0; i < p->gang_size; ++i) {
1269		struct dma_fence *fence;
1270
1271		if (p->jobs[i] == leader)
1272			continue;
1273
1274		fence = &p->jobs[i]->base.s_fence->scheduled;
1275		dma_fence_get(fence);
1276		r = drm_sched_job_add_dependency(&leader->base, fence);
1277		if (r) {
1278			dma_fence_put(fence);
1279			return r;
1280		}
1281	}
1282
1283	if (p->gang_size > 1) {
1284		for (i = 0; i < p->gang_size; ++i)
1285			amdgpu_job_set_gang_leader(p->jobs[i], leader);
1286	}
1287
1288	/* No memory allocation is allowed while holding the notifier lock.
1289	 * The lock is held until amdgpu_cs_submit is finished and fence is
1290	 * added to BOs.
1291	 */
1292	mutex_lock(&p->adev->notifier_lock);
1293
1294	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1295	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1296	 */
1297	r = 0;
1298	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1299		r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
1300							e->range);
1301		e->range = NULL;
1302	}
1303	if (r) {
1304		r = -EAGAIN;
1305		mutex_unlock(&p->adev->notifier_lock);
1306		return r;
1307	}
1308
1309	p->fence = dma_fence_get(&leader->base.s_fence->finished);
1310	drm_exec_for_each_locked_object(&p->exec, index, gobj) {
1311
1312		ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
1313
1314		/* Everybody except for the gang leader uses READ */
1315		for (i = 0; i < p->gang_size; ++i) {
1316			if (p->jobs[i] == leader)
1317				continue;
1318
1319			dma_resv_add_fence(gobj->resv,
1320					   &p->jobs[i]->base.s_fence->finished,
1321					   DMA_RESV_USAGE_READ);
1322		}
1323
1324		/* The gang leader as remembered as writer */
1325		dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
1326	}
1327
1328	seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1329				   p->fence);
1330	amdgpu_cs_post_dependencies(p);
1331
1332	if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1333	    !p->ctx->preamble_presented) {
1334		leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1335		p->ctx->preamble_presented = true;
1336	}
1337
1338	cs->out.handle = seq;
1339	leader->uf_sequence = seq;
1340
1341	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
1342	for (i = 0; i < p->gang_size; ++i) {
1343		amdgpu_job_free_resources(p->jobs[i]);
1344		trace_amdgpu_cs_ioctl(p->jobs[i]);
1345		drm_sched_entity_push_job(&p->jobs[i]->base);
1346		p->jobs[i] = NULL;
1347	}
1348
1349	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
 
 
 
1350
1351	mutex_unlock(&p->adev->notifier_lock);
1352	mutex_unlock(&p->bo_list->bo_list_mutex);
1353	return 0;
1354}
1355
1356/* Cleanup the parser structure */
1357static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1358{
1359	unsigned int i;
1360
1361	amdgpu_sync_free(&parser->sync);
1362	drm_exec_fini(&parser->exec);
1363
1364	for (i = 0; i < parser->num_post_deps; i++) {
1365		drm_syncobj_put(parser->post_deps[i].syncobj);
1366		kfree(parser->post_deps[i].chain);
1367	}
1368	kfree(parser->post_deps);
1369
1370	dma_fence_put(parser->fence);
1371
1372	if (parser->ctx)
1373		amdgpu_ctx_put(parser->ctx);
1374	if (parser->bo_list)
1375		amdgpu_bo_list_put(parser->bo_list);
1376
1377	for (i = 0; i < parser->nchunks; i++)
1378		kvfree(parser->chunks[i].kdata);
1379	kvfree(parser->chunks);
1380	for (i = 0; i < parser->gang_size; ++i) {
1381		if (parser->jobs[i])
1382			amdgpu_job_free(parser->jobs[i]);
1383	}
1384	amdgpu_bo_unref(&parser->uf_bo);
1385}
1386
1387int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1388{
1389	struct amdgpu_device *adev = drm_to_adev(dev);
1390	struct amdgpu_cs_parser parser;
1391	int r;
1392
1393	if (amdgpu_ras_intr_triggered())
1394		return -EHWPOISON;
1395
1396	if (!adev->accel_working)
1397		return -EBUSY;
1398
1399	r = amdgpu_cs_parser_init(&parser, adev, filp, data);
 
 
 
1400	if (r) {
1401		DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
1402		return r;
1403	}
1404
1405	r = amdgpu_cs_pass1(&parser, data);
1406	if (r)
1407		goto error_fini;
1408
1409	r = amdgpu_cs_pass2(&parser);
1410	if (r)
1411		goto error_fini;
 
 
1412
1413	r = amdgpu_cs_parser_bos(&parser, data);
1414	if (r) {
1415		if (r == -ENOMEM)
1416			DRM_ERROR("Not enough memory for command submission!\n");
1417		else if (r != -ERESTARTSYS && r != -EAGAIN)
1418			DRM_DEBUG("Failed to process the buffer list %d!\n", r);
1419		goto error_fini;
1420	}
1421
1422	r = amdgpu_cs_patch_jobs(&parser);
1423	if (r)
1424		goto error_backoff;
1425
1426	r = amdgpu_cs_vm_handling(&parser);
1427	if (r)
1428		goto error_backoff;
1429
1430	r = amdgpu_cs_sync_rings(&parser);
1431	if (r)
1432		goto error_backoff;
1433
1434	trace_amdgpu_cs_ibs(&parser);
 
1435
1436	r = amdgpu_cs_submit(&parser, data);
1437	if (r)
1438		goto error_backoff;
1439
1440	amdgpu_cs_parser_fini(&parser);
1441	return 0;
1442
1443error_backoff:
1444	mutex_unlock(&parser.bo_list->bo_list_mutex);
1445
1446error_fini:
1447	amdgpu_cs_parser_fini(&parser);
1448	return r;
1449}
1450
1451/**
1452 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1453 *
1454 * @dev: drm device
1455 * @data: data from userspace
1456 * @filp: file private
1457 *
1458 * Wait for the command submission identified by handle to finish.
1459 */
1460int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1461			 struct drm_file *filp)
1462{
1463	union drm_amdgpu_wait_cs *wait = data;
1464	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1465	struct drm_sched_entity *entity;
1466	struct amdgpu_ctx *ctx;
1467	struct dma_fence *fence;
1468	long r;
1469
1470	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1471	if (ctx == NULL)
1472		return -EINVAL;
1473
1474	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1475				  wait->in.ring, &entity);
1476	if (r) {
1477		amdgpu_ctx_put(ctx);
1478		return r;
1479	}
1480
1481	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1482	if (IS_ERR(fence))
1483		r = PTR_ERR(fence);
1484	else if (fence) {
1485		r = dma_fence_wait_timeout(fence, true, timeout);
1486		if (r > 0 && fence->error)
1487			r = fence->error;
1488		dma_fence_put(fence);
1489	} else
1490		r = 1;
1491
1492	amdgpu_ctx_put(ctx);
1493	if (r < 0)
1494		return r;
1495
1496	memset(wait, 0, sizeof(*wait));
1497	wait->out.status = (r == 0);
1498
1499	return 0;
1500}
1501
1502/**
1503 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1504 *
1505 * @adev: amdgpu device
1506 * @filp: file private
1507 * @user: drm_amdgpu_fence copied from user space
1508 */
1509static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1510					     struct drm_file *filp,
1511					     struct drm_amdgpu_fence *user)
1512{
1513	struct drm_sched_entity *entity;
1514	struct amdgpu_ctx *ctx;
1515	struct dma_fence *fence;
1516	int r;
1517
1518	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1519	if (ctx == NULL)
1520		return ERR_PTR(-EINVAL);
1521
1522	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1523				  user->ring, &entity);
1524	if (r) {
1525		amdgpu_ctx_put(ctx);
1526		return ERR_PTR(r);
1527	}
1528
1529	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1530	amdgpu_ctx_put(ctx);
1531
1532	return fence;
1533}
1534
1535int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1536				    struct drm_file *filp)
1537{
1538	struct amdgpu_device *adev = drm_to_adev(dev);
1539	union drm_amdgpu_fence_to_handle *info = data;
1540	struct dma_fence *fence;
1541	struct drm_syncobj *syncobj;
1542	struct sync_file *sync_file;
1543	int fd, r;
1544
1545	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1546	if (IS_ERR(fence))
1547		return PTR_ERR(fence);
1548
1549	if (!fence)
1550		fence = dma_fence_get_stub();
1551
1552	switch (info->in.what) {
1553	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1554		r = drm_syncobj_create(&syncobj, 0, fence);
1555		dma_fence_put(fence);
1556		if (r)
1557			return r;
1558		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1559		drm_syncobj_put(syncobj);
1560		return r;
1561
1562	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1563		r = drm_syncobj_create(&syncobj, 0, fence);
1564		dma_fence_put(fence);
1565		if (r)
1566			return r;
1567		r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1568		drm_syncobj_put(syncobj);
1569		return r;
1570
1571	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1572		fd = get_unused_fd_flags(O_CLOEXEC);
1573		if (fd < 0) {
1574			dma_fence_put(fence);
1575			return fd;
1576		}
1577
1578		sync_file = sync_file_create(fence);
1579		dma_fence_put(fence);
1580		if (!sync_file) {
1581			put_unused_fd(fd);
1582			return -ENOMEM;
1583		}
1584
1585		fd_install(fd, sync_file->file);
1586		info->out.handle = fd;
1587		return 0;
1588
1589	default:
1590		dma_fence_put(fence);
1591		return -EINVAL;
1592	}
1593}
1594
1595/**
1596 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1597 *
1598 * @adev: amdgpu device
1599 * @filp: file private
1600 * @wait: wait parameters
1601 * @fences: array of drm_amdgpu_fence
1602 */
1603static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1604				     struct drm_file *filp,
1605				     union drm_amdgpu_wait_fences *wait,
1606				     struct drm_amdgpu_fence *fences)
1607{
1608	uint32_t fence_count = wait->in.fence_count;
1609	unsigned int i;
1610	long r = 1;
1611
1612	for (i = 0; i < fence_count; i++) {
1613		struct dma_fence *fence;
1614		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1615
1616		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1617		if (IS_ERR(fence))
1618			return PTR_ERR(fence);
1619		else if (!fence)
1620			continue;
1621
1622		r = dma_fence_wait_timeout(fence, true, timeout);
1623		if (r > 0 && fence->error)
1624			r = fence->error;
1625
1626		dma_fence_put(fence);
1627		if (r < 0)
1628			return r;
1629
1630		if (r == 0)
1631			break;
 
 
 
1632	}
1633
1634	memset(wait, 0, sizeof(*wait));
1635	wait->out.status = (r > 0);
1636
1637	return 0;
1638}
1639
1640/**
1641 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1642 *
1643 * @adev: amdgpu device
1644 * @filp: file private
1645 * @wait: wait parameters
1646 * @fences: array of drm_amdgpu_fence
1647 */
1648static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1649				    struct drm_file *filp,
1650				    union drm_amdgpu_wait_fences *wait,
1651				    struct drm_amdgpu_fence *fences)
1652{
1653	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1654	uint32_t fence_count = wait->in.fence_count;
1655	uint32_t first = ~0;
1656	struct dma_fence **array;
1657	unsigned int i;
1658	long r;
1659
1660	/* Prepare the fence array */
1661	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1662
1663	if (array == NULL)
1664		return -ENOMEM;
1665
1666	for (i = 0; i < fence_count; i++) {
1667		struct dma_fence *fence;
1668
1669		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1670		if (IS_ERR(fence)) {
1671			r = PTR_ERR(fence);
1672			goto err_free_fence_array;
1673		} else if (fence) {
1674			array[i] = fence;
1675		} else { /* NULL, the fence has been already signaled */
1676			r = 1;
1677			first = i;
1678			goto out;
1679		}
1680	}
1681
1682	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1683				       &first);
1684	if (r < 0)
1685		goto err_free_fence_array;
1686
1687out:
1688	memset(wait, 0, sizeof(*wait));
1689	wait->out.status = (r > 0);
1690	wait->out.first_signaled = first;
1691
1692	if (first < fence_count && array[first])
1693		r = array[first]->error;
1694	else
1695		r = 0;
1696
1697err_free_fence_array:
1698	for (i = 0; i < fence_count; i++)
1699		dma_fence_put(array[i]);
1700	kfree(array);
1701
1702	return r;
1703}
1704
1705/**
1706 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1707 *
1708 * @dev: drm device
1709 * @data: data from userspace
1710 * @filp: file private
1711 */
1712int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1713				struct drm_file *filp)
1714{
1715	struct amdgpu_device *adev = drm_to_adev(dev);
1716	union drm_amdgpu_wait_fences *wait = data;
1717	uint32_t fence_count = wait->in.fence_count;
1718	struct drm_amdgpu_fence *fences_user;
1719	struct drm_amdgpu_fence *fences;
1720	int r;
1721
1722	/* Get the fences from userspace */
1723	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1724			GFP_KERNEL);
1725	if (fences == NULL)
1726		return -ENOMEM;
1727
1728	fences_user = u64_to_user_ptr(wait->in.fences);
1729	if (copy_from_user(fences, fences_user,
1730		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1731		r = -EFAULT;
1732		goto err_free_fences;
1733	}
1734
1735	if (wait->in.wait_all)
1736		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1737	else
1738		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1739
1740err_free_fences:
1741	kfree(fences);
1742
1743	return r;
1744}
1745
1746/**
1747 * amdgpu_cs_find_mapping - find bo_va for VM address
1748 *
1749 * @parser: command submission parser context
1750 * @addr: VM address
1751 * @bo: resulting BO of the mapping found
1752 * @map: Placeholder to return found BO mapping
1753 *
1754 * Search the buffer objects in the command submission context for a certain
1755 * virtual memory address. Returns allocation structure when found, NULL
1756 * otherwise.
1757 */
1758int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1759			   uint64_t addr, struct amdgpu_bo **bo,
1760			   struct amdgpu_bo_va_mapping **map)
1761{
1762	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1763	struct ttm_operation_ctx ctx = { false, false };
1764	struct amdgpu_vm *vm = &fpriv->vm;
1765	struct amdgpu_bo_va_mapping *mapping;
1766	int r;
1767
1768	addr /= AMDGPU_GPU_PAGE_SIZE;
1769
1770	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1771	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1772		return -EINVAL;
1773
1774	*bo = mapping->bo_va->base.bo;
1775	*map = mapping;
1776
1777	/* Double check that the BO is reserved by this CS */
1778	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
1779		return -EINVAL;
1780
1781	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1782		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1783		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1784		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1785		if (r)
1786			return r;
1787	}
1788
1789	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1790}
v5.4
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/pagemap.h>
  30#include <linux/sync_file.h>
 
  31
  32#include <drm/amdgpu_drm.h>
  33#include <drm/drm_syncobj.h>
 
 
 
  34#include "amdgpu.h"
  35#include "amdgpu_trace.h"
  36#include "amdgpu_gmc.h"
  37#include "amdgpu_gem.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  38
  39static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
  40				      struct drm_amdgpu_cs_chunk_fence *data,
  41				      uint32_t *offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42{
  43	struct drm_gem_object *gobj;
  44	struct amdgpu_bo *bo;
  45	unsigned long size;
  46	int r;
  47
  48	gobj = drm_gem_object_lookup(p->filp, data->handle);
  49	if (gobj == NULL)
  50		return -EINVAL;
  51
  52	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
  53	p->uf_entry.priority = 0;
  54	p->uf_entry.tv.bo = &bo->tbo;
  55	/* One for TTM and one for the CS job */
  56	p->uf_entry.tv.num_shared = 2;
  57
  58	drm_gem_object_put_unlocked(gobj);
  59
  60	size = amdgpu_bo_size(bo);
  61	if (size != PAGE_SIZE || (data->offset + 8) > size) {
  62		r = -EINVAL;
  63		goto error_unref;
  64	}
  65
  66	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
  67		r = -EINVAL;
  68		goto error_unref;
  69	}
  70
  71	*offset = data->offset;
  72
  73	return 0;
  74
  75error_unref:
  76	amdgpu_bo_unref(&bo);
  77	return r;
  78}
  79
  80static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
  81				      struct drm_amdgpu_bo_list_in *data)
  82{
 
  83	int r;
  84	struct drm_amdgpu_bo_list_entry *info = NULL;
  85
  86	r = amdgpu_bo_create_list_entry_array(data, &info);
  87	if (r)
  88		return r;
  89
  90	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
  91				  &p->bo_list);
  92	if (r)
  93		goto error_free;
  94
  95	kvfree(info);
  96	return 0;
  97
  98error_free:
  99	if (info)
 100		kvfree(info);
 101
 102	return r;
 103}
 104
 105static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
 
 
 106{
 107	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 
 108	struct amdgpu_vm *vm = &fpriv->vm;
 109	uint64_t *chunk_array_user;
 110	uint64_t *chunk_array;
 111	unsigned size, num_ibs = 0;
 112	uint32_t uf_offset = 0;
 
 
 113	int i;
 114	int ret;
 115
 116	if (cs->in.num_chunks == 0)
 117		return 0;
 118
 119	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 120	if (!chunk_array)
 121		return -ENOMEM;
 122
 123	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
 124	if (!p->ctx) {
 125		ret = -EINVAL;
 126		goto free_chunk;
 127	}
 128
 129	mutex_lock(&p->ctx->lock);
 130
 131	/* skip guilty context job */
 132	if (atomic_read(&p->ctx->guilty) == 1) {
 133		ret = -ECANCELED;
 134		goto free_chunk;
 135	}
 136
 137	/* get chunks */
 138	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 139	if (copy_from_user(chunk_array, chunk_array_user,
 140			   sizeof(uint64_t)*cs->in.num_chunks)) {
 141		ret = -EFAULT;
 142		goto free_chunk;
 143	}
 144
 145	p->nchunks = cs->in.num_chunks;
 146	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 147			    GFP_KERNEL);
 148	if (!p->chunks) {
 149		ret = -ENOMEM;
 150		goto free_chunk;
 151	}
 152
 153	for (i = 0; i < p->nchunks; i++) {
 154		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
 155		struct drm_amdgpu_cs_chunk user_chunk;
 156		uint32_t __user *cdata;
 157
 158		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
 159		if (copy_from_user(&user_chunk, chunk_ptr,
 160				       sizeof(struct drm_amdgpu_cs_chunk))) {
 161			ret = -EFAULT;
 162			i--;
 163			goto free_partial_kdata;
 164		}
 165		p->chunks[i].chunk_id = user_chunk.chunk_id;
 166		p->chunks[i].length_dw = user_chunk.length_dw;
 167
 168		size = p->chunks[i].length_dw;
 169		cdata = u64_to_user_ptr(user_chunk.chunk_data);
 170
 171		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
 
 172		if (p->chunks[i].kdata == NULL) {
 173			ret = -ENOMEM;
 174			i--;
 175			goto free_partial_kdata;
 176		}
 177		size *= sizeof(uint32_t);
 178		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 179			ret = -EFAULT;
 180			goto free_partial_kdata;
 181		}
 182
 
 
 183		switch (p->chunks[i].chunk_id) {
 184		case AMDGPU_CHUNK_ID_IB:
 185			++num_ibs;
 
 
 
 
 
 186			break;
 187
 188		case AMDGPU_CHUNK_ID_FENCE:
 189			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
 190			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 191				ret = -EINVAL;
 192				goto free_partial_kdata;
 193			}
 194
 195			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
 196							 &uf_offset);
 197			if (ret)
 198				goto free_partial_kdata;
 199
 200			break;
 201
 202		case AMDGPU_CHUNK_ID_BO_HANDLES:
 203			size = sizeof(struct drm_amdgpu_bo_list_in);
 204			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 205				ret = -EINVAL;
 206				goto free_partial_kdata;
 207			}
 208
 209			ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
 210			if (ret)
 211				goto free_partial_kdata;
 212
 213			break;
 214
 215		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 216		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 217		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 218		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 219		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 220		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 
 221			break;
 222
 223		default:
 224			ret = -EINVAL;
 225			goto free_partial_kdata;
 226		}
 227	}
 228
 229	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
 230	if (ret)
 231		goto free_all_kdata;
 
 
 
 
 
 
 
 
 
 232
 233	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
 234		ret = -ECANCELED;
 235		goto free_all_kdata;
 236	}
 237
 238	if (p->uf_entry.tv.bo)
 239		p->job->uf_addr = uf_offset;
 240	kfree(chunk_array);
 241
 242	/* Use this opportunity to fill in task info for the vm */
 243	amdgpu_vm_set_task_info(vm);
 244
 245	return 0;
 246
 247free_all_kdata:
 248	i = p->nchunks - 1;
 249free_partial_kdata:
 250	for (; i >= 0; i--)
 251		kvfree(p->chunks[i].kdata);
 252	kfree(p->chunks);
 253	p->chunks = NULL;
 254	p->nchunks = 0;
 255free_chunk:
 256	kfree(chunk_array);
 257
 258	return ret;
 259}
 260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261/* Convert microseconds to bytes. */
 262static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 263{
 264	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 265		return 0;
 266
 267	/* Since accum_us is incremented by a million per second, just
 268	 * multiply it by the number of MB/s to get the number of bytes.
 269	 */
 270	return us << adev->mm_stats.log2_max_MBps;
 271}
 272
 273static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 274{
 275	if (!adev->mm_stats.log2_max_MBps)
 276		return 0;
 277
 278	return bytes >> adev->mm_stats.log2_max_MBps;
 279}
 280
 281/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 282 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 283 * which means it can go over the threshold once. If that happens, the driver
 284 * will be in debt and no other buffer migrations can be done until that debt
 285 * is repaid.
 286 *
 287 * This approach allows moving a buffer of any size (it's important to allow
 288 * that).
 289 *
 290 * The currency is simply time in microseconds and it increases as the clock
 291 * ticks. The accumulated microseconds (us) are converted to bytes and
 292 * returned.
 293 */
 294static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 295					      u64 *max_bytes,
 296					      u64 *max_vis_bytes)
 297{
 298	s64 time_us, increment_us;
 299	u64 free_vram, total_vram, used_vram;
 300
 301	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 302	 * throttling.
 303	 *
 304	 * It means that in order to get full max MBps, at least 5 IBs per
 305	 * second must be submitted and not more than 200ms apart from each
 306	 * other.
 307	 */
 308	const s64 us_upper_bound = 200000;
 309
 310	if (!adev->mm_stats.log2_max_MBps) {
 311		*max_bytes = 0;
 312		*max_vis_bytes = 0;
 313		return;
 314	}
 315
 316	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
 317	used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
 318	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 319
 320	spin_lock(&adev->mm_stats.lock);
 321
 322	/* Increase the amount of accumulated us. */
 323	time_us = ktime_to_us(ktime_get());
 324	increment_us = time_us - adev->mm_stats.last_update_us;
 325	adev->mm_stats.last_update_us = time_us;
 326	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 327                                      us_upper_bound);
 328
 329	/* This prevents the short period of low performance when the VRAM
 330	 * usage is low and the driver is in debt or doesn't have enough
 331	 * accumulated us to fill VRAM quickly.
 332	 *
 333	 * The situation can occur in these cases:
 334	 * - a lot of VRAM is freed by userspace
 335	 * - the presence of a big buffer causes a lot of evictions
 336	 *   (solution: split buffers into smaller ones)
 337	 *
 338	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 339	 * accum_us to a positive number.
 340	 */
 341	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 342		s64 min_us;
 343
 344		/* Be more aggresive on dGPUs. Try to fill a portion of free
 345		 * VRAM now.
 346		 */
 347		if (!(adev->flags & AMD_IS_APU))
 348			min_us = bytes_to_us(adev, free_vram / 4);
 349		else
 350			min_us = 0; /* Reset accum_us on APUs. */
 351
 352		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 353	}
 354
 355	/* This is set to 0 if the driver is in debt to disallow (optional)
 356	 * buffer moves.
 357	 */
 358	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 359
 360	/* Do the same for visible VRAM if half of it is free */
 361	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
 362		u64 total_vis_vram = adev->gmc.visible_vram_size;
 363		u64 used_vis_vram =
 364			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
 365
 366		if (used_vis_vram < total_vis_vram) {
 367			u64 free_vis_vram = total_vis_vram - used_vis_vram;
 
 368			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
 369							  increment_us, us_upper_bound);
 370
 371			if (free_vis_vram >= total_vis_vram / 2)
 372				adev->mm_stats.accum_us_vis =
 373					max(bytes_to_us(adev, free_vis_vram / 2),
 374					    adev->mm_stats.accum_us_vis);
 375		}
 376
 377		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
 378	} else {
 379		*max_vis_bytes = 0;
 380	}
 381
 382	spin_unlock(&adev->mm_stats.lock);
 383}
 384
 385/* Report how many bytes have really been moved for the last command
 386 * submission. This can result in a debt that can stop buffer migrations
 387 * temporarily.
 388 */
 389void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
 390				  u64 num_vis_bytes)
 391{
 392	spin_lock(&adev->mm_stats.lock);
 393	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 394	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 395	spin_unlock(&adev->mm_stats.lock);
 396}
 397
 398static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 399				 struct amdgpu_bo *bo)
 400{
 401	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
 402	struct ttm_operation_ctx ctx = {
 403		.interruptible = true,
 404		.no_wait_gpu = false,
 405		.resv = bo->tbo.base.resv,
 406		.flags = 0
 407	};
 408	uint32_t domain;
 409	int r;
 410
 411	if (bo->pin_count)
 412		return 0;
 413
 414	/* Don't move this buffer if we have depleted our allowance
 415	 * to move it. Don't move anything if the threshold is zero.
 416	 */
 417	if (p->bytes_moved < p->bytes_moved_threshold) {
 
 
 418		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 419		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 420			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
 421			 * visible VRAM if we've depleted our allowance to do
 422			 * that.
 423			 */
 424			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
 425				domain = bo->preferred_domains;
 426			else
 427				domain = bo->allowed_domains;
 428		} else {
 429			domain = bo->preferred_domains;
 430		}
 431	} else {
 432		domain = bo->allowed_domains;
 433	}
 434
 435retry:
 436	amdgpu_bo_placement_from_domain(bo, domain);
 437	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 438
 439	p->bytes_moved += ctx.bytes_moved;
 440	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 441	    amdgpu_bo_in_cpu_visible_vram(bo))
 442		p->bytes_moved_vis += ctx.bytes_moved;
 443
 444	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 445		domain = bo->allowed_domains;
 446		goto retry;
 447	}
 448
 449	return r;
 450}
 451
 452/* Last resort, try to evict something from the current working set */
 453static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 454				struct amdgpu_bo *validated)
 455{
 456	uint32_t domain = validated->allowed_domains;
 457	struct ttm_operation_ctx ctx = { true, false };
 458	int r;
 459
 460	if (!p->evictable)
 461		return false;
 462
 463	for (;&p->evictable->tv.head != &p->validated;
 464	     p->evictable = list_prev_entry(p->evictable, tv.head)) {
 465
 466		struct amdgpu_bo_list_entry *candidate = p->evictable;
 467		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
 468		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 469		bool update_bytes_moved_vis;
 470		uint32_t other;
 471
 472		/* If we reached our current BO we can forget it */
 473		if (bo == validated)
 474			break;
 475
 476		/* We can't move pinned BOs here */
 477		if (bo->pin_count)
 478			continue;
 479
 480		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 481
 482		/* Check if this BO is in one of the domains we need space for */
 483		if (!(other & domain))
 484			continue;
 485
 486		/* Check if we can move this BO somewhere else */
 487		other = bo->allowed_domains & ~domain;
 488		if (!other)
 489			continue;
 490
 491		/* Good we can try to move this BO somewhere else */
 492		update_bytes_moved_vis =
 493				!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 494				amdgpu_bo_in_cpu_visible_vram(bo);
 495		amdgpu_bo_placement_from_domain(bo, other);
 496		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 497		p->bytes_moved += ctx.bytes_moved;
 498		if (update_bytes_moved_vis)
 499			p->bytes_moved_vis += ctx.bytes_moved;
 500
 501		if (unlikely(r))
 502			break;
 503
 504		p->evictable = list_prev_entry(p->evictable, tv.head);
 505		list_move(&candidate->tv.head, &p->validated);
 506
 507		return true;
 508	}
 509
 510	return false;
 511}
 512
 513static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
 514{
 515	struct amdgpu_cs_parser *p = param;
 516	int r;
 517
 518	do {
 519		r = amdgpu_cs_bo_validate(p, bo);
 520	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
 521	if (r)
 522		return r;
 523
 524	if (bo->shadow)
 525		r = amdgpu_cs_bo_validate(p, bo->shadow);
 526
 527	return r;
 528}
 529
 530static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 531			    struct list_head *validated)
 532{
 533	struct ttm_operation_ctx ctx = { true, false };
 534	struct amdgpu_bo_list_entry *lobj;
 535	int r;
 536
 537	list_for_each_entry(lobj, validated, tv.head) {
 538		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
 539		struct mm_struct *usermm;
 540
 541		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 542		if (usermm && usermm != current->mm)
 543			return -EPERM;
 544
 545		if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
 546		    lobj->user_invalidated && lobj->user_pages) {
 547			amdgpu_bo_placement_from_domain(bo,
 548							AMDGPU_GEM_DOMAIN_CPU);
 549			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 550			if (r)
 551				return r;
 552
 553			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
 554						     lobj->user_pages);
 555		}
 556
 557		if (p->evictable == lobj)
 558			p->evictable = NULL;
 559
 560		r = amdgpu_cs_validate(p, bo);
 561		if (r)
 562			return r;
 563
 564		kvfree(lobj->user_pages);
 565		lobj->user_pages = NULL;
 566	}
 567	return 0;
 568}
 569
 570static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 571				union drm_amdgpu_cs *cs)
 572{
 573	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 
 574	struct amdgpu_vm *vm = &fpriv->vm;
 575	struct amdgpu_bo_list_entry *e;
 576	struct list_head duplicates;
 577	struct amdgpu_bo *gds;
 578	struct amdgpu_bo *gws;
 579	struct amdgpu_bo *oa;
 580	int r;
 581
 582	INIT_LIST_HEAD(&p->validated);
 583
 584	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
 585	if (cs->in.bo_list_handle) {
 586		if (p->bo_list)
 587			return -EINVAL;
 588
 589		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
 590				       &p->bo_list);
 591		if (r)
 592			return r;
 593	} else if (!p->bo_list) {
 594		/* Create a empty bo_list when no handle is provided */
 595		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
 596					  &p->bo_list);
 597		if (r)
 598			return r;
 599	}
 600
 601	/* One for TTM and one for the CS job */
 602	amdgpu_bo_list_for_each_entry(e, p->bo_list)
 603		e->tv.num_shared = 2;
 604
 605	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 606	if (p->bo_list->first_userptr != p->bo_list->num_entries)
 607		p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
 608
 609	INIT_LIST_HEAD(&duplicates);
 610	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 611
 612	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
 613		list_add(&p->uf_entry.tv.head, &p->validated);
 614
 615	/* Get userptr backing pages. If pages are updated after registered
 616	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
 617	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
 618	 */
 619	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 620		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 621		bool userpage_invalidated = false;
 
 622		int i;
 623
 624		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
 625					sizeof(struct page *),
 626					GFP_KERNEL | __GFP_ZERO);
 627		if (!e->user_pages) {
 628			DRM_ERROR("calloc failure\n");
 629			return -ENOMEM;
 
 630		}
 631
 632		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
 633		if (r) {
 634			kvfree(e->user_pages);
 635			e->user_pages = NULL;
 636			return r;
 637		}
 638
 639		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
 640			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
 641				userpage_invalidated = true;
 642				break;
 643			}
 644		}
 645		e->user_invalidated = userpage_invalidated;
 646	}
 647
 648	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 649				   &duplicates, false);
 650	if (unlikely(r != 0)) {
 651		if (r != -ERESTARTSYS)
 652			DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 653		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654	}
 655
 656	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
 657					  &p->bytes_moved_vis_threshold);
 658	p->bytes_moved = 0;
 659	p->bytes_moved_vis = 0;
 660	p->evictable = list_last_entry(&p->validated,
 661				       struct amdgpu_bo_list_entry,
 662				       tv.head);
 663
 664	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
 665				      amdgpu_cs_validate, p);
 666	if (r) {
 667		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
 668		goto error_validate;
 669	}
 670
 671	r = amdgpu_cs_list_validate(p, &duplicates);
 672	if (r)
 673		goto error_validate;
 
 
 
 
 
 
 
 674
 675	r = amdgpu_cs_list_validate(p, &p->validated);
 676	if (r)
 677		goto error_validate;
 678
 679	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
 680				     p->bytes_moved_vis);
 681
 682	gds = p->bo_list->gds_obj;
 683	gws = p->bo_list->gws_obj;
 684	oa = p->bo_list->oa_obj;
 
 
 685
 686	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 687		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
 688
 689		/* Make sure we use the exclusive slot for shared BOs */
 690		if (bo->prime_shared_count)
 691			e->tv.num_shared = 0;
 692		e->bo_va = amdgpu_vm_bo_find(vm, bo);
 
 
 693	}
 
 
 
 
 
 
 
 694
 695	if (gds) {
 696		p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
 697		p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
 698	}
 699	if (gws) {
 700		p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
 701		p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
 702	}
 703	if (oa) {
 704		p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
 705		p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
 706	}
 707
 708	if (!r && p->uf_entry.tv.bo) {
 709		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
 710
 711		r = amdgpu_ttm_alloc_gart(&uf->tbo);
 712		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
 713	}
 714
 715error_validate:
 716	if (r)
 717		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 718out:
 719	return r;
 720}
 721
 722static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 
 723{
 724	struct amdgpu_bo_list_entry *e;
 
 725	int r;
 726
 727	list_for_each_entry(e, &p->validated, tv.head) {
 728		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 729		struct dma_resv *resv = bo->tbo.base.resv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 730
 731		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
 732				     amdgpu_bo_explicit_sync(bo));
 
 
 
 733
 
 
 734		if (r)
 735			return r;
 736	}
 737	return 0;
 738}
 739
 740/**
 741 * cs_parser_fini() - clean parser states
 742 * @parser:	parser structure holding parsing context.
 743 * @error:	error number
 744 *
 745 * If error is set than unvalidate buffer, otherwise just free memory
 746 * used by parsing context.
 747 **/
 748static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 749				  bool backoff)
 750{
 751	unsigned i;
 752
 753	if (error && backoff)
 754		ttm_eu_backoff_reservation(&parser->ticket,
 755					   &parser->validated);
 756
 757	for (i = 0; i < parser->num_post_deps; i++) {
 758		drm_syncobj_put(parser->post_deps[i].syncobj);
 759		kfree(parser->post_deps[i].chain);
 
 
 
 
 
 
 
 760	}
 761	kfree(parser->post_deps);
 762
 763	dma_fence_put(parser->fence);
 
 764
 765	if (parser->ctx) {
 766		mutex_unlock(&parser->ctx->lock);
 767		amdgpu_ctx_put(parser->ctx);
 768	}
 769	if (parser->bo_list)
 770		amdgpu_bo_list_put(parser->bo_list);
 771
 772	for (i = 0; i < parser->nchunks; i++)
 773		kvfree(parser->chunks[i].kdata);
 774	kfree(parser->chunks);
 775	if (parser->job)
 776		amdgpu_job_free(parser->job);
 777	if (parser->uf_entry.tv.bo) {
 778		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
 779
 780		amdgpu_bo_unref(&uf);
 781	}
 
 782}
 783
 784static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 785{
 786	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
 787	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 
 788	struct amdgpu_device *adev = p->adev;
 789	struct amdgpu_vm *vm = &fpriv->vm;
 790	struct amdgpu_bo_list_entry *e;
 791	struct amdgpu_bo_va *bo_va;
 792	struct amdgpu_bo *bo;
 793	int r;
 794
 795	/* Only for UVD/VCE VM emulation */
 796	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
 797		unsigned i, j;
 798
 799		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
 800			struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 801			struct amdgpu_bo_va_mapping *m;
 802			struct amdgpu_bo *aobj = NULL;
 803			struct amdgpu_cs_chunk *chunk;
 804			uint64_t offset, va_start;
 805			struct amdgpu_ib *ib;
 806			uint8_t *kptr;
 807
 808			chunk = &p->chunks[i];
 809			ib = &p->job->ibs[j];
 810			chunk_ib = chunk->kdata;
 811
 812			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 813				continue;
 814
 815			va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
 816			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
 817			if (r) {
 818				DRM_ERROR("IB va_start is invalid\n");
 819				return r;
 820			}
 821
 822			if ((va_start + chunk_ib->ib_bytes) >
 823			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
 824				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
 825				return -EINVAL;
 826			}
 827
 828			/* the IB should be reserved at this point */
 829			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
 830			if (r) {
 831				return r;
 832			}
 833
 834			offset = m->start * AMDGPU_GPU_PAGE_SIZE;
 835			kptr += va_start - offset;
 836
 837			if (ring->funcs->parse_cs) {
 838				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
 839				amdgpu_bo_kunmap(aobj);
 840
 841				r = amdgpu_ring_parse_cs(ring, p, j);
 842				if (r)
 843					return r;
 844			} else {
 845				ib->ptr = (uint32_t *)kptr;
 846				r = amdgpu_ring_patch_cs_in_place(ring, p, j);
 847				amdgpu_bo_kunmap(aobj);
 848				if (r)
 849					return r;
 850			}
 851
 852			j++;
 853		}
 854	}
 855
 856	if (!p->job->vm)
 857		return amdgpu_cs_sync_rings(p);
 858
 859
 860	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 861	if (r)
 862		return r;
 863
 864	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
 865	if (r)
 866		return r;
 867
 868	r = amdgpu_sync_fence(adev, &p->job->sync,
 869			      fpriv->prt_va->last_pt_update, false);
 870	if (r)
 871		return r;
 872
 873	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
 874		struct dma_fence *f;
 875
 876		bo_va = fpriv->csa_va;
 877		BUG_ON(!bo_va);
 878		r = amdgpu_vm_bo_update(adev, bo_va, false);
 879		if (r)
 880			return r;
 881
 882		f = bo_va->last_pt_update;
 883		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
 884		if (r)
 885			return r;
 886	}
 887
 
 
 
 
 
 888	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 889		struct dma_fence *f;
 890
 891		/* ignore duplicates */
 892		bo = ttm_to_amdgpu_bo(e->tv.bo);
 893		if (!bo)
 894			continue;
 895
 896		bo_va = e->bo_va;
 897		if (bo_va == NULL)
 898			continue;
 899
 900		r = amdgpu_vm_bo_update(adev, bo_va, false);
 901		if (r)
 902			return r;
 903
 904		f = bo_va->last_pt_update;
 905		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
 906		if (r)
 907			return r;
 908	}
 909
 910	r = amdgpu_vm_handle_moved(adev, vm);
 911	if (r)
 912		return r;
 913
 914	r = amdgpu_vm_update_directories(adev, vm);
 915	if (r)
 916		return r;
 917
 918	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
 919	if (r)
 920		return r;
 921
 922	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
 
 
 
 
 923
 924	if (amdgpu_vm_debug) {
 
 
 
 925		/* Invalidate all BOs to test for userspace bugs */
 926		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 927			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 928
 929			/* ignore duplicates */
 930			if (!bo)
 931				continue;
 932
 933			amdgpu_vm_bo_invalidate(adev, bo, false);
 934		}
 935	}
 936
 937	return amdgpu_cs_sync_rings(p);
 938}
 939
 940static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 941			     struct amdgpu_cs_parser *parser)
 942{
 943	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 944	struct amdgpu_vm *vm = &fpriv->vm;
 945	int r, ce_preempt = 0, de_preempt = 0;
 946	struct amdgpu_ring *ring;
 947	int i, j;
 948
 949	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
 950		struct amdgpu_cs_chunk *chunk;
 951		struct amdgpu_ib *ib;
 952		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 953		struct drm_sched_entity *entity;
 954
 955		chunk = &parser->chunks[i];
 956		ib = &parser->job->ibs[j];
 957		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
 958
 959		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 960			continue;
 961
 962		if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
 963		    (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
 964			if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
 965				if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
 966					ce_preempt++;
 967				else
 968					de_preempt++;
 969			}
 970
 971			/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
 972			if (ce_preempt > 1 || de_preempt > 1)
 973				return -EINVAL;
 974		}
 975
 976		r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
 977					  chunk_ib->ip_instance, chunk_ib->ring,
 978					  &entity);
 979		if (r)
 980			return r;
 981
 982		if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
 983			parser->job->preamble_status |=
 984				AMDGPU_PREAMBLE_IB_PRESENT;
 985
 986		if (parser->entity && parser->entity != entity)
 987			return -EINVAL;
 988
 989		parser->entity = entity;
 990
 991		ring = to_amdgpu_ring(entity->rq->sched);
 992		r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
 993				   chunk_ib->ib_bytes : 0, ib);
 994		if (r) {
 995			DRM_ERROR("Failed to get ib !\n");
 996			return r;
 997		}
 998
 999		ib->gpu_addr = chunk_ib->va_start;
1000		ib->length_dw = chunk_ib->ib_bytes / 4;
1001		ib->flags = chunk_ib->flags;
1002
1003		j++;
1004	}
1005
1006	/* MM engine doesn't support user fences */
1007	ring = to_amdgpu_ring(parser->entity->rq->sched);
1008	if (parser->job->uf_addr && ring->funcs->no_user_fence)
1009		return -EINVAL;
1010
1011	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
1012}
1013
1014static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
1015				       struct amdgpu_cs_chunk *chunk)
1016{
1017	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1018	unsigned num_deps;
1019	int i, r;
1020	struct drm_amdgpu_cs_chunk_dep *deps;
1021
1022	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
1023	num_deps = chunk->length_dw * 4 /
1024		sizeof(struct drm_amdgpu_cs_chunk_dep);
1025
1026	for (i = 0; i < num_deps; ++i) {
1027		struct amdgpu_ctx *ctx;
1028		struct drm_sched_entity *entity;
1029		struct dma_fence *fence;
1030
1031		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
1032		if (ctx == NULL)
1033			return -EINVAL;
1034
1035		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
1036					  deps[i].ip_instance,
1037					  deps[i].ring, &entity);
1038		if (r) {
1039			amdgpu_ctx_put(ctx);
1040			return r;
1041		}
1042
1043		fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
1044		amdgpu_ctx_put(ctx);
1045
1046		if (IS_ERR(fence))
1047			return PTR_ERR(fence);
1048		else if (!fence)
1049			continue;
1050
1051		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
1052			struct drm_sched_fence *s_fence;
1053			struct dma_fence *old = fence;
1054
1055			s_fence = to_drm_sched_fence(fence);
1056			fence = dma_fence_get(&s_fence->scheduled);
1057			dma_fence_put(old);
1058		}
1059
1060		r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1061		dma_fence_put(fence);
1062		if (r)
1063			return r;
1064	}
1065	return 0;
1066}
1067
1068static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1069						 uint32_t handle, u64 point,
1070						 u64 flags)
1071{
1072	struct dma_fence *fence;
 
 
1073	int r;
1074
1075	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1076	if (r) {
1077		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1078			  handle, point, r);
1079		return r;
1080	}
1081
1082	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1083	dma_fence_put(fence);
1084
1085	return r;
1086}
1087
1088static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1089					    struct amdgpu_cs_chunk *chunk)
1090{
1091	struct drm_amdgpu_cs_chunk_sem *deps;
1092	unsigned num_deps;
1093	int i, r;
1094
1095	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1096	num_deps = chunk->length_dw * 4 /
1097		sizeof(struct drm_amdgpu_cs_chunk_sem);
1098	for (i = 0; i < num_deps; ++i) {
1099		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1100							  0, 0);
1101		if (r)
1102			return r;
1103	}
1104
1105	return 0;
1106}
1107
1108
1109static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1110						     struct amdgpu_cs_chunk *chunk)
1111{
1112	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1113	unsigned num_deps;
1114	int i, r;
1115
1116	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1117	num_deps = chunk->length_dw * 4 /
1118		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1119	for (i = 0; i < num_deps; ++i) {
1120		r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1121							  syncobj_deps[i].handle,
1122							  syncobj_deps[i].point,
1123							  syncobj_deps[i].flags);
1124		if (r)
1125			return r;
1126	}
1127
1128	return 0;
1129}
1130
1131static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1132					     struct amdgpu_cs_chunk *chunk)
1133{
1134	struct drm_amdgpu_cs_chunk_sem *deps;
1135	unsigned num_deps;
1136	int i;
1137
1138	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1139	num_deps = chunk->length_dw * 4 /
1140		sizeof(struct drm_amdgpu_cs_chunk_sem);
1141
1142	if (p->post_deps)
1143		return -EINVAL;
1144
1145	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1146				     GFP_KERNEL);
1147	p->num_post_deps = 0;
1148
1149	if (!p->post_deps)
1150		return -ENOMEM;
1151
1152
1153	for (i = 0; i < num_deps; ++i) {
1154		p->post_deps[i].syncobj =
1155			drm_syncobj_find(p->filp, deps[i].handle);
1156		if (!p->post_deps[i].syncobj)
1157			return -EINVAL;
1158		p->post_deps[i].chain = NULL;
1159		p->post_deps[i].point = 0;
1160		p->num_post_deps++;
1161	}
1162
1163	return 0;
1164}
1165
1166
1167static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1168						      struct amdgpu_cs_chunk *chunk)
1169{
1170	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1171	unsigned num_deps;
1172	int i;
1173
1174	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1175	num_deps = chunk->length_dw * 4 /
1176		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1177
1178	if (p->post_deps)
1179		return -EINVAL;
1180
1181	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1182				     GFP_KERNEL);
1183	p->num_post_deps = 0;
1184
1185	if (!p->post_deps)
1186		return -ENOMEM;
1187
1188	for (i = 0; i < num_deps; ++i) {
1189		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1190
1191		dep->chain = NULL;
1192		if (syncobj_deps[i].point) {
1193			dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
1194			if (!dep->chain)
1195				return -ENOMEM;
1196		}
1197
1198		dep->syncobj = drm_syncobj_find(p->filp,
1199						syncobj_deps[i].handle);
1200		if (!dep->syncobj) {
1201			kfree(dep->chain);
1202			return -EINVAL;
1203		}
1204		dep->point = syncobj_deps[i].point;
1205		p->num_post_deps++;
1206	}
1207
1208	return 0;
1209}
1210
1211static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1212				  struct amdgpu_cs_parser *p)
1213{
1214	int i, r;
1215
1216	for (i = 0; i < p->nchunks; ++i) {
1217		struct amdgpu_cs_chunk *chunk;
1218
1219		chunk = &p->chunks[i];
1220
1221		switch (chunk->chunk_id) {
1222		case AMDGPU_CHUNK_ID_DEPENDENCIES:
1223		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1224			r = amdgpu_cs_process_fence_dep(p, chunk);
1225			if (r)
1226				return r;
1227			break;
1228		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1229			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1230			if (r)
1231				return r;
1232			break;
1233		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1234			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1235			if (r)
1236				return r;
1237			break;
1238		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1239			r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1240			if (r)
1241				return r;
1242			break;
1243		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1244			r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1245			if (r)
1246				return r;
1247			break;
1248		}
1249	}
1250
1251	return 0;
1252}
1253
1254static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1255{
1256	int i;
1257
1258	for (i = 0; i < p->num_post_deps; ++i) {
1259		if (p->post_deps[i].chain && p->post_deps[i].point) {
1260			drm_syncobj_add_point(p->post_deps[i].syncobj,
1261					      p->post_deps[i].chain,
1262					      p->fence, p->post_deps[i].point);
1263			p->post_deps[i].chain = NULL;
1264		} else {
1265			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1266						  p->fence);
1267		}
1268	}
1269}
1270
1271static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1272			    union drm_amdgpu_cs *cs)
1273{
1274	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1275	struct drm_sched_entity *entity = p->entity;
1276	enum drm_sched_priority priority;
1277	struct amdgpu_ring *ring;
1278	struct amdgpu_bo_list_entry *e;
1279	struct amdgpu_job *job;
 
 
1280	uint64_t seq;
1281	int r;
1282
1283	job = p->job;
1284	p->job = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285
1286	r = drm_sched_job_init(&job->base, entity, p->filp);
1287	if (r)
1288		goto error_unlock;
 
1289
1290	/* No memory allocation is allowed while holding the mn lock.
1291	 * p->mn is hold until amdgpu_cs_submit is finished and fence is added
1292	 * to BOs.
1293	 */
1294	amdgpu_mn_lock(p->mn);
1295
1296	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1297	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1298	 */
 
1299	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1300		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1301
1302		r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1303	}
1304	if (r) {
1305		r = -EAGAIN;
1306		goto error_abort;
 
1307	}
1308
1309	job->owner = p->filp;
1310	p->fence = dma_fence_get(&job->base.s_fence->finished);
 
 
 
 
 
 
 
 
 
 
 
 
1311
1312	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
 
 
 
 
 
1313	amdgpu_cs_post_dependencies(p);
1314
1315	if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1316	    !p->ctx->preamble_presented) {
1317		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1318		p->ctx->preamble_presented = true;
1319	}
1320
1321	cs->out.handle = seq;
1322	job->uf_sequence = seq;
1323
1324	amdgpu_job_free_resources(job);
 
 
 
 
 
 
1325
1326	trace_amdgpu_cs_ioctl(job);
1327	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1328	priority = job->base.s_priority;
1329	drm_sched_entity_push_job(&job->base, entity);
1330
1331	ring = to_amdgpu_ring(entity->rq->sched);
1332	amdgpu_ring_priority_get(ring, priority);
 
 
1333
1334	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
 
 
 
1335
1336	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1337	amdgpu_mn_unlock(p->mn);
1338
1339	return 0;
 
 
 
 
 
 
1340
1341error_abort:
1342	drm_sched_job_cleanup(&job->base);
1343	amdgpu_mn_unlock(p->mn);
 
1344
1345error_unlock:
1346	amdgpu_job_free(job);
1347	return r;
 
 
 
 
 
1348}
1349
1350int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1351{
1352	struct amdgpu_device *adev = dev->dev_private;
1353	union drm_amdgpu_cs *cs = data;
1354	struct amdgpu_cs_parser parser = {};
1355	bool reserved_buffers = false;
1356	int i, r;
 
1357
1358	if (!adev->accel_working)
1359		return -EBUSY;
1360
1361	parser.adev = adev;
1362	parser.filp = filp;
1363
1364	r = amdgpu_cs_parser_init(&parser, data);
1365	if (r) {
1366		DRM_ERROR("Failed to initialize parser %d!\n", r);
1367		goto out;
1368	}
1369
1370	r = amdgpu_cs_ib_fill(adev, &parser);
1371	if (r)
1372		goto out;
1373
1374	r = amdgpu_cs_dependencies(adev, &parser);
1375	if (r) {
1376		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1377		goto out;
1378	}
1379
1380	r = amdgpu_cs_parser_bos(&parser, data);
1381	if (r) {
1382		if (r == -ENOMEM)
1383			DRM_ERROR("Not enough memory for command submission!\n");
1384		else if (r != -ERESTARTSYS && r != -EAGAIN)
1385			DRM_ERROR("Failed to process the buffer list %d!\n", r);
1386		goto out;
1387	}
1388
1389	reserved_buffers = true;
 
 
 
 
 
 
 
 
 
 
1390
1391	for (i = 0; i < parser.job->num_ibs; i++)
1392		trace_amdgpu_cs(&parser, i);
1393
1394	r = amdgpu_cs_vm_handling(&parser);
1395	if (r)
1396		goto out;
1397
1398	r = amdgpu_cs_submit(&parser, cs);
 
1399
1400out:
1401	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1402
 
 
1403	return r;
1404}
1405
1406/**
1407 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1408 *
1409 * @dev: drm device
1410 * @data: data from userspace
1411 * @filp: file private
1412 *
1413 * Wait for the command submission identified by handle to finish.
1414 */
1415int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1416			 struct drm_file *filp)
1417{
1418	union drm_amdgpu_wait_cs *wait = data;
1419	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1420	struct drm_sched_entity *entity;
1421	struct amdgpu_ctx *ctx;
1422	struct dma_fence *fence;
1423	long r;
1424
1425	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1426	if (ctx == NULL)
1427		return -EINVAL;
1428
1429	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1430				  wait->in.ring, &entity);
1431	if (r) {
1432		amdgpu_ctx_put(ctx);
1433		return r;
1434	}
1435
1436	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1437	if (IS_ERR(fence))
1438		r = PTR_ERR(fence);
1439	else if (fence) {
1440		r = dma_fence_wait_timeout(fence, true, timeout);
1441		if (r > 0 && fence->error)
1442			r = fence->error;
1443		dma_fence_put(fence);
1444	} else
1445		r = 1;
1446
1447	amdgpu_ctx_put(ctx);
1448	if (r < 0)
1449		return r;
1450
1451	memset(wait, 0, sizeof(*wait));
1452	wait->out.status = (r == 0);
1453
1454	return 0;
1455}
1456
1457/**
1458 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1459 *
1460 * @adev: amdgpu device
1461 * @filp: file private
1462 * @user: drm_amdgpu_fence copied from user space
1463 */
1464static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1465					     struct drm_file *filp,
1466					     struct drm_amdgpu_fence *user)
1467{
1468	struct drm_sched_entity *entity;
1469	struct amdgpu_ctx *ctx;
1470	struct dma_fence *fence;
1471	int r;
1472
1473	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1474	if (ctx == NULL)
1475		return ERR_PTR(-EINVAL);
1476
1477	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1478				  user->ring, &entity);
1479	if (r) {
1480		amdgpu_ctx_put(ctx);
1481		return ERR_PTR(r);
1482	}
1483
1484	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1485	amdgpu_ctx_put(ctx);
1486
1487	return fence;
1488}
1489
1490int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1491				    struct drm_file *filp)
1492{
1493	struct amdgpu_device *adev = dev->dev_private;
1494	union drm_amdgpu_fence_to_handle *info = data;
1495	struct dma_fence *fence;
1496	struct drm_syncobj *syncobj;
1497	struct sync_file *sync_file;
1498	int fd, r;
1499
1500	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1501	if (IS_ERR(fence))
1502		return PTR_ERR(fence);
1503
1504	if (!fence)
1505		fence = dma_fence_get_stub();
1506
1507	switch (info->in.what) {
1508	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1509		r = drm_syncobj_create(&syncobj, 0, fence);
1510		dma_fence_put(fence);
1511		if (r)
1512			return r;
1513		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1514		drm_syncobj_put(syncobj);
1515		return r;
1516
1517	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1518		r = drm_syncobj_create(&syncobj, 0, fence);
1519		dma_fence_put(fence);
1520		if (r)
1521			return r;
1522		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1523		drm_syncobj_put(syncobj);
1524		return r;
1525
1526	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1527		fd = get_unused_fd_flags(O_CLOEXEC);
1528		if (fd < 0) {
1529			dma_fence_put(fence);
1530			return fd;
1531		}
1532
1533		sync_file = sync_file_create(fence);
1534		dma_fence_put(fence);
1535		if (!sync_file) {
1536			put_unused_fd(fd);
1537			return -ENOMEM;
1538		}
1539
1540		fd_install(fd, sync_file->file);
1541		info->out.handle = fd;
1542		return 0;
1543
1544	default:
 
1545		return -EINVAL;
1546	}
1547}
1548
1549/**
1550 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1551 *
1552 * @adev: amdgpu device
1553 * @filp: file private
1554 * @wait: wait parameters
1555 * @fences: array of drm_amdgpu_fence
1556 */
1557static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1558				     struct drm_file *filp,
1559				     union drm_amdgpu_wait_fences *wait,
1560				     struct drm_amdgpu_fence *fences)
1561{
1562	uint32_t fence_count = wait->in.fence_count;
1563	unsigned int i;
1564	long r = 1;
1565
1566	for (i = 0; i < fence_count; i++) {
1567		struct dma_fence *fence;
1568		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1569
1570		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1571		if (IS_ERR(fence))
1572			return PTR_ERR(fence);
1573		else if (!fence)
1574			continue;
1575
1576		r = dma_fence_wait_timeout(fence, true, timeout);
 
 
 
1577		dma_fence_put(fence);
1578		if (r < 0)
1579			return r;
1580
1581		if (r == 0)
1582			break;
1583
1584		if (fence->error)
1585			return fence->error;
1586	}
1587
1588	memset(wait, 0, sizeof(*wait));
1589	wait->out.status = (r > 0);
1590
1591	return 0;
1592}
1593
1594/**
1595 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1596 *
1597 * @adev: amdgpu device
1598 * @filp: file private
1599 * @wait: wait parameters
1600 * @fences: array of drm_amdgpu_fence
1601 */
1602static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1603				    struct drm_file *filp,
1604				    union drm_amdgpu_wait_fences *wait,
1605				    struct drm_amdgpu_fence *fences)
1606{
1607	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1608	uint32_t fence_count = wait->in.fence_count;
1609	uint32_t first = ~0;
1610	struct dma_fence **array;
1611	unsigned int i;
1612	long r;
1613
1614	/* Prepare the fence array */
1615	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1616
1617	if (array == NULL)
1618		return -ENOMEM;
1619
1620	for (i = 0; i < fence_count; i++) {
1621		struct dma_fence *fence;
1622
1623		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1624		if (IS_ERR(fence)) {
1625			r = PTR_ERR(fence);
1626			goto err_free_fence_array;
1627		} else if (fence) {
1628			array[i] = fence;
1629		} else { /* NULL, the fence has been already signaled */
1630			r = 1;
1631			first = i;
1632			goto out;
1633		}
1634	}
1635
1636	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1637				       &first);
1638	if (r < 0)
1639		goto err_free_fence_array;
1640
1641out:
1642	memset(wait, 0, sizeof(*wait));
1643	wait->out.status = (r > 0);
1644	wait->out.first_signaled = first;
1645
1646	if (first < fence_count && array[first])
1647		r = array[first]->error;
1648	else
1649		r = 0;
1650
1651err_free_fence_array:
1652	for (i = 0; i < fence_count; i++)
1653		dma_fence_put(array[i]);
1654	kfree(array);
1655
1656	return r;
1657}
1658
1659/**
1660 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1661 *
1662 * @dev: drm device
1663 * @data: data from userspace
1664 * @filp: file private
1665 */
1666int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1667				struct drm_file *filp)
1668{
1669	struct amdgpu_device *adev = dev->dev_private;
1670	union drm_amdgpu_wait_fences *wait = data;
1671	uint32_t fence_count = wait->in.fence_count;
1672	struct drm_amdgpu_fence *fences_user;
1673	struct drm_amdgpu_fence *fences;
1674	int r;
1675
1676	/* Get the fences from userspace */
1677	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1678			GFP_KERNEL);
1679	if (fences == NULL)
1680		return -ENOMEM;
1681
1682	fences_user = u64_to_user_ptr(wait->in.fences);
1683	if (copy_from_user(fences, fences_user,
1684		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1685		r = -EFAULT;
1686		goto err_free_fences;
1687	}
1688
1689	if (wait->in.wait_all)
1690		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1691	else
1692		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1693
1694err_free_fences:
1695	kfree(fences);
1696
1697	return r;
1698}
1699
1700/**
1701 * amdgpu_cs_find_bo_va - find bo_va for VM address
1702 *
1703 * @parser: command submission parser context
1704 * @addr: VM address
1705 * @bo: resulting BO of the mapping found
 
1706 *
1707 * Search the buffer objects in the command submission context for a certain
1708 * virtual memory address. Returns allocation structure when found, NULL
1709 * otherwise.
1710 */
1711int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1712			   uint64_t addr, struct amdgpu_bo **bo,
1713			   struct amdgpu_bo_va_mapping **map)
1714{
1715	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1716	struct ttm_operation_ctx ctx = { false, false };
1717	struct amdgpu_vm *vm = &fpriv->vm;
1718	struct amdgpu_bo_va_mapping *mapping;
1719	int r;
1720
1721	addr /= AMDGPU_GPU_PAGE_SIZE;
1722
1723	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1724	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1725		return -EINVAL;
1726
1727	*bo = mapping->bo_va->base.bo;
1728	*map = mapping;
1729
1730	/* Double check that the BO is reserved by this CS */
1731	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1732		return -EINVAL;
1733
1734	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1735		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1736		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1737		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1738		if (r)
1739			return r;
1740	}
1741
1742	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1743}