Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.9.4
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/pagemap.h>
  30#include <linux/sync_file.h>
  31#include <linux/dma-buf.h>
  32
  33#include <drm/amdgpu_drm.h>
  34#include <drm/drm_syncobj.h>
  35#include <drm/ttm/ttm_tt.h>
  36
  37#include "amdgpu_cs.h"
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40#include "amdgpu_gmc.h"
  41#include "amdgpu_gem.h"
  42#include "amdgpu_ras.h"
  43
  44static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
  45				 struct amdgpu_device *adev,
  46				 struct drm_file *filp,
  47				 union drm_amdgpu_cs *cs)
  48{
  49	struct amdgpu_fpriv *fpriv = filp->driver_priv;
  50
  51	if (cs->in.num_chunks == 0)
 
 
 
 
 
 
  52		return -EINVAL;
  53
  54	memset(p, 0, sizeof(*p));
  55	p->adev = adev;
  56	p->filp = filp;
  57
  58	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
  59	if (!p->ctx)
  60		return -EINVAL;
  61
  62	if (atomic_read(&p->ctx->guilty)) {
  63		amdgpu_ctx_put(p->ctx);
  64		return -ECANCELED;
  65	}
  66
  67	amdgpu_sync_create(&p->sync);
  68	drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
  69		      DRM_EXEC_IGNORE_DUPLICATES, 0);
  70	return 0;
  71}
  72
  73static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
  74			     struct drm_amdgpu_cs_chunk_ib *chunk_ib)
  75{
  76	struct drm_sched_entity *entity;
  77	unsigned int i;
  78	int r;
  79
  80	r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
  81				  chunk_ib->ip_instance,
  82				  chunk_ib->ring, &entity);
  83	if (r)
  84		return r;
  85
  86	/*
  87	 * Abort if there is no run queue associated with this entity.
  88	 * Possibly because of disabled HW IP.
  89	 */
  90	if (entity->rq == NULL)
  91		return -EINVAL;
  92
  93	/* Check if we can add this IB to some existing job */
  94	for (i = 0; i < p->gang_size; ++i)
  95		if (p->entities[i] == entity)
  96			return i;
  97
  98	/* If not increase the gang size if possible */
  99	if (i == AMDGPU_CS_GANG_SIZE)
 100		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101
 102	p->entities[i] = entity;
 103	p->gang_size = i + 1;
 104	return i;
 105}
 106
 107static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
 108			   struct drm_amdgpu_cs_chunk_ib *chunk_ib,
 109			   unsigned int *num_ibs)
 110{
 111	int r;
 112
 113	r = amdgpu_cs_job_idx(p, chunk_ib);
 114	if (r < 0)
 115		return r;
 116
 117	if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
 118		return -EINVAL;
 
 119
 120	++(num_ibs[r]);
 121	p->gang_leader_idx = r;
 122	return 0;
 123}
 124
 125static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
 126				   struct drm_amdgpu_cs_chunk_fence *data,
 127				   uint32_t *offset)
 128{
 129	struct drm_gem_object *gobj;
 130	unsigned long size;
 131
 132	gobj = drm_gem_object_lookup(p->filp, data->handle);
 133	if (gobj == NULL)
 134		return -EINVAL;
 135
 136	p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 137	drm_gem_object_put(gobj);
 
 
 
 138
 139	size = amdgpu_bo_size(p->uf_bo);
 140	if (size != PAGE_SIZE || data->offset > (size - 8))
 141		return -EINVAL;
 142
 143	if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
 144		return -EINVAL;
 145
 146	*offset = data->offset;
 147	return 0;
 148}
 149
 150static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
 151				   struct drm_amdgpu_bo_list_in *data)
 152{
 153	struct drm_amdgpu_bo_list_entry *info;
 154	int r;
 155
 156	r = amdgpu_bo_create_list_entry_array(data, &info);
 157	if (r)
 158		return r;
 159
 160	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
 161				  &p->bo_list);
 162	if (r)
 163		goto error_free;
 164
 165	kvfree(info);
 166	return 0;
 167
 168error_free:
 169	kvfree(info);
 170
 171	return r;
 172}
 173
 174/* Copy the data from userspace and go over it the first time */
 175static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
 176			   union drm_amdgpu_cs *cs)
 177{
 178	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 179	unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
 180	struct amdgpu_vm *vm = &fpriv->vm;
 
 181	uint64_t *chunk_array_user;
 182	uint64_t *chunk_array;
 
 183	uint32_t uf_offset = 0;
 184	size_t size;
 185	int ret;
 186	int i;
 
 
 
 
 187
 188	chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
 189				     GFP_KERNEL);
 190	if (!chunk_array)
 191		return -ENOMEM;
 192
 
 
 
 
 
 
 193	/* get chunks */
 194	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 195	if (copy_from_user(chunk_array, chunk_array_user,
 196			   sizeof(uint64_t)*cs->in.num_chunks)) {
 197		ret = -EFAULT;
 198		goto free_chunk;
 199	}
 200
 201	p->nchunks = cs->in.num_chunks;
 202	p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 203			    GFP_KERNEL);
 204	if (!p->chunks) {
 205		ret = -ENOMEM;
 206		goto free_chunk;
 207	}
 208
 209	for (i = 0; i < p->nchunks; i++) {
 210		struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
 211		struct drm_amdgpu_cs_chunk user_chunk;
 212		uint32_t __user *cdata;
 213
 214		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
 215		if (copy_from_user(&user_chunk, chunk_ptr,
 216				       sizeof(struct drm_amdgpu_cs_chunk))) {
 217			ret = -EFAULT;
 218			i--;
 219			goto free_partial_kdata;
 220		}
 221		p->chunks[i].chunk_id = user_chunk.chunk_id;
 222		p->chunks[i].length_dw = user_chunk.length_dw;
 223
 224		size = p->chunks[i].length_dw;
 225		cdata = u64_to_user_ptr(user_chunk.chunk_data);
 226
 227		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
 228						    GFP_KERNEL);
 229		if (p->chunks[i].kdata == NULL) {
 230			ret = -ENOMEM;
 231			i--;
 232			goto free_partial_kdata;
 233		}
 234		size *= sizeof(uint32_t);
 235		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 236			ret = -EFAULT;
 237			goto free_partial_kdata;
 238		}
 239
 240		/* Assume the worst on the following checks */
 241		ret = -EINVAL;
 242		switch (p->chunks[i].chunk_id) {
 243		case AMDGPU_CHUNK_ID_IB:
 244			if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
 245				goto free_partial_kdata;
 246
 247			ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
 248			if (ret)
 249				goto free_partial_kdata;
 250			break;
 251
 252		case AMDGPU_CHUNK_ID_FENCE:
 253			if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
 
 
 254				goto free_partial_kdata;
 
 255
 256			ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
 257						      &uf_offset);
 258			if (ret)
 259				goto free_partial_kdata;
 260			break;
 261
 262		case AMDGPU_CHUNK_ID_BO_HANDLES:
 263			if (size < sizeof(struct drm_amdgpu_bo_list_in))
 264				goto free_partial_kdata;
 265
 266			ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
 267			if (ret)
 268				goto free_partial_kdata;
 269			break;
 270
 271		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 272		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 273		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 274		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 275		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 276		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 277		case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
 278			break;
 279
 280		default:
 
 281			goto free_partial_kdata;
 282		}
 283	}
 284
 285	if (!p->gang_size) {
 286		ret = -EINVAL;
 287		goto free_all_kdata;
 288	}
 289
 290	for (i = 0; i < p->gang_size; ++i) {
 291		ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
 292				       num_ibs[i], &p->jobs[i]);
 293		if (ret)
 294			goto free_all_kdata;
 295	}
 296	p->gang_leader = p->jobs[p->gang_leader_idx];
 297
 298	if (p->ctx->generation != p->gang_leader->generation) {
 299		ret = -ECANCELED;
 300		goto free_all_kdata;
 301	}
 302
 303	if (p->uf_bo)
 304		p->gang_leader->uf_addr = uf_offset;
 305	kvfree(chunk_array);
 306
 307	/* Use this opportunity to fill in task info for the vm */
 308	amdgpu_vm_set_task_info(vm);
 309
 
 
 
 310	return 0;
 311
 312free_all_kdata:
 313	i = p->nchunks - 1;
 314free_partial_kdata:
 315	for (; i >= 0; i--)
 316		kvfree(p->chunks[i].kdata);
 317	kvfree(p->chunks);
 318	p->chunks = NULL;
 319	p->nchunks = 0;
 320free_chunk:
 321	kvfree(chunk_array);
 322
 323	return ret;
 324}
 325
 326static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
 327			   struct amdgpu_cs_chunk *chunk,
 328			   unsigned int *ce_preempt,
 329			   unsigned int *de_preempt)
 330{
 331	struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
 332	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 333	struct amdgpu_vm *vm = &fpriv->vm;
 334	struct amdgpu_ring *ring;
 335	struct amdgpu_job *job;
 336	struct amdgpu_ib *ib;
 337	int r;
 338
 339	r = amdgpu_cs_job_idx(p, chunk_ib);
 340	if (r < 0)
 341		return r;
 342
 343	job = p->jobs[r];
 344	ring = amdgpu_job_ring(job);
 345	ib = &job->ibs[job->num_ibs++];
 346
 347	/* MM engine doesn't support user fences */
 348	if (p->uf_bo && ring->funcs->no_user_fence)
 349		return -EINVAL;
 350
 351	if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
 352	    chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
 353		if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
 354			(*ce_preempt)++;
 355		else
 356			(*de_preempt)++;
 357
 358		/* Each GFX command submit allows only 1 IB max
 359		 * preemptible for CE & DE */
 360		if (*ce_preempt > 1 || *de_preempt > 1)
 361			return -EINVAL;
 362	}
 363
 364	if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
 365		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
 366
 367	r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
 368			   chunk_ib->ib_bytes : 0,
 369			   AMDGPU_IB_POOL_DELAYED, ib);
 370	if (r) {
 371		DRM_ERROR("Failed to get ib !\n");
 372		return r;
 373	}
 374
 375	ib->gpu_addr = chunk_ib->va_start;
 376	ib->length_dw = chunk_ib->ib_bytes / 4;
 377	ib->flags = chunk_ib->flags;
 378	return 0;
 379}
 380
 381static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
 382				     struct amdgpu_cs_chunk *chunk)
 383{
 384	struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
 385	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 386	unsigned int num_deps;
 387	int i, r;
 388
 389	num_deps = chunk->length_dw * 4 /
 390		sizeof(struct drm_amdgpu_cs_chunk_dep);
 391
 392	for (i = 0; i < num_deps; ++i) {
 393		struct amdgpu_ctx *ctx;
 394		struct drm_sched_entity *entity;
 395		struct dma_fence *fence;
 396
 397		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
 398		if (ctx == NULL)
 399			return -EINVAL;
 400
 401		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
 402					  deps[i].ip_instance,
 403					  deps[i].ring, &entity);
 404		if (r) {
 405			amdgpu_ctx_put(ctx);
 406			return r;
 407		}
 408
 409		fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
 410		amdgpu_ctx_put(ctx);
 411
 412		if (IS_ERR(fence))
 413			return PTR_ERR(fence);
 414		else if (!fence)
 415			continue;
 416
 417		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
 418			struct drm_sched_fence *s_fence;
 419			struct dma_fence *old = fence;
 420
 421			s_fence = to_drm_sched_fence(fence);
 422			fence = dma_fence_get(&s_fence->scheduled);
 423			dma_fence_put(old);
 424		}
 425
 426		r = amdgpu_sync_fence(&p->sync, fence);
 427		dma_fence_put(fence);
 428		if (r)
 429			return r;
 430	}
 431	return 0;
 432}
 433
 434static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
 435					 uint32_t handle, u64 point,
 436					 u64 flags)
 437{
 438	struct dma_fence *fence;
 439	int r;
 440
 441	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
 442	if (r) {
 443		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
 444			  handle, point, r);
 445		return r;
 446	}
 447
 448	r = amdgpu_sync_fence(&p->sync, fence);
 449	dma_fence_put(fence);
 450	return r;
 451}
 452
 453static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
 454				   struct amdgpu_cs_chunk *chunk)
 455{
 456	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
 457	unsigned int num_deps;
 458	int i, r;
 459
 460	num_deps = chunk->length_dw * 4 /
 461		sizeof(struct drm_amdgpu_cs_chunk_sem);
 462	for (i = 0; i < num_deps; ++i) {
 463		r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
 464		if (r)
 465			return r;
 466	}
 467
 468	return 0;
 469}
 470
 471static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
 472					      struct amdgpu_cs_chunk *chunk)
 473{
 474	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
 475	unsigned int num_deps;
 476	int i, r;
 477
 478	num_deps = chunk->length_dw * 4 /
 479		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 480	for (i = 0; i < num_deps; ++i) {
 481		r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
 482						  syncobj_deps[i].point,
 483						  syncobj_deps[i].flags);
 484		if (r)
 485			return r;
 486	}
 487
 488	return 0;
 489}
 490
 491static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
 492				    struct amdgpu_cs_chunk *chunk)
 493{
 494	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
 495	unsigned int num_deps;
 496	int i;
 497
 498	num_deps = chunk->length_dw * 4 /
 499		sizeof(struct drm_amdgpu_cs_chunk_sem);
 500
 501	if (p->post_deps)
 502		return -EINVAL;
 503
 504	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
 505				     GFP_KERNEL);
 506	p->num_post_deps = 0;
 507
 508	if (!p->post_deps)
 509		return -ENOMEM;
 510
 511
 512	for (i = 0; i < num_deps; ++i) {
 513		p->post_deps[i].syncobj =
 514			drm_syncobj_find(p->filp, deps[i].handle);
 515		if (!p->post_deps[i].syncobj)
 516			return -EINVAL;
 517		p->post_deps[i].chain = NULL;
 518		p->post_deps[i].point = 0;
 519		p->num_post_deps++;
 520	}
 521
 522	return 0;
 523}
 524
 525static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
 526						struct amdgpu_cs_chunk *chunk)
 527{
 528	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
 529	unsigned int num_deps;
 530	int i;
 531
 532	num_deps = chunk->length_dw * 4 /
 533		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 534
 535	if (p->post_deps)
 536		return -EINVAL;
 537
 538	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
 539				     GFP_KERNEL);
 540	p->num_post_deps = 0;
 541
 542	if (!p->post_deps)
 543		return -ENOMEM;
 544
 545	for (i = 0; i < num_deps; ++i) {
 546		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
 547
 548		dep->chain = NULL;
 549		if (syncobj_deps[i].point) {
 550			dep->chain = dma_fence_chain_alloc();
 551			if (!dep->chain)
 552				return -ENOMEM;
 553		}
 554
 555		dep->syncobj = drm_syncobj_find(p->filp,
 556						syncobj_deps[i].handle);
 557		if (!dep->syncobj) {
 558			dma_fence_chain_free(dep->chain);
 559			return -EINVAL;
 560		}
 561		dep->point = syncobj_deps[i].point;
 562		p->num_post_deps++;
 563	}
 564
 565	return 0;
 566}
 567
 568static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
 569			       struct amdgpu_cs_chunk *chunk)
 570{
 571	struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
 572	int i;
 573
 574	if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
 575		return -EINVAL;
 576
 577	for (i = 0; i < p->gang_size; ++i) {
 578		p->jobs[i]->shadow_va = shadow->shadow_va;
 579		p->jobs[i]->csa_va = shadow->csa_va;
 580		p->jobs[i]->gds_va = shadow->gds_va;
 581		p->jobs[i]->init_shadow =
 582			shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
 583	}
 584
 585	return 0;
 586}
 587
 588static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
 589{
 590	unsigned int ce_preempt = 0, de_preempt = 0;
 591	int i, r;
 592
 593	for (i = 0; i < p->nchunks; ++i) {
 594		struct amdgpu_cs_chunk *chunk;
 595
 596		chunk = &p->chunks[i];
 597
 598		switch (chunk->chunk_id) {
 599		case AMDGPU_CHUNK_ID_IB:
 600			r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
 601			if (r)
 602				return r;
 603			break;
 604		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 605		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 606			r = amdgpu_cs_p2_dependencies(p, chunk);
 607			if (r)
 608				return r;
 609			break;
 610		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 611			r = amdgpu_cs_p2_syncobj_in(p, chunk);
 612			if (r)
 613				return r;
 614			break;
 615		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 616			r = amdgpu_cs_p2_syncobj_out(p, chunk);
 617			if (r)
 618				return r;
 619			break;
 620		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 621			r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
 622			if (r)
 623				return r;
 624			break;
 625		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 626			r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
 627			if (r)
 628				return r;
 629			break;
 630		case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
 631			r = amdgpu_cs_p2_shadow(p, chunk);
 632			if (r)
 633				return r;
 634			break;
 635		}
 636	}
 637
 638	return 0;
 639}
 640
 641/* Convert microseconds to bytes. */
 642static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 643{
 644	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 645		return 0;
 646
 647	/* Since accum_us is incremented by a million per second, just
 648	 * multiply it by the number of MB/s to get the number of bytes.
 649	 */
 650	return us << adev->mm_stats.log2_max_MBps;
 651}
 652
 653static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 654{
 655	if (!adev->mm_stats.log2_max_MBps)
 656		return 0;
 657
 658	return bytes >> adev->mm_stats.log2_max_MBps;
 659}
 660
 661/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 662 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 663 * which means it can go over the threshold once. If that happens, the driver
 664 * will be in debt and no other buffer migrations can be done until that debt
 665 * is repaid.
 666 *
 667 * This approach allows moving a buffer of any size (it's important to allow
 668 * that).
 669 *
 670 * The currency is simply time in microseconds and it increases as the clock
 671 * ticks. The accumulated microseconds (us) are converted to bytes and
 672 * returned.
 673 */
 674static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 675					      u64 *max_bytes,
 676					      u64 *max_vis_bytes)
 677{
 678	s64 time_us, increment_us;
 
 679	u64 free_vram, total_vram, used_vram;
 
 680	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 681	 * throttling.
 682	 *
 683	 * It means that in order to get full max MBps, at least 5 IBs per
 684	 * second must be submitted and not more than 200ms apart from each
 685	 * other.
 686	 */
 687	const s64 us_upper_bound = 200000;
 688
 689	if (!adev->mm_stats.log2_max_MBps) {
 690		*max_bytes = 0;
 691		*max_vis_bytes = 0;
 692		return;
 693	}
 694
 695	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
 696	used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
 697	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 698
 699	spin_lock(&adev->mm_stats.lock);
 700
 701	/* Increase the amount of accumulated us. */
 702	time_us = ktime_to_us(ktime_get());
 703	increment_us = time_us - adev->mm_stats.last_update_us;
 704	adev->mm_stats.last_update_us = time_us;
 705	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 706				      us_upper_bound);
 707
 708	/* This prevents the short period of low performance when the VRAM
 709	 * usage is low and the driver is in debt or doesn't have enough
 710	 * accumulated us to fill VRAM quickly.
 711	 *
 712	 * The situation can occur in these cases:
 713	 * - a lot of VRAM is freed by userspace
 714	 * - the presence of a big buffer causes a lot of evictions
 715	 *   (solution: split buffers into smaller ones)
 716	 *
 717	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 718	 * accum_us to a positive number.
 719	 */
 720	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 721		s64 min_us;
 722
 723		/* Be more aggressive on dGPUs. Try to fill a portion of free
 724		 * VRAM now.
 725		 */
 726		if (!(adev->flags & AMD_IS_APU))
 727			min_us = bytes_to_us(adev, free_vram / 4);
 728		else
 729			min_us = 0; /* Reset accum_us on APUs. */
 730
 731		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 732	}
 733
 734	/* This is set to 0 if the driver is in debt to disallow (optional)
 735	 * buffer moves.
 736	 */
 737	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 738
 739	/* Do the same for visible VRAM if half of it is free */
 740	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
 741		u64 total_vis_vram = adev->gmc.visible_vram_size;
 742		u64 used_vis_vram =
 743		  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
 744
 745		if (used_vis_vram < total_vis_vram) {
 746			u64 free_vis_vram = total_vis_vram - used_vis_vram;
 747
 748			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
 749							  increment_us, us_upper_bound);
 750
 751			if (free_vis_vram >= total_vis_vram / 2)
 752				adev->mm_stats.accum_us_vis =
 753					max(bytes_to_us(adev, free_vis_vram / 2),
 754					    adev->mm_stats.accum_us_vis);
 755		}
 756
 757		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
 758	} else {
 759		*max_vis_bytes = 0;
 760	}
 761
 762	spin_unlock(&adev->mm_stats.lock);
 
 763}
 764
 765/* Report how many bytes have really been moved for the last command
 766 * submission. This can result in a debt that can stop buffer migrations
 767 * temporarily.
 768 */
 769void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
 770				  u64 num_vis_bytes)
 771{
 772	spin_lock(&adev->mm_stats.lock);
 773	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 774	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 775	spin_unlock(&adev->mm_stats.lock);
 776}
 777
 778static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
 
 779{
 780	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 781	struct amdgpu_cs_parser *p = param;
 782	struct ttm_operation_ctx ctx = {
 783		.interruptible = true,
 784		.no_wait_gpu = false,
 785		.resv = bo->tbo.base.resv
 786	};
 787	uint32_t domain;
 788	int r;
 789
 790	if (bo->tbo.pin_count)
 791		return 0;
 792
 793	/* Don't move this buffer if we have depleted our allowance
 794	 * to move it. Don't move anything if the threshold is zero.
 795	 */
 796	if (p->bytes_moved < p->bytes_moved_threshold &&
 797	    (!bo->tbo.base.dma_buf ||
 798	    list_empty(&bo->tbo.base.dma_buf->attachments))) {
 799		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 800		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 801			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
 802			 * visible VRAM if we've depleted our allowance to do
 803			 * that.
 804			 */
 805			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
 806				domain = bo->preferred_domains;
 807			else
 808				domain = bo->allowed_domains;
 809		} else {
 810			domain = bo->preferred_domains;
 811		}
 812	} else {
 813		domain = bo->allowed_domains;
 814	}
 815
 816retry:
 817	amdgpu_bo_placement_from_domain(bo, domain);
 818	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 819
 820	p->bytes_moved += ctx.bytes_moved;
 821	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 822	    amdgpu_res_cpu_visible(adev, bo->tbo.resource))
 823		p->bytes_moved_vis += ctx.bytes_moved;
 824
 825	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 826		domain = bo->allowed_domains;
 827		goto retry;
 828	}
 829
 830	return r;
 831}
 832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 834				union drm_amdgpu_cs *cs)
 835{
 836	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 837	struct ttm_operation_ctx ctx = { true, false };
 838	struct amdgpu_vm *vm = &fpriv->vm;
 839	struct amdgpu_bo_list_entry *e;
 840	struct drm_gem_object *obj;
 841	unsigned long index;
 842	unsigned int i;
 843	int r;
 844
 845	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
 846	if (cs->in.bo_list_handle) {
 847		if (p->bo_list)
 848			return -EINVAL;
 849
 850		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
 851				       &p->bo_list);
 852		if (r)
 853			return r;
 854	} else if (!p->bo_list) {
 855		/* Create a empty bo_list when no handle is provided */
 856		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
 857					  &p->bo_list);
 858		if (r)
 859			return r;
 860	}
 861
 862	mutex_lock(&p->bo_list->bo_list_mutex);
 
 863
 864	/* Get userptr backing pages. If pages are updated after registered
 865	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
 866	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
 867	 */
 868	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 869		bool userpage_invalidated = false;
 870		struct amdgpu_bo *bo = e->bo;
 871		int i;
 872
 873		e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
 874					 sizeof(struct page *),
 875					 GFP_KERNEL);
 876		if (!e->user_pages) {
 877			DRM_ERROR("kvmalloc_array failure\n");
 878			r = -ENOMEM;
 879			goto out_free_user_pages;
 880		}
 881
 882		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
 883		if (r) {
 884			kvfree(e->user_pages);
 885			e->user_pages = NULL;
 886			goto out_free_user_pages;
 887		}
 888
 889		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
 890			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
 891				userpage_invalidated = true;
 892				break;
 893			}
 894		}
 895		e->user_invalidated = userpage_invalidated;
 896	}
 897
 898	drm_exec_until_all_locked(&p->exec) {
 899		r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
 900		drm_exec_retry_on_contention(&p->exec);
 901		if (unlikely(r))
 902			goto out_free_user_pages;
 903
 904		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 905			/* One fence for TTM and one for each CS job */
 906			r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
 907						 1 + p->gang_size);
 908			drm_exec_retry_on_contention(&p->exec);
 909			if (unlikely(r))
 910				goto out_free_user_pages;
 911
 912			e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
 
 
 
 
 
 913		}
 914
 915		if (p->uf_bo) {
 916			r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
 917						 1 + p->gang_size);
 918			drm_exec_retry_on_contention(&p->exec);
 919			if (unlikely(r))
 920				goto out_free_user_pages;
 921		}
 922	}
 923
 924	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 925		struct mm_struct *usermm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 926
 927		usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
 928		if (usermm && usermm != current->mm) {
 929			r = -EPERM;
 930			goto out_free_user_pages;
 
 
 
 931		}
 932
 933		if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
 934		    e->user_invalidated && e->user_pages) {
 935			amdgpu_bo_placement_from_domain(e->bo,
 936							AMDGPU_GEM_DOMAIN_CPU);
 937			r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
 938					    &ctx);
 939			if (r)
 940				goto out_free_user_pages;
 941
 942			amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
 943						     e->user_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944		}
 945
 946		kvfree(e->user_pages);
 947		e->user_pages = NULL;
 948	}
 949
 950	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
 951					  &p->bytes_moved_vis_threshold);
 952	p->bytes_moved = 0;
 953	p->bytes_moved_vis = 0;
 
 
 954
 955	r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
 956			       amdgpu_cs_bo_validate, p);
 957	if (r) {
 958		DRM_ERROR("amdgpu_vm_validate() failed.\n");
 959		goto out_free_user_pages;
 960	}
 961
 962	drm_exec_for_each_locked_object(&p->exec, index, obj) {
 963		r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
 964		if (unlikely(r))
 965			goto out_free_user_pages;
 966	}
 967
 968	if (p->uf_bo) {
 969		r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
 970		if (unlikely(r))
 971			goto out_free_user_pages;
 972
 973		p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
 974	}
 975
 976	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
 977				     p->bytes_moved_vis);
 978
 979	for (i = 0; i < p->gang_size; ++i)
 980		amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
 981					 p->bo_list->gws_obj,
 982					 p->bo_list->oa_obj);
 983	return 0;
 984
 985out_free_user_pages:
 986	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 987		struct amdgpu_bo *bo = e->bo;
 
 
 
 988
 989		if (!e->user_pages)
 990			continue;
 991		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
 992		kvfree(e->user_pages);
 993		e->user_pages = NULL;
 994		e->range = NULL;
 995	}
 996	mutex_unlock(&p->bo_list->bo_list_mutex);
 997	return r;
 998}
 999
1000static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1001{
1002	int i, j;
1003
1004	if (!trace_amdgpu_cs_enabled())
1005		return;
1006
1007	for (i = 0; i < p->gang_size; ++i) {
1008		struct amdgpu_job *job = p->jobs[i];
1009
1010		for (j = 0; j < job->num_ibs; ++j)
1011			trace_amdgpu_cs(p, job, &job->ibs[j]);
 
 
 
 
 
 
 
 
 
 
1012	}
1013}
1014
1015static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1016			       struct amdgpu_job *job)
1017{
1018	struct amdgpu_ring *ring = amdgpu_job_ring(job);
1019	unsigned int i;
1020	int r;
1021
1022	/* Only for UVD/VCE VM emulation */
1023	if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1024		return 0;
1025
1026	for (i = 0; i < job->num_ibs; ++i) {
1027		struct amdgpu_ib *ib = &job->ibs[i];
1028		struct amdgpu_bo_va_mapping *m;
1029		struct amdgpu_bo *aobj;
1030		uint64_t va_start;
1031		uint8_t *kptr;
1032
1033		va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1034		r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1035		if (r) {
1036			DRM_ERROR("IB va_start is invalid\n");
1037			return r;
1038		}
1039
1040		if ((va_start + ib->length_dw * 4) >
1041		    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1042			DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1043			return -EINVAL;
1044		}
1045
1046		/* the IB should be reserved at this point */
1047		r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1048		if (r)
1049			return r;
1050
1051		kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
 
 
 
1052
1053		if (ring->funcs->parse_cs) {
1054			memcpy(ib->ptr, kptr, ib->length_dw * 4);
1055			amdgpu_bo_kunmap(aobj);
1056
1057			r = amdgpu_ring_parse_cs(ring, p, job, ib);
1058			if (r)
1059				return r;
1060		} else {
1061			ib->ptr = (uint32_t *)kptr;
1062			r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1063			amdgpu_bo_kunmap(aobj);
1064			if (r)
1065				return r;
1066		}
1067	}
1068
1069	return 0;
1070}
1071
1072static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1073{
1074	unsigned int i;
1075	int r;
1076
1077	for (i = 0; i < p->gang_size; ++i) {
1078		r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
 
 
1079		if (r)
1080			return r;
1081	}
1082	return 0;
1083}
1084
1085static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 
 
 
 
 
 
 
 
1086{
1087	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1088	struct amdgpu_job *job = p->gang_leader;
1089	struct amdgpu_device *adev = p->adev;
1090	struct amdgpu_vm *vm = &fpriv->vm;
1091	struct amdgpu_bo_list_entry *e;
1092	struct amdgpu_bo_va *bo_va;
1093	unsigned int i;
1094	int r;
1095
1096	r = amdgpu_vm_clear_freed(adev, vm, NULL);
1097	if (r)
1098		return r;
1099
1100	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1101	if (r)
1102		return r;
1103
1104	r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1105	if (r)
1106		return r;
1107
1108	if (fpriv->csa_va) {
1109		bo_va = fpriv->csa_va;
1110		BUG_ON(!bo_va);
1111		r = amdgpu_vm_bo_update(adev, bo_va, false);
1112		if (r)
1113			return r;
1114
1115		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1116		if (r)
1117			return r;
 
 
 
1118	}
 
1119
1120	/* FIXME: In theory this loop shouldn't be needed any more when
1121	 * amdgpu_vm_handle_moved handles all moved BOs that are reserved
1122	 * with p->ticket. But removing it caused test regressions, so I'm
1123	 * leaving it here for now.
1124	 */
1125	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1126		bo_va = e->bo_va;
1127		if (bo_va == NULL)
1128			continue;
1129
1130		r = amdgpu_vm_bo_update(adev, bo_va, false);
1131		if (r)
1132			return r;
 
 
 
 
1133
1134		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1135		if (r)
1136			return r;
1137	}
 
 
 
1138
1139	r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
1140	if (r)
1141		return r;
1142
1143	r = amdgpu_vm_update_pdes(adev, vm, false);
1144	if (r)
1145		return r;
1146
1147	r = amdgpu_sync_fence(&p->sync, vm->last_update);
1148	if (r)
1149		return r;
1150
1151	for (i = 0; i < p->gang_size; ++i) {
1152		job = p->jobs[i];
 
1153
1154		if (!job->vm)
1155			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156
1157		job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1158	}
1159
1160	if (adev->debug_vm) {
1161		/* Invalidate all BOs to test for userspace bugs */
1162		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1163			struct amdgpu_bo *bo = e->bo;
1164
 
 
 
1165			/* ignore duplicates */
 
1166			if (!bo)
1167				continue;
1168
1169			amdgpu_vm_bo_invalidate(adev, bo, false);
1170		}
1171	}
1172
1173	return 0;
1174}
1175
1176static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 
1177{
1178	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1179	struct drm_gpu_scheduler *sched;
1180	struct drm_gem_object *obj;
1181	struct dma_fence *fence;
1182	unsigned long index;
1183	unsigned int i;
1184	int r;
1185
1186	r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1187	if (r) {
1188		if (r != -ERESTARTSYS)
1189			DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1190		return r;
 
 
1191	}
1192
1193	drm_exec_for_each_locked_object(&p->exec, index, obj) {
1194		struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
1195
1196		struct dma_resv *resv = bo->tbo.base.resv;
1197		enum amdgpu_sync_mode sync_mode;
1198
1199		sync_mode = amdgpu_bo_explicit_sync(bo) ?
1200			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1201		r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1202				     &fpriv->vm);
1203		if (r)
1204			return r;
1205	}
1206
1207	for (i = 0; i < p->gang_size; ++i) {
1208		r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1209		if (r)
1210			return r;
1211	}
1212
1213	sched = p->gang_leader->base.entity->rq->sched;
1214	while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1215		struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1216
1217		/*
1218		 * When we have an dependency it might be necessary to insert a
1219		 * pipeline sync to make sure that all caches etc are flushed and the
1220		 * next job actually sees the results from the previous one
1221		 * before we start executing on the same scheduler ring.
1222		 */
1223		if (!s_fence || s_fence->sched != sched) {
1224			dma_fence_put(fence);
 
 
 
 
 
 
 
1225			continue;
1226		}
1227
1228		r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1229		dma_fence_put(fence);
 
1230		if (r)
1231			return r;
1232	}
1233	return 0;
1234}
1235
1236static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1237{
1238	int i;
1239
1240	for (i = 0; i < p->num_post_deps; ++i) {
1241		if (p->post_deps[i].chain && p->post_deps[i].point) {
1242			drm_syncobj_add_point(p->post_deps[i].syncobj,
1243					      p->post_deps[i].chain,
1244					      p->fence, p->post_deps[i].point);
1245			p->post_deps[i].chain = NULL;
1246		} else {
1247			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1248						  p->fence);
1249		}
1250	}
1251}
1252
1253static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1254			    union drm_amdgpu_cs *cs)
1255{
1256	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1257	struct amdgpu_job *leader = p->gang_leader;
1258	struct amdgpu_bo_list_entry *e;
1259	struct drm_gem_object *gobj;
1260	unsigned long index;
1261	unsigned int i;
1262	uint64_t seq;
1263	int r;
1264
1265	for (i = 0; i < p->gang_size; ++i)
1266		drm_sched_job_arm(&p->jobs[i]->base);
1267
1268	for (i = 0; i < p->gang_size; ++i) {
1269		struct dma_fence *fence;
 
 
 
 
 
 
 
 
 
 
1270
1271		if (p->jobs[i] == leader)
1272			continue;
 
 
 
1273
1274		fence = &p->jobs[i]->base.s_fence->scheduled;
1275		dma_fence_get(fence);
1276		r = drm_sched_job_add_dependency(&leader->base, fence);
1277		if (r) {
1278			dma_fence_put(fence);
1279			return r;
1280		}
1281	}
1282
1283	if (p->gang_size > 1) {
1284		for (i = 0; i < p->gang_size; ++i)
1285			amdgpu_job_set_gang_leader(p->jobs[i], leader);
1286	}
1287
1288	/* No memory allocation is allowed while holding the notifier lock.
1289	 * The lock is held until amdgpu_cs_submit is finished and fence is
1290	 * added to BOs.
1291	 */
1292	mutex_lock(&p->adev->notifier_lock);
1293
1294	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1295	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1296	 */
1297	r = 0;
1298	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1299		r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
1300							e->range);
1301		e->range = NULL;
1302	}
1303	if (r) {
1304		r = -EAGAIN;
1305		mutex_unlock(&p->adev->notifier_lock);
1306		return r;
 
 
1307	}
1308
1309	p->fence = dma_fence_get(&leader->base.s_fence->finished);
1310	drm_exec_for_each_locked_object(&p->exec, index, gobj) {
 
 
 
1311
1312		ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
 
1313
1314		/* Everybody except for the gang leader uses READ */
1315		for (i = 0; i < p->gang_size; ++i) {
1316			if (p->jobs[i] == leader)
1317				continue;
 
1318
1319			dma_resv_add_fence(gobj->resv,
1320					   &p->jobs[i]->base.s_fence->finished,
1321					   DMA_RESV_USAGE_READ);
1322		}
1323
1324		/* The gang leader as remembered as writer */
1325		dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
1326	}
1327
1328	seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1329				   p->fence);
1330	amdgpu_cs_post_dependencies(p);
1331
1332	if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1333	    !p->ctx->preamble_presented) {
1334		leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1335		p->ctx->preamble_presented = true;
1336	}
 
 
 
 
 
 
 
 
 
1337
1338	cs->out.handle = seq;
1339	leader->uf_sequence = seq;
 
 
 
 
 
 
 
 
1340
1341	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
1342	for (i = 0; i < p->gang_size; ++i) {
1343		amdgpu_job_free_resources(p->jobs[i]);
1344		trace_amdgpu_cs_ioctl(p->jobs[i]);
1345		drm_sched_entity_push_job(&p->jobs[i]->base);
1346		p->jobs[i] = NULL;
 
 
 
1347	}
1348
1349	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1350
1351	mutex_unlock(&p->adev->notifier_lock);
1352	mutex_unlock(&p->bo_list->bo_list_mutex);
1353	return 0;
1354}
1355
1356/* Cleanup the parser structure */
1357static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1358{
1359	unsigned int i;
 
 
 
1360
1361	amdgpu_sync_free(&parser->sync);
1362	drm_exec_fini(&parser->exec);
1363
1364	for (i = 0; i < parser->num_post_deps; i++) {
1365		drm_syncobj_put(parser->post_deps[i].syncobj);
1366		kfree(parser->post_deps[i].chain);
 
1367	}
1368	kfree(parser->post_deps);
1369
1370	dma_fence_put(parser->fence);
 
 
 
 
 
1371
1372	if (parser->ctx)
1373		amdgpu_ctx_put(parser->ctx);
1374	if (parser->bo_list)
1375		amdgpu_bo_list_put(parser->bo_list);
1376
1377	for (i = 0; i < parser->nchunks; i++)
1378		kvfree(parser->chunks[i].kdata);
1379	kvfree(parser->chunks);
1380	for (i = 0; i < parser->gang_size; ++i) {
1381		if (parser->jobs[i])
1382			amdgpu_job_free(parser->jobs[i]);
1383	}
1384	amdgpu_bo_unref(&parser->uf_bo);
1385}
1386
1387int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1388{
1389	struct amdgpu_device *adev = drm_to_adev(dev);
1390	struct amdgpu_cs_parser parser;
1391	int r;
1392
1393	if (amdgpu_ras_intr_triggered())
1394		return -EHWPOISON;
1395
1396	if (!adev->accel_working)
1397		return -EBUSY;
1398
1399	r = amdgpu_cs_parser_init(&parser, adev, filp, data);
 
 
 
1400	if (r) {
1401		DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
1402		return r;
1403	}
1404
1405	r = amdgpu_cs_pass1(&parser, data);
1406	if (r)
1407		goto error_fini;
1408
1409	r = amdgpu_cs_pass2(&parser);
1410	if (r)
1411		goto error_fini;
1412
1413	r = amdgpu_cs_parser_bos(&parser, data);
1414	if (r) {
1415		if (r == -ENOMEM)
1416			DRM_ERROR("Not enough memory for command submission!\n");
1417		else if (r != -ERESTARTSYS && r != -EAGAIN)
1418			DRM_DEBUG("Failed to process the buffer list %d!\n", r);
1419		goto error_fini;
1420	}
1421
1422	r = amdgpu_cs_patch_jobs(&parser);
 
1423	if (r)
1424		goto error_backoff;
1425
1426	r = amdgpu_cs_vm_handling(&parser);
1427	if (r)
1428		goto error_backoff;
1429
1430	r = amdgpu_cs_sync_rings(&parser);
1431	if (r)
1432		goto error_backoff;
1433
1434	trace_amdgpu_cs_ibs(&parser);
 
1435
1436	r = amdgpu_cs_submit(&parser, data);
1437	if (r)
1438		goto error_backoff;
1439
1440	amdgpu_cs_parser_fini(&parser);
1441	return 0;
1442
1443error_backoff:
1444	mutex_unlock(&parser.bo_list->bo_list_mutex);
1445
1446error_fini:
1447	amdgpu_cs_parser_fini(&parser);
1448	return r;
1449}
1450
1451/**
1452 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1453 *
1454 * @dev: drm device
1455 * @data: data from userspace
1456 * @filp: file private
1457 *
1458 * Wait for the command submission identified by handle to finish.
1459 */
1460int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1461			 struct drm_file *filp)
1462{
1463	union drm_amdgpu_wait_cs *wait = data;
 
1464	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1465	struct drm_sched_entity *entity;
1466	struct amdgpu_ctx *ctx;
1467	struct dma_fence *fence;
1468	long r;
1469
 
 
 
 
 
1470	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1471	if (ctx == NULL)
1472		return -EINVAL;
1473
1474	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1475				  wait->in.ring, &entity);
1476	if (r) {
1477		amdgpu_ctx_put(ctx);
1478		return r;
1479	}
1480
1481	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1482	if (IS_ERR(fence))
1483		r = PTR_ERR(fence);
1484	else if (fence) {
1485		r = dma_fence_wait_timeout(fence, true, timeout);
1486		if (r > 0 && fence->error)
1487			r = fence->error;
1488		dma_fence_put(fence);
1489	} else
1490		r = 1;
1491
1492	amdgpu_ctx_put(ctx);
1493	if (r < 0)
1494		return r;
1495
1496	memset(wait, 0, sizeof(*wait));
1497	wait->out.status = (r == 0);
1498
1499	return 0;
1500}
1501
1502/**
1503 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1504 *
1505 * @adev: amdgpu device
1506 * @filp: file private
1507 * @user: drm_amdgpu_fence copied from user space
1508 */
1509static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1510					     struct drm_file *filp,
1511					     struct drm_amdgpu_fence *user)
1512{
1513	struct drm_sched_entity *entity;
1514	struct amdgpu_ctx *ctx;
1515	struct dma_fence *fence;
1516	int r;
1517
 
 
 
 
 
1518	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1519	if (ctx == NULL)
1520		return ERR_PTR(-EINVAL);
1521
1522	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1523				  user->ring, &entity);
1524	if (r) {
1525		amdgpu_ctx_put(ctx);
1526		return ERR_PTR(r);
1527	}
1528
1529	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1530	amdgpu_ctx_put(ctx);
1531
1532	return fence;
1533}
1534
1535int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1536				    struct drm_file *filp)
1537{
1538	struct amdgpu_device *adev = drm_to_adev(dev);
1539	union drm_amdgpu_fence_to_handle *info = data;
1540	struct dma_fence *fence;
1541	struct drm_syncobj *syncobj;
1542	struct sync_file *sync_file;
1543	int fd, r;
1544
1545	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1546	if (IS_ERR(fence))
1547		return PTR_ERR(fence);
1548
1549	if (!fence)
1550		fence = dma_fence_get_stub();
1551
1552	switch (info->in.what) {
1553	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1554		r = drm_syncobj_create(&syncobj, 0, fence);
1555		dma_fence_put(fence);
1556		if (r)
1557			return r;
1558		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1559		drm_syncobj_put(syncobj);
1560		return r;
1561
1562	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1563		r = drm_syncobj_create(&syncobj, 0, fence);
1564		dma_fence_put(fence);
1565		if (r)
1566			return r;
1567		r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1568		drm_syncobj_put(syncobj);
1569		return r;
1570
1571	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1572		fd = get_unused_fd_flags(O_CLOEXEC);
1573		if (fd < 0) {
1574			dma_fence_put(fence);
1575			return fd;
1576		}
1577
1578		sync_file = sync_file_create(fence);
1579		dma_fence_put(fence);
1580		if (!sync_file) {
1581			put_unused_fd(fd);
1582			return -ENOMEM;
1583		}
1584
1585		fd_install(fd, sync_file->file);
1586		info->out.handle = fd;
1587		return 0;
1588
1589	default:
1590		dma_fence_put(fence);
1591		return -EINVAL;
1592	}
1593}
1594
1595/**
1596 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1597 *
1598 * @adev: amdgpu device
1599 * @filp: file private
1600 * @wait: wait parameters
1601 * @fences: array of drm_amdgpu_fence
1602 */
1603static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1604				     struct drm_file *filp,
1605				     union drm_amdgpu_wait_fences *wait,
1606				     struct drm_amdgpu_fence *fences)
1607{
1608	uint32_t fence_count = wait->in.fence_count;
1609	unsigned int i;
1610	long r = 1;
1611
1612	for (i = 0; i < fence_count; i++) {
1613		struct dma_fence *fence;
1614		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1615
1616		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1617		if (IS_ERR(fence))
1618			return PTR_ERR(fence);
1619		else if (!fence)
1620			continue;
1621
1622		r = dma_fence_wait_timeout(fence, true, timeout);
1623		if (r > 0 && fence->error)
1624			r = fence->error;
1625
1626		dma_fence_put(fence);
1627		if (r < 0)
1628			return r;
1629
1630		if (r == 0)
1631			break;
1632	}
1633
1634	memset(wait, 0, sizeof(*wait));
1635	wait->out.status = (r > 0);
1636
1637	return 0;
1638}
1639
1640/**
1641 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1642 *
1643 * @adev: amdgpu device
1644 * @filp: file private
1645 * @wait: wait parameters
1646 * @fences: array of drm_amdgpu_fence
1647 */
1648static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1649				    struct drm_file *filp,
1650				    union drm_amdgpu_wait_fences *wait,
1651				    struct drm_amdgpu_fence *fences)
1652{
1653	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1654	uint32_t fence_count = wait->in.fence_count;
1655	uint32_t first = ~0;
1656	struct dma_fence **array;
1657	unsigned int i;
1658	long r;
1659
1660	/* Prepare the fence array */
1661	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1662
1663	if (array == NULL)
1664		return -ENOMEM;
1665
1666	for (i = 0; i < fence_count; i++) {
1667		struct dma_fence *fence;
1668
1669		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1670		if (IS_ERR(fence)) {
1671			r = PTR_ERR(fence);
1672			goto err_free_fence_array;
1673		} else if (fence) {
1674			array[i] = fence;
1675		} else { /* NULL, the fence has been already signaled */
1676			r = 1;
1677			first = i;
1678			goto out;
1679		}
1680	}
1681
1682	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1683				       &first);
1684	if (r < 0)
1685		goto err_free_fence_array;
1686
1687out:
1688	memset(wait, 0, sizeof(*wait));
1689	wait->out.status = (r > 0);
1690	wait->out.first_signaled = first;
1691
1692	if (first < fence_count && array[first])
1693		r = array[first]->error;
1694	else
1695		r = 0;
1696
1697err_free_fence_array:
1698	for (i = 0; i < fence_count; i++)
1699		dma_fence_put(array[i]);
1700	kfree(array);
1701
1702	return r;
1703}
1704
1705/**
1706 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1707 *
1708 * @dev: drm device
1709 * @data: data from userspace
1710 * @filp: file private
1711 */
1712int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1713				struct drm_file *filp)
1714{
1715	struct amdgpu_device *adev = drm_to_adev(dev);
1716	union drm_amdgpu_wait_fences *wait = data;
1717	uint32_t fence_count = wait->in.fence_count;
1718	struct drm_amdgpu_fence *fences_user;
1719	struct drm_amdgpu_fence *fences;
1720	int r;
1721
1722	/* Get the fences from userspace */
1723	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1724			GFP_KERNEL);
1725	if (fences == NULL)
1726		return -ENOMEM;
1727
1728	fences_user = u64_to_user_ptr(wait->in.fences);
1729	if (copy_from_user(fences, fences_user,
1730		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1731		r = -EFAULT;
1732		goto err_free_fences;
1733	}
1734
1735	if (wait->in.wait_all)
1736		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1737	else
1738		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1739
1740err_free_fences:
1741	kfree(fences);
1742
1743	return r;
1744}
1745
1746/**
1747 * amdgpu_cs_find_mapping - find bo_va for VM address
1748 *
1749 * @parser: command submission parser context
1750 * @addr: VM address
1751 * @bo: resulting BO of the mapping found
1752 * @map: Placeholder to return found BO mapping
1753 *
1754 * Search the buffer objects in the command submission context for a certain
1755 * virtual memory address. Returns allocation structure when found, NULL
1756 * otherwise.
1757 */
1758int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1759			   uint64_t addr, struct amdgpu_bo **bo,
1760			   struct amdgpu_bo_va_mapping **map)
1761{
1762	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1763	struct ttm_operation_ctx ctx = { false, false };
1764	struct amdgpu_vm *vm = &fpriv->vm;
1765	struct amdgpu_bo_va_mapping *mapping;
1766	int r;
 
 
 
1767
1768	addr /= AMDGPU_GPU_PAGE_SIZE;
1769
1770	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1771	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1772		return -EINVAL;
1773
1774	*bo = mapping->bo_va->base.bo;
1775	*map = mapping;
 
1776
1777	/* Double check that the BO is reserved by this CS */
1778	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
1779		return -EINVAL;
 
1780
1781	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1782		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1783		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1784		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1785		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1786			return r;
1787	}
1788
1789	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1790}
v4.10.11
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
 
 
  27#include <linux/pagemap.h>
  28#include <drm/drmP.h>
 
 
  29#include <drm/amdgpu_drm.h>
 
 
 
 
  30#include "amdgpu.h"
  31#include "amdgpu_trace.h"
 
 
 
 
 
 
 
 
 
 
  32
  33int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
  34		       u32 ip_instance, u32 ring,
  35		       struct amdgpu_ring **out_ring)
  36{
  37	/* Right now all IPs have only one instance - multiple rings. */
  38	if (ip_instance != 0) {
  39		DRM_ERROR("invalid ip instance: %d\n", ip_instance);
  40		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
  41	}
  42
  43	switch (ip_type) {
  44	default:
  45		DRM_ERROR("unknown ip type: %d\n", ip_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46		return -EINVAL;
  47	case AMDGPU_HW_IP_GFX:
  48		if (ring < adev->gfx.num_gfx_rings) {
  49			*out_ring = &adev->gfx.gfx_ring[ring];
  50		} else {
  51			DRM_ERROR("only %d gfx rings are supported now\n",
  52				  adev->gfx.num_gfx_rings);
  53			return -EINVAL;
  54		}
  55		break;
  56	case AMDGPU_HW_IP_COMPUTE:
  57		if (ring < adev->gfx.num_compute_rings) {
  58			*out_ring = &adev->gfx.compute_ring[ring];
  59		} else {
  60			DRM_ERROR("only %d compute rings are supported now\n",
  61				  adev->gfx.num_compute_rings);
  62			return -EINVAL;
  63		}
  64		break;
  65	case AMDGPU_HW_IP_DMA:
  66		if (ring < adev->sdma.num_instances) {
  67			*out_ring = &adev->sdma.instance[ring].ring;
  68		} else {
  69			DRM_ERROR("only %d SDMA rings are supported\n",
  70				  adev->sdma.num_instances);
  71			return -EINVAL;
  72		}
  73		break;
  74	case AMDGPU_HW_IP_UVD:
  75		*out_ring = &adev->uvd.ring;
  76		break;
  77	case AMDGPU_HW_IP_VCE:
  78		if (ring < 2){
  79			*out_ring = &adev->vce.ring[ring];
  80		} else {
  81			DRM_ERROR("only two VCE rings are supported\n");
  82			return -EINVAL;
  83		}
  84		break;
  85	}
  86
  87	if (!(*out_ring && (*out_ring)->adev)) {
  88		DRM_ERROR("Ring %d is not initialized on IP %d\n",
  89			  ring, ip_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
  90		return -EINVAL;
  91	}
  92
 
 
  93	return 0;
  94}
  95
  96static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
  97				      struct drm_amdgpu_cs_chunk_fence *data,
  98				      uint32_t *offset)
  99{
 100	struct drm_gem_object *gobj;
 101	unsigned long size;
 102
 103	gobj = drm_gem_object_lookup(p->filp, data->handle);
 104	if (gobj == NULL)
 105		return -EINVAL;
 106
 107	p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 108	p->uf_entry.priority = 0;
 109	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
 110	p->uf_entry.tv.shared = true;
 111	p->uf_entry.user_pages = NULL;
 112
 113	size = amdgpu_bo_size(p->uf_entry.robj);
 114	if (size != PAGE_SIZE || (data->offset + 8) > size)
 
 
 
 115		return -EINVAL;
 116
 117	*offset = data->offset;
 
 
 118
 119	drm_gem_object_unreference_unlocked(gobj);
 
 
 
 
 120
 121	if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
 122		amdgpu_bo_unref(&p->uf_entry.robj);
 123		return -EINVAL;
 124	}
 
 
 
 
 125
 
 126	return 0;
 
 
 
 
 
 127}
 128
 129int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 
 
 130{
 131	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 
 132	struct amdgpu_vm *vm = &fpriv->vm;
 133	union drm_amdgpu_cs *cs = data;
 134	uint64_t *chunk_array_user;
 135	uint64_t *chunk_array;
 136	unsigned size, num_ibs = 0;
 137	uint32_t uf_offset = 0;
 
 
 138	int i;
 139	int ret;
 140
 141	if (cs->in.num_chunks == 0)
 142		return 0;
 143
 144	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 
 145	if (!chunk_array)
 146		return -ENOMEM;
 147
 148	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
 149	if (!p->ctx) {
 150		ret = -EINVAL;
 151		goto free_chunk;
 152	}
 153
 154	/* get chunks */
 155	chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
 156	if (copy_from_user(chunk_array, chunk_array_user,
 157			   sizeof(uint64_t)*cs->in.num_chunks)) {
 158		ret = -EFAULT;
 159		goto put_ctx;
 160	}
 161
 162	p->nchunks = cs->in.num_chunks;
 163	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 164			    GFP_KERNEL);
 165	if (!p->chunks) {
 166		ret = -ENOMEM;
 167		goto put_ctx;
 168	}
 169
 170	for (i = 0; i < p->nchunks; i++) {
 171		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
 172		struct drm_amdgpu_cs_chunk user_chunk;
 173		uint32_t __user *cdata;
 174
 175		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
 176		if (copy_from_user(&user_chunk, chunk_ptr,
 177				       sizeof(struct drm_amdgpu_cs_chunk))) {
 178			ret = -EFAULT;
 179			i--;
 180			goto free_partial_kdata;
 181		}
 182		p->chunks[i].chunk_id = user_chunk.chunk_id;
 183		p->chunks[i].length_dw = user_chunk.length_dw;
 184
 185		size = p->chunks[i].length_dw;
 186		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
 187
 188		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
 
 189		if (p->chunks[i].kdata == NULL) {
 190			ret = -ENOMEM;
 191			i--;
 192			goto free_partial_kdata;
 193		}
 194		size *= sizeof(uint32_t);
 195		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 196			ret = -EFAULT;
 197			goto free_partial_kdata;
 198		}
 199
 
 
 200		switch (p->chunks[i].chunk_id) {
 201		case AMDGPU_CHUNK_ID_IB:
 202			++num_ibs;
 
 
 
 
 
 203			break;
 204
 205		case AMDGPU_CHUNK_ID_FENCE:
 206			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
 207			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 208				ret = -EINVAL;
 209				goto free_partial_kdata;
 210			}
 211
 212			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
 213							 &uf_offset);
 214			if (ret)
 215				goto free_partial_kdata;
 
 216
 
 
 
 
 
 
 
 217			break;
 218
 219		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 
 
 
 
 
 
 220			break;
 221
 222		default:
 223			ret = -EINVAL;
 224			goto free_partial_kdata;
 225		}
 226	}
 227
 228	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
 229	if (ret)
 230		goto free_all_kdata;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 231
 232	if (p->uf_entry.robj)
 233		p->job->uf_addr = uf_offset;
 234	kfree(chunk_array);
 235	return 0;
 236
 237free_all_kdata:
 238	i = p->nchunks - 1;
 239free_partial_kdata:
 240	for (; i >= 0; i--)
 241		drm_free_large(p->chunks[i].kdata);
 242	kfree(p->chunks);
 243put_ctx:
 244	amdgpu_ctx_put(p->ctx);
 245free_chunk:
 246	kfree(chunk_array);
 247
 248	return ret;
 249}
 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251/* Convert microseconds to bytes. */
 252static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 253{
 254	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 255		return 0;
 256
 257	/* Since accum_us is incremented by a million per second, just
 258	 * multiply it by the number of MB/s to get the number of bytes.
 259	 */
 260	return us << adev->mm_stats.log2_max_MBps;
 261}
 262
 263static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 264{
 265	if (!adev->mm_stats.log2_max_MBps)
 266		return 0;
 267
 268	return bytes >> adev->mm_stats.log2_max_MBps;
 269}
 270
 271/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 272 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 273 * which means it can go over the threshold once. If that happens, the driver
 274 * will be in debt and no other buffer migrations can be done until that debt
 275 * is repaid.
 276 *
 277 * This approach allows moving a buffer of any size (it's important to allow
 278 * that).
 279 *
 280 * The currency is simply time in microseconds and it increases as the clock
 281 * ticks. The accumulated microseconds (us) are converted to bytes and
 282 * returned.
 283 */
 284static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 
 
 285{
 286	s64 time_us, increment_us;
 287	u64 max_bytes;
 288	u64 free_vram, total_vram, used_vram;
 289
 290	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
 291	 * throttling.
 292	 *
 293	 * It means that in order to get full max MBps, at least 5 IBs per
 294	 * second must be submitted and not more than 200ms apart from each
 295	 * other.
 296	 */
 297	const s64 us_upper_bound = 200000;
 298
 299	if (!adev->mm_stats.log2_max_MBps)
 300		return 0;
 
 
 
 301
 302	total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
 303	used_vram = atomic64_read(&adev->vram_usage);
 304	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 305
 306	spin_lock(&adev->mm_stats.lock);
 307
 308	/* Increase the amount of accumulated us. */
 309	time_us = ktime_to_us(ktime_get());
 310	increment_us = time_us - adev->mm_stats.last_update_us;
 311	adev->mm_stats.last_update_us = time_us;
 312	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 313                                      us_upper_bound);
 314
 315	/* This prevents the short period of low performance when the VRAM
 316	 * usage is low and the driver is in debt or doesn't have enough
 317	 * accumulated us to fill VRAM quickly.
 318	 *
 319	 * The situation can occur in these cases:
 320	 * - a lot of VRAM is freed by userspace
 321	 * - the presence of a big buffer causes a lot of evictions
 322	 *   (solution: split buffers into smaller ones)
 323	 *
 324	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 325	 * accum_us to a positive number.
 326	 */
 327	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 328		s64 min_us;
 329
 330		/* Be more aggresive on dGPUs. Try to fill a portion of free
 331		 * VRAM now.
 332		 */
 333		if (!(adev->flags & AMD_IS_APU))
 334			min_us = bytes_to_us(adev, free_vram / 4);
 335		else
 336			min_us = 0; /* Reset accum_us on APUs. */
 337
 338		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 339	}
 340
 341	/* This returns 0 if the driver is in debt to disallow (optional)
 342	 * buffer moves.
 343	 */
 344	max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 345
 346	spin_unlock(&adev->mm_stats.lock);
 347	return max_bytes;
 348}
 349
 350/* Report how many bytes have really been moved for the last command
 351 * submission. This can result in a debt that can stop buffer migrations
 352 * temporarily.
 353 */
 354static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
 355					 u64 num_bytes)
 356{
 357	spin_lock(&adev->mm_stats.lock);
 358	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 
 359	spin_unlock(&adev->mm_stats.lock);
 360}
 361
 362static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 363				 struct amdgpu_bo *bo)
 364{
 365	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 366	u64 initial_bytes_moved;
 
 
 
 
 
 367	uint32_t domain;
 368	int r;
 369
 370	if (bo->pin_count)
 371		return 0;
 372
 373	/* Don't move this buffer if we have depleted our allowance
 374	 * to move it. Don't move anything if the threshold is zero.
 375	 */
 376	if (p->bytes_moved < p->bytes_moved_threshold)
 377		domain = bo->prefered_domains;
 378	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 379		domain = bo->allowed_domains;
 
 380
 381retry:
 382	amdgpu_ttm_placement_from_domain(bo, domain);
 383	initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 384	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 385	p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
 386		initial_bytes_moved;
 
 
 387
 388	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 389		domain = bo->allowed_domains;
 390		goto retry;
 391	}
 392
 393	return r;
 394}
 395
 396/* Last resort, try to evict something from the current working set */
 397static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 398				struct amdgpu_bo *validated)
 399{
 400	uint32_t domain = validated->allowed_domains;
 401	int r;
 402
 403	if (!p->evictable)
 404		return false;
 405
 406	for (;&p->evictable->tv.head != &p->validated;
 407	     p->evictable = list_prev_entry(p->evictable, tv.head)) {
 408
 409		struct amdgpu_bo_list_entry *candidate = p->evictable;
 410		struct amdgpu_bo *bo = candidate->robj;
 411		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 412		u64 initial_bytes_moved;
 413		uint32_t other;
 414
 415		/* If we reached our current BO we can forget it */
 416		if (candidate->robj == validated)
 417			break;
 418
 419		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 420
 421		/* Check if this BO is in one of the domains we need space for */
 422		if (!(other & domain))
 423			continue;
 424
 425		/* Check if we can move this BO somewhere else */
 426		other = bo->allowed_domains & ~domain;
 427		if (!other)
 428			continue;
 429
 430		/* Good we can try to move this BO somewhere else */
 431		amdgpu_ttm_placement_from_domain(bo, other);
 432		initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 433		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 434		p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
 435			initial_bytes_moved;
 436
 437		if (unlikely(r))
 438			break;
 439
 440		p->evictable = list_prev_entry(p->evictable, tv.head);
 441		list_move(&candidate->tv.head, &p->validated);
 442
 443		return true;
 444	}
 445
 446	return false;
 447}
 448
 449static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
 450{
 451	struct amdgpu_cs_parser *p = param;
 452	int r;
 453
 454	do {
 455		r = amdgpu_cs_bo_validate(p, bo);
 456	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
 457	if (r)
 458		return r;
 459
 460	if (bo->shadow)
 461		r = amdgpu_cs_bo_validate(p, bo->shadow);
 462
 463	return r;
 464}
 465
 466static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 467			    struct list_head *validated)
 468{
 469	struct amdgpu_bo_list_entry *lobj;
 470	int r;
 471
 472	list_for_each_entry(lobj, validated, tv.head) {
 473		struct amdgpu_bo *bo = lobj->robj;
 474		bool binding_userptr = false;
 475		struct mm_struct *usermm;
 476
 477		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 478		if (usermm && usermm != current->mm)
 479			return -EPERM;
 480
 481		/* Check if we have user pages and nobody bound the BO already */
 482		if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
 483			size_t size = sizeof(struct page *);
 484
 485			size *= bo->tbo.ttm->num_pages;
 486			memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
 487			binding_userptr = true;
 488		}
 489
 490		if (p->evictable == lobj)
 491			p->evictable = NULL;
 492
 493		r = amdgpu_cs_validate(p, bo);
 494		if (r)
 495			return r;
 496
 497		if (binding_userptr) {
 498			drm_free_large(lobj->user_pages);
 499			lobj->user_pages = NULL;
 500		}
 501	}
 502	return 0;
 503}
 504
 505static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 506				union drm_amdgpu_cs *cs)
 507{
 508	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 
 
 509	struct amdgpu_bo_list_entry *e;
 510	struct list_head duplicates;
 511	bool need_mmap_lock = false;
 512	unsigned i, tries = 10;
 513	int r;
 514
 515	INIT_LIST_HEAD(&p->validated);
 
 
 
 516
 517	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
 518	if (p->bo_list) {
 519		need_mmap_lock = p->bo_list->first_userptr !=
 520			p->bo_list->num_entries;
 521		amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 
 
 
 
 
 522	}
 523
 524	INIT_LIST_HEAD(&duplicates);
 525	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 526
 527	if (p->uf_entry.robj)
 528		list_add(&p->uf_entry.tv.head, &p->validated);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529
 530	if (need_mmap_lock)
 531		down_read(&current->mm->mmap_sem);
 
 
 
 532
 533	while (1) {
 534		struct list_head need_pages;
 535		unsigned i;
 
 
 
 
 536
 537		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 538					   &duplicates);
 539		if (unlikely(r != 0)) {
 540			if (r != -ERESTARTSYS)
 541				DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 542			goto error_free_pages;
 543		}
 544
 545		/* Without a BO list we don't have userptr BOs */
 546		if (!p->bo_list)
 547			break;
 
 
 
 
 
 548
 549		INIT_LIST_HEAD(&need_pages);
 550		for (i = p->bo_list->first_userptr;
 551		     i < p->bo_list->num_entries; ++i) {
 552
 553			e = &p->bo_list->array[i];
 554
 555			if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
 556				 &e->user_invalidated) && e->user_pages) {
 557
 558				/* We acquired a page array, but somebody
 559				 * invalidated it. Free it an try again
 560				 */
 561				release_pages(e->user_pages,
 562					      e->robj->tbo.ttm->num_pages,
 563					      false);
 564				drm_free_large(e->user_pages);
 565				e->user_pages = NULL;
 566			}
 567
 568			if (e->robj->tbo.ttm->state != tt_bound &&
 569			    !e->user_pages) {
 570				list_del(&e->tv.head);
 571				list_add(&e->tv.head, &need_pages);
 572
 573				amdgpu_bo_unreserve(e->robj);
 574			}
 575		}
 576
 577		if (list_empty(&need_pages))
 578			break;
 579
 580		/* Unreserve everything again. */
 581		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 
 
 
 582
 583		/* We tried too many times, just abort */
 584		if (!--tries) {
 585			r = -EDEADLK;
 586			DRM_ERROR("deadlock in %s\n", __func__);
 587			goto error_free_pages;
 588		}
 589
 590		/* Fill the page arrays for all useptrs. */
 591		list_for_each_entry(e, &need_pages, tv.head) {
 592			struct ttm_tt *ttm = e->robj->tbo.ttm;
 593
 594			e->user_pages = drm_calloc_large(ttm->num_pages,
 595							 sizeof(struct page*));
 596			if (!e->user_pages) {
 597				r = -ENOMEM;
 598				DRM_ERROR("calloc failure in %s\n", __func__);
 599				goto error_free_pages;
 600			}
 601
 602			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
 603			if (r) {
 604				DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
 605				drm_free_large(e->user_pages);
 606				e->user_pages = NULL;
 607				goto error_free_pages;
 608			}
 609		}
 610
 611		/* And try again. */
 612		list_splice(&need_pages, &p->validated);
 613	}
 614
 615	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
 
 616	p->bytes_moved = 0;
 617	p->evictable = list_last_entry(&p->validated,
 618				       struct amdgpu_bo_list_entry,
 619				       tv.head);
 620
 621	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
 622				      amdgpu_cs_validate, p);
 623	if (r) {
 624		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
 625		goto error_validate;
 626	}
 627
 628	r = amdgpu_cs_list_validate(p, &duplicates);
 629	if (r) {
 630		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
 631		goto error_validate;
 632	}
 633
 634	r = amdgpu_cs_list_validate(p, &p->validated);
 635	if (r) {
 636		DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
 637		goto error_validate;
 
 
 638	}
 639
 640	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
 
 641
 642	fpriv->vm.last_eviction_counter =
 643		atomic64_read(&p->adev->num_evictions);
 
 
 
 644
 645	if (p->bo_list) {
 646		struct amdgpu_bo *gds = p->bo_list->gds_obj;
 647		struct amdgpu_bo *gws = p->bo_list->gws_obj;
 648		struct amdgpu_bo *oa = p->bo_list->oa_obj;
 649		struct amdgpu_vm *vm = &fpriv->vm;
 650		unsigned i;
 651
 652		for (i = 0; i < p->bo_list->num_entries; i++) {
 653			struct amdgpu_bo *bo = p->bo_list->array[i].robj;
 
 
 
 
 
 
 
 
 
 
 
 
 654
 655			p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
 656		}
 
 
 
 657
 658		if (gds) {
 659			p->job->gds_base = amdgpu_bo_gpu_offset(gds);
 660			p->job->gds_size = amdgpu_bo_size(gds);
 661		}
 662		if (gws) {
 663			p->job->gws_base = amdgpu_bo_gpu_offset(gws);
 664			p->job->gws_size = amdgpu_bo_size(gws);
 665		}
 666		if (oa) {
 667			p->job->oa_base = amdgpu_bo_gpu_offset(oa);
 668			p->job->oa_size = amdgpu_bo_size(oa);
 669		}
 670	}
 
 671
 672	if (!r && p->uf_entry.robj) {
 673		struct amdgpu_bo *uf = p->uf_entry.robj;
 
 
 
 
 674
 675		r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
 676		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
 677	}
 678
 679error_validate:
 680	if (r) {
 681		amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
 682		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 683	}
 
 
 
 
 
 
 
 
 684
 685error_free_pages:
 
 
 
 
 686
 687	if (need_mmap_lock)
 688		up_read(&current->mm->mmap_sem);
 
 
 689
 690	if (p->bo_list) {
 691		for (i = p->bo_list->first_userptr;
 692		     i < p->bo_list->num_entries; ++i) {
 693			e = &p->bo_list->array[i];
 694
 695			if (!e->user_pages)
 696				continue;
 
 697
 698			release_pages(e->user_pages,
 699				      e->robj->tbo.ttm->num_pages,
 700				      false);
 701			drm_free_large(e->user_pages);
 
 
 
 
 
 702		}
 703	}
 704
 705	return r;
 706}
 707
 708static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 709{
 710	struct amdgpu_bo_list_entry *e;
 711	int r;
 712
 713	list_for_each_entry(e, &p->validated, tv.head) {
 714		struct reservation_object *resv = e->robj->tbo.resv;
 715		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
 716
 717		if (r)
 718			return r;
 719	}
 720	return 0;
 721}
 722
 723/**
 724 * cs_parser_fini() - clean parser states
 725 * @parser:	parser structure holding parsing context.
 726 * @error:	error number
 727 *
 728 * If error is set than unvalidate buffer, otherwise just free memory
 729 * used by parsing context.
 730 **/
 731static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
 732{
 733	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 734	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735
 736	if (!error) {
 737		amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
 
 
 
 
 738
 739		ttm_eu_fence_buffer_objects(&parser->ticket,
 740					    &parser->validated,
 741					    parser->fence);
 742	} else if (backoff) {
 743		ttm_eu_backoff_reservation(&parser->ticket,
 744					   &parser->validated);
 745	}
 746	dma_fence_put(parser->fence);
 747
 748	if (parser->ctx)
 749		amdgpu_ctx_put(parser->ctx);
 750	if (parser->bo_list)
 751		amdgpu_bo_list_put(parser->bo_list);
 
 
 
 
 
 752
 753	for (i = 0; i < parser->nchunks; i++)
 754		drm_free_large(parser->chunks[i].kdata);
 755	kfree(parser->chunks);
 756	if (parser->job)
 757		amdgpu_job_free(parser->job);
 758	amdgpu_bo_unref(&parser->uf_entry.robj);
 759}
 760
 761static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 762				   struct amdgpu_vm *vm)
 763{
 764	struct amdgpu_device *adev = p->adev;
 765	struct amdgpu_bo_va *bo_va;
 766	struct amdgpu_bo *bo;
 767	int i, r;
 768
 769	r = amdgpu_vm_update_page_directory(adev, vm);
 770	if (r)
 771		return r;
 772
 773	r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence);
 774	if (r)
 775		return r;
 776
 777	r = amdgpu_vm_clear_freed(adev, vm);
 778	if (r)
 779		return r;
 780
 781	if (p->bo_list) {
 782		for (i = 0; i < p->bo_list->num_entries; i++) {
 783			struct dma_fence *f;
 784
 785			/* ignore duplicates */
 786			bo = p->bo_list->array[i].robj;
 787			if (!bo)
 788				continue;
 789
 790			bo_va = p->bo_list->array[i].bo_va;
 791			if (bo_va == NULL)
 792				continue;
 793
 794			r = amdgpu_vm_bo_update(adev, bo_va, false);
 795			if (r)
 796				return r;
 797
 798			f = bo_va->last_pt_update;
 799			r = amdgpu_sync_fence(adev, &p->job->sync, f);
 800			if (r)
 801				return r;
 802		}
 803
 
 804	}
 805
 806	r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
 
 
 
 807
 808	if (amdgpu_vm_debug && p->bo_list) {
 809		/* Invalidate all BOs to test for userspace bugs */
 810		for (i = 0; i < p->bo_list->num_entries; i++) {
 811			/* ignore duplicates */
 812			bo = p->bo_list->array[i].robj;
 813			if (!bo)
 814				continue;
 815
 816			amdgpu_vm_bo_invalidate(adev, bo);
 817		}
 818	}
 819
 820	return r;
 821}
 822
 823static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 824				 struct amdgpu_cs_parser *p)
 825{
 826	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 827	struct amdgpu_vm *vm = &fpriv->vm;
 828	struct amdgpu_ring *ring = p->job->ring;
 829	int i, r;
 
 
 
 830
 831	/* Only for UVD/VCE VM emulation */
 832	if (ring->funcs->parse_cs) {
 833		for (i = 0; i < p->job->num_ibs; i++) {
 834			r = amdgpu_ring_parse_cs(ring, p, i);
 835			if (r)
 836				return r;
 837		}
 838	}
 839
 840	if (p->job->vm) {
 841		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 842
 843		r = amdgpu_bo_vm_update_pte(p, vm);
 
 
 
 
 
 
 844		if (r)
 845			return r;
 846	}
 847
 848	return amdgpu_cs_sync_rings(p);
 849}
 
 
 
 850
 851static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 852			     struct amdgpu_cs_parser *parser)
 853{
 854	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 855	struct amdgpu_vm *vm = &fpriv->vm;
 856	int i, j;
 857	int r;
 858
 859	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
 860		struct amdgpu_cs_chunk *chunk;
 861		struct amdgpu_ib *ib;
 862		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 863		struct amdgpu_ring *ring;
 864
 865		chunk = &parser->chunks[i];
 866		ib = &parser->job->ibs[j];
 867		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
 868
 869		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 870			continue;
 
 871
 872		r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
 873				       chunk_ib->ip_instance, chunk_ib->ring,
 874				       &ring);
 875		if (r)
 876			return r;
 
 
 
 
 
 
 
 877
 878		if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
 879			parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
 880			if (!parser->ctx->preamble_presented) {
 881				parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
 882				parser->ctx->preamble_presented = true;
 883			}
 
 
 
 884		}
 
 
 885
 886		if (parser->job->ring && parser->job->ring != ring)
 887			return -EINVAL;
 
 
 
 
 
 
 
 
 
 888
 889		parser->job->ring = ring;
 
 890
 891		if (ring->funcs->parse_cs) {
 892			struct amdgpu_bo_va_mapping *m;
 893			struct amdgpu_bo *aobj = NULL;
 894			uint64_t offset;
 895			uint8_t *kptr;
 896
 897			m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
 898						   &aobj);
 899			if (!aobj) {
 900				DRM_ERROR("IB va_start is invalid\n");
 901				return -EINVAL;
 902			}
 903
 904			if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
 905			    (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
 906				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
 907				return -EINVAL;
 908			}
 909
 910			/* the IB should be reserved at this point */
 911			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
 912			if (r) {
 913				return r;
 914			}
 
 
 
 915
 916			offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
 917			kptr += chunk_ib->va_start - offset;
 
 
 918
 919			r =  amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
 920			if (r) {
 921				DRM_ERROR("Failed to get ib !\n");
 922				return r;
 923			}
 924
 925			memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
 926			amdgpu_bo_kunmap(aobj);
 927		} else {
 928			r =  amdgpu_ib_get(adev, vm, 0, ib);
 929			if (r) {
 930				DRM_ERROR("Failed to get ib !\n");
 931				return r;
 932			}
 933
 934		}
 935
 936		ib->gpu_addr = chunk_ib->va_start;
 937		ib->length_dw = chunk_ib->ib_bytes / 4;
 938		ib->flags = chunk_ib->flags;
 939		j++;
 940	}
 941
 942	/* UVD & VCE fw doesn't support user fences */
 943	if (parser->job->uf_addr && (
 944	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
 945	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
 946		return -EINVAL;
 947
 948	return 0;
 949}
 950
 951static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 952				  struct amdgpu_cs_parser *p)
 953{
 954	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 955	int i, j, r;
 956
 957	for (i = 0; i < p->nchunks; ++i) {
 958		struct drm_amdgpu_cs_chunk_dep *deps;
 959		struct amdgpu_cs_chunk *chunk;
 960		unsigned num_deps;
 961
 962		chunk = &p->chunks[i];
 
 
 963
 964		if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
 965			continue;
 
 966
 967		deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
 968		num_deps = chunk->length_dw * 4 /
 969			sizeof(struct drm_amdgpu_cs_chunk_dep);
 970
 971		for (j = 0; j < num_deps; ++j) {
 972			struct amdgpu_ring *ring;
 973			struct amdgpu_ctx *ctx;
 974			struct dma_fence *fence;
 975
 976			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 977					       deps[j].ip_instance,
 978					       deps[j].ring, &ring);
 979			if (r)
 980				return r;
 981
 982			ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
 983			if (ctx == NULL)
 984				return -EINVAL;
 985
 986			fence = amdgpu_ctx_get_fence(ctx, ring,
 987						     deps[j].handle);
 988			if (IS_ERR(fence)) {
 989				r = PTR_ERR(fence);
 990				amdgpu_ctx_put(ctx);
 991				return r;
 992
 993			} else if (fence) {
 994				r = amdgpu_sync_fence(adev, &p->job->sync,
 995						      fence);
 996				dma_fence_put(fence);
 997				amdgpu_ctx_put(ctx);
 998				if (r)
 999					return r;
1000			}
1001		}
1002	}
1003
 
 
 
 
1004	return 0;
1005}
1006
1007static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1008			    union drm_amdgpu_cs *cs)
1009{
1010	struct amdgpu_ring *ring = p->job->ring;
1011	struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
1012	struct amdgpu_job *job;
1013	int r;
1014
1015	job = p->job;
1016	p->job = NULL;
1017
1018	r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
1019	if (r) {
1020		amdgpu_job_free(job);
1021		return r;
1022	}
 
1023
1024	job->owner = p->filp;
1025	job->fence_ctx = entity->fence_context;
1026	p->fence = dma_fence_get(&job->base.s_fence->finished);
1027	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
1028	job->uf_sequence = cs->out.handle;
1029	amdgpu_job_free_resources(job);
1030
1031	trace_amdgpu_cs_ioctl(job);
1032	amd_sched_entity_push_job(&job->base);
 
 
1033
1034	return 0;
 
 
 
 
 
 
 
1035}
1036
1037int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1038{
1039	struct amdgpu_device *adev = dev->dev_private;
1040	union drm_amdgpu_cs *cs = data;
1041	struct amdgpu_cs_parser parser = {};
1042	bool reserved_buffers = false;
1043	int i, r;
 
1044
1045	if (!adev->accel_working)
1046		return -EBUSY;
1047
1048	parser.adev = adev;
1049	parser.filp = filp;
1050
1051	r = amdgpu_cs_parser_init(&parser, data);
1052	if (r) {
1053		DRM_ERROR("Failed to initialize parser !\n");
1054		goto out;
1055	}
1056
 
 
 
 
 
 
 
 
1057	r = amdgpu_cs_parser_bos(&parser, data);
1058	if (r) {
1059		if (r == -ENOMEM)
1060			DRM_ERROR("Not enough memory for command submission!\n");
1061		else if (r != -ERESTARTSYS)
1062			DRM_ERROR("Failed to process the buffer list %d!\n", r);
1063		goto out;
1064	}
1065
1066	reserved_buffers = true;
1067	r = amdgpu_cs_ib_fill(adev, &parser);
1068	if (r)
1069		goto out;
1070
1071	r = amdgpu_cs_dependencies(adev, &parser);
1072	if (r) {
1073		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1074		goto out;
1075	}
 
 
1076
1077	for (i = 0; i < parser.job->num_ibs; i++)
1078		trace_amdgpu_cs(&parser, i);
1079
1080	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
1081	if (r)
1082		goto out;
1083
1084	r = amdgpu_cs_submit(&parser, cs);
 
 
 
 
1085
1086out:
1087	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1088	return r;
1089}
1090
1091/**
1092 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1093 *
1094 * @dev: drm device
1095 * @data: data from userspace
1096 * @filp: file private
1097 *
1098 * Wait for the command submission identified by handle to finish.
1099 */
1100int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1101			 struct drm_file *filp)
1102{
1103	union drm_amdgpu_wait_cs *wait = data;
1104	struct amdgpu_device *adev = dev->dev_private;
1105	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1106	struct amdgpu_ring *ring = NULL;
1107	struct amdgpu_ctx *ctx;
1108	struct dma_fence *fence;
1109	long r;
1110
1111	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
1112			       wait->in.ring, &ring);
1113	if (r)
1114		return r;
1115
1116	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1117	if (ctx == NULL)
1118		return -EINVAL;
1119
1120	fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
 
 
 
 
 
 
 
1121	if (IS_ERR(fence))
1122		r = PTR_ERR(fence);
1123	else if (fence) {
1124		r = dma_fence_wait_timeout(fence, true, timeout);
 
 
1125		dma_fence_put(fence);
1126	} else
1127		r = 1;
1128
1129	amdgpu_ctx_put(ctx);
1130	if (r < 0)
1131		return r;
1132
1133	memset(wait, 0, sizeof(*wait));
1134	wait->out.status = (r == 0);
1135
1136	return 0;
1137}
1138
1139/**
1140 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1141 *
1142 * @adev: amdgpu device
1143 * @filp: file private
1144 * @user: drm_amdgpu_fence copied from user space
1145 */
1146static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1147					     struct drm_file *filp,
1148					     struct drm_amdgpu_fence *user)
1149{
1150	struct amdgpu_ring *ring;
1151	struct amdgpu_ctx *ctx;
1152	struct dma_fence *fence;
1153	int r;
1154
1155	r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
1156			       user->ring, &ring);
1157	if (r)
1158		return ERR_PTR(r);
1159
1160	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1161	if (ctx == NULL)
1162		return ERR_PTR(-EINVAL);
1163
1164	fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
 
 
 
 
 
 
 
1165	amdgpu_ctx_put(ctx);
1166
1167	return fence;
1168}
1169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170/**
1171 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1172 *
1173 * @adev: amdgpu device
1174 * @filp: file private
1175 * @wait: wait parameters
1176 * @fences: array of drm_amdgpu_fence
1177 */
1178static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1179				     struct drm_file *filp,
1180				     union drm_amdgpu_wait_fences *wait,
1181				     struct drm_amdgpu_fence *fences)
1182{
1183	uint32_t fence_count = wait->in.fence_count;
1184	unsigned int i;
1185	long r = 1;
1186
1187	for (i = 0; i < fence_count; i++) {
1188		struct dma_fence *fence;
1189		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1190
1191		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1192		if (IS_ERR(fence))
1193			return PTR_ERR(fence);
1194		else if (!fence)
1195			continue;
1196
1197		r = dma_fence_wait_timeout(fence, true, timeout);
 
 
 
 
1198		if (r < 0)
1199			return r;
1200
1201		if (r == 0)
1202			break;
1203	}
1204
1205	memset(wait, 0, sizeof(*wait));
1206	wait->out.status = (r > 0);
1207
1208	return 0;
1209}
1210
1211/**
1212 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1213 *
1214 * @adev: amdgpu device
1215 * @filp: file private
1216 * @wait: wait parameters
1217 * @fences: array of drm_amdgpu_fence
1218 */
1219static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1220				    struct drm_file *filp,
1221				    union drm_amdgpu_wait_fences *wait,
1222				    struct drm_amdgpu_fence *fences)
1223{
1224	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1225	uint32_t fence_count = wait->in.fence_count;
1226	uint32_t first = ~0;
1227	struct dma_fence **array;
1228	unsigned int i;
1229	long r;
1230
1231	/* Prepare the fence array */
1232	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1233
1234	if (array == NULL)
1235		return -ENOMEM;
1236
1237	for (i = 0; i < fence_count; i++) {
1238		struct dma_fence *fence;
1239
1240		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1241		if (IS_ERR(fence)) {
1242			r = PTR_ERR(fence);
1243			goto err_free_fence_array;
1244		} else if (fence) {
1245			array[i] = fence;
1246		} else { /* NULL, the fence has been already signaled */
1247			r = 1;
 
1248			goto out;
1249		}
1250	}
1251
1252	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1253				       &first);
1254	if (r < 0)
1255		goto err_free_fence_array;
1256
1257out:
1258	memset(wait, 0, sizeof(*wait));
1259	wait->out.status = (r > 0);
1260	wait->out.first_signaled = first;
1261	/* set return value 0 to indicate success */
1262	r = 0;
 
 
 
1263
1264err_free_fence_array:
1265	for (i = 0; i < fence_count; i++)
1266		dma_fence_put(array[i]);
1267	kfree(array);
1268
1269	return r;
1270}
1271
1272/**
1273 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1274 *
1275 * @dev: drm device
1276 * @data: data from userspace
1277 * @filp: file private
1278 */
1279int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1280				struct drm_file *filp)
1281{
1282	struct amdgpu_device *adev = dev->dev_private;
1283	union drm_amdgpu_wait_fences *wait = data;
1284	uint32_t fence_count = wait->in.fence_count;
1285	struct drm_amdgpu_fence *fences_user;
1286	struct drm_amdgpu_fence *fences;
1287	int r;
1288
1289	/* Get the fences from userspace */
1290	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1291			GFP_KERNEL);
1292	if (fences == NULL)
1293		return -ENOMEM;
1294
1295	fences_user = (void __user *)(unsigned long)(wait->in.fences);
1296	if (copy_from_user(fences, fences_user,
1297		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1298		r = -EFAULT;
1299		goto err_free_fences;
1300	}
1301
1302	if (wait->in.wait_all)
1303		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1304	else
1305		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1306
1307err_free_fences:
1308	kfree(fences);
1309
1310	return r;
1311}
1312
1313/**
1314 * amdgpu_cs_find_bo_va - find bo_va for VM address
1315 *
1316 * @parser: command submission parser context
1317 * @addr: VM address
1318 * @bo: resulting BO of the mapping found
 
1319 *
1320 * Search the buffer objects in the command submission context for a certain
1321 * virtual memory address. Returns allocation structure when found, NULL
1322 * otherwise.
1323 */
1324struct amdgpu_bo_va_mapping *
1325amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1326		       uint64_t addr, struct amdgpu_bo **bo)
1327{
 
 
 
1328	struct amdgpu_bo_va_mapping *mapping;
1329	unsigned i;
1330
1331	if (!parser->bo_list)
1332		return NULL;
1333
1334	addr /= AMDGPU_GPU_PAGE_SIZE;
1335
1336	for (i = 0; i < parser->bo_list->num_entries; i++) {
1337		struct amdgpu_bo_list_entry *lobj;
 
1338
1339		lobj = &parser->bo_list->array[i];
1340		if (!lobj->bo_va)
1341			continue;
1342
1343		list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
1344			if (mapping->it.start > addr ||
1345			    addr > mapping->it.last)
1346				continue;
1347
1348			*bo = lobj->bo_va->bo;
1349			return mapping;
1350		}
1351
1352		list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
1353			if (mapping->it.start > addr ||
1354			    addr > mapping->it.last)
1355				continue;
1356
1357			*bo = lobj->bo_va->bo;
1358			return mapping;
1359		}
1360	}
1361
1362	return NULL;
1363}
1364
1365/**
1366 * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
1367 *
1368 * @parser: command submission parser context
1369 *
1370 * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
1371 */
1372int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
1373{
1374	unsigned i;
1375	int r;
1376
1377	if (!parser->bo_list)
1378		return 0;
1379
1380	for (i = 0; i < parser->bo_list->num_entries; i++) {
1381		struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
1382
1383		r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
1384		if (unlikely(r))
1385			return r;
1386
1387		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
1388			continue;
1389
1390		bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1391		amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
1392		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
1393		if (unlikely(r))
1394			return r;
1395	}
1396
1397	return 0;
1398}