Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <drm/drm_exec.h>
  26
  27#include "amdgpu_mes.h"
  28#include "amdgpu.h"
  29#include "soc15_common.h"
  30#include "amdgpu_mes_ctx.h"
  31
  32#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
  33#define AMDGPU_ONE_DOORBELL_SIZE 8
  34
  35int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
  36{
  37	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
  38		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
  39		       PAGE_SIZE);
  40}
  41
  42static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43					 struct amdgpu_mes_process *process,
  44					 int ip_type, uint64_t *doorbell_index)
  45{
  46	unsigned int offset, found;
  47	struct amdgpu_mes *mes = &adev->mes;
  48
  49	if (ip_type == AMDGPU_RING_TYPE_SDMA)
  50		offset = adev->doorbell_index.sdma_engine[0];
  51	else
  52		offset = 0;
 
 
 
 
 
  53
  54	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
  55	if (found >= mes->num_mes_dbs) {
  56		DRM_WARN("No doorbell available\n");
  57		return -ENOSPC;
  58	}
  59
  60	set_bit(found, mes->doorbell_bitmap);
 
 
 
  61
  62	/* Get the absolute doorbell index on BAR */
  63	*doorbell_index = mes->db_start_dw_offset + found * 2;
  64	return 0;
  65}
  66
  67static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
  68					   struct amdgpu_mes_process *process,
  69					   uint32_t doorbell_index)
  70{
  71	unsigned int old, rel_index;
  72	struct amdgpu_mes *mes = &adev->mes;
 
 
 
 
  73
  74	/* Find the relative index of the doorbell in this object */
  75	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
  76	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
  77	WARN_ON(!old);
  78}
  79
  80static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
  81{
 
 
 
 
  82	int i;
  83	struct amdgpu_mes *mes = &adev->mes;
  84
  85	/* Bitmap for dynamic allocation of kernel doorbells */
  86	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
  87	if (!mes->doorbell_bitmap) {
  88		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
  89		return -ENOMEM;
  90	}
  91
  92	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
  93	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
  94		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
  95		set_bit(i, mes->doorbell_bitmap);
  96	}
  97
  98	return 0;
  99}
 100
 101static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
 102{
 103	int r;
 104
 105	if (!amdgpu_mes_log_enable)
 106		return 0;
 107
 108	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
 109				    AMDGPU_GEM_DOMAIN_GTT,
 110				    &adev->mes.event_log_gpu_obj,
 111				    &adev->mes.event_log_gpu_addr,
 112				    &adev->mes.event_log_cpu_addr);
 113	if (r) {
 114		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
 115		return r;
 116	}
 117
 118	memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 119
 120	return  0;
 
 121
 122}
 
 
 
 123
 124static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
 125{
 126	bitmap_free(adev->mes.doorbell_bitmap);
 127}
 128
 129int amdgpu_mes_init(struct amdgpu_device *adev)
 130{
 131	int i, r;
 132
 133	adev->mes.adev = adev;
 134
 135	idr_init(&adev->mes.pasid_idr);
 136	idr_init(&adev->mes.gang_id_idr);
 137	idr_init(&adev->mes.queue_id_idr);
 138	ida_init(&adev->mes.doorbell_ida);
 139	spin_lock_init(&adev->mes.queue_id_lock);
 140	spin_lock_init(&adev->mes.ring_lock);
 141	mutex_init(&adev->mes.mutex_hidden);
 142
 143	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
 144	adev->mes.vmid_mask_mmhub = 0xffffff00;
 145	adev->mes.vmid_mask_gfxhub = 0xffffff00;
 146
 147	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
 148		/* use only 1st MEC pipes */
 149		if (i >= 4)
 150			continue;
 151		adev->mes.compute_hqd_mask[i] = 0xc;
 152	}
 153
 154	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
 155		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
 156
 157	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
 158		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
 159		    IP_VERSION(6, 0, 0))
 160			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
 161		/* zero sdma_hqd_mask for non-existent engine */
 162		else if (adev->sdma.num_instances == 1)
 163			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
 164		else
 165			adev->mes.sdma_hqd_mask[i] = 0xfc;
 166	}
 167
 168	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
 169	if (r) {
 170		dev_err(adev->dev,
 171			"(%d) ring trail_fence_offs wb alloc failed\n", r);
 172		goto error_ids;
 173	}
 174	adev->mes.sch_ctx_gpu_addr =
 175		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
 176	adev->mes.sch_ctx_ptr =
 177		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
 178
 179	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
 180	if (r) {
 181		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 182		dev_err(adev->dev,
 183			"(%d) query_status_fence_offs wb alloc failed\n", r);
 184		goto error_ids;
 185	}
 186	adev->mes.query_status_fence_gpu_addr =
 187		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
 188	adev->mes.query_status_fence_ptr =
 189		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
 190
 191	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
 192	if (r) {
 193		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 194		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 195		dev_err(adev->dev,
 196			"(%d) read_val_offs alloc failed\n", r);
 197		goto error_ids;
 198	}
 199	adev->mes.read_val_gpu_addr =
 200		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
 201	adev->mes.read_val_ptr =
 202		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
 203
 204	r = amdgpu_mes_doorbell_init(adev);
 205	if (r)
 206		goto error;
 207
 208	r = amdgpu_mes_event_log_init(adev);
 209	if (r)
 210		goto error_doorbell;
 211
 212	return 0;
 213
 214error_doorbell:
 215	amdgpu_mes_doorbell_free(adev);
 216error:
 217	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 218	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 219	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
 220error_ids:
 221	idr_destroy(&adev->mes.pasid_idr);
 222	idr_destroy(&adev->mes.gang_id_idr);
 223	idr_destroy(&adev->mes.queue_id_idr);
 224	ida_destroy(&adev->mes.doorbell_ida);
 225	mutex_destroy(&adev->mes.mutex_hidden);
 226	return r;
 227}
 228
 229void amdgpu_mes_fini(struct amdgpu_device *adev)
 230{
 231	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
 232			      &adev->mes.event_log_gpu_addr,
 233			      &adev->mes.event_log_cpu_addr);
 234
 235	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 236	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 237	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
 238	amdgpu_mes_doorbell_free(adev);
 239
 240	idr_destroy(&adev->mes.pasid_idr);
 241	idr_destroy(&adev->mes.gang_id_idr);
 242	idr_destroy(&adev->mes.queue_id_idr);
 243	ida_destroy(&adev->mes.doorbell_ida);
 244	mutex_destroy(&adev->mes.mutex_hidden);
 245}
 246
 247static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
 248{
 249	amdgpu_bo_free_kernel(&q->mqd_obj,
 250			      &q->mqd_gpu_addr,
 251			      &q->mqd_cpu_ptr);
 252}
 253
 254int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
 255			      struct amdgpu_vm *vm)
 256{
 257	struct amdgpu_mes_process *process;
 258	int r;
 259
 260	/* allocate the mes process buffer */
 261	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
 262	if (!process) {
 263		DRM_ERROR("no more memory to create mes process\n");
 264		return -ENOMEM;
 265	}
 266
 
 
 
 
 
 
 
 
 
 267	/* allocate the process context bo and map it */
 268	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
 269				    AMDGPU_GEM_DOMAIN_GTT,
 270				    &process->proc_ctx_bo,
 271				    &process->proc_ctx_gpu_addr,
 272				    &process->proc_ctx_cpu_ptr);
 273	if (r) {
 274		DRM_ERROR("failed to allocate process context bo\n");
 275		goto clean_up_memory;
 276	}
 277	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
 278
 279	/*
 280	 * Avoid taking any other locks under MES lock to avoid circular
 281	 * lock dependencies.
 282	 */
 283	amdgpu_mes_lock(&adev->mes);
 284
 285	/* add the mes process to idr list */
 286	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
 287		      GFP_KERNEL);
 288	if (r < 0) {
 289		DRM_ERROR("failed to lock pasid=%d\n", pasid);
 290		goto clean_up_ctx;
 291	}
 292
 
 
 
 
 
 
 
 
 
 293	INIT_LIST_HEAD(&process->gang_list);
 294	process->vm = vm;
 295	process->pasid = pasid;
 296	process->process_quantum = adev->mes.default_process_quantum;
 297	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
 298
 299	amdgpu_mes_unlock(&adev->mes);
 300	return 0;
 301
 302clean_up_ctx:
 
 303	amdgpu_mes_unlock(&adev->mes);
 
 304	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
 305			      &process->proc_ctx_gpu_addr,
 306			      &process->proc_ctx_cpu_ptr);
 307clean_up_memory:
 
 308	kfree(process);
 309	return r;
 310}
 311
 312void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
 313{
 314	struct amdgpu_mes_process *process;
 315	struct amdgpu_mes_gang *gang, *tmp1;
 316	struct amdgpu_mes_queue *queue, *tmp2;
 317	struct mes_remove_queue_input queue_input;
 318	unsigned long flags;
 319	int r;
 320
 321	/*
 322	 * Avoid taking any other locks under MES lock to avoid circular
 323	 * lock dependencies.
 324	 */
 325	amdgpu_mes_lock(&adev->mes);
 326
 327	process = idr_find(&adev->mes.pasid_idr, pasid);
 328	if (!process) {
 329		DRM_WARN("pasid %d doesn't exist\n", pasid);
 330		amdgpu_mes_unlock(&adev->mes);
 331		return;
 332	}
 333
 334	/* Remove all queues from hardware */
 335	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
 336		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
 337			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 338			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
 339			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 340
 341			queue_input.doorbell_offset = queue->doorbell_off;
 342			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 343
 344			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
 345							     &queue_input);
 346			if (r)
 347				DRM_WARN("failed to remove hardware queue\n");
 348		}
 349
 350		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
 351	}
 352
 
 353	idr_remove(&adev->mes.pasid_idr, pasid);
 354	amdgpu_mes_unlock(&adev->mes);
 355
 356	/* free all memory allocated by the process */
 357	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
 358		/* free all queues in the gang */
 359		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
 360			amdgpu_mes_queue_free_mqd(queue);
 361			list_del(&queue->list);
 362			kfree(queue);
 363		}
 364		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 365				      &gang->gang_ctx_gpu_addr,
 366				      &gang->gang_ctx_cpu_ptr);
 367		list_del(&gang->list);
 368		kfree(gang);
 369
 370	}
 371	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
 372			      &process->proc_ctx_gpu_addr,
 373			      &process->proc_ctx_cpu_ptr);
 
 374	kfree(process);
 375}
 376
 377int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
 378			struct amdgpu_mes_gang_properties *gprops,
 379			int *gang_id)
 380{
 381	struct amdgpu_mes_process *process;
 382	struct amdgpu_mes_gang *gang;
 383	int r;
 384
 385	/* allocate the mes gang buffer */
 386	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
 387	if (!gang) {
 388		return -ENOMEM;
 389	}
 390
 391	/* allocate the gang context bo and map it to cpu space */
 392	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
 393				    AMDGPU_GEM_DOMAIN_GTT,
 394				    &gang->gang_ctx_bo,
 395				    &gang->gang_ctx_gpu_addr,
 396				    &gang->gang_ctx_cpu_ptr);
 397	if (r) {
 398		DRM_ERROR("failed to allocate process context bo\n");
 399		goto clean_up_mem;
 400	}
 401	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
 402
 403	/*
 404	 * Avoid taking any other locks under MES lock to avoid circular
 405	 * lock dependencies.
 406	 */
 407	amdgpu_mes_lock(&adev->mes);
 408
 409	process = idr_find(&adev->mes.pasid_idr, pasid);
 410	if (!process) {
 411		DRM_ERROR("pasid %d doesn't exist\n", pasid);
 412		r = -EINVAL;
 413		goto clean_up_ctx;
 414	}
 415
 416	/* add the mes gang to idr list */
 417	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
 418		      GFP_KERNEL);
 419	if (r < 0) {
 420		DRM_ERROR("failed to allocate idr for gang\n");
 421		goto clean_up_ctx;
 422	}
 423
 424	gang->gang_id = r;
 425	*gang_id = r;
 426
 427	INIT_LIST_HEAD(&gang->queue_list);
 428	gang->process = process;
 429	gang->priority = gprops->priority;
 430	gang->gang_quantum = gprops->gang_quantum ?
 431		gprops->gang_quantum : adev->mes.default_gang_quantum;
 432	gang->global_priority_level = gprops->global_priority_level;
 433	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
 434	list_add_tail(&gang->list, &process->gang_list);
 435
 436	amdgpu_mes_unlock(&adev->mes);
 437	return 0;
 438
 439clean_up_ctx:
 440	amdgpu_mes_unlock(&adev->mes);
 441	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 442			      &gang->gang_ctx_gpu_addr,
 443			      &gang->gang_ctx_cpu_ptr);
 444clean_up_mem:
 445	kfree(gang);
 446	return r;
 447}
 448
 449int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
 450{
 451	struct amdgpu_mes_gang *gang;
 452
 453	/*
 454	 * Avoid taking any other locks under MES lock to avoid circular
 455	 * lock dependencies.
 456	 */
 457	amdgpu_mes_lock(&adev->mes);
 458
 459	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 460	if (!gang) {
 461		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 462		amdgpu_mes_unlock(&adev->mes);
 463		return -EINVAL;
 464	}
 465
 466	if (!list_empty(&gang->queue_list)) {
 467		DRM_ERROR("queue list is not empty\n");
 468		amdgpu_mes_unlock(&adev->mes);
 469		return -EBUSY;
 470	}
 471
 472	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
 473	list_del(&gang->list);
 474	amdgpu_mes_unlock(&adev->mes);
 475
 476	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 477			      &gang->gang_ctx_gpu_addr,
 478			      &gang->gang_ctx_cpu_ptr);
 479
 480	kfree(gang);
 481
 482	return 0;
 483}
 484
 485int amdgpu_mes_suspend(struct amdgpu_device *adev)
 486{
 487	struct idr *idp;
 488	struct amdgpu_mes_process *process;
 489	struct amdgpu_mes_gang *gang;
 490	struct mes_suspend_gang_input input;
 491	int r, pasid;
 492
 493	/*
 494	 * Avoid taking any other locks under MES lock to avoid circular
 495	 * lock dependencies.
 496	 */
 497	amdgpu_mes_lock(&adev->mes);
 498
 499	idp = &adev->mes.pasid_idr;
 500
 501	idr_for_each_entry(idp, process, pasid) {
 502		list_for_each_entry(gang, &process->gang_list, list) {
 503			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
 504			if (r)
 505				DRM_ERROR("failed to suspend pasid %d gangid %d",
 506					 pasid, gang->gang_id);
 507		}
 508	}
 509
 510	amdgpu_mes_unlock(&adev->mes);
 511	return 0;
 512}
 513
 514int amdgpu_mes_resume(struct amdgpu_device *adev)
 515{
 516	struct idr *idp;
 517	struct amdgpu_mes_process *process;
 518	struct amdgpu_mes_gang *gang;
 519	struct mes_resume_gang_input input;
 520	int r, pasid;
 521
 522	/*
 523	 * Avoid taking any other locks under MES lock to avoid circular
 524	 * lock dependencies.
 525	 */
 526	amdgpu_mes_lock(&adev->mes);
 527
 528	idp = &adev->mes.pasid_idr;
 529
 530	idr_for_each_entry(idp, process, pasid) {
 531		list_for_each_entry(gang, &process->gang_list, list) {
 532			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
 533			if (r)
 534				DRM_ERROR("failed to resume pasid %d gangid %d",
 535					 pasid, gang->gang_id);
 536		}
 537	}
 538
 539	amdgpu_mes_unlock(&adev->mes);
 540	return 0;
 541}
 542
 543static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
 544				     struct amdgpu_mes_queue *q,
 545				     struct amdgpu_mes_queue_properties *p)
 546{
 547	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
 548	u32 mqd_size = mqd_mgr->mqd_size;
 549	int r;
 550
 551	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
 552				    AMDGPU_GEM_DOMAIN_GTT,
 553				    &q->mqd_obj,
 554				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
 555	if (r) {
 556		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
 557		return r;
 558	}
 559	memset(q->mqd_cpu_ptr, 0, mqd_size);
 560
 561	r = amdgpu_bo_reserve(q->mqd_obj, false);
 562	if (unlikely(r != 0))
 563		goto clean_up;
 564
 565	return 0;
 566
 567clean_up:
 568	amdgpu_bo_free_kernel(&q->mqd_obj,
 569			      &q->mqd_gpu_addr,
 570			      &q->mqd_cpu_ptr);
 571	return r;
 572}
 573
 574static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
 575				     struct amdgpu_mes_queue *q,
 576				     struct amdgpu_mes_queue_properties *p)
 577{
 578	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
 579	struct amdgpu_mqd_prop mqd_prop = {0};
 580
 581	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
 582	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
 583	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
 584	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
 585	mqd_prop.queue_size = p->queue_size;
 586	mqd_prop.use_doorbell = true;
 587	mqd_prop.doorbell_index = p->doorbell_off;
 588	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
 589	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
 590	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
 591	mqd_prop.hqd_active = false;
 592
 593	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
 594	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
 595		mutex_lock(&adev->srbm_mutex);
 596		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
 597	}
 598
 599	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
 600
 601	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
 602	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
 603		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
 604		mutex_unlock(&adev->srbm_mutex);
 605	}
 606
 607	amdgpu_bo_unreserve(q->mqd_obj);
 608}
 609
 610int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
 611			    struct amdgpu_mes_queue_properties *qprops,
 612			    int *queue_id)
 613{
 614	struct amdgpu_mes_queue *queue;
 615	struct amdgpu_mes_gang *gang;
 616	struct mes_add_queue_input queue_input;
 617	unsigned long flags;
 618	int r;
 619
 620	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
 621
 622	/* allocate the mes queue buffer */
 623	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
 624	if (!queue) {
 625		DRM_ERROR("Failed to allocate memory for queue\n");
 626		return -ENOMEM;
 627	}
 628
 629	/* Allocate the queue mqd */
 630	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
 631	if (r)
 632		goto clean_up_memory;
 633
 634	/*
 635	 * Avoid taking any other locks under MES lock to avoid circular
 636	 * lock dependencies.
 637	 */
 638	amdgpu_mes_lock(&adev->mes);
 639
 640	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 641	if (!gang) {
 642		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 643		r = -EINVAL;
 644		goto clean_up_mqd;
 645	}
 646
 647	/* add the mes gang to idr list */
 648	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 649	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
 650		      GFP_ATOMIC);
 651	if (r < 0) {
 652		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 653		goto clean_up_mqd;
 654	}
 655	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 656	*queue_id = queue->queue_id = r;
 657
 658	/* allocate a doorbell index for the queue */
 659	r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
 660					  qprops->queue_type,
 661					  &qprops->doorbell_off);
 662	if (r)
 663		goto clean_up_queue_id;
 664
 665	/* initialize the queue mqd */
 666	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
 667
 668	/* add hw queue to mes */
 669	queue_input.process_id = gang->process->pasid;
 670
 671	queue_input.page_table_base_addr =
 672		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
 673		adev->gmc.vram_start;
 674
 675	queue_input.process_va_start = 0;
 676	queue_input.process_va_end =
 677		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
 678	queue_input.process_quantum = gang->process->process_quantum;
 679	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
 680	queue_input.gang_quantum = gang->gang_quantum;
 681	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 682	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
 683	queue_input.gang_global_priority_level = gang->global_priority_level;
 684	queue_input.doorbell_offset = qprops->doorbell_off;
 685	queue_input.mqd_addr = queue->mqd_gpu_addr;
 686	queue_input.wptr_addr = qprops->wptr_gpu_addr;
 687	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
 688	queue_input.queue_type = qprops->queue_type;
 689	queue_input.paging = qprops->paging;
 690	queue_input.is_kfd_process = 0;
 691
 692	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
 693	if (r) {
 694		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
 695			  qprops->doorbell_off);
 696		goto clean_up_doorbell;
 697	}
 698
 699	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
 700		  "queue type=%d, doorbell=0x%llx\n",
 701		  gang->process->pasid, gang_id, qprops->queue_type,
 702		  qprops->doorbell_off);
 703
 704	queue->ring = qprops->ring;
 705	queue->doorbell_off = qprops->doorbell_off;
 706	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
 707	queue->queue_type = qprops->queue_type;
 708	queue->paging = qprops->paging;
 709	queue->gang = gang;
 710	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
 711	list_add_tail(&queue->list, &gang->queue_list);
 712
 713	amdgpu_mes_unlock(&adev->mes);
 714	return 0;
 715
 716clean_up_doorbell:
 717	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
 718				       qprops->doorbell_off);
 719clean_up_queue_id:
 720	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 721	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
 722	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 723clean_up_mqd:
 724	amdgpu_mes_unlock(&adev->mes);
 725	amdgpu_mes_queue_free_mqd(queue);
 726clean_up_memory:
 727	kfree(queue);
 728	return r;
 729}
 730
 731int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
 732{
 733	unsigned long flags;
 734	struct amdgpu_mes_queue *queue;
 735	struct amdgpu_mes_gang *gang;
 736	struct mes_remove_queue_input queue_input;
 737	int r;
 738
 739	/*
 740	 * Avoid taking any other locks under MES lock to avoid circular
 741	 * lock dependencies.
 742	 */
 743	amdgpu_mes_lock(&adev->mes);
 744
 745	/* remove the mes gang from idr list */
 746	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 747
 748	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
 749	if (!queue) {
 750		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 751		amdgpu_mes_unlock(&adev->mes);
 752		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
 753		return -EINVAL;
 754	}
 755
 756	idr_remove(&adev->mes.queue_id_idr, queue_id);
 757	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 758
 759	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
 760		  queue->doorbell_off);
 761
 762	gang = queue->gang;
 763	queue_input.doorbell_offset = queue->doorbell_off;
 764	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 765
 766	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
 767	if (r)
 768		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
 769			  queue_id);
 770
 771	list_del(&queue->list);
 772	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
 773				       queue->doorbell_off);
 774	amdgpu_mes_unlock(&adev->mes);
 775
 776	amdgpu_mes_queue_free_mqd(queue);
 777	kfree(queue);
 778	return 0;
 779}
 780
 781int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
 782				  struct amdgpu_ring *ring,
 783				  enum amdgpu_unmap_queues_action action,
 784				  u64 gpu_addr, u64 seq)
 785{
 786	struct mes_unmap_legacy_queue_input queue_input;
 787	int r;
 788
 789	queue_input.action = action;
 790	queue_input.queue_type = ring->funcs->type;
 791	queue_input.doorbell_offset = ring->doorbell_index;
 792	queue_input.pipe_id = ring->pipe;
 793	queue_input.queue_id = ring->queue;
 794	queue_input.trail_fence_addr = gpu_addr;
 795	queue_input.trail_fence_data = seq;
 796
 797	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
 798	if (r)
 799		DRM_ERROR("failed to unmap legacy queue\n");
 800
 801	return r;
 802}
 803
 804uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
 805{
 806	struct mes_misc_op_input op_input;
 807	int r, val = 0;
 808
 809	op_input.op = MES_MISC_OP_READ_REG;
 810	op_input.read_reg.reg_offset = reg;
 811	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
 812
 813	if (!adev->mes.funcs->misc_op) {
 814		DRM_ERROR("mes rreg is not supported!\n");
 815		goto error;
 816	}
 817
 818	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 819	if (r)
 820		DRM_ERROR("failed to read reg (0x%x)\n", reg);
 821	else
 822		val = *(adev->mes.read_val_ptr);
 823
 824error:
 825	return val;
 826}
 827
 828int amdgpu_mes_wreg(struct amdgpu_device *adev,
 829		    uint32_t reg, uint32_t val)
 830{
 831	struct mes_misc_op_input op_input;
 832	int r;
 833
 834	op_input.op = MES_MISC_OP_WRITE_REG;
 835	op_input.write_reg.reg_offset = reg;
 836	op_input.write_reg.reg_value = val;
 837
 838	if (!adev->mes.funcs->misc_op) {
 839		DRM_ERROR("mes wreg is not supported!\n");
 840		r = -EINVAL;
 841		goto error;
 842	}
 843
 844	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 845	if (r)
 846		DRM_ERROR("failed to write reg (0x%x)\n", reg);
 847
 848error:
 849	return r;
 850}
 851
 852int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
 853				  uint32_t reg0, uint32_t reg1,
 854				  uint32_t ref, uint32_t mask)
 855{
 856	struct mes_misc_op_input op_input;
 857	int r;
 858
 859	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
 860	op_input.wrm_reg.reg0 = reg0;
 861	op_input.wrm_reg.reg1 = reg1;
 862	op_input.wrm_reg.ref = ref;
 863	op_input.wrm_reg.mask = mask;
 864
 865	if (!adev->mes.funcs->misc_op) {
 866		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
 867		r = -EINVAL;
 868		goto error;
 869	}
 870
 871	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 872	if (r)
 873		DRM_ERROR("failed to reg_write_reg_wait\n");
 874
 875error:
 876	return r;
 877}
 878
 879int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
 880			uint32_t val, uint32_t mask)
 881{
 882	struct mes_misc_op_input op_input;
 883	int r;
 884
 885	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
 886	op_input.wrm_reg.reg0 = reg;
 887	op_input.wrm_reg.ref = val;
 888	op_input.wrm_reg.mask = mask;
 889
 890	if (!adev->mes.funcs->misc_op) {
 891		DRM_ERROR("mes reg wait is not supported!\n");
 892		r = -EINVAL;
 893		goto error;
 894	}
 895
 896	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 897	if (r)
 898		DRM_ERROR("failed to reg_write_reg_wait\n");
 899
 900error:
 901	return r;
 902}
 903
 904int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
 905				uint64_t process_context_addr,
 906				uint32_t spi_gdbg_per_vmid_cntl,
 907				const uint32_t *tcp_watch_cntl,
 908				uint32_t flags,
 909				bool trap_en)
 910{
 911	struct mes_misc_op_input op_input = {0};
 912	int r;
 913
 914	if (!adev->mes.funcs->misc_op) {
 915		DRM_ERROR("mes set shader debugger is not supported!\n");
 916		return -EINVAL;
 917	}
 918
 919	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
 920	op_input.set_shader_debugger.process_context_addr = process_context_addr;
 921	op_input.set_shader_debugger.flags.u32all = flags;
 922
 923	/* use amdgpu mes_flush_shader_debugger instead */
 924	if (op_input.set_shader_debugger.flags.process_ctx_flush)
 925		return -EINVAL;
 926
 927	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
 928	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
 929			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
 930
 931	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
 932			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
 933		op_input.set_shader_debugger.trap_en = trap_en;
 934
 935	amdgpu_mes_lock(&adev->mes);
 936
 937	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 938	if (r)
 939		DRM_ERROR("failed to set_shader_debugger\n");
 940
 941	amdgpu_mes_unlock(&adev->mes);
 942
 943	return r;
 944}
 945
 946int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
 947				     uint64_t process_context_addr)
 948{
 949	struct mes_misc_op_input op_input = {0};
 950	int r;
 951
 952	if (!adev->mes.funcs->misc_op) {
 953		DRM_ERROR("mes flush shader debugger is not supported!\n");
 954		return -EINVAL;
 955	}
 956
 957	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
 958	op_input.set_shader_debugger.process_context_addr = process_context_addr;
 959	op_input.set_shader_debugger.flags.process_ctx_flush = true;
 960
 961	amdgpu_mes_lock(&adev->mes);
 962
 963	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 964	if (r)
 965		DRM_ERROR("failed to set_shader_debugger\n");
 966
 967	amdgpu_mes_unlock(&adev->mes);
 968
 969	return r;
 970}
 971
 972static void
 973amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
 974			       struct amdgpu_ring *ring,
 975			       struct amdgpu_mes_queue_properties *props)
 976{
 977	props->queue_type = ring->funcs->type;
 978	props->hqd_base_gpu_addr = ring->gpu_addr;
 979	props->rptr_gpu_addr = ring->rptr_gpu_addr;
 980	props->wptr_gpu_addr = ring->wptr_gpu_addr;
 981	props->wptr_mc_addr =
 982		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
 983	props->queue_size = ring->ring_size;
 984	props->eop_gpu_addr = ring->eop_gpu_addr;
 985	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
 986	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
 987	props->paging = false;
 988	props->ring = ring;
 989}
 990
 991#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
 992do {									\
 993       if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
 994		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
 995				_eng[ring->idx].slots[id_offs]);        \
 996       else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
 997		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
 998				_eng[ring->idx].ring);                  \
 999       else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
1000		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1001				_eng[ring->idx].ib);                    \
1002       else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
1003		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1004				_eng[ring->idx].padding);               \
1005} while(0)
1006
1007int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1008{
1009	switch (ring->funcs->type) {
1010	case AMDGPU_RING_TYPE_GFX:
1011		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1012		break;
1013	case AMDGPU_RING_TYPE_COMPUTE:
1014		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1015		break;
1016	case AMDGPU_RING_TYPE_SDMA:
1017		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1018		break;
1019	default:
1020		break;
1021	}
1022
1023	WARN_ON(1);
1024	return -EINVAL;
1025}
1026
1027int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1028			int queue_type, int idx,
1029			struct amdgpu_mes_ctx_data *ctx_data,
1030			struct amdgpu_ring **out)
1031{
1032	struct amdgpu_ring *ring;
1033	struct amdgpu_mes_gang *gang;
1034	struct amdgpu_mes_queue_properties qprops = {0};
1035	int r, queue_id, pasid;
1036
1037	/*
1038	 * Avoid taking any other locks under MES lock to avoid circular
1039	 * lock dependencies.
1040	 */
1041	amdgpu_mes_lock(&adev->mes);
1042	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1043	if (!gang) {
1044		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1045		amdgpu_mes_unlock(&adev->mes);
1046		return -EINVAL;
1047	}
1048	pasid = gang->process->pasid;
1049
1050	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1051	if (!ring) {
1052		amdgpu_mes_unlock(&adev->mes);
1053		return -ENOMEM;
1054	}
1055
1056	ring->ring_obj = NULL;
1057	ring->use_doorbell = true;
1058	ring->is_mes_queue = true;
1059	ring->mes_ctx = ctx_data;
1060	ring->idx = idx;
1061	ring->no_scheduler = true;
1062
1063	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1064		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1065				      compute[ring->idx].mec_hpd);
1066		ring->eop_gpu_addr =
1067			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1068	}
1069
1070	switch (queue_type) {
1071	case AMDGPU_RING_TYPE_GFX:
1072		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1073		ring->me = adev->gfx.gfx_ring[0].me;
1074		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1075		break;
1076	case AMDGPU_RING_TYPE_COMPUTE:
1077		ring->funcs = adev->gfx.compute_ring[0].funcs;
1078		ring->me = adev->gfx.compute_ring[0].me;
1079		ring->pipe = adev->gfx.compute_ring[0].pipe;
1080		break;
1081	case AMDGPU_RING_TYPE_SDMA:
1082		ring->funcs = adev->sdma.instance[0].ring.funcs;
1083		break;
1084	default:
1085		BUG();
1086	}
1087
1088	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1089			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1090	if (r)
1091		goto clean_up_memory;
1092
1093	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1094
1095	dma_fence_wait(gang->process->vm->last_update, false);
1096	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1097	amdgpu_mes_unlock(&adev->mes);
1098
1099	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1100	if (r)
1101		goto clean_up_ring;
1102
1103	ring->hw_queue_id = queue_id;
1104	ring->doorbell_index = qprops.doorbell_off;
1105
1106	if (queue_type == AMDGPU_RING_TYPE_GFX)
1107		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1108	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1109		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1110			queue_id);
1111	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1112		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1113			queue_id);
1114	else
1115		BUG();
1116
1117	*out = ring;
1118	return 0;
1119
1120clean_up_ring:
1121	amdgpu_ring_fini(ring);
1122clean_up_memory:
1123	kfree(ring);
1124	amdgpu_mes_unlock(&adev->mes);
1125	return r;
1126}
1127
1128void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1129			    struct amdgpu_ring *ring)
1130{
1131	if (!ring)
1132		return;
1133
1134	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1135	del_timer_sync(&ring->fence_drv.fallback_timer);
1136	amdgpu_ring_fini(ring);
1137	kfree(ring);
1138}
1139
1140uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1141						   enum amdgpu_mes_priority_level prio)
1142{
1143	return adev->mes.aggregated_doorbells[prio];
1144}
1145
1146int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1147				   struct amdgpu_mes_ctx_data *ctx_data)
1148{
1149	int r;
1150
1151	r = amdgpu_bo_create_kernel(adev,
1152			    sizeof(struct amdgpu_mes_ctx_meta_data),
1153			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1154			    &ctx_data->meta_data_obj,
1155			    &ctx_data->meta_data_mc_addr,
1156			    &ctx_data->meta_data_ptr);
1157	if (r) {
1158		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1159		return r;
1160	}
1161
1162	if (!ctx_data->meta_data_obj)
1163		return -ENOMEM;
1164
1165	memset(ctx_data->meta_data_ptr, 0,
1166	       sizeof(struct amdgpu_mes_ctx_meta_data));
1167
1168	return 0;
1169}
1170
1171void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1172{
1173	if (ctx_data->meta_data_obj)
1174		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1175				      &ctx_data->meta_data_mc_addr,
1176				      &ctx_data->meta_data_ptr);
1177}
1178
1179int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1180				 struct amdgpu_vm *vm,
1181				 struct amdgpu_mes_ctx_data *ctx_data)
1182{
1183	struct amdgpu_bo_va *bo_va;
 
 
 
 
1184	struct amdgpu_sync sync;
1185	struct drm_exec exec;
1186	int r;
1187
1188	amdgpu_sync_create(&sync);
 
 
1189
1190	drm_exec_init(&exec, 0, 0);
1191	drm_exec_until_all_locked(&exec) {
1192		r = drm_exec_lock_obj(&exec,
1193				      &ctx_data->meta_data_obj->tbo.base);
1194		drm_exec_retry_on_contention(&exec);
1195		if (unlikely(r))
1196			goto error_fini_exec;
1197
1198		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1199		drm_exec_retry_on_contention(&exec);
1200		if (unlikely(r))
1201			goto error_fini_exec;
1202	}
1203
1204	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1205	if (!bo_va) {
 
1206		DRM_ERROR("failed to create bo_va for meta data BO\n");
1207		r = -ENOMEM;
1208		goto error_fini_exec;
1209	}
1210
1211	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1212			     sizeof(struct amdgpu_mes_ctx_meta_data),
1213			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1214			     AMDGPU_PTE_EXECUTABLE);
1215
1216	if (r) {
1217		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1218		goto error_del_bo_va;
1219	}
1220
1221	r = amdgpu_vm_bo_update(adev, bo_va, false);
1222	if (r) {
1223		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1224		goto error_del_bo_va;
1225	}
1226	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1227
1228	r = amdgpu_vm_update_pdes(adev, vm, false);
1229	if (r) {
1230		DRM_ERROR("failed to update pdes on meta data\n");
1231		goto error_del_bo_va;
1232	}
1233	amdgpu_sync_fence(&sync, vm->last_update);
1234
1235	amdgpu_sync_wait(&sync, false);
1236	drm_exec_fini(&exec);
1237
1238	amdgpu_sync_free(&sync);
1239	ctx_data->meta_data_va = bo_va;
1240	return 0;
1241
1242error_del_bo_va:
1243	amdgpu_vm_bo_del(adev, bo_va);
1244
1245error_fini_exec:
1246	drm_exec_fini(&exec);
1247	amdgpu_sync_free(&sync);
1248	return r;
1249}
1250
1251int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1252				   struct amdgpu_mes_ctx_data *ctx_data)
1253{
1254	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1255	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1256	struct amdgpu_vm *vm = bo_va->base.vm;
1257	struct dma_fence *fence;
1258	struct drm_exec exec;
1259	long r;
1260
1261	drm_exec_init(&exec, 0, 0);
1262	drm_exec_until_all_locked(&exec) {
1263		r = drm_exec_lock_obj(&exec,
1264				      &ctx_data->meta_data_obj->tbo.base);
1265		drm_exec_retry_on_contention(&exec);
1266		if (unlikely(r))
1267			goto out_unlock;
1268
1269		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1270		drm_exec_retry_on_contention(&exec);
1271		if (unlikely(r))
1272			goto out_unlock;
 
 
 
 
 
1273	}
1274
1275	amdgpu_vm_bo_del(adev, bo_va);
1276	if (!amdgpu_vm_ready(vm))
1277		goto out_unlock;
1278
1279	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1280				   &fence);
1281	if (r)
1282		goto out_unlock;
1283	if (fence) {
1284		amdgpu_bo_fence(bo, fence, true);
1285		fence = NULL;
1286	}
1287
1288	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1289	if (r || !fence)
1290		goto out_unlock;
1291
1292	dma_fence_wait(fence, false);
1293	amdgpu_bo_fence(bo, fence, true);
1294	dma_fence_put(fence);
1295
1296out_unlock:
1297	if (unlikely(r < 0))
1298		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1299	drm_exec_fini(&exec);
1300
1301	return r;
1302}
1303
1304static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1305					  int pasid, int *gang_id,
1306					  int queue_type, int num_queue,
1307					  struct amdgpu_ring **added_rings,
1308					  struct amdgpu_mes_ctx_data *ctx_data)
1309{
1310	struct amdgpu_ring *ring;
1311	struct amdgpu_mes_gang_properties gprops = {0};
1312	int r, j;
1313
1314	/* create a gang for the process */
1315	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1316	gprops.gang_quantum = adev->mes.default_gang_quantum;
1317	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1318	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1319	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1320
1321	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1322	if (r) {
1323		DRM_ERROR("failed to add gang\n");
1324		return r;
1325	}
1326
1327	/* create queues for the gang */
1328	for (j = 0; j < num_queue; j++) {
1329		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1330					ctx_data, &ring);
1331		if (r) {
1332			DRM_ERROR("failed to add ring\n");
1333			break;
1334		}
1335
1336		DRM_INFO("ring %s was added\n", ring->name);
1337		added_rings[j] = ring;
1338	}
1339
1340	return 0;
1341}
1342
1343static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1344{
1345	struct amdgpu_ring *ring;
1346	int i, r;
1347
1348	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1349		ring = added_rings[i];
1350		if (!ring)
1351			continue;
1352
1353		r = amdgpu_ring_test_helper(ring);
1354		if (r)
 
 
 
1355			return r;
 
 
1356
1357		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1358		if (r) {
1359			DRM_DEV_ERROR(ring->adev->dev,
1360				      "ring %s ib test failed (%d)\n",
1361				      ring->name, r);
1362			return r;
1363		} else
1364			DRM_INFO("ring %s ib test pass\n", ring->name);
1365	}
1366
1367	return 0;
1368}
1369
1370int amdgpu_mes_self_test(struct amdgpu_device *adev)
1371{
1372	struct amdgpu_vm *vm = NULL;
1373	struct amdgpu_mes_ctx_data ctx_data = {0};
1374	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1375	int gang_ids[3] = {0};
1376	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1377				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1378				 { AMDGPU_RING_TYPE_SDMA, 1} };
 
 
 
1379	int i, r, pasid, k = 0;
1380
1381	pasid = amdgpu_pasid_alloc(16);
1382	if (pasid < 0) {
1383		dev_warn(adev->dev, "No more PASIDs available!");
1384		pasid = 0;
1385	}
1386
1387	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1388	if (!vm) {
1389		r = -ENOMEM;
1390		goto error_pasid;
1391	}
1392
1393	r = amdgpu_vm_init(adev, vm, -1);
1394	if (r) {
1395		DRM_ERROR("failed to initialize vm\n");
1396		goto error_pasid;
1397	}
1398
1399	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1400	if (r) {
1401		DRM_ERROR("failed to alloc ctx meta data\n");
1402		goto error_fini;
1403	}
1404
1405	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1406	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1407	if (r) {
1408		DRM_ERROR("failed to map ctx meta data\n");
1409		goto error_vm;
1410	}
1411
1412	r = amdgpu_mes_create_process(adev, pasid, vm);
1413	if (r) {
1414		DRM_ERROR("failed to create MES process\n");
1415		goto error_vm;
1416	}
1417
1418	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1419		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1420		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1421			    IP_VERSION(10, 3, 0) &&
1422		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1423			    IP_VERSION(11, 0, 0) &&
1424		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1425			continue;
1426
1427		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1428							   &gang_ids[i],
1429							   queue_types[i][0],
1430							   queue_types[i][1],
1431							   &added_rings[k],
1432							   &ctx_data);
1433		if (r)
1434			goto error_queues;
1435
1436		k += queue_types[i][1];
1437	}
1438
1439	/* start ring test and ib test for MES queues */
1440	amdgpu_mes_test_queues(added_rings);
1441
1442error_queues:
1443	/* remove all queues */
1444	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1445		if (!added_rings[i])
1446			continue;
1447		amdgpu_mes_remove_ring(adev, added_rings[i]);
1448	}
1449
1450	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1451		if (!gang_ids[i])
1452			continue;
1453		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1454	}
1455
1456	amdgpu_mes_destroy_process(adev, pasid);
1457
1458error_vm:
1459	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1460
1461error_fini:
1462	amdgpu_vm_fini(adev, vm);
1463
1464error_pasid:
1465	if (pasid)
1466		amdgpu_pasid_free(pasid);
1467
1468	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1469	kfree(vm);
1470	return 0;
1471}
1472
1473int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1474{
1475	const struct mes_firmware_header_v1_0 *mes_hdr;
1476	struct amdgpu_firmware_info *info;
1477	char ucode_prefix[30];
1478	char fw_name[40];
1479	bool need_retry = false;
1480	int r;
1481
1482	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1483				       sizeof(ucode_prefix));
1484	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1485		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1486			 ucode_prefix,
1487			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1488		need_retry = true;
1489	} else {
1490		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1491			 ucode_prefix,
1492			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1493	}
1494
1495	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1496	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1497		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1498			 ucode_prefix);
1499		DRM_INFO("try to fall back to %s\n", fw_name);
1500		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1501					 fw_name);
1502	}
1503
1504	if (r)
1505		goto out;
1506
1507	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1508		adev->mes.fw[pipe]->data;
1509	adev->mes.uc_start_addr[pipe] =
1510		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1511		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1512	adev->mes.data_start_addr[pipe] =
1513		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1514		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1515
1516	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1517		int ucode, ucode_data;
1518
1519		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1520			ucode = AMDGPU_UCODE_ID_CP_MES;
1521			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1522		} else {
1523			ucode = AMDGPU_UCODE_ID_CP_MES1;
1524			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1525		}
1526
1527		info = &adev->firmware.ucode[ucode];
1528		info->ucode_id = ucode;
1529		info->fw = adev->mes.fw[pipe];
1530		adev->firmware.fw_size +=
1531			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1532			      PAGE_SIZE);
1533
1534		info = &adev->firmware.ucode[ucode_data];
1535		info->ucode_id = ucode_data;
1536		info->fw = adev->mes.fw[pipe];
1537		adev->firmware.fw_size +=
1538			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1539			      PAGE_SIZE);
1540	}
1541
1542	return 0;
1543out:
1544	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1545	return r;
1546}
1547
1548#if defined(CONFIG_DEBUG_FS)
1549
1550static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1551{
1552	struct amdgpu_device *adev = m->private;
1553	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1554
1555	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1556		     mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
1557
1558	return 0;
1559}
1560
1561DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1562
1563#endif
1564
1565void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1566{
1567
1568#if defined(CONFIG_DEBUG_FS)
1569	struct drm_minor *minor = adev_to_drm(adev)->primary;
1570	struct dentry *root = minor->debugfs_root;
1571	if (adev->enable_mes && amdgpu_mes_log_enable)
1572		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1573				    adev, &amdgpu_debugfs_mes_event_log_fops);
1574
1575#endif
1576}
v6.2
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
 
 
 
  24#include "amdgpu_mes.h"
  25#include "amdgpu.h"
  26#include "soc15_common.h"
  27#include "amdgpu_mes_ctx.h"
  28
  29#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
  30#define AMDGPU_ONE_DOORBELL_SIZE 8
  31
  32int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
  33{
  34	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
  35		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
  36		       PAGE_SIZE);
  37}
  38
  39int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
  40				      unsigned int *doorbell_index)
  41{
  42	int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
  43			       adev->mes.max_doorbell_slices,
  44			       GFP_KERNEL);
  45	if (r > 0)
  46		*doorbell_index = r;
  47
  48	return r;
  49}
  50
  51void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
  52				      unsigned int doorbell_index)
  53{
  54	if (doorbell_index)
  55		ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
  56}
  57
  58unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
  59					struct amdgpu_device *adev,
  60					uint32_t doorbell_index,
  61					unsigned int doorbell_id)
  62{
  63	return ((doorbell_index *
  64		amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
  65		doorbell_id * 2);
  66}
  67
  68static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
  69					 struct amdgpu_mes_process *process,
  70					 int ip_type, uint64_t *doorbell_index)
  71{
  72	unsigned int offset, found;
 
  73
  74	if (ip_type == AMDGPU_RING_TYPE_SDMA) {
  75		offset = adev->doorbell_index.sdma_engine[0];
  76		found = find_next_zero_bit(process->doorbell_bitmap,
  77					   AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
  78					   offset);
  79	} else {
  80		found = find_first_zero_bit(process->doorbell_bitmap,
  81					    AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
  82	}
  83
  84	if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
 
  85		DRM_WARN("No doorbell available\n");
  86		return -ENOSPC;
  87	}
  88
  89	set_bit(found, process->doorbell_bitmap);
  90
  91	*doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
  92				process->doorbell_index, found);
  93
 
 
  94	return 0;
  95}
  96
  97static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
  98					   struct amdgpu_mes_process *process,
  99					   uint32_t doorbell_index)
 100{
 101	unsigned int old, doorbell_id;
 102
 103	doorbell_id = doorbell_index -
 104		(process->doorbell_index *
 105		 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
 106	doorbell_id /= 2;
 107
 108	old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
 
 
 109	WARN_ON(!old);
 110}
 111
 112static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
 113{
 114	size_t doorbell_start_offset;
 115	size_t doorbell_aperture_size;
 116	size_t doorbell_process_limit;
 117	size_t aggregated_doorbell_start;
 118	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119
 120	aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32);
 121	aggregated_doorbell_start =
 122		roundup(aggregated_doorbell_start, PAGE_SIZE);
 123
 124	doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE;
 125	doorbell_start_offset =
 126		roundup(doorbell_start_offset,
 127			amdgpu_mes_doorbell_process_slice(adev));
 128
 129	doorbell_aperture_size = adev->doorbell.size;
 130	doorbell_aperture_size =
 131			rounddown(doorbell_aperture_size,
 132				  amdgpu_mes_doorbell_process_slice(adev));
 133
 134	if (doorbell_aperture_size > doorbell_start_offset)
 135		doorbell_process_limit =
 136			(doorbell_aperture_size - doorbell_start_offset) /
 137			amdgpu_mes_doorbell_process_slice(adev);
 138	else
 139		return -ENOSPC;
 140
 141	adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
 142	adev->mes.max_doorbell_slices = doorbell_process_limit;
 143
 144	/* allocate Qword range for aggregated doorbell */
 145	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
 146		adev->mes.aggregated_doorbells[i] =
 147			aggregated_doorbell_start / sizeof(u32) + i * 2;
 148
 149	DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
 150	return 0;
 
 151}
 152
 153int amdgpu_mes_init(struct amdgpu_device *adev)
 154{
 155	int i, r;
 156
 157	adev->mes.adev = adev;
 158
 159	idr_init(&adev->mes.pasid_idr);
 160	idr_init(&adev->mes.gang_id_idr);
 161	idr_init(&adev->mes.queue_id_idr);
 162	ida_init(&adev->mes.doorbell_ida);
 163	spin_lock_init(&adev->mes.queue_id_lock);
 164	spin_lock_init(&adev->mes.ring_lock);
 165	mutex_init(&adev->mes.mutex_hidden);
 166
 167	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
 168	adev->mes.vmid_mask_mmhub = 0xffffff00;
 169	adev->mes.vmid_mask_gfxhub = 0xffffff00;
 170
 171	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
 172		/* use only 1st MEC pipes */
 173		if (i >= 4)
 174			continue;
 175		adev->mes.compute_hqd_mask[i] = 0xc;
 176	}
 177
 178	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
 179		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
 180
 181	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
 182		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
 
 183			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
 184		/* zero sdma_hqd_mask for non-existent engine */
 185		else if (adev->sdma.num_instances == 1)
 186			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
 187		else
 188			adev->mes.sdma_hqd_mask[i] = 0xfc;
 189	}
 190
 191	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
 192	if (r) {
 193		dev_err(adev->dev,
 194			"(%d) ring trail_fence_offs wb alloc failed\n", r);
 195		goto error_ids;
 196	}
 197	adev->mes.sch_ctx_gpu_addr =
 198		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
 199	adev->mes.sch_ctx_ptr =
 200		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
 201
 202	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
 203	if (r) {
 204		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 205		dev_err(adev->dev,
 206			"(%d) query_status_fence_offs wb alloc failed\n", r);
 207		goto error_ids;
 208	}
 209	adev->mes.query_status_fence_gpu_addr =
 210		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
 211	adev->mes.query_status_fence_ptr =
 212		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
 213
 214	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
 215	if (r) {
 216		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 217		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 218		dev_err(adev->dev,
 219			"(%d) read_val_offs alloc failed\n", r);
 220		goto error_ids;
 221	}
 222	adev->mes.read_val_gpu_addr =
 223		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
 224	adev->mes.read_val_ptr =
 225		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
 226
 227	r = amdgpu_mes_doorbell_init(adev);
 228	if (r)
 229		goto error;
 230
 
 
 
 
 231	return 0;
 232
 
 
 233error:
 234	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 235	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 236	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
 237error_ids:
 238	idr_destroy(&adev->mes.pasid_idr);
 239	idr_destroy(&adev->mes.gang_id_idr);
 240	idr_destroy(&adev->mes.queue_id_idr);
 241	ida_destroy(&adev->mes.doorbell_ida);
 242	mutex_destroy(&adev->mes.mutex_hidden);
 243	return r;
 244}
 245
 246void amdgpu_mes_fini(struct amdgpu_device *adev)
 247{
 
 
 
 
 248	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 249	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 250	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
 
 251
 252	idr_destroy(&adev->mes.pasid_idr);
 253	idr_destroy(&adev->mes.gang_id_idr);
 254	idr_destroy(&adev->mes.queue_id_idr);
 255	ida_destroy(&adev->mes.doorbell_ida);
 256	mutex_destroy(&adev->mes.mutex_hidden);
 257}
 258
 259static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
 260{
 261	amdgpu_bo_free_kernel(&q->mqd_obj,
 262			      &q->mqd_gpu_addr,
 263			      &q->mqd_cpu_ptr);
 264}
 265
 266int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
 267			      struct amdgpu_vm *vm)
 268{
 269	struct amdgpu_mes_process *process;
 270	int r;
 271
 272	/* allocate the mes process buffer */
 273	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
 274	if (!process) {
 275		DRM_ERROR("no more memory to create mes process\n");
 276		return -ENOMEM;
 277	}
 278
 279	process->doorbell_bitmap =
 280		kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
 281				     BITS_PER_BYTE), GFP_KERNEL);
 282	if (!process->doorbell_bitmap) {
 283		DRM_ERROR("failed to allocate doorbell bitmap\n");
 284		kfree(process);
 285		return -ENOMEM;
 286	}
 287
 288	/* allocate the process context bo and map it */
 289	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
 290				    AMDGPU_GEM_DOMAIN_GTT,
 291				    &process->proc_ctx_bo,
 292				    &process->proc_ctx_gpu_addr,
 293				    &process->proc_ctx_cpu_ptr);
 294	if (r) {
 295		DRM_ERROR("failed to allocate process context bo\n");
 296		goto clean_up_memory;
 297	}
 298	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
 299
 300	/*
 301	 * Avoid taking any other locks under MES lock to avoid circular
 302	 * lock dependencies.
 303	 */
 304	amdgpu_mes_lock(&adev->mes);
 305
 306	/* add the mes process to idr list */
 307	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
 308		      GFP_KERNEL);
 309	if (r < 0) {
 310		DRM_ERROR("failed to lock pasid=%d\n", pasid);
 311		goto clean_up_ctx;
 312	}
 313
 314	/* allocate the starting doorbell index of the process */
 315	r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
 316	if (r < 0) {
 317		DRM_ERROR("failed to allocate doorbell for process\n");
 318		goto clean_up_pasid;
 319	}
 320
 321	DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
 322
 323	INIT_LIST_HEAD(&process->gang_list);
 324	process->vm = vm;
 325	process->pasid = pasid;
 326	process->process_quantum = adev->mes.default_process_quantum;
 327	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
 328
 329	amdgpu_mes_unlock(&adev->mes);
 330	return 0;
 331
 332clean_up_pasid:
 333	idr_remove(&adev->mes.pasid_idr, pasid);
 334	amdgpu_mes_unlock(&adev->mes);
 335clean_up_ctx:
 336	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
 337			      &process->proc_ctx_gpu_addr,
 338			      &process->proc_ctx_cpu_ptr);
 339clean_up_memory:
 340	kfree(process->doorbell_bitmap);
 341	kfree(process);
 342	return r;
 343}
 344
 345void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
 346{
 347	struct amdgpu_mes_process *process;
 348	struct amdgpu_mes_gang *gang, *tmp1;
 349	struct amdgpu_mes_queue *queue, *tmp2;
 350	struct mes_remove_queue_input queue_input;
 351	unsigned long flags;
 352	int r;
 353
 354	/*
 355	 * Avoid taking any other locks under MES lock to avoid circular
 356	 * lock dependencies.
 357	 */
 358	amdgpu_mes_lock(&adev->mes);
 359
 360	process = idr_find(&adev->mes.pasid_idr, pasid);
 361	if (!process) {
 362		DRM_WARN("pasid %d doesn't exist\n", pasid);
 363		amdgpu_mes_unlock(&adev->mes);
 364		return;
 365	}
 366
 367	/* Remove all queues from hardware */
 368	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
 369		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
 370			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 371			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
 372			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 373
 374			queue_input.doorbell_offset = queue->doorbell_off;
 375			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 376
 377			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
 378							     &queue_input);
 379			if (r)
 380				DRM_WARN("failed to remove hardware queue\n");
 381		}
 382
 383		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
 384	}
 385
 386	amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
 387	idr_remove(&adev->mes.pasid_idr, pasid);
 388	amdgpu_mes_unlock(&adev->mes);
 389
 390	/* free all memory allocated by the process */
 391	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
 392		/* free all queues in the gang */
 393		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
 394			amdgpu_mes_queue_free_mqd(queue);
 395			list_del(&queue->list);
 396			kfree(queue);
 397		}
 398		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 399				      &gang->gang_ctx_gpu_addr,
 400				      &gang->gang_ctx_cpu_ptr);
 401		list_del(&gang->list);
 402		kfree(gang);
 403
 404	}
 405	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
 406			      &process->proc_ctx_gpu_addr,
 407			      &process->proc_ctx_cpu_ptr);
 408	kfree(process->doorbell_bitmap);
 409	kfree(process);
 410}
 411
 412int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
 413			struct amdgpu_mes_gang_properties *gprops,
 414			int *gang_id)
 415{
 416	struct amdgpu_mes_process *process;
 417	struct amdgpu_mes_gang *gang;
 418	int r;
 419
 420	/* allocate the mes gang buffer */
 421	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
 422	if (!gang) {
 423		return -ENOMEM;
 424	}
 425
 426	/* allocate the gang context bo and map it to cpu space */
 427	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
 428				    AMDGPU_GEM_DOMAIN_GTT,
 429				    &gang->gang_ctx_bo,
 430				    &gang->gang_ctx_gpu_addr,
 431				    &gang->gang_ctx_cpu_ptr);
 432	if (r) {
 433		DRM_ERROR("failed to allocate process context bo\n");
 434		goto clean_up_mem;
 435	}
 436	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
 437
 438	/*
 439	 * Avoid taking any other locks under MES lock to avoid circular
 440	 * lock dependencies.
 441	 */
 442	amdgpu_mes_lock(&adev->mes);
 443
 444	process = idr_find(&adev->mes.pasid_idr, pasid);
 445	if (!process) {
 446		DRM_ERROR("pasid %d doesn't exist\n", pasid);
 447		r = -EINVAL;
 448		goto clean_up_ctx;
 449	}
 450
 451	/* add the mes gang to idr list */
 452	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
 453		      GFP_KERNEL);
 454	if (r < 0) {
 455		DRM_ERROR("failed to allocate idr for gang\n");
 456		goto clean_up_ctx;
 457	}
 458
 459	gang->gang_id = r;
 460	*gang_id = r;
 461
 462	INIT_LIST_HEAD(&gang->queue_list);
 463	gang->process = process;
 464	gang->priority = gprops->priority;
 465	gang->gang_quantum = gprops->gang_quantum ?
 466		gprops->gang_quantum : adev->mes.default_gang_quantum;
 467	gang->global_priority_level = gprops->global_priority_level;
 468	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
 469	list_add_tail(&gang->list, &process->gang_list);
 470
 471	amdgpu_mes_unlock(&adev->mes);
 472	return 0;
 473
 474clean_up_ctx:
 475	amdgpu_mes_unlock(&adev->mes);
 476	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 477			      &gang->gang_ctx_gpu_addr,
 478			      &gang->gang_ctx_cpu_ptr);
 479clean_up_mem:
 480	kfree(gang);
 481	return r;
 482}
 483
 484int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
 485{
 486	struct amdgpu_mes_gang *gang;
 487
 488	/*
 489	 * Avoid taking any other locks under MES lock to avoid circular
 490	 * lock dependencies.
 491	 */
 492	amdgpu_mes_lock(&adev->mes);
 493
 494	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 495	if (!gang) {
 496		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 497		amdgpu_mes_unlock(&adev->mes);
 498		return -EINVAL;
 499	}
 500
 501	if (!list_empty(&gang->queue_list)) {
 502		DRM_ERROR("queue list is not empty\n");
 503		amdgpu_mes_unlock(&adev->mes);
 504		return -EBUSY;
 505	}
 506
 507	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
 508	list_del(&gang->list);
 509	amdgpu_mes_unlock(&adev->mes);
 510
 511	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 512			      &gang->gang_ctx_gpu_addr,
 513			      &gang->gang_ctx_cpu_ptr);
 514
 515	kfree(gang);
 516
 517	return 0;
 518}
 519
 520int amdgpu_mes_suspend(struct amdgpu_device *adev)
 521{
 522	struct idr *idp;
 523	struct amdgpu_mes_process *process;
 524	struct amdgpu_mes_gang *gang;
 525	struct mes_suspend_gang_input input;
 526	int r, pasid;
 527
 528	/*
 529	 * Avoid taking any other locks under MES lock to avoid circular
 530	 * lock dependencies.
 531	 */
 532	amdgpu_mes_lock(&adev->mes);
 533
 534	idp = &adev->mes.pasid_idr;
 535
 536	idr_for_each_entry(idp, process, pasid) {
 537		list_for_each_entry(gang, &process->gang_list, list) {
 538			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
 539			if (r)
 540				DRM_ERROR("failed to suspend pasid %d gangid %d",
 541					 pasid, gang->gang_id);
 542		}
 543	}
 544
 545	amdgpu_mes_unlock(&adev->mes);
 546	return 0;
 547}
 548
 549int amdgpu_mes_resume(struct amdgpu_device *adev)
 550{
 551	struct idr *idp;
 552	struct amdgpu_mes_process *process;
 553	struct amdgpu_mes_gang *gang;
 554	struct mes_resume_gang_input input;
 555	int r, pasid;
 556
 557	/*
 558	 * Avoid taking any other locks under MES lock to avoid circular
 559	 * lock dependencies.
 560	 */
 561	amdgpu_mes_lock(&adev->mes);
 562
 563	idp = &adev->mes.pasid_idr;
 564
 565	idr_for_each_entry(idp, process, pasid) {
 566		list_for_each_entry(gang, &process->gang_list, list) {
 567			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
 568			if (r)
 569				DRM_ERROR("failed to resume pasid %d gangid %d",
 570					 pasid, gang->gang_id);
 571		}
 572	}
 573
 574	amdgpu_mes_unlock(&adev->mes);
 575	return 0;
 576}
 577
 578static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
 579				     struct amdgpu_mes_queue *q,
 580				     struct amdgpu_mes_queue_properties *p)
 581{
 582	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
 583	u32 mqd_size = mqd_mgr->mqd_size;
 584	int r;
 585
 586	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
 587				    AMDGPU_GEM_DOMAIN_GTT,
 588				    &q->mqd_obj,
 589				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
 590	if (r) {
 591		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
 592		return r;
 593	}
 594	memset(q->mqd_cpu_ptr, 0, mqd_size);
 595
 596	r = amdgpu_bo_reserve(q->mqd_obj, false);
 597	if (unlikely(r != 0))
 598		goto clean_up;
 599
 600	return 0;
 601
 602clean_up:
 603	amdgpu_bo_free_kernel(&q->mqd_obj,
 604			      &q->mqd_gpu_addr,
 605			      &q->mqd_cpu_ptr);
 606	return r;
 607}
 608
 609static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
 610				     struct amdgpu_mes_queue *q,
 611				     struct amdgpu_mes_queue_properties *p)
 612{
 613	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
 614	struct amdgpu_mqd_prop mqd_prop = {0};
 615
 616	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
 617	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
 618	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
 619	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
 620	mqd_prop.queue_size = p->queue_size;
 621	mqd_prop.use_doorbell = true;
 622	mqd_prop.doorbell_index = p->doorbell_off;
 623	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
 624	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
 625	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
 626	mqd_prop.hqd_active = false;
 627
 
 
 
 
 
 
 628	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
 629
 
 
 
 
 
 
 630	amdgpu_bo_unreserve(q->mqd_obj);
 631}
 632
 633int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
 634			    struct amdgpu_mes_queue_properties *qprops,
 635			    int *queue_id)
 636{
 637	struct amdgpu_mes_queue *queue;
 638	struct amdgpu_mes_gang *gang;
 639	struct mes_add_queue_input queue_input;
 640	unsigned long flags;
 641	int r;
 642
 
 
 643	/* allocate the mes queue buffer */
 644	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
 645	if (!queue) {
 646		DRM_ERROR("Failed to allocate memory for queue\n");
 647		return -ENOMEM;
 648	}
 649
 650	/* Allocate the queue mqd */
 651	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
 652	if (r)
 653		goto clean_up_memory;
 654
 655	/*
 656	 * Avoid taking any other locks under MES lock to avoid circular
 657	 * lock dependencies.
 658	 */
 659	amdgpu_mes_lock(&adev->mes);
 660
 661	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 662	if (!gang) {
 663		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 664		r = -EINVAL;
 665		goto clean_up_mqd;
 666	}
 667
 668	/* add the mes gang to idr list */
 669	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 670	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
 671		      GFP_ATOMIC);
 672	if (r < 0) {
 673		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 674		goto clean_up_mqd;
 675	}
 676	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 677	*queue_id = queue->queue_id = r;
 678
 679	/* allocate a doorbell index for the queue */
 680	r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
 681					  qprops->queue_type,
 682					  &qprops->doorbell_off);
 683	if (r)
 684		goto clean_up_queue_id;
 685
 686	/* initialize the queue mqd */
 687	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
 688
 689	/* add hw queue to mes */
 690	queue_input.process_id = gang->process->pasid;
 691
 692	queue_input.page_table_base_addr =
 693		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
 694		adev->gmc.vram_start;
 695
 696	queue_input.process_va_start = 0;
 697	queue_input.process_va_end =
 698		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
 699	queue_input.process_quantum = gang->process->process_quantum;
 700	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
 701	queue_input.gang_quantum = gang->gang_quantum;
 702	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 703	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
 704	queue_input.gang_global_priority_level = gang->global_priority_level;
 705	queue_input.doorbell_offset = qprops->doorbell_off;
 706	queue_input.mqd_addr = queue->mqd_gpu_addr;
 707	queue_input.wptr_addr = qprops->wptr_gpu_addr;
 708	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
 709	queue_input.queue_type = qprops->queue_type;
 710	queue_input.paging = qprops->paging;
 711	queue_input.is_kfd_process = 0;
 712
 713	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
 714	if (r) {
 715		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
 716			  qprops->doorbell_off);
 717		goto clean_up_doorbell;
 718	}
 719
 720	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
 721		  "queue type=%d, doorbell=0x%llx\n",
 722		  gang->process->pasid, gang_id, qprops->queue_type,
 723		  qprops->doorbell_off);
 724
 725	queue->ring = qprops->ring;
 726	queue->doorbell_off = qprops->doorbell_off;
 727	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
 728	queue->queue_type = qprops->queue_type;
 729	queue->paging = qprops->paging;
 730	queue->gang = gang;
 731	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
 732	list_add_tail(&queue->list, &gang->queue_list);
 733
 734	amdgpu_mes_unlock(&adev->mes);
 735	return 0;
 736
 737clean_up_doorbell:
 738	amdgpu_mes_queue_doorbell_free(adev, gang->process,
 739				       qprops->doorbell_off);
 740clean_up_queue_id:
 741	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 742	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
 743	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 744clean_up_mqd:
 745	amdgpu_mes_unlock(&adev->mes);
 746	amdgpu_mes_queue_free_mqd(queue);
 747clean_up_memory:
 748	kfree(queue);
 749	return r;
 750}
 751
 752int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
 753{
 754	unsigned long flags;
 755	struct amdgpu_mes_queue *queue;
 756	struct amdgpu_mes_gang *gang;
 757	struct mes_remove_queue_input queue_input;
 758	int r;
 759
 760	/*
 761	 * Avoid taking any other locks under MES lock to avoid circular
 762	 * lock dependencies.
 763	 */
 764	amdgpu_mes_lock(&adev->mes);
 765
 766	/* remove the mes gang from idr list */
 767	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 768
 769	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
 770	if (!queue) {
 771		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 772		amdgpu_mes_unlock(&adev->mes);
 773		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
 774		return -EINVAL;
 775	}
 776
 777	idr_remove(&adev->mes.queue_id_idr, queue_id);
 778	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 779
 780	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
 781		  queue->doorbell_off);
 782
 783	gang = queue->gang;
 784	queue_input.doorbell_offset = queue->doorbell_off;
 785	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 786
 787	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
 788	if (r)
 789		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
 790			  queue_id);
 791
 792	list_del(&queue->list);
 793	amdgpu_mes_queue_doorbell_free(adev, gang->process,
 794				       queue->doorbell_off);
 795	amdgpu_mes_unlock(&adev->mes);
 796
 797	amdgpu_mes_queue_free_mqd(queue);
 798	kfree(queue);
 799	return 0;
 800}
 801
 802int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
 803				  struct amdgpu_ring *ring,
 804				  enum amdgpu_unmap_queues_action action,
 805				  u64 gpu_addr, u64 seq)
 806{
 807	struct mes_unmap_legacy_queue_input queue_input;
 808	int r;
 809
 810	queue_input.action = action;
 811	queue_input.queue_type = ring->funcs->type;
 812	queue_input.doorbell_offset = ring->doorbell_index;
 813	queue_input.pipe_id = ring->pipe;
 814	queue_input.queue_id = ring->queue;
 815	queue_input.trail_fence_addr = gpu_addr;
 816	queue_input.trail_fence_data = seq;
 817
 818	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
 819	if (r)
 820		DRM_ERROR("failed to unmap legacy queue\n");
 821
 822	return r;
 823}
 824
 825uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
 826{
 827	struct mes_misc_op_input op_input;
 828	int r, val = 0;
 829
 830	op_input.op = MES_MISC_OP_READ_REG;
 831	op_input.read_reg.reg_offset = reg;
 832	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
 833
 834	if (!adev->mes.funcs->misc_op) {
 835		DRM_ERROR("mes rreg is not supported!\n");
 836		goto error;
 837	}
 838
 839	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 840	if (r)
 841		DRM_ERROR("failed to read reg (0x%x)\n", reg);
 842	else
 843		val = *(adev->mes.read_val_ptr);
 844
 845error:
 846	return val;
 847}
 848
 849int amdgpu_mes_wreg(struct amdgpu_device *adev,
 850		    uint32_t reg, uint32_t val)
 851{
 852	struct mes_misc_op_input op_input;
 853	int r;
 854
 855	op_input.op = MES_MISC_OP_WRITE_REG;
 856	op_input.write_reg.reg_offset = reg;
 857	op_input.write_reg.reg_value = val;
 858
 859	if (!adev->mes.funcs->misc_op) {
 860		DRM_ERROR("mes wreg is not supported!\n");
 861		r = -EINVAL;
 862		goto error;
 863	}
 864
 865	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 866	if (r)
 867		DRM_ERROR("failed to write reg (0x%x)\n", reg);
 868
 869error:
 870	return r;
 871}
 872
 873int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
 874				  uint32_t reg0, uint32_t reg1,
 875				  uint32_t ref, uint32_t mask)
 876{
 877	struct mes_misc_op_input op_input;
 878	int r;
 879
 880	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
 881	op_input.wrm_reg.reg0 = reg0;
 882	op_input.wrm_reg.reg1 = reg1;
 883	op_input.wrm_reg.ref = ref;
 884	op_input.wrm_reg.mask = mask;
 885
 886	if (!adev->mes.funcs->misc_op) {
 887		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
 888		r = -EINVAL;
 889		goto error;
 890	}
 891
 892	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 893	if (r)
 894		DRM_ERROR("failed to reg_write_reg_wait\n");
 895
 896error:
 897	return r;
 898}
 899
 900int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
 901			uint32_t val, uint32_t mask)
 902{
 903	struct mes_misc_op_input op_input;
 904	int r;
 905
 906	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
 907	op_input.wrm_reg.reg0 = reg;
 908	op_input.wrm_reg.ref = val;
 909	op_input.wrm_reg.mask = mask;
 910
 911	if (!adev->mes.funcs->misc_op) {
 912		DRM_ERROR("mes reg wait is not supported!\n");
 913		r = -EINVAL;
 914		goto error;
 915	}
 916
 917	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 918	if (r)
 919		DRM_ERROR("failed to reg_write_reg_wait\n");
 920
 921error:
 922	return r;
 923}
 924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925static void
 926amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
 927			       struct amdgpu_ring *ring,
 928			       struct amdgpu_mes_queue_properties *props)
 929{
 930	props->queue_type = ring->funcs->type;
 931	props->hqd_base_gpu_addr = ring->gpu_addr;
 932	props->rptr_gpu_addr = ring->rptr_gpu_addr;
 933	props->wptr_gpu_addr = ring->wptr_gpu_addr;
 934	props->wptr_mc_addr =
 935		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
 936	props->queue_size = ring->ring_size;
 937	props->eop_gpu_addr = ring->eop_gpu_addr;
 938	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
 939	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
 940	props->paging = false;
 941	props->ring = ring;
 942}
 943
 944#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
 945do {									\
 946       if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
 947		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
 948				_eng[ring->idx].slots[id_offs]);        \
 949       else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
 950		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
 951				_eng[ring->idx].ring);                  \
 952       else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
 953		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
 954				_eng[ring->idx].ib);                    \
 955       else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
 956		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
 957				_eng[ring->idx].padding);               \
 958} while(0)
 959
 960int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
 961{
 962	switch (ring->funcs->type) {
 963	case AMDGPU_RING_TYPE_GFX:
 964		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
 965		break;
 966	case AMDGPU_RING_TYPE_COMPUTE:
 967		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
 968		break;
 969	case AMDGPU_RING_TYPE_SDMA:
 970		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
 971		break;
 972	default:
 973		break;
 974	}
 975
 976	WARN_ON(1);
 977	return -EINVAL;
 978}
 979
 980int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
 981			int queue_type, int idx,
 982			struct amdgpu_mes_ctx_data *ctx_data,
 983			struct amdgpu_ring **out)
 984{
 985	struct amdgpu_ring *ring;
 986	struct amdgpu_mes_gang *gang;
 987	struct amdgpu_mes_queue_properties qprops = {0};
 988	int r, queue_id, pasid;
 989
 990	/*
 991	 * Avoid taking any other locks under MES lock to avoid circular
 992	 * lock dependencies.
 993	 */
 994	amdgpu_mes_lock(&adev->mes);
 995	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 996	if (!gang) {
 997		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 998		amdgpu_mes_unlock(&adev->mes);
 999		return -EINVAL;
1000	}
1001	pasid = gang->process->pasid;
1002
1003	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1004	if (!ring) {
1005		amdgpu_mes_unlock(&adev->mes);
1006		return -ENOMEM;
1007	}
1008
1009	ring->ring_obj = NULL;
1010	ring->use_doorbell = true;
1011	ring->is_mes_queue = true;
1012	ring->mes_ctx = ctx_data;
1013	ring->idx = idx;
1014	ring->no_scheduler = true;
1015
1016	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1017		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1018				      compute[ring->idx].mec_hpd);
1019		ring->eop_gpu_addr =
1020			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1021	}
1022
1023	switch (queue_type) {
1024	case AMDGPU_RING_TYPE_GFX:
1025		ring->funcs = adev->gfx.gfx_ring[0].funcs;
 
 
1026		break;
1027	case AMDGPU_RING_TYPE_COMPUTE:
1028		ring->funcs = adev->gfx.compute_ring[0].funcs;
 
 
1029		break;
1030	case AMDGPU_RING_TYPE_SDMA:
1031		ring->funcs = adev->sdma.instance[0].ring.funcs;
1032		break;
1033	default:
1034		BUG();
1035	}
1036
1037	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1038			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1039	if (r)
1040		goto clean_up_memory;
1041
1042	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1043
1044	dma_fence_wait(gang->process->vm->last_update, false);
1045	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1046	amdgpu_mes_unlock(&adev->mes);
1047
1048	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1049	if (r)
1050		goto clean_up_ring;
1051
1052	ring->hw_queue_id = queue_id;
1053	ring->doorbell_index = qprops.doorbell_off;
1054
1055	if (queue_type == AMDGPU_RING_TYPE_GFX)
1056		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1057	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1058		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1059			queue_id);
1060	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1061		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1062			queue_id);
1063	else
1064		BUG();
1065
1066	*out = ring;
1067	return 0;
1068
1069clean_up_ring:
1070	amdgpu_ring_fini(ring);
1071clean_up_memory:
1072	kfree(ring);
1073	amdgpu_mes_unlock(&adev->mes);
1074	return r;
1075}
1076
1077void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1078			    struct amdgpu_ring *ring)
1079{
1080	if (!ring)
1081		return;
1082
1083	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
 
1084	amdgpu_ring_fini(ring);
1085	kfree(ring);
1086}
1087
1088uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1089						   enum amdgpu_mes_priority_level prio)
1090{
1091	return adev->mes.aggregated_doorbells[prio];
1092}
1093
1094int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1095				   struct amdgpu_mes_ctx_data *ctx_data)
1096{
1097	int r;
1098
1099	r = amdgpu_bo_create_kernel(adev,
1100			    sizeof(struct amdgpu_mes_ctx_meta_data),
1101			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1102			    &ctx_data->meta_data_obj,
1103			    &ctx_data->meta_data_mc_addr,
1104			    &ctx_data->meta_data_ptr);
 
 
 
 
 
1105	if (!ctx_data->meta_data_obj)
1106		return -ENOMEM;
1107
1108	memset(ctx_data->meta_data_ptr, 0,
1109	       sizeof(struct amdgpu_mes_ctx_meta_data));
1110
1111	return 0;
1112}
1113
1114void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1115{
1116	if (ctx_data->meta_data_obj)
1117		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1118				      &ctx_data->meta_data_mc_addr,
1119				      &ctx_data->meta_data_ptr);
1120}
1121
1122int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1123				 struct amdgpu_vm *vm,
1124				 struct amdgpu_mes_ctx_data *ctx_data)
1125{
1126	struct amdgpu_bo_va *bo_va;
1127	struct ww_acquire_ctx ticket;
1128	struct list_head list;
1129	struct amdgpu_bo_list_entry pd;
1130	struct ttm_validate_buffer csa_tv;
1131	struct amdgpu_sync sync;
 
1132	int r;
1133
1134	amdgpu_sync_create(&sync);
1135	INIT_LIST_HEAD(&list);
1136	INIT_LIST_HEAD(&csa_tv.head);
1137
1138	csa_tv.bo = &ctx_data->meta_data_obj->tbo;
1139	csa_tv.num_shared = 1;
1140
1141	list_add(&csa_tv.head, &list);
1142	amdgpu_vm_get_pd_bo(vm, &list, &pd);
1143
1144	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1145	if (r) {
1146		DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1147		return r;
 
 
1148	}
1149
1150	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1151	if (!bo_va) {
1152		ttm_eu_backoff_reservation(&ticket, &list);
1153		DRM_ERROR("failed to create bo_va for meta data BO\n");
1154		return -ENOMEM;
 
1155	}
1156
1157	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1158			     sizeof(struct amdgpu_mes_ctx_meta_data),
1159			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1160			     AMDGPU_PTE_EXECUTABLE);
1161
1162	if (r) {
1163		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1164		goto error;
1165	}
1166
1167	r = amdgpu_vm_bo_update(adev, bo_va, false);
1168	if (r) {
1169		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1170		goto error;
1171	}
1172	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1173
1174	r = amdgpu_vm_update_pdes(adev, vm, false);
1175	if (r) {
1176		DRM_ERROR("failed to update pdes on meta data\n");
1177		goto error;
1178	}
1179	amdgpu_sync_fence(&sync, vm->last_update);
1180
1181	amdgpu_sync_wait(&sync, false);
1182	ttm_eu_backoff_reservation(&ticket, &list);
1183
1184	amdgpu_sync_free(&sync);
1185	ctx_data->meta_data_va = bo_va;
1186	return 0;
1187
1188error:
1189	amdgpu_vm_bo_del(adev, bo_va);
1190	ttm_eu_backoff_reservation(&ticket, &list);
 
 
1191	amdgpu_sync_free(&sync);
1192	return r;
1193}
1194
1195int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1196				   struct amdgpu_mes_ctx_data *ctx_data)
1197{
1198	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1199	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1200	struct amdgpu_vm *vm = bo_va->base.vm;
1201	struct amdgpu_bo_list_entry vm_pd;
1202	struct list_head list, duplicates;
1203	struct dma_fence *fence = NULL;
1204	struct ttm_validate_buffer tv;
1205	struct ww_acquire_ctx ticket;
1206	long r = 0;
1207
1208	INIT_LIST_HEAD(&list);
1209	INIT_LIST_HEAD(&duplicates);
1210
1211	tv.bo = &bo->tbo;
1212	tv.num_shared = 2;
1213	list_add(&tv.head, &list);
1214
1215	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
1216
1217	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
1218	if (r) {
1219		dev_err(adev->dev, "leaking bo va because "
1220			"we fail to reserve bo (%ld)\n", r);
1221		return r;
1222	}
1223
1224	amdgpu_vm_bo_del(adev, bo_va);
1225	if (!amdgpu_vm_ready(vm))
1226		goto out_unlock;
1227
1228	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
 
1229	if (r)
1230		goto out_unlock;
1231	if (fence) {
1232		amdgpu_bo_fence(bo, fence, true);
1233		fence = NULL;
1234	}
1235
1236	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1237	if (r || !fence)
1238		goto out_unlock;
1239
1240	dma_fence_wait(fence, false);
1241	amdgpu_bo_fence(bo, fence, true);
1242	dma_fence_put(fence);
1243
1244out_unlock:
1245	if (unlikely(r < 0))
1246		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1247	ttm_eu_backoff_reservation(&ticket, &list);
1248
1249	return r;
1250}
1251
1252static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1253					  int pasid, int *gang_id,
1254					  int queue_type, int num_queue,
1255					  struct amdgpu_ring **added_rings,
1256					  struct amdgpu_mes_ctx_data *ctx_data)
1257{
1258	struct amdgpu_ring *ring;
1259	struct amdgpu_mes_gang_properties gprops = {0};
1260	int r, j;
1261
1262	/* create a gang for the process */
1263	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1264	gprops.gang_quantum = adev->mes.default_gang_quantum;
1265	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1266	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1267	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1268
1269	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1270	if (r) {
1271		DRM_ERROR("failed to add gang\n");
1272		return r;
1273	}
1274
1275	/* create queues for the gang */
1276	for (j = 0; j < num_queue; j++) {
1277		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1278					ctx_data, &ring);
1279		if (r) {
1280			DRM_ERROR("failed to add ring\n");
1281			break;
1282		}
1283
1284		DRM_INFO("ring %s was added\n", ring->name);
1285		added_rings[j] = ring;
1286	}
1287
1288	return 0;
1289}
1290
1291static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1292{
1293	struct amdgpu_ring *ring;
1294	int i, r;
1295
1296	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1297		ring = added_rings[i];
1298		if (!ring)
1299			continue;
1300
1301		r = amdgpu_ring_test_ring(ring);
1302		if (r) {
1303			DRM_DEV_ERROR(ring->adev->dev,
1304				      "ring %s test failed (%d)\n",
1305				      ring->name, r);
1306			return r;
1307		} else
1308			DRM_INFO("ring %s test pass\n", ring->name);
1309
1310		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1311		if (r) {
1312			DRM_DEV_ERROR(ring->adev->dev,
1313				      "ring %s ib test failed (%d)\n",
1314				      ring->name, r);
1315			return r;
1316		} else
1317			DRM_INFO("ring %s ib test pass\n", ring->name);
1318	}
1319
1320	return 0;
1321}
1322
1323int amdgpu_mes_self_test(struct amdgpu_device *adev)
1324{
1325	struct amdgpu_vm *vm = NULL;
1326	struct amdgpu_mes_ctx_data ctx_data = {0};
1327	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1328	int gang_ids[3] = {0};
1329	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
1330				   AMDGPU_MES_CTX_MAX_GFX_RINGS},
1331				 { AMDGPU_RING_TYPE_COMPUTE,
1332				   AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
1333				 { AMDGPU_RING_TYPE_SDMA,
1334				   AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
1335	int i, r, pasid, k = 0;
1336
1337	pasid = amdgpu_pasid_alloc(16);
1338	if (pasid < 0) {
1339		dev_warn(adev->dev, "No more PASIDs available!");
1340		pasid = 0;
1341	}
1342
1343	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1344	if (!vm) {
1345		r = -ENOMEM;
1346		goto error_pasid;
1347	}
1348
1349	r = amdgpu_vm_init(adev, vm);
1350	if (r) {
1351		DRM_ERROR("failed to initialize vm\n");
1352		goto error_pasid;
1353	}
1354
1355	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1356	if (r) {
1357		DRM_ERROR("failed to alloc ctx meta data\n");
1358		goto error_fini;
1359	}
1360
1361	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1362	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1363	if (r) {
1364		DRM_ERROR("failed to map ctx meta data\n");
1365		goto error_vm;
1366	}
1367
1368	r = amdgpu_mes_create_process(adev, pasid, vm);
1369	if (r) {
1370		DRM_ERROR("failed to create MES process\n");
1371		goto error_vm;
1372	}
1373
1374	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1375		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1376		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1377		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
 
 
1378		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1379			continue;
1380
1381		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1382							   &gang_ids[i],
1383							   queue_types[i][0],
1384							   queue_types[i][1],
1385							   &added_rings[k],
1386							   &ctx_data);
1387		if (r)
1388			goto error_queues;
1389
1390		k += queue_types[i][1];
1391	}
1392
1393	/* start ring test and ib test for MES queues */
1394	amdgpu_mes_test_queues(added_rings);
1395
1396error_queues:
1397	/* remove all queues */
1398	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1399		if (!added_rings[i])
1400			continue;
1401		amdgpu_mes_remove_ring(adev, added_rings[i]);
1402	}
1403
1404	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1405		if (!gang_ids[i])
1406			continue;
1407		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1408	}
1409
1410	amdgpu_mes_destroy_process(adev, pasid);
1411
1412error_vm:
1413	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1414
1415error_fini:
1416	amdgpu_vm_fini(adev, vm);
1417
1418error_pasid:
1419	if (pasid)
1420		amdgpu_pasid_free(pasid);
1421
1422	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1423	kfree(vm);
1424	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1425}