Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <drm/drm_exec.h>
  26
  27#include "amdgpu_mes.h"
  28#include "amdgpu.h"
  29#include "soc15_common.h"
  30#include "amdgpu_mes_ctx.h"
  31
  32#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
  33#define AMDGPU_ONE_DOORBELL_SIZE 8
  34
  35int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
  36{
  37	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
  38		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
  39		       PAGE_SIZE);
  40}
  41
  42static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
  43					 struct amdgpu_mes_process *process,
  44					 int ip_type, uint64_t *doorbell_index)
  45{
  46	unsigned int offset, found;
  47	struct amdgpu_mes *mes = &adev->mes;
  48
  49	if (ip_type == AMDGPU_RING_TYPE_SDMA)
  50		offset = adev->doorbell_index.sdma_engine[0];
  51	else
  52		offset = 0;
  53
  54	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
  55	if (found >= mes->num_mes_dbs) {
  56		DRM_WARN("No doorbell available\n");
  57		return -ENOSPC;
  58	}
  59
  60	set_bit(found, mes->doorbell_bitmap);
  61
  62	/* Get the absolute doorbell index on BAR */
  63	*doorbell_index = mes->db_start_dw_offset + found * 2;
  64	return 0;
  65}
  66
  67static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
  68					   struct amdgpu_mes_process *process,
  69					   uint32_t doorbell_index)
  70{
  71	unsigned int old, rel_index;
  72	struct amdgpu_mes *mes = &adev->mes;
  73
  74	/* Find the relative index of the doorbell in this object */
  75	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
  76	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
  77	WARN_ON(!old);
  78}
  79
  80static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
  81{
  82	int i;
  83	struct amdgpu_mes *mes = &adev->mes;
  84
  85	/* Bitmap for dynamic allocation of kernel doorbells */
  86	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
  87	if (!mes->doorbell_bitmap) {
  88		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
  89		return -ENOMEM;
  90	}
  91
  92	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
  93	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
  94		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
  95		set_bit(i, mes->doorbell_bitmap);
  96	}
  97
  98	return 0;
  99}
 100
 101static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
 102{
 103	int r;
 104
 105	if (!amdgpu_mes_log_enable)
 106		return 0;
 107
 108	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
 109				    AMDGPU_GEM_DOMAIN_GTT,
 110				    &adev->mes.event_log_gpu_obj,
 111				    &adev->mes.event_log_gpu_addr,
 112				    &adev->mes.event_log_cpu_addr);
 113	if (r) {
 114		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
 115		return r;
 116	}
 117
 118	memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
 119
 120	return  0;
 121
 122}
 123
 124static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
 125{
 126	bitmap_free(adev->mes.doorbell_bitmap);
 127}
 128
 129int amdgpu_mes_init(struct amdgpu_device *adev)
 130{
 131	int i, r;
 132
 133	adev->mes.adev = adev;
 134
 135	idr_init(&adev->mes.pasid_idr);
 136	idr_init(&adev->mes.gang_id_idr);
 137	idr_init(&adev->mes.queue_id_idr);
 138	ida_init(&adev->mes.doorbell_ida);
 139	spin_lock_init(&adev->mes.queue_id_lock);
 140	spin_lock_init(&adev->mes.ring_lock);
 141	mutex_init(&adev->mes.mutex_hidden);
 142
 
 
 
 143	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
 144	adev->mes.vmid_mask_mmhub = 0xffffff00;
 145	adev->mes.vmid_mask_gfxhub = 0xffffff00;
 146
 147	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
 148		/* use only 1st MEC pipes */
 149		if (i >= 4)
 150			continue;
 151		adev->mes.compute_hqd_mask[i] = 0xc;
 152	}
 153
 154	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
 155		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
 156
 157	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
 158		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
 159		    IP_VERSION(6, 0, 0))
 160			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
 161		/* zero sdma_hqd_mask for non-existent engine */
 162		else if (adev->sdma.num_instances == 1)
 163			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
 164		else
 165			adev->mes.sdma_hqd_mask[i] = 0xfc;
 166	}
 167
 168	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
 169	if (r) {
 170		dev_err(adev->dev,
 171			"(%d) ring trail_fence_offs wb alloc failed\n", r);
 172		goto error_ids;
 173	}
 174	adev->mes.sch_ctx_gpu_addr =
 175		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
 176	adev->mes.sch_ctx_ptr =
 177		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
 178
 179	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
 180	if (r) {
 181		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 182		dev_err(adev->dev,
 183			"(%d) query_status_fence_offs wb alloc failed\n", r);
 184		goto error_ids;
 185	}
 186	adev->mes.query_status_fence_gpu_addr =
 187		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
 188	adev->mes.query_status_fence_ptr =
 189		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
 190
 191	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
 192	if (r) {
 193		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 194		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 195		dev_err(adev->dev,
 196			"(%d) read_val_offs alloc failed\n", r);
 197		goto error_ids;
 198	}
 199	adev->mes.read_val_gpu_addr =
 200		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
 201	adev->mes.read_val_ptr =
 202		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
 
 203
 204	r = amdgpu_mes_doorbell_init(adev);
 205	if (r)
 206		goto error;
 207
 208	r = amdgpu_mes_event_log_init(adev);
 209	if (r)
 210		goto error_doorbell;
 211
 212	return 0;
 213
 214error_doorbell:
 215	amdgpu_mes_doorbell_free(adev);
 216error:
 217	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 218	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 219	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
 220error_ids:
 
 
 
 
 221	idr_destroy(&adev->mes.pasid_idr);
 222	idr_destroy(&adev->mes.gang_id_idr);
 223	idr_destroy(&adev->mes.queue_id_idr);
 224	ida_destroy(&adev->mes.doorbell_ida);
 225	mutex_destroy(&adev->mes.mutex_hidden);
 226	return r;
 227}
 228
 229void amdgpu_mes_fini(struct amdgpu_device *adev)
 230{
 
 
 231	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
 232			      &adev->mes.event_log_gpu_addr,
 233			      &adev->mes.event_log_cpu_addr);
 234
 235	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
 236	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
 237	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
 
 
 
 
 
 238	amdgpu_mes_doorbell_free(adev);
 239
 240	idr_destroy(&adev->mes.pasid_idr);
 241	idr_destroy(&adev->mes.gang_id_idr);
 242	idr_destroy(&adev->mes.queue_id_idr);
 243	ida_destroy(&adev->mes.doorbell_ida);
 244	mutex_destroy(&adev->mes.mutex_hidden);
 245}
 246
 247static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
 248{
 249	amdgpu_bo_free_kernel(&q->mqd_obj,
 250			      &q->mqd_gpu_addr,
 251			      &q->mqd_cpu_ptr);
 252}
 253
 254int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
 255			      struct amdgpu_vm *vm)
 256{
 257	struct amdgpu_mes_process *process;
 258	int r;
 259
 260	/* allocate the mes process buffer */
 261	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
 262	if (!process) {
 263		DRM_ERROR("no more memory to create mes process\n");
 264		return -ENOMEM;
 265	}
 266
 267	/* allocate the process context bo and map it */
 268	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
 269				    AMDGPU_GEM_DOMAIN_GTT,
 270				    &process->proc_ctx_bo,
 271				    &process->proc_ctx_gpu_addr,
 272				    &process->proc_ctx_cpu_ptr);
 273	if (r) {
 274		DRM_ERROR("failed to allocate process context bo\n");
 275		goto clean_up_memory;
 276	}
 277	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
 278
 279	/*
 280	 * Avoid taking any other locks under MES lock to avoid circular
 281	 * lock dependencies.
 282	 */
 283	amdgpu_mes_lock(&adev->mes);
 284
 285	/* add the mes process to idr list */
 286	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
 287		      GFP_KERNEL);
 288	if (r < 0) {
 289		DRM_ERROR("failed to lock pasid=%d\n", pasid);
 290		goto clean_up_ctx;
 291	}
 292
 293	INIT_LIST_HEAD(&process->gang_list);
 294	process->vm = vm;
 295	process->pasid = pasid;
 296	process->process_quantum = adev->mes.default_process_quantum;
 297	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
 298
 299	amdgpu_mes_unlock(&adev->mes);
 300	return 0;
 301
 302clean_up_ctx:
 303	amdgpu_mes_unlock(&adev->mes);
 304	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
 305			      &process->proc_ctx_gpu_addr,
 306			      &process->proc_ctx_cpu_ptr);
 307clean_up_memory:
 308	kfree(process);
 309	return r;
 310}
 311
 312void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
 313{
 314	struct amdgpu_mes_process *process;
 315	struct amdgpu_mes_gang *gang, *tmp1;
 316	struct amdgpu_mes_queue *queue, *tmp2;
 317	struct mes_remove_queue_input queue_input;
 318	unsigned long flags;
 319	int r;
 320
 321	/*
 322	 * Avoid taking any other locks under MES lock to avoid circular
 323	 * lock dependencies.
 324	 */
 325	amdgpu_mes_lock(&adev->mes);
 326
 327	process = idr_find(&adev->mes.pasid_idr, pasid);
 328	if (!process) {
 329		DRM_WARN("pasid %d doesn't exist\n", pasid);
 330		amdgpu_mes_unlock(&adev->mes);
 331		return;
 332	}
 333
 334	/* Remove all queues from hardware */
 335	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
 336		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
 337			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 338			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
 339			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 340
 341			queue_input.doorbell_offset = queue->doorbell_off;
 342			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 343
 344			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
 345							     &queue_input);
 346			if (r)
 347				DRM_WARN("failed to remove hardware queue\n");
 348		}
 349
 350		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
 351	}
 352
 353	idr_remove(&adev->mes.pasid_idr, pasid);
 354	amdgpu_mes_unlock(&adev->mes);
 355
 356	/* free all memory allocated by the process */
 357	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
 358		/* free all queues in the gang */
 359		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
 360			amdgpu_mes_queue_free_mqd(queue);
 361			list_del(&queue->list);
 362			kfree(queue);
 363		}
 364		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 365				      &gang->gang_ctx_gpu_addr,
 366				      &gang->gang_ctx_cpu_ptr);
 367		list_del(&gang->list);
 368		kfree(gang);
 369
 370	}
 371	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
 372			      &process->proc_ctx_gpu_addr,
 373			      &process->proc_ctx_cpu_ptr);
 374	kfree(process);
 375}
 376
 377int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
 378			struct amdgpu_mes_gang_properties *gprops,
 379			int *gang_id)
 380{
 381	struct amdgpu_mes_process *process;
 382	struct amdgpu_mes_gang *gang;
 383	int r;
 384
 385	/* allocate the mes gang buffer */
 386	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
 387	if (!gang) {
 388		return -ENOMEM;
 389	}
 390
 391	/* allocate the gang context bo and map it to cpu space */
 392	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
 393				    AMDGPU_GEM_DOMAIN_GTT,
 394				    &gang->gang_ctx_bo,
 395				    &gang->gang_ctx_gpu_addr,
 396				    &gang->gang_ctx_cpu_ptr);
 397	if (r) {
 398		DRM_ERROR("failed to allocate process context bo\n");
 399		goto clean_up_mem;
 400	}
 401	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
 402
 403	/*
 404	 * Avoid taking any other locks under MES lock to avoid circular
 405	 * lock dependencies.
 406	 */
 407	amdgpu_mes_lock(&adev->mes);
 408
 409	process = idr_find(&adev->mes.pasid_idr, pasid);
 410	if (!process) {
 411		DRM_ERROR("pasid %d doesn't exist\n", pasid);
 412		r = -EINVAL;
 413		goto clean_up_ctx;
 414	}
 415
 416	/* add the mes gang to idr list */
 417	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
 418		      GFP_KERNEL);
 419	if (r < 0) {
 420		DRM_ERROR("failed to allocate idr for gang\n");
 421		goto clean_up_ctx;
 422	}
 423
 424	gang->gang_id = r;
 425	*gang_id = r;
 426
 427	INIT_LIST_HEAD(&gang->queue_list);
 428	gang->process = process;
 429	gang->priority = gprops->priority;
 430	gang->gang_quantum = gprops->gang_quantum ?
 431		gprops->gang_quantum : adev->mes.default_gang_quantum;
 432	gang->global_priority_level = gprops->global_priority_level;
 433	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
 434	list_add_tail(&gang->list, &process->gang_list);
 435
 436	amdgpu_mes_unlock(&adev->mes);
 437	return 0;
 438
 439clean_up_ctx:
 440	amdgpu_mes_unlock(&adev->mes);
 441	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 442			      &gang->gang_ctx_gpu_addr,
 443			      &gang->gang_ctx_cpu_ptr);
 444clean_up_mem:
 445	kfree(gang);
 446	return r;
 447}
 448
 449int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
 450{
 451	struct amdgpu_mes_gang *gang;
 452
 453	/*
 454	 * Avoid taking any other locks under MES lock to avoid circular
 455	 * lock dependencies.
 456	 */
 457	amdgpu_mes_lock(&adev->mes);
 458
 459	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 460	if (!gang) {
 461		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 462		amdgpu_mes_unlock(&adev->mes);
 463		return -EINVAL;
 464	}
 465
 466	if (!list_empty(&gang->queue_list)) {
 467		DRM_ERROR("queue list is not empty\n");
 468		amdgpu_mes_unlock(&adev->mes);
 469		return -EBUSY;
 470	}
 471
 472	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
 473	list_del(&gang->list);
 474	amdgpu_mes_unlock(&adev->mes);
 475
 476	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 477			      &gang->gang_ctx_gpu_addr,
 478			      &gang->gang_ctx_cpu_ptr);
 479
 480	kfree(gang);
 481
 482	return 0;
 483}
 484
 485int amdgpu_mes_suspend(struct amdgpu_device *adev)
 486{
 487	struct idr *idp;
 488	struct amdgpu_mes_process *process;
 489	struct amdgpu_mes_gang *gang;
 490	struct mes_suspend_gang_input input;
 491	int r, pasid;
 
 
 
 
 
 
 492
 493	/*
 494	 * Avoid taking any other locks under MES lock to avoid circular
 495	 * lock dependencies.
 496	 */
 497	amdgpu_mes_lock(&adev->mes);
 498
 499	idp = &adev->mes.pasid_idr;
 500
 501	idr_for_each_entry(idp, process, pasid) {
 502		list_for_each_entry(gang, &process->gang_list, list) {
 503			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
 504			if (r)
 505				DRM_ERROR("failed to suspend pasid %d gangid %d",
 506					 pasid, gang->gang_id);
 507		}
 508	}
 509
 510	amdgpu_mes_unlock(&adev->mes);
 511	return 0;
 
 
 
 512}
 513
 514int amdgpu_mes_resume(struct amdgpu_device *adev)
 515{
 516	struct idr *idp;
 517	struct amdgpu_mes_process *process;
 518	struct amdgpu_mes_gang *gang;
 519	struct mes_resume_gang_input input;
 520	int r, pasid;
 
 
 
 
 
 
 521
 522	/*
 523	 * Avoid taking any other locks under MES lock to avoid circular
 524	 * lock dependencies.
 525	 */
 526	amdgpu_mes_lock(&adev->mes);
 527
 528	idp = &adev->mes.pasid_idr;
 529
 530	idr_for_each_entry(idp, process, pasid) {
 531		list_for_each_entry(gang, &process->gang_list, list) {
 532			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
 533			if (r)
 534				DRM_ERROR("failed to resume pasid %d gangid %d",
 535					 pasid, gang->gang_id);
 536		}
 537	}
 538
 539	amdgpu_mes_unlock(&adev->mes);
 540	return 0;
 
 
 
 541}
 542
 543static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
 544				     struct amdgpu_mes_queue *q,
 545				     struct amdgpu_mes_queue_properties *p)
 546{
 547	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
 548	u32 mqd_size = mqd_mgr->mqd_size;
 549	int r;
 550
 551	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
 552				    AMDGPU_GEM_DOMAIN_GTT,
 553				    &q->mqd_obj,
 554				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
 555	if (r) {
 556		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
 557		return r;
 558	}
 559	memset(q->mqd_cpu_ptr, 0, mqd_size);
 560
 561	r = amdgpu_bo_reserve(q->mqd_obj, false);
 562	if (unlikely(r != 0))
 563		goto clean_up;
 564
 565	return 0;
 566
 567clean_up:
 568	amdgpu_bo_free_kernel(&q->mqd_obj,
 569			      &q->mqd_gpu_addr,
 570			      &q->mqd_cpu_ptr);
 571	return r;
 572}
 573
 574static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
 575				     struct amdgpu_mes_queue *q,
 576				     struct amdgpu_mes_queue_properties *p)
 577{
 578	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
 579	struct amdgpu_mqd_prop mqd_prop = {0};
 580
 581	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
 582	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
 583	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
 584	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
 585	mqd_prop.queue_size = p->queue_size;
 586	mqd_prop.use_doorbell = true;
 587	mqd_prop.doorbell_index = p->doorbell_off;
 588	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
 589	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
 590	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
 591	mqd_prop.hqd_active = false;
 592
 593	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
 594	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
 595		mutex_lock(&adev->srbm_mutex);
 596		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
 597	}
 598
 599	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
 600
 601	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
 602	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
 603		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
 604		mutex_unlock(&adev->srbm_mutex);
 605	}
 606
 607	amdgpu_bo_unreserve(q->mqd_obj);
 608}
 609
 610int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
 611			    struct amdgpu_mes_queue_properties *qprops,
 612			    int *queue_id)
 613{
 614	struct amdgpu_mes_queue *queue;
 615	struct amdgpu_mes_gang *gang;
 616	struct mes_add_queue_input queue_input;
 617	unsigned long flags;
 618	int r;
 619
 620	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
 621
 622	/* allocate the mes queue buffer */
 623	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
 624	if (!queue) {
 625		DRM_ERROR("Failed to allocate memory for queue\n");
 626		return -ENOMEM;
 627	}
 628
 629	/* Allocate the queue mqd */
 630	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
 631	if (r)
 632		goto clean_up_memory;
 633
 634	/*
 635	 * Avoid taking any other locks under MES lock to avoid circular
 636	 * lock dependencies.
 637	 */
 638	amdgpu_mes_lock(&adev->mes);
 639
 640	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 641	if (!gang) {
 642		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 643		r = -EINVAL;
 644		goto clean_up_mqd;
 645	}
 646
 647	/* add the mes gang to idr list */
 648	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 649	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
 650		      GFP_ATOMIC);
 651	if (r < 0) {
 652		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 653		goto clean_up_mqd;
 654	}
 655	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 656	*queue_id = queue->queue_id = r;
 657
 658	/* allocate a doorbell index for the queue */
 659	r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
 660					  qprops->queue_type,
 661					  &qprops->doorbell_off);
 662	if (r)
 663		goto clean_up_queue_id;
 664
 665	/* initialize the queue mqd */
 666	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
 667
 668	/* add hw queue to mes */
 669	queue_input.process_id = gang->process->pasid;
 670
 671	queue_input.page_table_base_addr =
 672		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
 673		adev->gmc.vram_start;
 674
 675	queue_input.process_va_start = 0;
 676	queue_input.process_va_end =
 677		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
 678	queue_input.process_quantum = gang->process->process_quantum;
 679	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
 680	queue_input.gang_quantum = gang->gang_quantum;
 681	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 682	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
 683	queue_input.gang_global_priority_level = gang->global_priority_level;
 684	queue_input.doorbell_offset = qprops->doorbell_off;
 685	queue_input.mqd_addr = queue->mqd_gpu_addr;
 686	queue_input.wptr_addr = qprops->wptr_gpu_addr;
 687	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
 688	queue_input.queue_type = qprops->queue_type;
 689	queue_input.paging = qprops->paging;
 690	queue_input.is_kfd_process = 0;
 691
 692	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
 693	if (r) {
 694		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
 695			  qprops->doorbell_off);
 696		goto clean_up_doorbell;
 697	}
 698
 699	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
 700		  "queue type=%d, doorbell=0x%llx\n",
 701		  gang->process->pasid, gang_id, qprops->queue_type,
 702		  qprops->doorbell_off);
 703
 704	queue->ring = qprops->ring;
 705	queue->doorbell_off = qprops->doorbell_off;
 706	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
 707	queue->queue_type = qprops->queue_type;
 708	queue->paging = qprops->paging;
 709	queue->gang = gang;
 710	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
 711	list_add_tail(&queue->list, &gang->queue_list);
 712
 713	amdgpu_mes_unlock(&adev->mes);
 714	return 0;
 715
 716clean_up_doorbell:
 717	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
 718				       qprops->doorbell_off);
 719clean_up_queue_id:
 720	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 721	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
 722	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 723clean_up_mqd:
 724	amdgpu_mes_unlock(&adev->mes);
 725	amdgpu_mes_queue_free_mqd(queue);
 726clean_up_memory:
 727	kfree(queue);
 728	return r;
 729}
 730
 731int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
 732{
 733	unsigned long flags;
 734	struct amdgpu_mes_queue *queue;
 735	struct amdgpu_mes_gang *gang;
 736	struct mes_remove_queue_input queue_input;
 737	int r;
 738
 739	/*
 740	 * Avoid taking any other locks under MES lock to avoid circular
 741	 * lock dependencies.
 742	 */
 743	amdgpu_mes_lock(&adev->mes);
 744
 745	/* remove the mes gang from idr list */
 746	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 747
 748	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
 749	if (!queue) {
 750		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 751		amdgpu_mes_unlock(&adev->mes);
 752		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
 753		return -EINVAL;
 754	}
 755
 756	idr_remove(&adev->mes.queue_id_idr, queue_id);
 757	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 758
 759	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
 760		  queue->doorbell_off);
 761
 762	gang = queue->gang;
 763	queue_input.doorbell_offset = queue->doorbell_off;
 764	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 765
 766	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
 767	if (r)
 768		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
 769			  queue_id);
 770
 771	list_del(&queue->list);
 772	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
 773				       queue->doorbell_off);
 774	amdgpu_mes_unlock(&adev->mes);
 775
 776	amdgpu_mes_queue_free_mqd(queue);
 777	kfree(queue);
 778	return 0;
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
 782				  struct amdgpu_ring *ring,
 783				  enum amdgpu_unmap_queues_action action,
 784				  u64 gpu_addr, u64 seq)
 785{
 786	struct mes_unmap_legacy_queue_input queue_input;
 787	int r;
 788
 789	queue_input.action = action;
 790	queue_input.queue_type = ring->funcs->type;
 791	queue_input.doorbell_offset = ring->doorbell_index;
 792	queue_input.pipe_id = ring->pipe;
 793	queue_input.queue_id = ring->queue;
 794	queue_input.trail_fence_addr = gpu_addr;
 795	queue_input.trail_fence_data = seq;
 796
 797	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
 798	if (r)
 799		DRM_ERROR("failed to unmap legacy queue\n");
 800
 801	return r;
 802}
 803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
 805{
 806	struct mes_misc_op_input op_input;
 807	int r, val = 0;
 
 
 
 808
 
 
 
 
 
 
 809	op_input.op = MES_MISC_OP_READ_REG;
 810	op_input.read_reg.reg_offset = reg;
 811	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
 812
 813	if (!adev->mes.funcs->misc_op) {
 814		DRM_ERROR("mes rreg is not supported!\n");
 815		goto error;
 816	}
 817
 818	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 819	if (r)
 820		DRM_ERROR("failed to read reg (0x%x)\n", reg);
 821	else
 822		val = *(adev->mes.read_val_ptr);
 823
 824error:
 
 
 825	return val;
 826}
 827
 828int amdgpu_mes_wreg(struct amdgpu_device *adev,
 829		    uint32_t reg, uint32_t val)
 830{
 831	struct mes_misc_op_input op_input;
 832	int r;
 833
 834	op_input.op = MES_MISC_OP_WRITE_REG;
 835	op_input.write_reg.reg_offset = reg;
 836	op_input.write_reg.reg_value = val;
 837
 838	if (!adev->mes.funcs->misc_op) {
 839		DRM_ERROR("mes wreg is not supported!\n");
 840		r = -EINVAL;
 841		goto error;
 842	}
 843
 844	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 845	if (r)
 846		DRM_ERROR("failed to write reg (0x%x)\n", reg);
 847
 848error:
 849	return r;
 850}
 851
 852int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
 853				  uint32_t reg0, uint32_t reg1,
 854				  uint32_t ref, uint32_t mask)
 855{
 856	struct mes_misc_op_input op_input;
 857	int r;
 858
 859	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
 860	op_input.wrm_reg.reg0 = reg0;
 861	op_input.wrm_reg.reg1 = reg1;
 862	op_input.wrm_reg.ref = ref;
 863	op_input.wrm_reg.mask = mask;
 864
 865	if (!adev->mes.funcs->misc_op) {
 866		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
 867		r = -EINVAL;
 868		goto error;
 869	}
 870
 871	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 872	if (r)
 873		DRM_ERROR("failed to reg_write_reg_wait\n");
 874
 875error:
 876	return r;
 877}
 878
 879int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
 880			uint32_t val, uint32_t mask)
 881{
 882	struct mes_misc_op_input op_input;
 883	int r;
 884
 885	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
 886	op_input.wrm_reg.reg0 = reg;
 887	op_input.wrm_reg.ref = val;
 888	op_input.wrm_reg.mask = mask;
 889
 890	if (!adev->mes.funcs->misc_op) {
 891		DRM_ERROR("mes reg wait is not supported!\n");
 892		r = -EINVAL;
 893		goto error;
 894	}
 895
 896	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 897	if (r)
 898		DRM_ERROR("failed to reg_write_reg_wait\n");
 899
 900error:
 901	return r;
 902}
 903
 904int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
 905				uint64_t process_context_addr,
 906				uint32_t spi_gdbg_per_vmid_cntl,
 907				const uint32_t *tcp_watch_cntl,
 908				uint32_t flags,
 909				bool trap_en)
 910{
 911	struct mes_misc_op_input op_input = {0};
 912	int r;
 913
 914	if (!adev->mes.funcs->misc_op) {
 915		DRM_ERROR("mes set shader debugger is not supported!\n");
 916		return -EINVAL;
 917	}
 918
 919	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
 920	op_input.set_shader_debugger.process_context_addr = process_context_addr;
 921	op_input.set_shader_debugger.flags.u32all = flags;
 922
 923	/* use amdgpu mes_flush_shader_debugger instead */
 924	if (op_input.set_shader_debugger.flags.process_ctx_flush)
 925		return -EINVAL;
 926
 927	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
 928	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
 929			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
 930
 931	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
 932			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
 933		op_input.set_shader_debugger.trap_en = trap_en;
 934
 935	amdgpu_mes_lock(&adev->mes);
 936
 937	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 938	if (r)
 939		DRM_ERROR("failed to set_shader_debugger\n");
 940
 941	amdgpu_mes_unlock(&adev->mes);
 942
 943	return r;
 944}
 945
 946int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
 947				     uint64_t process_context_addr)
 948{
 949	struct mes_misc_op_input op_input = {0};
 950	int r;
 951
 952	if (!adev->mes.funcs->misc_op) {
 953		DRM_ERROR("mes flush shader debugger is not supported!\n");
 954		return -EINVAL;
 955	}
 956
 957	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
 958	op_input.set_shader_debugger.process_context_addr = process_context_addr;
 959	op_input.set_shader_debugger.flags.process_ctx_flush = true;
 960
 961	amdgpu_mes_lock(&adev->mes);
 962
 963	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 964	if (r)
 965		DRM_ERROR("failed to set_shader_debugger\n");
 966
 967	amdgpu_mes_unlock(&adev->mes);
 968
 969	return r;
 970}
 971
 972static void
 973amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
 974			       struct amdgpu_ring *ring,
 975			       struct amdgpu_mes_queue_properties *props)
 976{
 977	props->queue_type = ring->funcs->type;
 978	props->hqd_base_gpu_addr = ring->gpu_addr;
 979	props->rptr_gpu_addr = ring->rptr_gpu_addr;
 980	props->wptr_gpu_addr = ring->wptr_gpu_addr;
 981	props->wptr_mc_addr =
 982		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
 983	props->queue_size = ring->ring_size;
 984	props->eop_gpu_addr = ring->eop_gpu_addr;
 985	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
 986	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
 987	props->paging = false;
 988	props->ring = ring;
 989}
 990
 991#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
 992do {									\
 993       if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
 994		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
 995				_eng[ring->idx].slots[id_offs]);        \
 996       else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
 997		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
 998				_eng[ring->idx].ring);                  \
 999       else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
1000		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1001				_eng[ring->idx].ib);                    \
1002       else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
1003		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1004				_eng[ring->idx].padding);               \
1005} while(0)
1006
1007int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1008{
1009	switch (ring->funcs->type) {
1010	case AMDGPU_RING_TYPE_GFX:
1011		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1012		break;
1013	case AMDGPU_RING_TYPE_COMPUTE:
1014		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1015		break;
1016	case AMDGPU_RING_TYPE_SDMA:
1017		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1018		break;
1019	default:
1020		break;
1021	}
1022
1023	WARN_ON(1);
1024	return -EINVAL;
1025}
1026
1027int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1028			int queue_type, int idx,
1029			struct amdgpu_mes_ctx_data *ctx_data,
1030			struct amdgpu_ring **out)
1031{
1032	struct amdgpu_ring *ring;
1033	struct amdgpu_mes_gang *gang;
1034	struct amdgpu_mes_queue_properties qprops = {0};
1035	int r, queue_id, pasid;
1036
1037	/*
1038	 * Avoid taking any other locks under MES lock to avoid circular
1039	 * lock dependencies.
1040	 */
1041	amdgpu_mes_lock(&adev->mes);
1042	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1043	if (!gang) {
1044		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1045		amdgpu_mes_unlock(&adev->mes);
1046		return -EINVAL;
1047	}
1048	pasid = gang->process->pasid;
1049
1050	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1051	if (!ring) {
1052		amdgpu_mes_unlock(&adev->mes);
1053		return -ENOMEM;
1054	}
1055
1056	ring->ring_obj = NULL;
1057	ring->use_doorbell = true;
1058	ring->is_mes_queue = true;
1059	ring->mes_ctx = ctx_data;
1060	ring->idx = idx;
1061	ring->no_scheduler = true;
1062
1063	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1064		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1065				      compute[ring->idx].mec_hpd);
1066		ring->eop_gpu_addr =
1067			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1068	}
1069
1070	switch (queue_type) {
1071	case AMDGPU_RING_TYPE_GFX:
1072		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1073		ring->me = adev->gfx.gfx_ring[0].me;
1074		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1075		break;
1076	case AMDGPU_RING_TYPE_COMPUTE:
1077		ring->funcs = adev->gfx.compute_ring[0].funcs;
1078		ring->me = adev->gfx.compute_ring[0].me;
1079		ring->pipe = adev->gfx.compute_ring[0].pipe;
1080		break;
1081	case AMDGPU_RING_TYPE_SDMA:
1082		ring->funcs = adev->sdma.instance[0].ring.funcs;
1083		break;
1084	default:
1085		BUG();
1086	}
1087
1088	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1089			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1090	if (r)
 
1091		goto clean_up_memory;
 
1092
1093	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1094
1095	dma_fence_wait(gang->process->vm->last_update, false);
1096	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1097	amdgpu_mes_unlock(&adev->mes);
1098
1099	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1100	if (r)
1101		goto clean_up_ring;
1102
1103	ring->hw_queue_id = queue_id;
1104	ring->doorbell_index = qprops.doorbell_off;
1105
1106	if (queue_type == AMDGPU_RING_TYPE_GFX)
1107		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1108	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1109		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1110			queue_id);
1111	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1112		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1113			queue_id);
1114	else
1115		BUG();
1116
1117	*out = ring;
1118	return 0;
1119
1120clean_up_ring:
1121	amdgpu_ring_fini(ring);
1122clean_up_memory:
1123	kfree(ring);
1124	amdgpu_mes_unlock(&adev->mes);
1125	return r;
1126}
1127
1128void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1129			    struct amdgpu_ring *ring)
1130{
1131	if (!ring)
1132		return;
1133
1134	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1135	del_timer_sync(&ring->fence_drv.fallback_timer);
1136	amdgpu_ring_fini(ring);
1137	kfree(ring);
1138}
1139
1140uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1141						   enum amdgpu_mes_priority_level prio)
1142{
1143	return adev->mes.aggregated_doorbells[prio];
1144}
1145
1146int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1147				   struct amdgpu_mes_ctx_data *ctx_data)
1148{
1149	int r;
1150
1151	r = amdgpu_bo_create_kernel(adev,
1152			    sizeof(struct amdgpu_mes_ctx_meta_data),
1153			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1154			    &ctx_data->meta_data_obj,
1155			    &ctx_data->meta_data_mc_addr,
1156			    &ctx_data->meta_data_ptr);
1157	if (r) {
1158		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1159		return r;
1160	}
1161
1162	if (!ctx_data->meta_data_obj)
1163		return -ENOMEM;
1164
1165	memset(ctx_data->meta_data_ptr, 0,
1166	       sizeof(struct amdgpu_mes_ctx_meta_data));
1167
1168	return 0;
1169}
1170
1171void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1172{
1173	if (ctx_data->meta_data_obj)
1174		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1175				      &ctx_data->meta_data_mc_addr,
1176				      &ctx_data->meta_data_ptr);
1177}
1178
1179int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1180				 struct amdgpu_vm *vm,
1181				 struct amdgpu_mes_ctx_data *ctx_data)
1182{
1183	struct amdgpu_bo_va *bo_va;
1184	struct amdgpu_sync sync;
1185	struct drm_exec exec;
1186	int r;
1187
1188	amdgpu_sync_create(&sync);
1189
1190	drm_exec_init(&exec, 0, 0);
1191	drm_exec_until_all_locked(&exec) {
1192		r = drm_exec_lock_obj(&exec,
1193				      &ctx_data->meta_data_obj->tbo.base);
1194		drm_exec_retry_on_contention(&exec);
1195		if (unlikely(r))
1196			goto error_fini_exec;
1197
1198		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1199		drm_exec_retry_on_contention(&exec);
1200		if (unlikely(r))
1201			goto error_fini_exec;
1202	}
1203
1204	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1205	if (!bo_va) {
1206		DRM_ERROR("failed to create bo_va for meta data BO\n");
1207		r = -ENOMEM;
1208		goto error_fini_exec;
1209	}
1210
1211	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1212			     sizeof(struct amdgpu_mes_ctx_meta_data),
1213			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1214			     AMDGPU_PTE_EXECUTABLE);
1215
1216	if (r) {
1217		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1218		goto error_del_bo_va;
1219	}
1220
1221	r = amdgpu_vm_bo_update(adev, bo_va, false);
1222	if (r) {
1223		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1224		goto error_del_bo_va;
1225	}
1226	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1227
1228	r = amdgpu_vm_update_pdes(adev, vm, false);
1229	if (r) {
1230		DRM_ERROR("failed to update pdes on meta data\n");
1231		goto error_del_bo_va;
1232	}
1233	amdgpu_sync_fence(&sync, vm->last_update);
1234
1235	amdgpu_sync_wait(&sync, false);
1236	drm_exec_fini(&exec);
1237
1238	amdgpu_sync_free(&sync);
1239	ctx_data->meta_data_va = bo_va;
1240	return 0;
1241
1242error_del_bo_va:
1243	amdgpu_vm_bo_del(adev, bo_va);
1244
1245error_fini_exec:
1246	drm_exec_fini(&exec);
1247	amdgpu_sync_free(&sync);
1248	return r;
1249}
1250
1251int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1252				   struct amdgpu_mes_ctx_data *ctx_data)
1253{
1254	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1255	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1256	struct amdgpu_vm *vm = bo_va->base.vm;
1257	struct dma_fence *fence;
1258	struct drm_exec exec;
1259	long r;
1260
1261	drm_exec_init(&exec, 0, 0);
1262	drm_exec_until_all_locked(&exec) {
1263		r = drm_exec_lock_obj(&exec,
1264				      &ctx_data->meta_data_obj->tbo.base);
1265		drm_exec_retry_on_contention(&exec);
1266		if (unlikely(r))
1267			goto out_unlock;
1268
1269		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1270		drm_exec_retry_on_contention(&exec);
1271		if (unlikely(r))
1272			goto out_unlock;
1273	}
1274
1275	amdgpu_vm_bo_del(adev, bo_va);
1276	if (!amdgpu_vm_ready(vm))
1277		goto out_unlock;
1278
1279	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1280				   &fence);
1281	if (r)
1282		goto out_unlock;
1283	if (fence) {
1284		amdgpu_bo_fence(bo, fence, true);
1285		fence = NULL;
1286	}
1287
1288	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1289	if (r || !fence)
1290		goto out_unlock;
1291
1292	dma_fence_wait(fence, false);
1293	amdgpu_bo_fence(bo, fence, true);
1294	dma_fence_put(fence);
1295
1296out_unlock:
1297	if (unlikely(r < 0))
1298		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1299	drm_exec_fini(&exec);
1300
1301	return r;
1302}
1303
1304static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1305					  int pasid, int *gang_id,
1306					  int queue_type, int num_queue,
1307					  struct amdgpu_ring **added_rings,
1308					  struct amdgpu_mes_ctx_data *ctx_data)
1309{
1310	struct amdgpu_ring *ring;
1311	struct amdgpu_mes_gang_properties gprops = {0};
1312	int r, j;
1313
1314	/* create a gang for the process */
1315	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1316	gprops.gang_quantum = adev->mes.default_gang_quantum;
1317	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1318	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1319	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1320
1321	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1322	if (r) {
1323		DRM_ERROR("failed to add gang\n");
1324		return r;
1325	}
1326
1327	/* create queues for the gang */
1328	for (j = 0; j < num_queue; j++) {
1329		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1330					ctx_data, &ring);
1331		if (r) {
1332			DRM_ERROR("failed to add ring\n");
1333			break;
1334		}
1335
1336		DRM_INFO("ring %s was added\n", ring->name);
1337		added_rings[j] = ring;
1338	}
1339
1340	return 0;
1341}
1342
1343static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1344{
1345	struct amdgpu_ring *ring;
1346	int i, r;
1347
1348	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1349		ring = added_rings[i];
1350		if (!ring)
1351			continue;
1352
1353		r = amdgpu_ring_test_helper(ring);
1354		if (r)
1355			return r;
1356
1357		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1358		if (r) {
1359			DRM_DEV_ERROR(ring->adev->dev,
1360				      "ring %s ib test failed (%d)\n",
1361				      ring->name, r);
1362			return r;
1363		} else
1364			DRM_INFO("ring %s ib test pass\n", ring->name);
1365	}
1366
1367	return 0;
1368}
1369
1370int amdgpu_mes_self_test(struct amdgpu_device *adev)
1371{
1372	struct amdgpu_vm *vm = NULL;
1373	struct amdgpu_mes_ctx_data ctx_data = {0};
1374	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1375	int gang_ids[3] = {0};
1376	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1377				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1378				 { AMDGPU_RING_TYPE_SDMA, 1} };
1379	int i, r, pasid, k = 0;
1380
1381	pasid = amdgpu_pasid_alloc(16);
1382	if (pasid < 0) {
1383		dev_warn(adev->dev, "No more PASIDs available!");
1384		pasid = 0;
1385	}
1386
1387	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1388	if (!vm) {
1389		r = -ENOMEM;
1390		goto error_pasid;
1391	}
1392
1393	r = amdgpu_vm_init(adev, vm, -1);
1394	if (r) {
1395		DRM_ERROR("failed to initialize vm\n");
1396		goto error_pasid;
1397	}
1398
1399	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1400	if (r) {
1401		DRM_ERROR("failed to alloc ctx meta data\n");
1402		goto error_fini;
1403	}
1404
1405	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1406	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1407	if (r) {
1408		DRM_ERROR("failed to map ctx meta data\n");
1409		goto error_vm;
1410	}
1411
1412	r = amdgpu_mes_create_process(adev, pasid, vm);
1413	if (r) {
1414		DRM_ERROR("failed to create MES process\n");
1415		goto error_vm;
1416	}
1417
1418	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1419		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1420		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1421			    IP_VERSION(10, 3, 0) &&
1422		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1423			    IP_VERSION(11, 0, 0) &&
1424		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1425			continue;
1426
1427		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1428							   &gang_ids[i],
1429							   queue_types[i][0],
1430							   queue_types[i][1],
1431							   &added_rings[k],
1432							   &ctx_data);
1433		if (r)
1434			goto error_queues;
1435
1436		k += queue_types[i][1];
1437	}
1438
1439	/* start ring test and ib test for MES queues */
1440	amdgpu_mes_test_queues(added_rings);
1441
1442error_queues:
1443	/* remove all queues */
1444	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1445		if (!added_rings[i])
1446			continue;
1447		amdgpu_mes_remove_ring(adev, added_rings[i]);
1448	}
1449
1450	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1451		if (!gang_ids[i])
1452			continue;
1453		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1454	}
1455
1456	amdgpu_mes_destroy_process(adev, pasid);
1457
1458error_vm:
1459	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1460
1461error_fini:
1462	amdgpu_vm_fini(adev, vm);
1463
1464error_pasid:
1465	if (pasid)
1466		amdgpu_pasid_free(pasid);
1467
1468	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1469	kfree(vm);
1470	return 0;
1471}
1472
1473int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1474{
1475	const struct mes_firmware_header_v1_0 *mes_hdr;
1476	struct amdgpu_firmware_info *info;
1477	char ucode_prefix[30];
1478	char fw_name[40];
1479	bool need_retry = false;
 
1480	int r;
1481
1482	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1483				       sizeof(ucode_prefix));
1484	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
 
 
 
 
1485		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1486			 ucode_prefix,
1487			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1488		need_retry = true;
1489	} else {
1490		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1491			 ucode_prefix,
1492			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1493	}
1494
1495	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1496	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1497		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1498			 ucode_prefix);
1499		DRM_INFO("try to fall back to %s\n", fw_name);
1500		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1501					 fw_name);
1502	}
1503
1504	if (r)
1505		goto out;
1506
1507	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1508		adev->mes.fw[pipe]->data;
1509	adev->mes.uc_start_addr[pipe] =
1510		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1511		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1512	adev->mes.data_start_addr[pipe] =
1513		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1514		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
 
 
 
 
1515
1516	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1517		int ucode, ucode_data;
1518
1519		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1520			ucode = AMDGPU_UCODE_ID_CP_MES;
1521			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1522		} else {
1523			ucode = AMDGPU_UCODE_ID_CP_MES1;
1524			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1525		}
1526
1527		info = &adev->firmware.ucode[ucode];
1528		info->ucode_id = ucode;
1529		info->fw = adev->mes.fw[pipe];
1530		adev->firmware.fw_size +=
1531			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1532			      PAGE_SIZE);
1533
1534		info = &adev->firmware.ucode[ucode_data];
1535		info->ucode_id = ucode_data;
1536		info->fw = adev->mes.fw[pipe];
1537		adev->firmware.fw_size +=
1538			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1539			      PAGE_SIZE);
1540	}
1541
1542	return 0;
1543out:
1544	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1545	return r;
1546}
1547
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1548#if defined(CONFIG_DEBUG_FS)
1549
1550static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1551{
1552	struct amdgpu_device *adev = m->private;
1553	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1554
1555	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1556		     mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
1557
1558	return 0;
1559}
1560
1561DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1562
1563#endif
1564
1565void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1566{
1567
1568#if defined(CONFIG_DEBUG_FS)
1569	struct drm_minor *minor = adev_to_drm(adev)->primary;
1570	struct dentry *root = minor->debugfs_root;
1571	if (adev->enable_mes && amdgpu_mes_log_enable)
1572		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1573				    adev, &amdgpu_debugfs_mes_event_log_fops);
1574
1575#endif
1576}
v6.13.7
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <drm/drm_exec.h>
  26
  27#include "amdgpu_mes.h"
  28#include "amdgpu.h"
  29#include "soc15_common.h"
  30#include "amdgpu_mes_ctx.h"
  31
  32#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
  33#define AMDGPU_ONE_DOORBELL_SIZE 8
  34
  35int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
  36{
  37	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
  38		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
  39		       PAGE_SIZE);
  40}
  41
  42static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
 
  43					 int ip_type, uint64_t *doorbell_index)
  44{
  45	unsigned int offset, found;
  46	struct amdgpu_mes *mes = &adev->mes;
  47
  48	if (ip_type == AMDGPU_RING_TYPE_SDMA)
  49		offset = adev->doorbell_index.sdma_engine[0];
  50	else
  51		offset = 0;
  52
  53	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
  54	if (found >= mes->num_mes_dbs) {
  55		DRM_WARN("No doorbell available\n");
  56		return -ENOSPC;
  57	}
  58
  59	set_bit(found, mes->doorbell_bitmap);
  60
  61	/* Get the absolute doorbell index on BAR */
  62	*doorbell_index = mes->db_start_dw_offset + found * 2;
  63	return 0;
  64}
  65
  66static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
 
  67					   uint32_t doorbell_index)
  68{
  69	unsigned int old, rel_index;
  70	struct amdgpu_mes *mes = &adev->mes;
  71
  72	/* Find the relative index of the doorbell in this object */
  73	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
  74	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
  75	WARN_ON(!old);
  76}
  77
  78static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
  79{
  80	int i;
  81	struct amdgpu_mes *mes = &adev->mes;
  82
  83	/* Bitmap for dynamic allocation of kernel doorbells */
  84	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
  85	if (!mes->doorbell_bitmap) {
  86		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
  87		return -ENOMEM;
  88	}
  89
  90	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
  91	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
  92		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
  93		set_bit(i, mes->doorbell_bitmap);
  94	}
  95
  96	return 0;
  97}
  98
  99static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
 100{
 101	int r;
 102
 103	if (!amdgpu_mes_log_enable)
 104		return 0;
 105
 106	r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
 107				    AMDGPU_GEM_DOMAIN_VRAM,
 108				    &adev->mes.event_log_gpu_obj,
 109				    &adev->mes.event_log_gpu_addr,
 110				    &adev->mes.event_log_cpu_addr);
 111	if (r) {
 112		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
 113		return r;
 114	}
 115
 116	memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
 117
 118	return  0;
 119
 120}
 121
 122static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
 123{
 124	bitmap_free(adev->mes.doorbell_bitmap);
 125}
 126
 127int amdgpu_mes_init(struct amdgpu_device *adev)
 128{
 129	int i, r;
 130
 131	adev->mes.adev = adev;
 132
 133	idr_init(&adev->mes.pasid_idr);
 134	idr_init(&adev->mes.gang_id_idr);
 135	idr_init(&adev->mes.queue_id_idr);
 136	ida_init(&adev->mes.doorbell_ida);
 137	spin_lock_init(&adev->mes.queue_id_lock);
 
 138	mutex_init(&adev->mes.mutex_hidden);
 139
 140	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
 141		spin_lock_init(&adev->mes.ring_lock[i]);
 142
 143	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
 144	adev->mes.vmid_mask_mmhub = 0xffffff00;
 145	adev->mes.vmid_mask_gfxhub = 0xffffff00;
 146
 147	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
 148		/* use only 1st MEC pipes */
 149		if (i >= adev->gfx.mec.num_pipe_per_mec)
 150			continue;
 151		adev->mes.compute_hqd_mask[i] = 0xc;
 152	}
 153
 154	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
 155		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
 156
 157	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
 158		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
 159		    IP_VERSION(6, 0, 0))
 160			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
 161		/* zero sdma_hqd_mask for non-existent engine */
 162		else if (adev->sdma.num_instances == 1)
 163			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
 164		else
 165			adev->mes.sdma_hqd_mask[i] = 0xfc;
 166	}
 167
 168	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
 169		r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
 170		if (r) {
 171			dev_err(adev->dev,
 172				"(%d) ring trail_fence_offs wb alloc failed\n",
 173				r);
 174			goto error;
 175		}
 176		adev->mes.sch_ctx_gpu_addr[i] =
 177			adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
 178		adev->mes.sch_ctx_ptr[i] =
 179			(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
 
 
 
 
 
 
 
 
 
 
 180
 181		r = amdgpu_device_wb_get(adev,
 182				 &adev->mes.query_status_fence_offs[i]);
 183		if (r) {
 184			dev_err(adev->dev,
 185			      "(%d) query_status_fence_offs wb alloc failed\n",
 186			      r);
 187			goto error;
 188		}
 189		adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
 190			(adev->mes.query_status_fence_offs[i] * 4);
 191		adev->mes.query_status_fence_ptr[i] =
 192			(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
 193	}
 194
 195	r = amdgpu_mes_doorbell_init(adev);
 196	if (r)
 197		goto error;
 198
 199	r = amdgpu_mes_event_log_init(adev);
 200	if (r)
 201		goto error_doorbell;
 202
 203	return 0;
 204
 205error_doorbell:
 206	amdgpu_mes_doorbell_free(adev);
 207error:
 208	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
 209		if (adev->mes.sch_ctx_ptr[i])
 210			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
 211		if (adev->mes.query_status_fence_ptr[i])
 212			amdgpu_device_wb_free(adev,
 213				      adev->mes.query_status_fence_offs[i]);
 214	}
 215
 216	idr_destroy(&adev->mes.pasid_idr);
 217	idr_destroy(&adev->mes.gang_id_idr);
 218	idr_destroy(&adev->mes.queue_id_idr);
 219	ida_destroy(&adev->mes.doorbell_ida);
 220	mutex_destroy(&adev->mes.mutex_hidden);
 221	return r;
 222}
 223
 224void amdgpu_mes_fini(struct amdgpu_device *adev)
 225{
 226	int i;
 227
 228	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
 229			      &adev->mes.event_log_gpu_addr,
 230			      &adev->mes.event_log_cpu_addr);
 231
 232	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
 233		if (adev->mes.sch_ctx_ptr[i])
 234			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
 235		if (adev->mes.query_status_fence_ptr[i])
 236			amdgpu_device_wb_free(adev,
 237				      adev->mes.query_status_fence_offs[i]);
 238	}
 239
 240	amdgpu_mes_doorbell_free(adev);
 241
 242	idr_destroy(&adev->mes.pasid_idr);
 243	idr_destroy(&adev->mes.gang_id_idr);
 244	idr_destroy(&adev->mes.queue_id_idr);
 245	ida_destroy(&adev->mes.doorbell_ida);
 246	mutex_destroy(&adev->mes.mutex_hidden);
 247}
 248
 249static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
 250{
 251	amdgpu_bo_free_kernel(&q->mqd_obj,
 252			      &q->mqd_gpu_addr,
 253			      &q->mqd_cpu_ptr);
 254}
 255
 256int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
 257			      struct amdgpu_vm *vm)
 258{
 259	struct amdgpu_mes_process *process;
 260	int r;
 261
 262	/* allocate the mes process buffer */
 263	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
 264	if (!process) {
 265		DRM_ERROR("no more memory to create mes process\n");
 266		return -ENOMEM;
 267	}
 268
 269	/* allocate the process context bo and map it */
 270	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
 271				    AMDGPU_GEM_DOMAIN_GTT,
 272				    &process->proc_ctx_bo,
 273				    &process->proc_ctx_gpu_addr,
 274				    &process->proc_ctx_cpu_ptr);
 275	if (r) {
 276		DRM_ERROR("failed to allocate process context bo\n");
 277		goto clean_up_memory;
 278	}
 279	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
 280
 281	/*
 282	 * Avoid taking any other locks under MES lock to avoid circular
 283	 * lock dependencies.
 284	 */
 285	amdgpu_mes_lock(&adev->mes);
 286
 287	/* add the mes process to idr list */
 288	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
 289		      GFP_KERNEL);
 290	if (r < 0) {
 291		DRM_ERROR("failed to lock pasid=%d\n", pasid);
 292		goto clean_up_ctx;
 293	}
 294
 295	INIT_LIST_HEAD(&process->gang_list);
 296	process->vm = vm;
 297	process->pasid = pasid;
 298	process->process_quantum = adev->mes.default_process_quantum;
 299	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
 300
 301	amdgpu_mes_unlock(&adev->mes);
 302	return 0;
 303
 304clean_up_ctx:
 305	amdgpu_mes_unlock(&adev->mes);
 306	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
 307			      &process->proc_ctx_gpu_addr,
 308			      &process->proc_ctx_cpu_ptr);
 309clean_up_memory:
 310	kfree(process);
 311	return r;
 312}
 313
 314void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
 315{
 316	struct amdgpu_mes_process *process;
 317	struct amdgpu_mes_gang *gang, *tmp1;
 318	struct amdgpu_mes_queue *queue, *tmp2;
 319	struct mes_remove_queue_input queue_input;
 320	unsigned long flags;
 321	int r;
 322
 323	/*
 324	 * Avoid taking any other locks under MES lock to avoid circular
 325	 * lock dependencies.
 326	 */
 327	amdgpu_mes_lock(&adev->mes);
 328
 329	process = idr_find(&adev->mes.pasid_idr, pasid);
 330	if (!process) {
 331		DRM_WARN("pasid %d doesn't exist\n", pasid);
 332		amdgpu_mes_unlock(&adev->mes);
 333		return;
 334	}
 335
 336	/* Remove all queues from hardware */
 337	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
 338		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
 339			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 340			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
 341			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 342
 343			queue_input.doorbell_offset = queue->doorbell_off;
 344			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 345
 346			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
 347							     &queue_input);
 348			if (r)
 349				DRM_WARN("failed to remove hardware queue\n");
 350		}
 351
 352		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
 353	}
 354
 355	idr_remove(&adev->mes.pasid_idr, pasid);
 356	amdgpu_mes_unlock(&adev->mes);
 357
 358	/* free all memory allocated by the process */
 359	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
 360		/* free all queues in the gang */
 361		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
 362			amdgpu_mes_queue_free_mqd(queue);
 363			list_del(&queue->list);
 364			kfree(queue);
 365		}
 366		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 367				      &gang->gang_ctx_gpu_addr,
 368				      &gang->gang_ctx_cpu_ptr);
 369		list_del(&gang->list);
 370		kfree(gang);
 371
 372	}
 373	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
 374			      &process->proc_ctx_gpu_addr,
 375			      &process->proc_ctx_cpu_ptr);
 376	kfree(process);
 377}
 378
 379int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
 380			struct amdgpu_mes_gang_properties *gprops,
 381			int *gang_id)
 382{
 383	struct amdgpu_mes_process *process;
 384	struct amdgpu_mes_gang *gang;
 385	int r;
 386
 387	/* allocate the mes gang buffer */
 388	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
 389	if (!gang) {
 390		return -ENOMEM;
 391	}
 392
 393	/* allocate the gang context bo and map it to cpu space */
 394	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
 395				    AMDGPU_GEM_DOMAIN_GTT,
 396				    &gang->gang_ctx_bo,
 397				    &gang->gang_ctx_gpu_addr,
 398				    &gang->gang_ctx_cpu_ptr);
 399	if (r) {
 400		DRM_ERROR("failed to allocate process context bo\n");
 401		goto clean_up_mem;
 402	}
 403	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
 404
 405	/*
 406	 * Avoid taking any other locks under MES lock to avoid circular
 407	 * lock dependencies.
 408	 */
 409	amdgpu_mes_lock(&adev->mes);
 410
 411	process = idr_find(&adev->mes.pasid_idr, pasid);
 412	if (!process) {
 413		DRM_ERROR("pasid %d doesn't exist\n", pasid);
 414		r = -EINVAL;
 415		goto clean_up_ctx;
 416	}
 417
 418	/* add the mes gang to idr list */
 419	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
 420		      GFP_KERNEL);
 421	if (r < 0) {
 422		DRM_ERROR("failed to allocate idr for gang\n");
 423		goto clean_up_ctx;
 424	}
 425
 426	gang->gang_id = r;
 427	*gang_id = r;
 428
 429	INIT_LIST_HEAD(&gang->queue_list);
 430	gang->process = process;
 431	gang->priority = gprops->priority;
 432	gang->gang_quantum = gprops->gang_quantum ?
 433		gprops->gang_quantum : adev->mes.default_gang_quantum;
 434	gang->global_priority_level = gprops->global_priority_level;
 435	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
 436	list_add_tail(&gang->list, &process->gang_list);
 437
 438	amdgpu_mes_unlock(&adev->mes);
 439	return 0;
 440
 441clean_up_ctx:
 442	amdgpu_mes_unlock(&adev->mes);
 443	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 444			      &gang->gang_ctx_gpu_addr,
 445			      &gang->gang_ctx_cpu_ptr);
 446clean_up_mem:
 447	kfree(gang);
 448	return r;
 449}
 450
 451int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
 452{
 453	struct amdgpu_mes_gang *gang;
 454
 455	/*
 456	 * Avoid taking any other locks under MES lock to avoid circular
 457	 * lock dependencies.
 458	 */
 459	amdgpu_mes_lock(&adev->mes);
 460
 461	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 462	if (!gang) {
 463		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 464		amdgpu_mes_unlock(&adev->mes);
 465		return -EINVAL;
 466	}
 467
 468	if (!list_empty(&gang->queue_list)) {
 469		DRM_ERROR("queue list is not empty\n");
 470		amdgpu_mes_unlock(&adev->mes);
 471		return -EBUSY;
 472	}
 473
 474	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
 475	list_del(&gang->list);
 476	amdgpu_mes_unlock(&adev->mes);
 477
 478	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
 479			      &gang->gang_ctx_gpu_addr,
 480			      &gang->gang_ctx_cpu_ptr);
 481
 482	kfree(gang);
 483
 484	return 0;
 485}
 486
 487int amdgpu_mes_suspend(struct amdgpu_device *adev)
 488{
 
 
 
 489	struct mes_suspend_gang_input input;
 490	int r;
 491
 492	if (!amdgpu_mes_suspend_resume_all_supported(adev))
 493		return 0;
 494
 495	memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
 496	input.suspend_all_gangs = 1;
 497
 498	/*
 499	 * Avoid taking any other locks under MES lock to avoid circular
 500	 * lock dependencies.
 501	 */
 502	amdgpu_mes_lock(&adev->mes);
 503	r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
 
 
 
 
 
 
 
 
 
 
 
 504	amdgpu_mes_unlock(&adev->mes);
 505	if (r)
 506		DRM_ERROR("failed to suspend all gangs");
 507
 508	return r;
 509}
 510
 511int amdgpu_mes_resume(struct amdgpu_device *adev)
 512{
 
 
 
 513	struct mes_resume_gang_input input;
 514	int r;
 515
 516	if (!amdgpu_mes_suspend_resume_all_supported(adev))
 517		return 0;
 518
 519	memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
 520	input.resume_all_gangs = 1;
 521
 522	/*
 523	 * Avoid taking any other locks under MES lock to avoid circular
 524	 * lock dependencies.
 525	 */
 526	amdgpu_mes_lock(&adev->mes);
 527	r = adev->mes.funcs->resume_gang(&adev->mes, &input);
 
 
 
 
 
 
 
 
 
 
 
 528	amdgpu_mes_unlock(&adev->mes);
 529	if (r)
 530		DRM_ERROR("failed to resume all gangs");
 531
 532	return r;
 533}
 534
 535static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
 536				     struct amdgpu_mes_queue *q,
 537				     struct amdgpu_mes_queue_properties *p)
 538{
 539	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
 540	u32 mqd_size = mqd_mgr->mqd_size;
 541	int r;
 542
 543	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
 544				    AMDGPU_GEM_DOMAIN_GTT,
 545				    &q->mqd_obj,
 546				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
 547	if (r) {
 548		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
 549		return r;
 550	}
 551	memset(q->mqd_cpu_ptr, 0, mqd_size);
 552
 553	r = amdgpu_bo_reserve(q->mqd_obj, false);
 554	if (unlikely(r != 0))
 555		goto clean_up;
 556
 557	return 0;
 558
 559clean_up:
 560	amdgpu_bo_free_kernel(&q->mqd_obj,
 561			      &q->mqd_gpu_addr,
 562			      &q->mqd_cpu_ptr);
 563	return r;
 564}
 565
 566static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
 567				     struct amdgpu_mes_queue *q,
 568				     struct amdgpu_mes_queue_properties *p)
 569{
 570	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
 571	struct amdgpu_mqd_prop mqd_prop = {0};
 572
 573	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
 574	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
 575	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
 576	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
 577	mqd_prop.queue_size = p->queue_size;
 578	mqd_prop.use_doorbell = true;
 579	mqd_prop.doorbell_index = p->doorbell_off;
 580	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
 581	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
 582	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
 583	mqd_prop.hqd_active = false;
 584
 585	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
 586	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
 587		mutex_lock(&adev->srbm_mutex);
 588		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
 589	}
 590
 591	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
 592
 593	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
 594	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
 595		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
 596		mutex_unlock(&adev->srbm_mutex);
 597	}
 598
 599	amdgpu_bo_unreserve(q->mqd_obj);
 600}
 601
 602int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
 603			    struct amdgpu_mes_queue_properties *qprops,
 604			    int *queue_id)
 605{
 606	struct amdgpu_mes_queue *queue;
 607	struct amdgpu_mes_gang *gang;
 608	struct mes_add_queue_input queue_input;
 609	unsigned long flags;
 610	int r;
 611
 612	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
 613
 614	/* allocate the mes queue buffer */
 615	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
 616	if (!queue) {
 617		DRM_ERROR("Failed to allocate memory for queue\n");
 618		return -ENOMEM;
 619	}
 620
 621	/* Allocate the queue mqd */
 622	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
 623	if (r)
 624		goto clean_up_memory;
 625
 626	/*
 627	 * Avoid taking any other locks under MES lock to avoid circular
 628	 * lock dependencies.
 629	 */
 630	amdgpu_mes_lock(&adev->mes);
 631
 632	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
 633	if (!gang) {
 634		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
 635		r = -EINVAL;
 636		goto clean_up_mqd;
 637	}
 638
 639	/* add the mes gang to idr list */
 640	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 641	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
 642		      GFP_ATOMIC);
 643	if (r < 0) {
 644		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 645		goto clean_up_mqd;
 646	}
 647	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 648	*queue_id = queue->queue_id = r;
 649
 650	/* allocate a doorbell index for the queue */
 651	r = amdgpu_mes_kernel_doorbell_get(adev,
 652					  qprops->queue_type,
 653					  &qprops->doorbell_off);
 654	if (r)
 655		goto clean_up_queue_id;
 656
 657	/* initialize the queue mqd */
 658	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
 659
 660	/* add hw queue to mes */
 661	queue_input.process_id = gang->process->pasid;
 662
 663	queue_input.page_table_base_addr =
 664		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
 665		adev->gmc.vram_start;
 666
 667	queue_input.process_va_start = 0;
 668	queue_input.process_va_end =
 669		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
 670	queue_input.process_quantum = gang->process->process_quantum;
 671	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
 672	queue_input.gang_quantum = gang->gang_quantum;
 673	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 674	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
 675	queue_input.gang_global_priority_level = gang->global_priority_level;
 676	queue_input.doorbell_offset = qprops->doorbell_off;
 677	queue_input.mqd_addr = queue->mqd_gpu_addr;
 678	queue_input.wptr_addr = qprops->wptr_gpu_addr;
 679	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
 680	queue_input.queue_type = qprops->queue_type;
 681	queue_input.paging = qprops->paging;
 682	queue_input.is_kfd_process = 0;
 683
 684	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
 685	if (r) {
 686		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
 687			  qprops->doorbell_off);
 688		goto clean_up_doorbell;
 689	}
 690
 691	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
 692		  "queue type=%d, doorbell=0x%llx\n",
 693		  gang->process->pasid, gang_id, qprops->queue_type,
 694		  qprops->doorbell_off);
 695
 696	queue->ring = qprops->ring;
 697	queue->doorbell_off = qprops->doorbell_off;
 698	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
 699	queue->queue_type = qprops->queue_type;
 700	queue->paging = qprops->paging;
 701	queue->gang = gang;
 702	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
 703	list_add_tail(&queue->list, &gang->queue_list);
 704
 705	amdgpu_mes_unlock(&adev->mes);
 706	return 0;
 707
 708clean_up_doorbell:
 709	amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
 
 710clean_up_queue_id:
 711	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 712	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
 713	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 714clean_up_mqd:
 715	amdgpu_mes_unlock(&adev->mes);
 716	amdgpu_mes_queue_free_mqd(queue);
 717clean_up_memory:
 718	kfree(queue);
 719	return r;
 720}
 721
 722int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
 723{
 724	unsigned long flags;
 725	struct amdgpu_mes_queue *queue;
 726	struct amdgpu_mes_gang *gang;
 727	struct mes_remove_queue_input queue_input;
 728	int r;
 729
 730	/*
 731	 * Avoid taking any other locks under MES lock to avoid circular
 732	 * lock dependencies.
 733	 */
 734	amdgpu_mes_lock(&adev->mes);
 735
 736	/* remove the mes gang from idr list */
 737	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 738
 739	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
 740	if (!queue) {
 741		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 742		amdgpu_mes_unlock(&adev->mes);
 743		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
 744		return -EINVAL;
 745	}
 746
 747	idr_remove(&adev->mes.queue_id_idr, queue_id);
 748	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 749
 750	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
 751		  queue->doorbell_off);
 752
 753	gang = queue->gang;
 754	queue_input.doorbell_offset = queue->doorbell_off;
 755	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 756
 757	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
 758	if (r)
 759		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
 760			  queue_id);
 761
 762	list_del(&queue->list);
 763	amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
 
 764	amdgpu_mes_unlock(&adev->mes);
 765
 766	amdgpu_mes_queue_free_mqd(queue);
 767	kfree(queue);
 768	return 0;
 769}
 770
 771int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
 772{
 773	unsigned long flags;
 774	struct amdgpu_mes_queue *queue;
 775	struct amdgpu_mes_gang *gang;
 776	struct mes_reset_queue_input queue_input;
 777	int r;
 778
 779	/*
 780	 * Avoid taking any other locks under MES lock to avoid circular
 781	 * lock dependencies.
 782	 */
 783	amdgpu_mes_lock(&adev->mes);
 784
 785	/* remove the mes gang from idr list */
 786	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
 787
 788	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
 789	if (!queue) {
 790		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 791		amdgpu_mes_unlock(&adev->mes);
 792		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
 793		return -EINVAL;
 794	}
 795	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
 796
 797	DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
 798		  queue->doorbell_off);
 799
 800	gang = queue->gang;
 801	queue_input.doorbell_offset = queue->doorbell_off;
 802	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
 803
 804	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
 805	if (r)
 806		DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
 807			  queue_id);
 808
 809	amdgpu_mes_unlock(&adev->mes);
 810
 811	return 0;
 812}
 813
 814int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
 815				   int me_id, int pipe_id, int queue_id, int vmid)
 816{
 817	struct mes_reset_queue_input queue_input;
 818	int r;
 819
 820	queue_input.queue_type = queue_type;
 821	queue_input.use_mmio = true;
 822	queue_input.me_id = me_id;
 823	queue_input.pipe_id = pipe_id;
 824	queue_input.queue_id = queue_id;
 825	queue_input.vmid = vmid;
 826	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
 827	if (r)
 828		DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
 829			  queue_id);
 830	return r;
 831}
 832
 833int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
 834				struct amdgpu_ring *ring)
 835{
 836	struct mes_map_legacy_queue_input queue_input;
 837	int r;
 838
 839	memset(&queue_input, 0, sizeof(queue_input));
 840
 841	queue_input.queue_type = ring->funcs->type;
 842	queue_input.doorbell_offset = ring->doorbell_index;
 843	queue_input.pipe_id = ring->pipe;
 844	queue_input.queue_id = ring->queue;
 845	queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
 846	queue_input.wptr_addr = ring->wptr_gpu_addr;
 847
 848	r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
 849	if (r)
 850		DRM_ERROR("failed to map legacy queue\n");
 851
 852	return r;
 853}
 854
 855int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
 856				  struct amdgpu_ring *ring,
 857				  enum amdgpu_unmap_queues_action action,
 858				  u64 gpu_addr, u64 seq)
 859{
 860	struct mes_unmap_legacy_queue_input queue_input;
 861	int r;
 862
 863	queue_input.action = action;
 864	queue_input.queue_type = ring->funcs->type;
 865	queue_input.doorbell_offset = ring->doorbell_index;
 866	queue_input.pipe_id = ring->pipe;
 867	queue_input.queue_id = ring->queue;
 868	queue_input.trail_fence_addr = gpu_addr;
 869	queue_input.trail_fence_data = seq;
 870
 871	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
 872	if (r)
 873		DRM_ERROR("failed to unmap legacy queue\n");
 874
 875	return r;
 876}
 877
 878int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
 879				  struct amdgpu_ring *ring,
 880				  unsigned int vmid,
 881				  bool use_mmio)
 882{
 883	struct mes_reset_legacy_queue_input queue_input;
 884	int r;
 885
 886	memset(&queue_input, 0, sizeof(queue_input));
 887
 888	queue_input.queue_type = ring->funcs->type;
 889	queue_input.doorbell_offset = ring->doorbell_index;
 890	queue_input.me_id = ring->me;
 891	queue_input.pipe_id = ring->pipe;
 892	queue_input.queue_id = ring->queue;
 893	queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
 894	queue_input.wptr_addr = ring->wptr_gpu_addr;
 895	queue_input.vmid = vmid;
 896	queue_input.use_mmio = use_mmio;
 897
 898	r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
 899	if (r)
 900		DRM_ERROR("failed to reset legacy queue\n");
 901
 902	return r;
 903}
 904
 905uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
 906{
 907	struct mes_misc_op_input op_input;
 908	int r, val = 0;
 909	uint32_t addr_offset = 0;
 910	uint64_t read_val_gpu_addr;
 911	uint32_t *read_val_ptr;
 912
 913	if (amdgpu_device_wb_get(adev, &addr_offset)) {
 914		DRM_ERROR("critical bug! too many mes readers\n");
 915		goto error;
 916	}
 917	read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
 918	read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
 919	op_input.op = MES_MISC_OP_READ_REG;
 920	op_input.read_reg.reg_offset = reg;
 921	op_input.read_reg.buffer_addr = read_val_gpu_addr;
 922
 923	if (!adev->mes.funcs->misc_op) {
 924		DRM_ERROR("mes rreg is not supported!\n");
 925		goto error;
 926	}
 927
 928	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 929	if (r)
 930		DRM_ERROR("failed to read reg (0x%x)\n", reg);
 931	else
 932		val = *(read_val_ptr);
 933
 934error:
 935	if (addr_offset)
 936		amdgpu_device_wb_free(adev, addr_offset);
 937	return val;
 938}
 939
 940int amdgpu_mes_wreg(struct amdgpu_device *adev,
 941		    uint32_t reg, uint32_t val)
 942{
 943	struct mes_misc_op_input op_input;
 944	int r;
 945
 946	op_input.op = MES_MISC_OP_WRITE_REG;
 947	op_input.write_reg.reg_offset = reg;
 948	op_input.write_reg.reg_value = val;
 949
 950	if (!adev->mes.funcs->misc_op) {
 951		DRM_ERROR("mes wreg is not supported!\n");
 952		r = -EINVAL;
 953		goto error;
 954	}
 955
 956	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 957	if (r)
 958		DRM_ERROR("failed to write reg (0x%x)\n", reg);
 959
 960error:
 961	return r;
 962}
 963
 964int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
 965				  uint32_t reg0, uint32_t reg1,
 966				  uint32_t ref, uint32_t mask)
 967{
 968	struct mes_misc_op_input op_input;
 969	int r;
 970
 971	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
 972	op_input.wrm_reg.reg0 = reg0;
 973	op_input.wrm_reg.reg1 = reg1;
 974	op_input.wrm_reg.ref = ref;
 975	op_input.wrm_reg.mask = mask;
 976
 977	if (!adev->mes.funcs->misc_op) {
 978		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
 979		r = -EINVAL;
 980		goto error;
 981	}
 982
 983	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
 984	if (r)
 985		DRM_ERROR("failed to reg_write_reg_wait\n");
 986
 987error:
 988	return r;
 989}
 990
 991int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
 992			uint32_t val, uint32_t mask)
 993{
 994	struct mes_misc_op_input op_input;
 995	int r;
 996
 997	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
 998	op_input.wrm_reg.reg0 = reg;
 999	op_input.wrm_reg.ref = val;
1000	op_input.wrm_reg.mask = mask;
1001
1002	if (!adev->mes.funcs->misc_op) {
1003		DRM_ERROR("mes reg wait is not supported!\n");
1004		r = -EINVAL;
1005		goto error;
1006	}
1007
1008	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1009	if (r)
1010		DRM_ERROR("failed to reg_write_reg_wait\n");
1011
1012error:
1013	return r;
1014}
1015
1016int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
1017				uint64_t process_context_addr,
1018				uint32_t spi_gdbg_per_vmid_cntl,
1019				const uint32_t *tcp_watch_cntl,
1020				uint32_t flags,
1021				bool trap_en)
1022{
1023	struct mes_misc_op_input op_input = {0};
1024	int r;
1025
1026	if (!adev->mes.funcs->misc_op) {
1027		DRM_ERROR("mes set shader debugger is not supported!\n");
1028		return -EINVAL;
1029	}
1030
1031	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1032	op_input.set_shader_debugger.process_context_addr = process_context_addr;
1033	op_input.set_shader_debugger.flags.u32all = flags;
1034
1035	/* use amdgpu mes_flush_shader_debugger instead */
1036	if (op_input.set_shader_debugger.flags.process_ctx_flush)
1037		return -EINVAL;
1038
1039	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
1040	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
1041			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
1042
1043	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
1044			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
1045		op_input.set_shader_debugger.trap_en = trap_en;
1046
1047	amdgpu_mes_lock(&adev->mes);
1048
1049	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1050	if (r)
1051		DRM_ERROR("failed to set_shader_debugger\n");
1052
1053	amdgpu_mes_unlock(&adev->mes);
1054
1055	return r;
1056}
1057
1058int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
1059				     uint64_t process_context_addr)
1060{
1061	struct mes_misc_op_input op_input = {0};
1062	int r;
1063
1064	if (!adev->mes.funcs->misc_op) {
1065		DRM_ERROR("mes flush shader debugger is not supported!\n");
1066		return -EINVAL;
1067	}
1068
1069	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1070	op_input.set_shader_debugger.process_context_addr = process_context_addr;
1071	op_input.set_shader_debugger.flags.process_ctx_flush = true;
1072
1073	amdgpu_mes_lock(&adev->mes);
1074
1075	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1076	if (r)
1077		DRM_ERROR("failed to set_shader_debugger\n");
1078
1079	amdgpu_mes_unlock(&adev->mes);
1080
1081	return r;
1082}
1083
1084static void
1085amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1086			       struct amdgpu_ring *ring,
1087			       struct amdgpu_mes_queue_properties *props)
1088{
1089	props->queue_type = ring->funcs->type;
1090	props->hqd_base_gpu_addr = ring->gpu_addr;
1091	props->rptr_gpu_addr = ring->rptr_gpu_addr;
1092	props->wptr_gpu_addr = ring->wptr_gpu_addr;
1093	props->wptr_mc_addr =
1094		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1095	props->queue_size = ring->ring_size;
1096	props->eop_gpu_addr = ring->eop_gpu_addr;
1097	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1098	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1099	props->paging = false;
1100	props->ring = ring;
1101}
1102
1103#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
1104do {									\
1105       if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
1106		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
1107				_eng[ring->idx].slots[id_offs]);        \
1108       else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
1109		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1110				_eng[ring->idx].ring);                  \
1111       else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
1112		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1113				_eng[ring->idx].ib);                    \
1114       else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
1115		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1116				_eng[ring->idx].padding);               \
1117} while(0)
1118
1119int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1120{
1121	switch (ring->funcs->type) {
1122	case AMDGPU_RING_TYPE_GFX:
1123		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1124		break;
1125	case AMDGPU_RING_TYPE_COMPUTE:
1126		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1127		break;
1128	case AMDGPU_RING_TYPE_SDMA:
1129		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1130		break;
1131	default:
1132		break;
1133	}
1134
1135	WARN_ON(1);
1136	return -EINVAL;
1137}
1138
1139int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1140			int queue_type, int idx,
1141			struct amdgpu_mes_ctx_data *ctx_data,
1142			struct amdgpu_ring **out)
1143{
1144	struct amdgpu_ring *ring;
1145	struct amdgpu_mes_gang *gang;
1146	struct amdgpu_mes_queue_properties qprops = {0};
1147	int r, queue_id, pasid;
1148
1149	/*
1150	 * Avoid taking any other locks under MES lock to avoid circular
1151	 * lock dependencies.
1152	 */
1153	amdgpu_mes_lock(&adev->mes);
1154	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1155	if (!gang) {
1156		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1157		amdgpu_mes_unlock(&adev->mes);
1158		return -EINVAL;
1159	}
1160	pasid = gang->process->pasid;
1161
1162	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1163	if (!ring) {
1164		amdgpu_mes_unlock(&adev->mes);
1165		return -ENOMEM;
1166	}
1167
1168	ring->ring_obj = NULL;
1169	ring->use_doorbell = true;
1170	ring->is_mes_queue = true;
1171	ring->mes_ctx = ctx_data;
1172	ring->idx = idx;
1173	ring->no_scheduler = true;
1174
1175	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1176		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1177				      compute[ring->idx].mec_hpd);
1178		ring->eop_gpu_addr =
1179			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1180	}
1181
1182	switch (queue_type) {
1183	case AMDGPU_RING_TYPE_GFX:
1184		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1185		ring->me = adev->gfx.gfx_ring[0].me;
1186		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1187		break;
1188	case AMDGPU_RING_TYPE_COMPUTE:
1189		ring->funcs = adev->gfx.compute_ring[0].funcs;
1190		ring->me = adev->gfx.compute_ring[0].me;
1191		ring->pipe = adev->gfx.compute_ring[0].pipe;
1192		break;
1193	case AMDGPU_RING_TYPE_SDMA:
1194		ring->funcs = adev->sdma.instance[0].ring.funcs;
1195		break;
1196	default:
1197		BUG();
1198	}
1199
1200	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1201			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1202	if (r) {
1203		amdgpu_mes_unlock(&adev->mes);
1204		goto clean_up_memory;
1205	}
1206
1207	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1208
1209	dma_fence_wait(gang->process->vm->last_update, false);
1210	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1211	amdgpu_mes_unlock(&adev->mes);
1212
1213	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1214	if (r)
1215		goto clean_up_ring;
1216
1217	ring->hw_queue_id = queue_id;
1218	ring->doorbell_index = qprops.doorbell_off;
1219
1220	if (queue_type == AMDGPU_RING_TYPE_GFX)
1221		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1222	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1223		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1224			queue_id);
1225	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1226		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1227			queue_id);
1228	else
1229		BUG();
1230
1231	*out = ring;
1232	return 0;
1233
1234clean_up_ring:
1235	amdgpu_ring_fini(ring);
1236clean_up_memory:
1237	kfree(ring);
 
1238	return r;
1239}
1240
1241void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1242			    struct amdgpu_ring *ring)
1243{
1244	if (!ring)
1245		return;
1246
1247	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1248	del_timer_sync(&ring->fence_drv.fallback_timer);
1249	amdgpu_ring_fini(ring);
1250	kfree(ring);
1251}
1252
1253uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1254						   enum amdgpu_mes_priority_level prio)
1255{
1256	return adev->mes.aggregated_doorbells[prio];
1257}
1258
1259int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1260				   struct amdgpu_mes_ctx_data *ctx_data)
1261{
1262	int r;
1263
1264	r = amdgpu_bo_create_kernel(adev,
1265			    sizeof(struct amdgpu_mes_ctx_meta_data),
1266			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1267			    &ctx_data->meta_data_obj,
1268			    &ctx_data->meta_data_mc_addr,
1269			    &ctx_data->meta_data_ptr);
1270	if (r) {
1271		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1272		return r;
1273	}
1274
1275	if (!ctx_data->meta_data_obj)
1276		return -ENOMEM;
1277
1278	memset(ctx_data->meta_data_ptr, 0,
1279	       sizeof(struct amdgpu_mes_ctx_meta_data));
1280
1281	return 0;
1282}
1283
1284void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1285{
1286	if (ctx_data->meta_data_obj)
1287		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1288				      &ctx_data->meta_data_mc_addr,
1289				      &ctx_data->meta_data_ptr);
1290}
1291
1292int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1293				 struct amdgpu_vm *vm,
1294				 struct amdgpu_mes_ctx_data *ctx_data)
1295{
1296	struct amdgpu_bo_va *bo_va;
1297	struct amdgpu_sync sync;
1298	struct drm_exec exec;
1299	int r;
1300
1301	amdgpu_sync_create(&sync);
1302
1303	drm_exec_init(&exec, 0, 0);
1304	drm_exec_until_all_locked(&exec) {
1305		r = drm_exec_lock_obj(&exec,
1306				      &ctx_data->meta_data_obj->tbo.base);
1307		drm_exec_retry_on_contention(&exec);
1308		if (unlikely(r))
1309			goto error_fini_exec;
1310
1311		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1312		drm_exec_retry_on_contention(&exec);
1313		if (unlikely(r))
1314			goto error_fini_exec;
1315	}
1316
1317	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1318	if (!bo_va) {
1319		DRM_ERROR("failed to create bo_va for meta data BO\n");
1320		r = -ENOMEM;
1321		goto error_fini_exec;
1322	}
1323
1324	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1325			     sizeof(struct amdgpu_mes_ctx_meta_data),
1326			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1327			     AMDGPU_PTE_EXECUTABLE);
1328
1329	if (r) {
1330		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1331		goto error_del_bo_va;
1332	}
1333
1334	r = amdgpu_vm_bo_update(adev, bo_va, false);
1335	if (r) {
1336		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1337		goto error_del_bo_va;
1338	}
1339	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1340
1341	r = amdgpu_vm_update_pdes(adev, vm, false);
1342	if (r) {
1343		DRM_ERROR("failed to update pdes on meta data\n");
1344		goto error_del_bo_va;
1345	}
1346	amdgpu_sync_fence(&sync, vm->last_update);
1347
1348	amdgpu_sync_wait(&sync, false);
1349	drm_exec_fini(&exec);
1350
1351	amdgpu_sync_free(&sync);
1352	ctx_data->meta_data_va = bo_va;
1353	return 0;
1354
1355error_del_bo_va:
1356	amdgpu_vm_bo_del(adev, bo_va);
1357
1358error_fini_exec:
1359	drm_exec_fini(&exec);
1360	amdgpu_sync_free(&sync);
1361	return r;
1362}
1363
1364int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1365				   struct amdgpu_mes_ctx_data *ctx_data)
1366{
1367	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1368	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1369	struct amdgpu_vm *vm = bo_va->base.vm;
1370	struct dma_fence *fence;
1371	struct drm_exec exec;
1372	long r;
1373
1374	drm_exec_init(&exec, 0, 0);
1375	drm_exec_until_all_locked(&exec) {
1376		r = drm_exec_lock_obj(&exec,
1377				      &ctx_data->meta_data_obj->tbo.base);
1378		drm_exec_retry_on_contention(&exec);
1379		if (unlikely(r))
1380			goto out_unlock;
1381
1382		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1383		drm_exec_retry_on_contention(&exec);
1384		if (unlikely(r))
1385			goto out_unlock;
1386	}
1387
1388	amdgpu_vm_bo_del(adev, bo_va);
1389	if (!amdgpu_vm_ready(vm))
1390		goto out_unlock;
1391
1392	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1393				   &fence);
1394	if (r)
1395		goto out_unlock;
1396	if (fence) {
1397		amdgpu_bo_fence(bo, fence, true);
1398		fence = NULL;
1399	}
1400
1401	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1402	if (r || !fence)
1403		goto out_unlock;
1404
1405	dma_fence_wait(fence, false);
1406	amdgpu_bo_fence(bo, fence, true);
1407	dma_fence_put(fence);
1408
1409out_unlock:
1410	if (unlikely(r < 0))
1411		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1412	drm_exec_fini(&exec);
1413
1414	return r;
1415}
1416
1417static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1418					  int pasid, int *gang_id,
1419					  int queue_type, int num_queue,
1420					  struct amdgpu_ring **added_rings,
1421					  struct amdgpu_mes_ctx_data *ctx_data)
1422{
1423	struct amdgpu_ring *ring;
1424	struct amdgpu_mes_gang_properties gprops = {0};
1425	int r, j;
1426
1427	/* create a gang for the process */
1428	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1429	gprops.gang_quantum = adev->mes.default_gang_quantum;
1430	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1431	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1432	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1433
1434	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1435	if (r) {
1436		DRM_ERROR("failed to add gang\n");
1437		return r;
1438	}
1439
1440	/* create queues for the gang */
1441	for (j = 0; j < num_queue; j++) {
1442		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1443					ctx_data, &ring);
1444		if (r) {
1445			DRM_ERROR("failed to add ring\n");
1446			break;
1447		}
1448
1449		DRM_INFO("ring %s was added\n", ring->name);
1450		added_rings[j] = ring;
1451	}
1452
1453	return 0;
1454}
1455
1456static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1457{
1458	struct amdgpu_ring *ring;
1459	int i, r;
1460
1461	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1462		ring = added_rings[i];
1463		if (!ring)
1464			continue;
1465
1466		r = amdgpu_ring_test_helper(ring);
1467		if (r)
1468			return r;
1469
1470		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1471		if (r) {
1472			DRM_DEV_ERROR(ring->adev->dev,
1473				      "ring %s ib test failed (%d)\n",
1474				      ring->name, r);
1475			return r;
1476		} else
1477			DRM_INFO("ring %s ib test pass\n", ring->name);
1478	}
1479
1480	return 0;
1481}
1482
1483int amdgpu_mes_self_test(struct amdgpu_device *adev)
1484{
1485	struct amdgpu_vm *vm = NULL;
1486	struct amdgpu_mes_ctx_data ctx_data = {0};
1487	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1488	int gang_ids[3] = {0};
1489	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1490				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1491				 { AMDGPU_RING_TYPE_SDMA, 1} };
1492	int i, r, pasid, k = 0;
1493
1494	pasid = amdgpu_pasid_alloc(16);
1495	if (pasid < 0) {
1496		dev_warn(adev->dev, "No more PASIDs available!");
1497		pasid = 0;
1498	}
1499
1500	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1501	if (!vm) {
1502		r = -ENOMEM;
1503		goto error_pasid;
1504	}
1505
1506	r = amdgpu_vm_init(adev, vm, -1);
1507	if (r) {
1508		DRM_ERROR("failed to initialize vm\n");
1509		goto error_pasid;
1510	}
1511
1512	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1513	if (r) {
1514		DRM_ERROR("failed to alloc ctx meta data\n");
1515		goto error_fini;
1516	}
1517
1518	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1519	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1520	if (r) {
1521		DRM_ERROR("failed to map ctx meta data\n");
1522		goto error_vm;
1523	}
1524
1525	r = amdgpu_mes_create_process(adev, pasid, vm);
1526	if (r) {
1527		DRM_ERROR("failed to create MES process\n");
1528		goto error_vm;
1529	}
1530
1531	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1532		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1533		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1534			    IP_VERSION(10, 3, 0) &&
1535		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1536			    IP_VERSION(11, 0, 0) &&
1537		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1538			continue;
1539
1540		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1541							   &gang_ids[i],
1542							   queue_types[i][0],
1543							   queue_types[i][1],
1544							   &added_rings[k],
1545							   &ctx_data);
1546		if (r)
1547			goto error_queues;
1548
1549		k += queue_types[i][1];
1550	}
1551
1552	/* start ring test and ib test for MES queues */
1553	amdgpu_mes_test_queues(added_rings);
1554
1555error_queues:
1556	/* remove all queues */
1557	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1558		if (!added_rings[i])
1559			continue;
1560		amdgpu_mes_remove_ring(adev, added_rings[i]);
1561	}
1562
1563	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1564		if (!gang_ids[i])
1565			continue;
1566		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1567	}
1568
1569	amdgpu_mes_destroy_process(adev, pasid);
1570
1571error_vm:
1572	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1573
1574error_fini:
1575	amdgpu_vm_fini(adev, vm);
1576
1577error_pasid:
1578	if (pasid)
1579		amdgpu_pasid_free(pasid);
1580
1581	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1582	kfree(vm);
1583	return 0;
1584}
1585
1586int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1587{
1588	const struct mes_firmware_header_v1_0 *mes_hdr;
1589	struct amdgpu_firmware_info *info;
1590	char ucode_prefix[30];
1591	char fw_name[50];
1592	bool need_retry = false;
1593	u32 *ucode_ptr;
1594	int r;
1595
1596	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1597				       sizeof(ucode_prefix));
1598	if (adev->enable_uni_mes) {
1599		snprintf(fw_name, sizeof(fw_name),
1600			 "amdgpu/%s_uni_mes.bin", ucode_prefix);
1601	} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1602	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
1603		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1604			 ucode_prefix,
1605			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1606		need_retry = true;
1607	} else {
1608		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1609			 ucode_prefix,
1610			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1611	}
1612
1613	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
1614	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1615		dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
 
 
1616		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1617					 "amdgpu/%s_mes.bin", ucode_prefix);
1618	}
1619
1620	if (r)
1621		goto out;
1622
1623	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1624		adev->mes.fw[pipe]->data;
1625	adev->mes.uc_start_addr[pipe] =
1626		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1627		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1628	adev->mes.data_start_addr[pipe] =
1629		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1630		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1631	ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
1632			  sizeof(union amdgpu_firmware_header));
1633	adev->mes.fw_version[pipe] =
1634		le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
1635
1636	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1637		int ucode, ucode_data;
1638
1639		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1640			ucode = AMDGPU_UCODE_ID_CP_MES;
1641			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1642		} else {
1643			ucode = AMDGPU_UCODE_ID_CP_MES1;
1644			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1645		}
1646
1647		info = &adev->firmware.ucode[ucode];
1648		info->ucode_id = ucode;
1649		info->fw = adev->mes.fw[pipe];
1650		adev->firmware.fw_size +=
1651			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1652			      PAGE_SIZE);
1653
1654		info = &adev->firmware.ucode[ucode_data];
1655		info->ucode_id = ucode_data;
1656		info->fw = adev->mes.fw[pipe];
1657		adev->firmware.fw_size +=
1658			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1659			      PAGE_SIZE);
1660	}
1661
1662	return 0;
1663out:
1664	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1665	return r;
1666}
1667
1668bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
1669{
1670	uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
1671	bool is_supported = false;
1672
1673	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1674	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
1675	    mes_rev >= 0x63)
1676		is_supported = true;
1677
1678	return is_supported;
1679}
1680
1681/* Fix me -- node_id is used to identify the correct MES instances in the future */
1682static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
1683					    uint32_t node_id, bool enable)
1684{
1685	struct mes_misc_op_input op_input = {0};
1686	int r;
1687
1688	op_input.op = MES_MISC_OP_CHANGE_CONFIG;
1689	op_input.change_config.option.limit_single_process = enable ? 1 : 0;
1690
1691	if (!adev->mes.funcs->misc_op) {
1692		dev_err(adev->dev, "mes change config is not supported!\n");
1693		r = -EINVAL;
1694		goto error;
1695	}
1696
1697	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1698	if (r)
1699		dev_err(adev->dev, "failed to change_config.\n");
1700
1701error:
1702	return r;
1703}
1704
1705int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
1706{
1707	int i, r = 0;
1708
1709	if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
1710		mutex_lock(&adev->enforce_isolation_mutex);
1711		for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1712			if (adev->enforce_isolation[i])
1713				r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
1714			else
1715				r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
1716		}
1717		mutex_unlock(&adev->enforce_isolation_mutex);
1718	}
1719	return r;
1720}
1721
1722#if defined(CONFIG_DEBUG_FS)
1723
1724static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1725{
1726	struct amdgpu_device *adev = m->private;
1727	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1728
1729	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1730		     mem, adev->mes.event_log_size, false);
1731
1732	return 0;
1733}
1734
1735DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1736
1737#endif
1738
1739void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1740{
1741
1742#if defined(CONFIG_DEBUG_FS)
1743	struct drm_minor *minor = adev_to_drm(adev)->primary;
1744	struct dentry *root = minor->debugfs_root;
1745	if (adev->enable_mes && amdgpu_mes_log_enable)
1746		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1747				    adev, &amdgpu_debugfs_mes_event_log_fops);
1748
1749#endif
1750}