Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright 2023 Advanced Micro Devices, Inc.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice shall be included in
 13 * all copies or substantial portions of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 21 * OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include <linux/firmware.h>
 26#include <linux/module.h>
 27#include <linux/debugfs.h>
 28#include "amdgpu.h"
 29#include "soc15_common.h"
 30#include "soc21.h"
 31#include "vcn/vcn_4_0_0_offset.h"
 32#include "vcn/vcn_4_0_0_sh_mask.h"
 33
 34#include "amdgpu_umsch_mm.h"
 35#include "umsch_mm_4_0_api_def.h"
 36#include "umsch_mm_v4_0.h"
 37
 38#define regUVD_IPX_DLDO_CONFIG                             0x0064
 39#define regUVD_IPX_DLDO_CONFIG_BASE_IDX                    1
 40#define regUVD_IPX_DLDO_STATUS                             0x0065
 41#define regUVD_IPX_DLDO_STATUS_BASE_IDX                    1
 42
 43#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT        0x00000002
 44#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG_MASK          0x0000000cUL
 45#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT        0x00000001
 46#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK          0x00000002UL
 47
 48static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch)
 49{
 50	struct amdgpu_device *adev = umsch->ring.adev;
 51	uint64_t data;
 52	int r;
 53
 54	r = amdgpu_umsch_mm_allocate_ucode_buffer(umsch);
 55	if (r)
 56		return r;
 57
 58	r = amdgpu_umsch_mm_allocate_ucode_data_buffer(umsch);
 59	if (r)
 60		goto err_free_ucode_bo;
 61
 62	umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr;
 63
 64	if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
 65		WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
 66			1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
 67		SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
 68			0 << UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT,
 69			UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK);
 70	}
 71
 72	data = RREG32_SOC15(VCN, 0, regUMSCH_MES_RESET_CTRL);
 73	data = REG_SET_FIELD(data, UMSCH_MES_RESET_CTRL, MES_CORE_SOFT_RESET, 0);
 74	WREG32_SOC15_UMSCH(regUMSCH_MES_RESET_CTRL, data);
 75
 76	data = RREG32_SOC15(VCN, 0, regVCN_MES_CNTL);
 77	data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_INVALIDATE_ICACHE, 1);
 78	data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_RESET, 1);
 79	data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_ACTIVE, 0);
 80	data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_HALT, 1);
 81	WREG32_SOC15_UMSCH(regVCN_MES_CNTL, data);
 82
 83	data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_BASE_CNTL);
 84	data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, VMID, 0);
 85	data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, EXE_DISABLE, 0);
 86	data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, CACHE_POLICY, 0);
 87	WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_CNTL, data);
 88
 89	WREG32_SOC15_UMSCH(regVCN_MES_INTR_ROUTINE_START,
 90		lower_32_bits(adev->umsch_mm.irq_start_addr >> 2));
 91	WREG32_SOC15_UMSCH(regVCN_MES_INTR_ROUTINE_START_HI,
 92		upper_32_bits(adev->umsch_mm.irq_start_addr >> 2));
 93
 94	WREG32_SOC15_UMSCH(regVCN_MES_PRGRM_CNTR_START,
 95		lower_32_bits(adev->umsch_mm.uc_start_addr >> 2));
 96	WREG32_SOC15_UMSCH(regVCN_MES_PRGRM_CNTR_START_HI,
 97		upper_32_bits(adev->umsch_mm.uc_start_addr >> 2));
 98
 99	WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_BASE_LO, 0);
100	WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_BASE_HI, 0);
101
102	data = adev->umsch_mm.uc_start_addr + adev->umsch_mm.ucode_size - 1;
103	WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_MASK_LO, lower_32_bits(data));
104	WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_MASK_HI, upper_32_bits(data));
105
106	data = adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ?
107	       0 : adev->umsch_mm.ucode_fw_gpu_addr;
108	WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_LO, lower_32_bits(data));
109	WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_HI, upper_32_bits(data));
110
111	WREG32_SOC15_UMSCH(regVCN_MES_MIBOUND_LO, 0x1FFFFF);
112
113	WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_BASE0_LO,
114		lower_32_bits(adev->umsch_mm.data_start_addr));
115	WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_BASE0_HI,
116		upper_32_bits(adev->umsch_mm.data_start_addr));
117
118	WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_MASK0_LO,
119		adev->umsch_mm.data_size - 1);
120	WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_MASK0_HI, 0);
121
122	data = adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ?
123	       0 : adev->umsch_mm.data_fw_gpu_addr;
124	WREG32_SOC15_UMSCH(regVCN_MES_DC_BASE_LO, lower_32_bits(data));
125	WREG32_SOC15_UMSCH(regVCN_MES_DC_BASE_HI, upper_32_bits(data));
126
127	WREG32_SOC15_UMSCH(regVCN_MES_MDBOUND_LO, 0x3FFFF);
128
129	data = RREG32_SOC15(VCN, 0, regUVD_UMSCH_FORCE);
130	data = REG_SET_FIELD(data, UVD_UMSCH_FORCE, IC_FORCE_GPUVM, 1);
131	data = REG_SET_FIELD(data, UVD_UMSCH_FORCE, DC_FORCE_GPUVM, 1);
132	WREG32_SOC15_UMSCH(regUVD_UMSCH_FORCE, data);
133
134	data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL);
135	data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
136	data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
137	WREG32_SOC15_UMSCH(regVCN_MES_IC_OP_CNTL, data);
138
139	data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL);
140	data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
141	WREG32_SOC15_UMSCH(regVCN_MES_IC_OP_CNTL, data);
142
143	WREG32_SOC15_UMSCH(regVCN_MES_GP0_LO, 0);
144	WREG32_SOC15_UMSCH(regVCN_MES_GP0_HI, 0);
145
146#if defined(CONFIG_DEBUG_FS)
147	WREG32_SOC15_UMSCH(regVCN_MES_GP0_LO, lower_32_bits(umsch->log_gpu_addr));
148	WREG32_SOC15_UMSCH(regVCN_MES_GP0_HI, upper_32_bits(umsch->log_gpu_addr));
149#endif
150
151	WREG32_SOC15_UMSCH(regVCN_MES_GP1_LO, 0);
152	WREG32_SOC15_UMSCH(regVCN_MES_GP1_HI, 0);
153
154	data = RREG32_SOC15(VCN, 0, regVCN_MES_CNTL);
155	data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_INVALIDATE_ICACHE, 0);
156	data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_RESET, 0);
157	data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_HALT, 0);
158	data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_ACTIVE, 1);
159	WREG32_SOC15_UMSCH(regVCN_MES_CNTL, data);
160
161	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
162		amdgpu_umsch_mm_psp_execute_cmd_buf(umsch);
163
164	r = SOC15_WAIT_ON_RREG(VCN, 0, regVCN_MES_MSTATUS_LO, 0xAAAAAAAA, 0xFFFFFFFF);
165	if (r) {
166		dev_err(adev->dev, "UMSCH FW Load: Failed, regVCN_MES_MSTATUS_LO: 0x%08x\n",
167			RREG32_SOC15(VCN, 0, regVCN_MES_MSTATUS_LO));
168		goto err_free_data_bo;
169	}
170
171	return 0;
172
173err_free_data_bo:
174	amdgpu_bo_free_kernel(&adev->umsch_mm.data_fw_obj,
175			      &adev->umsch_mm.data_fw_gpu_addr,
176			      (void **)&adev->umsch_mm.data_fw_ptr);
177err_free_ucode_bo:
178	amdgpu_bo_free_kernel(&adev->umsch_mm.ucode_fw_obj,
179			      &adev->umsch_mm.ucode_fw_gpu_addr,
180			      (void **)&adev->umsch_mm.ucode_fw_ptr);
181	return r;
182}
183
184static void umsch_mm_v4_0_aggregated_doorbell_init(struct amdgpu_umsch_mm *umsch)
185{
186	struct amdgpu_device *adev = umsch->ring.adev;
187	uint32_t data;
188
189	data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL0);
190	data = REG_SET_FIELD(data, VCN_AGDB_CTRL0, OFFSET,
191	       umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_REALTIME]);
192	data = REG_SET_FIELD(data, VCN_AGDB_CTRL0, EN, 1);
193	WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL0, data);
194
195	data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL1);
196	data = REG_SET_FIELD(data, VCN_AGDB_CTRL1, OFFSET,
197	       umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_FOCUS]);
198	data = REG_SET_FIELD(data, VCN_AGDB_CTRL1, EN, 1);
199	WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL1, data);
200
201	data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL2);
202	data = REG_SET_FIELD(data, VCN_AGDB_CTRL2, OFFSET,
203	       umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL]);
204	data = REG_SET_FIELD(data, VCN_AGDB_CTRL2, EN, 1);
205	WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL2, data);
206
207	data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL3);
208	data = REG_SET_FIELD(data, VCN_AGDB_CTRL3, OFFSET,
209	       umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_IDLE]);
210	data = REG_SET_FIELD(data, VCN_AGDB_CTRL3, EN, 1);
211	WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL3, data);
212}
213
214static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
215{
216	struct amdgpu_ring *ring = &umsch->ring;
217	struct amdgpu_device *adev = ring->adev;
218	uint32_t data;
219
220	data = RREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL);
221	data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, OFFSET, ring->doorbell_index);
222	data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 1);
223	WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data);
224
225	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
226		(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
227
228	WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
229	WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
230
231	WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
232
233	ring->wptr = 0;
234
235	data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
236	data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
237	WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
238
239	umsch_mm_v4_0_aggregated_doorbell_init(umsch);
240
241	return 0;
242}
243
244static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch)
245{
246	struct amdgpu_ring *ring = &umsch->ring;
247	struct amdgpu_device *adev = ring->adev;
248	uint32_t data;
249
250	data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
251	data = REG_SET_FIELD(data, VCN_RB_ENABLE, UMSCH_RB_EN, 0);
252	WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
253
254	data = RREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL);
255	data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0);
256	WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data);
257
258	if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
259		WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
260			2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
261		SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
262			1 << UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT,
263			UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK);
264	}
265
266	return 0;
267}
268
269static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch)
270{
271	union UMSCHAPI__SET_HW_RESOURCES set_hw_resources = {};
272	struct amdgpu_device *adev = umsch->ring.adev;
273	int r;
274
275	set_hw_resources.header.type = UMSCH_API_TYPE_SCHEDULER;
276	set_hw_resources.header.opcode = UMSCH_API_SET_HW_RSRC;
277	set_hw_resources.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
278
279	set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn;
280	set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe;
281	set_hw_resources.collaboration_mask_vpe =
282		adev->vpe.collaborate_mode ? 0x3 : 0x0;
283	set_hw_resources.engine_mask = umsch->engine_mask;
284
285	set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask;
286	set_hw_resources.vcn1_hqd_mask[0] = umsch->vcn1_hqd_mask;
287	set_hw_resources.vcn_hqd_mask[0] = umsch->vcn_hqd_mask[0];
288	set_hw_resources.vcn_hqd_mask[1] = umsch->vcn_hqd_mask[1];
289	set_hw_resources.vpe_hqd_mask[0] = umsch->vpe_hqd_mask;
290
291	set_hw_resources.g_sch_ctx_gpu_mc_ptr = umsch->sch_ctx_gpu_addr;
292
293	set_hw_resources.enable_level_process_quantum_check = 1;
294
295	memcpy(set_hw_resources.mmhub_base, adev->reg_offset[MMHUB_HWIP][0],
296	       sizeof(uint32_t) * 5);
297	set_hw_resources.mmhub_version =
298		IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, MMHUB_HWIP, 0));
299
300	memcpy(set_hw_resources.osssys_base, adev->reg_offset[OSSSYS_HWIP][0],
301	       sizeof(uint32_t) * 5);
302	set_hw_resources.osssys_version =
303		IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
304
305	set_hw_resources.vcn_version =
306		IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VCN_HWIP, 0));
307	set_hw_resources.vpe_version =
308		IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VPE_HWIP, 0));
309
310	set_hw_resources.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
311	set_hw_resources.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
312
313	r = amdgpu_umsch_mm_submit_pkt(umsch, &set_hw_resources.max_dwords_in_api,
314				       API_FRAME_SIZE_IN_DWORDS);
315	if (r)
316		return r;
317
318	r = amdgpu_umsch_mm_query_fence(umsch);
319	if (r) {
320		dev_err(adev->dev, "UMSCH SET_HW_RESOURCES: Failed\n");
321		return r;
322	}
323
324	return 0;
325}
326
327static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch,
328				   struct umsch_mm_add_queue_input *input_ptr)
329{
330	struct amdgpu_device *adev = umsch->ring.adev;
331	union UMSCHAPI__ADD_QUEUE add_queue = {};
332	int r;
333
334	add_queue.header.type = UMSCH_API_TYPE_SCHEDULER;
335	add_queue.header.opcode = UMSCH_API_ADD_QUEUE;
336	add_queue.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
337
338	add_queue.process_id = input_ptr->process_id;
339	add_queue.page_table_base_addr = input_ptr->page_table_base_addr;
340	add_queue.process_va_start = input_ptr->process_va_start;
341	add_queue.process_va_end = input_ptr->process_va_end;
342	add_queue.process_quantum = input_ptr->process_quantum;
343	add_queue.process_csa_addr = input_ptr->process_csa_addr;
344	add_queue.context_quantum = input_ptr->context_quantum;
345	add_queue.context_csa_addr = input_ptr->context_csa_addr;
346	add_queue.inprocess_context_priority = input_ptr->inprocess_context_priority;
347	add_queue.context_global_priority_level =
348		(enum UMSCH_AMD_PRIORITY_LEVEL)input_ptr->context_global_priority_level;
349	add_queue.doorbell_offset_0 = input_ptr->doorbell_offset_0;
350	add_queue.doorbell_offset_1 = input_ptr->doorbell_offset_1;
351	add_queue.affinity.u32All = input_ptr->affinity;
352	add_queue.mqd_addr = input_ptr->mqd_addr;
353	add_queue.engine_type = (enum UMSCH_ENGINE_TYPE)input_ptr->engine_type;
354	add_queue.h_context = input_ptr->h_context;
355	add_queue.h_queue = input_ptr->h_queue;
356	add_queue.vm_context_cntl = input_ptr->vm_context_cntl;
357	add_queue.is_context_suspended = input_ptr->is_context_suspended;
358	add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0;
359
360	add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
361	add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
362
363	r = amdgpu_umsch_mm_submit_pkt(umsch, &add_queue.max_dwords_in_api,
364				       API_FRAME_SIZE_IN_DWORDS);
365	if (r)
366		return r;
367
368	r = amdgpu_umsch_mm_query_fence(umsch);
369	if (r) {
370		dev_err(adev->dev, "UMSCH ADD_QUEUE: Failed\n");
371		return r;
372	}
373
374	return 0;
375}
376
377static int umsch_mm_v4_0_remove_queue(struct amdgpu_umsch_mm *umsch,
378				      struct umsch_mm_remove_queue_input *input_ptr)
379{
380	union UMSCHAPI__REMOVE_QUEUE remove_queue = {};
381	struct amdgpu_device *adev = umsch->ring.adev;
382	int r;
383
384	remove_queue.header.type = UMSCH_API_TYPE_SCHEDULER;
385	remove_queue.header.opcode = UMSCH_API_REMOVE_QUEUE;
386	remove_queue.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
387
388	remove_queue.doorbell_offset_0 = input_ptr->doorbell_offset_0;
389	remove_queue.doorbell_offset_1 = input_ptr->doorbell_offset_1;
390	remove_queue.context_csa_addr = input_ptr->context_csa_addr;
391
392	remove_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
393	remove_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
394
395	r = amdgpu_umsch_mm_submit_pkt(umsch, &remove_queue.max_dwords_in_api,
396				       API_FRAME_SIZE_IN_DWORDS);
397	if (r)
398		return r;
399
400	r = amdgpu_umsch_mm_query_fence(umsch);
401	if (r) {
402		dev_err(adev->dev, "UMSCH REMOVE_QUEUE: Failed\n");
403		return r;
404	}
405
406	return 0;
407}
408
409static int umsch_mm_v4_0_set_regs(struct amdgpu_umsch_mm *umsch)
410{
411	struct amdgpu_device *adev = container_of(umsch, struct amdgpu_device, umsch_mm);
412
413	umsch->rb_wptr = SOC15_REG_OFFSET(VCN, 0, regVCN_UMSCH_RB_WPTR);
414	umsch->rb_rptr = SOC15_REG_OFFSET(VCN, 0, regVCN_UMSCH_RB_RPTR);
415
416	return 0;
417}
418
419static const struct umsch_mm_funcs umsch_mm_v4_0_funcs = {
420	.set_hw_resources = umsch_mm_v4_0_set_hw_resources,
421	.add_queue = umsch_mm_v4_0_add_queue,
422	.remove_queue = umsch_mm_v4_0_remove_queue,
423	.set_regs = umsch_mm_v4_0_set_regs,
424	.init_microcode = amdgpu_umsch_mm_init_microcode,
425	.load_microcode = umsch_mm_v4_0_load_microcode,
426	.ring_init = amdgpu_umsch_mm_ring_init,
427	.ring_start = umsch_mm_v4_0_ring_start,
428	.ring_stop = umsch_mm_v4_0_ring_stop,
429};
430
431void umsch_mm_v4_0_set_funcs(struct amdgpu_umsch_mm *umsch)
432{
433	umsch->funcs = &umsch_mm_v4_0_funcs;
434}