Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Copyright 2023 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include "amdgpu.h"
  26#include "amdgpu_vcn.h"
  27#include "amdgpu_pm.h"
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_hw_ip.h"
  31#include "vcn_v2_0.h"
  32
  33#include "vcn/vcn_5_0_0_offset.h"
  34#include "vcn/vcn_5_0_0_sh_mask.h"
  35#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
  36#include "vcn_v5_0_0.h"
  37
  38#include <drm/drm_drv.h>
  39
  40static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
  41	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
  42	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
  43	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
  44	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
  45	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
  46	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
  47	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
  48	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
  49	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
  50	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
  51	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
  52	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
  53	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
  54	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
  55	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
  56	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
  57	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
  58	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
  59	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
  60	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
  61	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
  62	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
  63	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
  64	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
  65	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
  66	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
  67	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
  68	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
  69	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
  70	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
  71	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
  72};
  73
  74static int amdgpu_ih_clientid_vcns[] = {
  75	SOC15_IH_CLIENTID_VCN,
  76	SOC15_IH_CLIENTID_VCN1
  77};
  78
  79static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
  80static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
  81static int vcn_v5_0_0_set_powergating_state(void *handle,
  82		enum amd_powergating_state state);
  83static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
  84		int inst_idx, struct dpg_pause_state *new_state);
  85static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
  86
  87/**
  88 * vcn_v5_0_0_early_init - set function pointers and load microcode
  89 *
  90 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
  91 *
  92 * Set ring and irq function pointers
  93 * Load microcode from filesystem
  94 */
  95static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
  96{
  97	struct amdgpu_device *adev = ip_block->adev;
  98
  99	/* re-use enc ring as unified ring */
 100	adev->vcn.num_enc_rings = 1;
 101
 102	vcn_v5_0_0_set_unified_ring_funcs(adev);
 103	vcn_v5_0_0_set_irq_funcs(adev);
 104
 105	return amdgpu_vcn_early_init(adev);
 106}
 107
 108/**
 109 * vcn_v5_0_0_sw_init - sw init for VCN block
 110 *
 111 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 112 *
 113 * Load firmware and sw initialization
 114 */
 115static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
 116{
 117	struct amdgpu_ring *ring;
 118	struct amdgpu_device *adev = ip_block->adev;
 119	int i, r;
 120	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
 121	uint32_t *ptr;
 122
 123	r = amdgpu_vcn_sw_init(adev);
 124	if (r)
 125		return r;
 126
 127	amdgpu_vcn_setup_ucode(adev);
 128
 129	r = amdgpu_vcn_resume(adev);
 130	if (r)
 131		return r;
 132
 133	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 134		volatile struct amdgpu_vcn5_fw_shared *fw_shared;
 135
 136		if (adev->vcn.harvest_config & (1 << i))
 137			continue;
 138
 139		atomic_set(&adev->vcn.inst[i].sched_score, 0);
 140
 141		/* VCN UNIFIED TRAP */
 142		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
 143				VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
 144		if (r)
 145			return r;
 146
 147		/* VCN POISON TRAP */
 148		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
 149				VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
 150		if (r)
 151			return r;
 152
 153		ring = &adev->vcn.inst[i].ring_enc[0];
 154		ring->use_doorbell = true;
 155		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
 156
 157		ring->vm_hub = AMDGPU_MMHUB0(0);
 158		sprintf(ring->name, "vcn_unified_%d", i);
 159
 160		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
 161						AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
 162		if (r)
 163			return r;
 164
 165		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
 166		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
 167		fw_shared->sq.is_enabled = 1;
 168
 169		if (amdgpu_vcnfw_log)
 170			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
 171	}
 172
 173	/* TODO: Add queue reset mask when FW fully supports it */
 174	adev->vcn.supported_reset =
 175		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
 176
 177	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
 178		adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
 179
 180	/* Allocate memory for VCN IP Dump buffer */
 181	ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
 182	if (!ptr) {
 183		DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
 184		adev->vcn.ip_dump = NULL;
 185	} else {
 186		adev->vcn.ip_dump = ptr;
 187	}
 188
 189	r = amdgpu_vcn_sysfs_reset_mask_init(adev);
 190	if (r)
 191		return r;
 192
 193	return 0;
 194}
 195
 196/**
 197 * vcn_v5_0_0_sw_fini - sw fini for VCN block
 198 *
 199 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 200 *
 201 * VCN suspend and free up sw allocation
 202 */
 203static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
 204{
 205	struct amdgpu_device *adev = ip_block->adev;
 206	int i, r, idx;
 207
 208	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 209		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 210			volatile struct amdgpu_vcn5_fw_shared *fw_shared;
 211
 212			if (adev->vcn.harvest_config & (1 << i))
 213				continue;
 214
 215			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
 216			fw_shared->present_flag_0 = 0;
 217			fw_shared->sq.is_enabled = 0;
 218		}
 219
 220		drm_dev_exit(idx);
 221	}
 222
 223	r = amdgpu_vcn_suspend(adev);
 224	if (r)
 225		return r;
 226
 227	amdgpu_vcn_sysfs_reset_mask_fini(adev);
 228	r = amdgpu_vcn_sw_fini(adev);
 229
 230	kfree(adev->vcn.ip_dump);
 231
 232	return r;
 233}
 234
 235/**
 236 * vcn_v5_0_0_hw_init - start and test VCN block
 237 *
 238 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 239 *
 240 * Initialize the hardware, boot up the VCPU and do some testing
 241 */
 242static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
 243{
 244	struct amdgpu_device *adev = ip_block->adev;
 245	struct amdgpu_ring *ring;
 246	int i, r;
 247
 248	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 249		if (adev->vcn.harvest_config & (1 << i))
 250			continue;
 251
 252		ring = &adev->vcn.inst[i].ring_enc[0];
 253
 254		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
 255			((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
 256
 257		r = amdgpu_ring_test_helper(ring);
 258		if (r)
 259			return r;
 260	}
 261
 262	return 0;
 263}
 264
 265/**
 266 * vcn_v5_0_0_hw_fini - stop the hardware block
 267 *
 268 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 269 *
 270 * Stop the VCN block, mark ring as not ready any more
 271 */
 272static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
 273{
 274	struct amdgpu_device *adev = ip_block->adev;
 275	int i;
 276
 277	cancel_delayed_work_sync(&adev->vcn.idle_work);
 278
 279	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 280		if (adev->vcn.harvest_config & (1 << i))
 281			continue;
 282		if (!amdgpu_sriov_vf(adev)) {
 283			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
 284				(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
 285				RREG32_SOC15(VCN, i, regUVD_STATUS))) {
 286				vcn_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 287			}
 288		}
 289	}
 290
 291	return 0;
 292}
 293
 294/**
 295 * vcn_v5_0_0_suspend - suspend VCN block
 296 *
 297 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 298 *
 299 * HW fini and suspend VCN block
 300 */
 301static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
 302{
 303	int r;
 304
 305	r = vcn_v5_0_0_hw_fini(ip_block);
 306	if (r)
 307		return r;
 308
 309	r = amdgpu_vcn_suspend(ip_block->adev);
 310
 311	return r;
 312}
 313
 314/**
 315 * vcn_v5_0_0_resume - resume VCN block
 316 *
 317 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 318 *
 319 * Resume firmware and hw init VCN block
 320 */
 321static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
 322{
 323	int r;
 324
 325	r = amdgpu_vcn_resume(ip_block->adev);
 326	if (r)
 327		return r;
 328
 329	r = vcn_v5_0_0_hw_init(ip_block);
 330
 331	return r;
 332}
 333
 334/**
 335 * vcn_v5_0_0_mc_resume - memory controller programming
 336 *
 337 * @adev: amdgpu_device pointer
 338 * @inst: instance number
 339 *
 340 * Let the VCN memory controller know it's offsets
 341 */
 342static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
 343{
 344	uint32_t offset, size;
 345	const struct common_firmware_header *hdr;
 346
 347	hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
 348	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 349
 350	/* cache window 0: fw */
 351	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 352		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 353			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
 354		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 355			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
 356		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
 357		offset = 0;
 358	} else {
 359		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 360			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
 361		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 362			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
 363		offset = size;
 364		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 365	}
 366	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
 367
 368	/* cache window 1: stack */
 369	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 370		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
 371	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 372		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
 373	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
 374	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 375
 376	/* cache window 2: context */
 377	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 378		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
 379	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 380		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
 381	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
 382	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 383
 384	/* non-cache window */
 385	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
 386		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
 387	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
 388		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
 389	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
 390	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
 391		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
 392}
 393
 394/**
 395 * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
 396 *
 397 * @adev: amdgpu_device pointer
 398 * @inst_idx: instance number index
 399 * @indirect: indirectly write sram
 400 *
 401 * Let the VCN memory controller know it's offsets with dpg mode
 402 */
 403static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
 404{
 405	uint32_t offset, size;
 406	const struct common_firmware_header *hdr;
 407
 408	hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
 409	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 410
 411	/* cache window 0: fw */
 412	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 413		if (!indirect) {
 414			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 415				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 416				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
 417			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 418				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 419				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
 420			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 421				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
 422		} else {
 423			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 424				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
 425			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 426				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
 427			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 428				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
 429		}
 430		offset = 0;
 431	} else {
 432		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 433			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 434			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
 435		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 436			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 437			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
 438		offset = size;
 439		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 440			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
 441			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
 442	}
 443
 444	if (!indirect)
 445		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 446			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
 447	else
 448		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 449			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
 450
 451	/* cache window 1: stack */
 452	if (!indirect) {
 453		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 454			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 455			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
 456		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 457			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 458			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
 459		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 460			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
 461	} else {
 462		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 463			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
 464		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 465			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
 466		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 467			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
 468	}
 469		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 470			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
 471
 472	/* cache window 2: context */
 473	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 474		VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 475		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
 476	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 477		VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 478		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
 479	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 480		VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
 481	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 482		VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
 483
 484	/* non-cache window */
 485	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 486		VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
 487		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
 488	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 489		VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
 490		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
 491	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 492		VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
 493	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 494		VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
 495		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
 496
 497	/* VCN global tiling registers */
 498	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 499		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
 500
 501	return;
 502}
 503
 504/**
 505 * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
 506 *
 507 * @adev: amdgpu_device pointer
 508 * @inst: instance number
 509 *
 510 * Disable static power gating for VCN block
 511 */
 512static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
 513{
 514	uint32_t data = 0;
 515
 516	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 517		data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
 518		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 519		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
 520				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
 521
 522		data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
 523		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 524		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
 525				1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
 526				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
 527
 528		data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
 529		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 530		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
 531				1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
 532				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
 533
 534		data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
 535		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 536		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
 537				1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
 538				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
 539	} else {
 540		data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
 541		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 542		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
 543				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
 544
 545		data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
 546		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 547		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
 548				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
 549
 550		data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
 551		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 552		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
 553				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
 554
 555		data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
 556		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 557		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
 558				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
 559	}
 560
 561	data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
 562	data &= ~0x103;
 563	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
 564		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
 565			UVD_POWER_STATUS__UVD_PG_EN_MASK;
 566
 567	WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
 568	return;
 569}
 570
 571/**
 572 * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
 573 *
 574 * @adev: amdgpu_device pointer
 575 * @inst: instance number
 576 *
 577 * Enable static power gating for VCN block
 578 */
 579static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
 580{
 581	uint32_t data;
 582
 583	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 584		/* Before power off, this indicator has to be turned on */
 585		data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
 586		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
 587		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
 588		WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
 589
 590		data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
 591		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 592		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
 593				1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
 594				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
 595
 596		data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
 597		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 598		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
 599				1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
 600				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
 601
 602		data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
 603		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 604		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
 605				1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
 606				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
 607
 608		data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
 609		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
 610		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
 611				1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
 612				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
 613	}
 614	return;
 615}
 616
 617/**
 618 * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
 619 *
 620 * @adev: amdgpu_device pointer
 621 * @inst: instance number
 622 *
 623 * Disable clock gating for VCN block
 624 */
 625static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
 626{
 627	return;
 628}
 629
 630#if 0
 631/**
 632 * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
 633 *
 634 * @adev: amdgpu_device pointer
 635 * @sram_sel: sram select
 636 * @inst_idx: instance number index
 637 * @indirect: indirectly write sram
 638 *
 639 * Disable clock gating for VCN block with dpg mode
 640 */
 641static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
 642	int inst_idx, uint8_t indirect)
 643{
 644	return;
 645}
 646#endif
 647
 648/**
 649 * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
 650 *
 651 * @adev: amdgpu_device pointer
 652 * @inst: instance number
 653 *
 654 * Enable clock gating for VCN block
 655 */
 656static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
 657{
 658	return;
 659}
 660
 661/**
 662 * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
 663 *
 664 * @adev: amdgpu_device pointer
 665 * @inst_idx: instance number index
 666 * @indirect: indirectly write sram
 667 *
 668 * Start VCN block with dpg mode
 669 */
 670static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
 671{
 672	volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
 673	struct amdgpu_ring *ring;
 674	uint32_t tmp;
 675
 676	/* disable register anti-hang mechanism */
 677	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
 678		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
 679
 680	/* enable dynamic power gating mode */
 681	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
 682	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
 683	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
 684	WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
 685
 686	if (indirect)
 687		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
 688
 689	/* enable VCPU clock */
 690	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
 691	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
 692	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 693		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
 694
 695	/* disable master interrupt */
 696	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 697		VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
 698
 699	/* setup regUVD_LMI_CTRL */
 700	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 701		UVD_LMI_CTRL__REQ_MODE_MASK |
 702		UVD_LMI_CTRL__CRC_RESET_MASK |
 703		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
 704		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 705		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 706		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 707		0x00100000L);
 708	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 709		VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
 710
 711	vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
 712
 713	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
 714	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
 715	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 716		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
 717
 718	/* enable LMI MC and UMC channels */
 719	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
 720	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 721		VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
 722
 723	/* enable master interrupt */
 724	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
 725		VCN, inst_idx, regUVD_MASTINT_EN),
 726		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 727
 728	if (indirect)
 729		amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
 730
 731	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
 732
 733	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
 734	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 735	WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
 736
 737	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
 738	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
 739	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
 740	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
 741	WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
 742	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
 743
 744	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
 745	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
 746	ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
 747
 748	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
 749	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
 750	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
 751	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
 752
 753	WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
 754		ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
 755		VCN_RB1_DB_CTRL__EN_MASK);
 756
 757	return 0;
 758}
 759
 760/**
 761 * vcn_v5_0_0_start - VCN start
 762 *
 763 * @adev: amdgpu_device pointer
 764 *
 765 * Start VCN block
 766 */
 767static int vcn_v5_0_0_start(struct amdgpu_device *adev)
 768{
 769	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
 770	struct amdgpu_ring *ring;
 771	uint32_t tmp;
 772	int i, j, k, r;
 773
 774	if (adev->pm.dpm_enabled)
 775		amdgpu_dpm_enable_uvd(adev, true);
 776
 777	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 778		if (adev->vcn.harvest_config & (1 << i))
 779			continue;
 780
 781		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
 782
 783		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
 784			r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
 785			continue;
 786		}
 787
 788		/* disable VCN power gating */
 789		vcn_v5_0_0_disable_static_power_gating(adev, i);
 790
 791		/* set VCN status busy */
 792		tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
 793		WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
 794
 795		/* enable VCPU clock */
 796		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
 797			UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
 798
 799		/* disable master interrupt */
 800		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
 801			~UVD_MASTINT_EN__VCPU_EN_MASK);
 802
 803		/* enable LMI MC and UMC channels */
 804		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
 805			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 806
 807		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
 808		tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
 809		tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
 810		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
 811
 812		/* setup regUVD_LMI_CTRL */
 813		tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
 814		WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
 815			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 816			UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
 817			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 818			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
 819
 820		vcn_v5_0_0_mc_resume(adev, i);
 821
 822		/* VCN global tiling registers */
 823		WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
 824			adev->gfx.config.gb_addr_config);
 825
 826		/* unblock VCPU register access */
 827		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
 828			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
 829
 830		/* release VCPU reset to boot */
 831		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
 832			~UVD_VCPU_CNTL__BLK_RST_MASK);
 833
 834		for (j = 0; j < 10; ++j) {
 835			uint32_t status;
 836
 837			for (k = 0; k < 100; ++k) {
 838				status = RREG32_SOC15(VCN, i, regUVD_STATUS);
 839				if (status & 2)
 840					break;
 841				mdelay(10);
 842				if (amdgpu_emu_mode == 1)
 843					msleep(1);
 844			}
 845
 846			if (amdgpu_emu_mode == 1) {
 847				r = -1;
 848				if (status & 2) {
 849					r = 0;
 850					break;
 851				}
 852			} else {
 853				r = 0;
 854				if (status & 2)
 855					break;
 856
 857				dev_err(adev->dev,
 858					"VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
 859				WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
 860							UVD_VCPU_CNTL__BLK_RST_MASK,
 861							~UVD_VCPU_CNTL__BLK_RST_MASK);
 862				mdelay(10);
 863				WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
 864							~UVD_VCPU_CNTL__BLK_RST_MASK);
 865
 866				mdelay(10);
 867				r = -1;
 868			}
 869		}
 870
 871		if (r) {
 872			dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
 873			return r;
 874		}
 875
 876		/* enable master interrupt */
 877		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
 878				UVD_MASTINT_EN__VCPU_EN_MASK,
 879				~UVD_MASTINT_EN__VCPU_EN_MASK);
 880
 881		/* clear the busy bit of VCN_STATUS */
 882		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
 883			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 884
 885		ring = &adev->vcn.inst[i].ring_enc[0];
 886		WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
 887			ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
 888			VCN_RB1_DB_CTRL__EN_MASK);
 889
 890		WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
 891		WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 892		WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
 893
 894		tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
 895		tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
 896		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
 897		fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
 898		WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
 899		WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
 900
 901		tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
 902		WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
 903		ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
 904
 905		tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
 906		tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
 907		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
 908		fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
 909	}
 910
 911	return 0;
 912}
 913
 914/**
 915 * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
 916 *
 917 * @adev: amdgpu_device pointer
 918 * @inst_idx: instance number index
 919 *
 920 * Stop VCN block with dpg mode
 921 */
 922static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
 923{
 924	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
 925	uint32_t tmp;
 926
 927	vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state);
 928
 929	/* Wait for power status to be 1 */
 930	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
 931		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
 932
 933	/* wait for read ptr to be equal to write ptr */
 934	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
 935	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
 936
 937	/* disable dynamic power gating mode */
 938	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
 939		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 940
 941	return;
 942}
 943
 944/**
 945 * vcn_v5_0_0_stop - VCN stop
 946 *
 947 * @adev: amdgpu_device pointer
 948 *
 949 * Stop VCN block
 950 */
 951static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
 952{
 953	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
 954	uint32_t tmp;
 955	int i, r = 0;
 956
 957	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 958		if (adev->vcn.harvest_config & (1 << i))
 959			continue;
 960
 961		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
 962		fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
 963
 964		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
 965			vcn_v5_0_0_stop_dpg_mode(adev, i);
 966			continue;
 967		}
 968
 969		/* wait for vcn idle */
 970		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
 971		if (r)
 972			return r;
 973
 974		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
 975		      UVD_LMI_STATUS__READ_CLEAN_MASK |
 976		      UVD_LMI_STATUS__WRITE_CLEAN_MASK |
 977		      UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
 978		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
 979		if (r)
 980			return r;
 981
 982		/* disable LMI UMC channel */
 983		tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
 984		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
 985		WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
 986		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
 987		      UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
 988		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
 989		if (r)
 990			return r;
 991
 992		/* block VCPU register access */
 993		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
 994			UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
 995			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
 996
 997		/* reset VCPU */
 998		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
 999			UVD_VCPU_CNTL__BLK_RST_MASK,
1000			~UVD_VCPU_CNTL__BLK_RST_MASK);
1001
1002		/* disable VCPU clock */
1003		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1004			~(UVD_VCPU_CNTL__CLK_EN_MASK));
1005
1006		/* apply soft reset */
1007		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1008		tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1009		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1010		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1011		tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1012		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1013
1014		/* clear status */
1015		WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1016
1017		/* enable VCN power gating */
1018		vcn_v5_0_0_enable_static_power_gating(adev, i);
1019	}
1020
1021	if (adev->pm.dpm_enabled)
1022		amdgpu_dpm_enable_uvd(adev, false);
1023
1024	return 0;
1025}
1026
1027/**
1028 * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
1029 *
1030 * @adev: amdgpu_device pointer
1031 * @inst_idx: instance number index
1032 * @new_state: pause state
1033 *
1034 * Pause dpg mode for VCN block
1035 */
1036static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1037	struct dpg_pause_state *new_state)
1038{
1039	uint32_t reg_data = 0;
1040	int ret_code;
1041
1042	/* pause/unpause if state is changed */
1043	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1044		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1045			adev->vcn.inst[inst_idx].pause_state.fw_based,  new_state->fw_based);
1046		reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1047			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1048
1049		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1050			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1051					UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1052
1053			if (!ret_code) {
1054				/* pause DPG */
1055				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1056				WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1057
1058				/* wait for ACK */
1059				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1060					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1061					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1062			}
1063		} else {
1064			/* unpause dpg, no need to wait */
1065			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1066			WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1067		}
1068		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1069	}
1070
1071	return 0;
1072}
1073
1074/**
1075 * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer
1076 *
1077 * @ring: amdgpu_ring pointer
1078 *
1079 * Returns the current hardware unified read pointer
1080 */
1081static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1082{
1083	struct amdgpu_device *adev = ring->adev;
1084
1085	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1086		DRM_ERROR("wrong ring id is identified in %s", __func__);
1087
1088	return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1089}
1090
1091/**
1092 * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer
1093 *
1094 * @ring: amdgpu_ring pointer
1095 *
1096 * Returns the current hardware unified write pointer
1097 */
1098static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1099{
1100	struct amdgpu_device *adev = ring->adev;
1101
1102	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1103		DRM_ERROR("wrong ring id is identified in %s", __func__);
1104
1105	if (ring->use_doorbell)
1106		return *ring->wptr_cpu_addr;
1107	else
1108		return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1109}
1110
1111/**
1112 * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer
1113 *
1114 * @ring: amdgpu_ring pointer
1115 *
1116 * Commits the enc write pointer to the hardware
1117 */
1118static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1119{
1120	struct amdgpu_device *adev = ring->adev;
1121
1122	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1123		DRM_ERROR("wrong ring id is identified in %s", __func__);
1124
1125	if (ring->use_doorbell) {
1126		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1127		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1128	} else {
1129		WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1130	}
1131}
1132
1133static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
1134	.type = AMDGPU_RING_TYPE_VCN_ENC,
1135	.align_mask = 0x3f,
1136	.nop = VCN_ENC_CMD_NO_OP,
1137	.get_rptr = vcn_v5_0_0_unified_ring_get_rptr,
1138	.get_wptr = vcn_v5_0_0_unified_ring_get_wptr,
1139	.set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
1140	.emit_frame_size =
1141		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1142		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1143		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1144		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1145		1, /* vcn_v2_0_enc_ring_insert_end */
1146	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1147	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1148	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1149	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1150	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1151	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1152	.insert_nop = amdgpu_ring_insert_nop,
1153	.insert_end = vcn_v2_0_enc_ring_insert_end,
1154	.pad_ib = amdgpu_ring_generic_pad_ib,
1155	.begin_use = amdgpu_vcn_ring_begin_use,
1156	.end_use = amdgpu_vcn_ring_end_use,
1157	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1158	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1159	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1160};
1161
1162/**
1163 * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions
1164 *
1165 * @adev: amdgpu_device pointer
1166 *
1167 * Set unified ring functions
1168 */
1169static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1170{
1171	int i;
1172
1173	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1174		if (adev->vcn.harvest_config & (1 << i))
1175			continue;
1176
1177		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs;
1178		adev->vcn.inst[i].ring_enc[0].me = i;
1179	}
1180}
1181
1182/**
1183 * vcn_v5_0_0_is_idle - check VCN block is idle
1184 *
1185 * @handle: amdgpu_device pointer
1186 *
1187 * Check whether VCN block is idle
1188 */
1189static bool vcn_v5_0_0_is_idle(void *handle)
1190{
1191	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1192	int i, ret = 1;
1193
1194	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1195		if (adev->vcn.harvest_config & (1 << i))
1196			continue;
1197
1198		ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1199	}
1200
1201	return ret;
1202}
1203
1204/**
1205 * vcn_v5_0_0_wait_for_idle - wait for VCN block idle
1206 *
1207 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1208 *
1209 * Wait for VCN block idle
1210 */
1211static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1212{
1213	struct amdgpu_device *adev = ip_block->adev;
1214	int i, ret = 0;
1215
1216	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1217		if (adev->vcn.harvest_config & (1 << i))
1218			continue;
1219
1220		ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1221			UVD_STATUS__IDLE);
1222		if (ret)
1223			return ret;
1224	}
1225
1226	return ret;
1227}
1228
1229/**
1230 * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
1231 *
1232 * @handle: amdgpu_device pointer
1233 * @state: clock gating state
1234 *
1235 * Set VCN block clockgating state
1236 */
1237static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
1238{
1239	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1240	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1241	int i;
1242
1243	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1244		if (adev->vcn.harvest_config & (1 << i))
1245			continue;
1246
1247		if (enable) {
1248			if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1249				return -EBUSY;
1250			vcn_v5_0_0_enable_clock_gating(adev, i);
1251		} else {
1252			vcn_v5_0_0_disable_clock_gating(adev, i);
1253		}
1254	}
1255
1256	return 0;
1257}
1258
1259/**
1260 * vcn_v5_0_0_set_powergating_state - set VCN block powergating state
1261 *
1262 * @handle: amdgpu_device pointer
1263 * @state: power gating state
1264 *
1265 * Set VCN block powergating state
1266 */
1267static int vcn_v5_0_0_set_powergating_state(void *handle, enum amd_powergating_state state)
1268{
1269	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270	int ret;
1271
1272	if (state == adev->vcn.cur_state)
1273		return 0;
1274
1275	if (state == AMD_PG_STATE_GATE)
1276		ret = vcn_v5_0_0_stop(adev);
1277	else
1278		ret = vcn_v5_0_0_start(adev);
1279
1280	if (!ret)
1281		adev->vcn.cur_state = state;
1282
1283	return ret;
1284}
1285
1286/**
1287 * vcn_v5_0_0_process_interrupt - process VCN block interrupt
1288 *
1289 * @adev: amdgpu_device pointer
1290 * @source: interrupt sources
1291 * @entry: interrupt entry from clients and sources
1292 *
1293 * Process VCN block interrupt
1294 */
1295static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1296	struct amdgpu_iv_entry *entry)
1297{
1298	uint32_t ip_instance;
1299
1300	switch (entry->client_id) {
1301	case SOC15_IH_CLIENTID_VCN:
1302		ip_instance = 0;
1303		break;
1304	case SOC15_IH_CLIENTID_VCN1:
1305		ip_instance = 1;
1306		break;
1307	default:
1308		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1309		return 0;
1310	}
1311
1312	DRM_DEBUG("IH: VCN TRAP\n");
1313
1314	switch (entry->src_id) {
1315	case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1316		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1317		break;
1318	case VCN_4_0__SRCID_UVD_POISON:
1319		amdgpu_vcn_process_poison_irq(adev, source, entry);
1320		break;
1321	default:
1322		DRM_ERROR("Unhandled interrupt: %d %d\n",
1323			  entry->src_id, entry->src_data[0]);
1324		break;
1325	}
1326
1327	return 0;
1328}
1329
1330static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = {
1331	.process = vcn_v5_0_0_process_interrupt,
1332};
1333
1334/**
1335 * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions
1336 *
1337 * @adev: amdgpu_device pointer
1338 *
1339 * Set VCN block interrupt irq functions
1340 */
1341static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
1342{
1343	int i;
1344
1345	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1346		if (adev->vcn.harvest_config & (1 << i))
1347			continue;
1348
1349		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1350		adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
1351	}
1352}
1353
1354static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1355{
1356	struct amdgpu_device *adev = ip_block->adev;
1357	int i, j;
1358	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
1359	uint32_t inst_off, is_powered;
1360
1361	if (!adev->vcn.ip_dump)
1362		return;
1363
1364	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1365	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1366		if (adev->vcn.harvest_config & (1 << i)) {
1367			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1368			continue;
1369		}
1370
1371		inst_off = i * reg_count;
1372		is_powered = (adev->vcn.ip_dump[inst_off] &
1373				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1374
1375		if (is_powered) {
1376			drm_printf(p, "\nActive Instance:VCN%d\n", i);
1377			for (j = 0; j < reg_count; j++)
1378				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
1379					   adev->vcn.ip_dump[inst_off + j]);
1380		} else {
1381			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1382		}
1383	}
1384}
1385
1386static void vcn_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
1387{
1388	struct amdgpu_device *adev = ip_block->adev;
1389	int i, j;
1390	bool is_powered;
1391	uint32_t inst_off;
1392	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
1393
1394	if (!adev->vcn.ip_dump)
1395		return;
1396
1397	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1398		if (adev->vcn.harvest_config & (1 << i))
1399			continue;
1400
1401		inst_off = i * reg_count;
1402		/* mmUVD_POWER_STATUS is always readable and is first element of the array */
1403		adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
1404		is_powered = (adev->vcn.ip_dump[inst_off] &
1405				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1406
1407		if (is_powered)
1408			for (j = 1; j < reg_count; j++)
1409				adev->vcn.ip_dump[inst_off + j] =
1410					RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
1411	}
1412}
1413
1414static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
1415	.name = "vcn_v5_0_0",
1416	.early_init = vcn_v5_0_0_early_init,
1417	.sw_init = vcn_v5_0_0_sw_init,
1418	.sw_fini = vcn_v5_0_0_sw_fini,
1419	.hw_init = vcn_v5_0_0_hw_init,
1420	.hw_fini = vcn_v5_0_0_hw_fini,
1421	.suspend = vcn_v5_0_0_suspend,
1422	.resume = vcn_v5_0_0_resume,
1423	.is_idle = vcn_v5_0_0_is_idle,
1424	.wait_for_idle = vcn_v5_0_0_wait_for_idle,
1425	.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
1426	.set_powergating_state = vcn_v5_0_0_set_powergating_state,
1427	.dump_ip_state = vcn_v5_0_dump_ip_state,
1428	.print_ip_state = vcn_v5_0_print_ip_state,
1429};
1430
1431const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
1432	.type = AMD_IP_BLOCK_TYPE_VCN,
1433	.major = 5,
1434	.minor = 0,
1435	.rev = 0,
1436	.funcs = &vcn_v5_0_0_ip_funcs,
1437};