Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.13.7
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
  28#include "amdgpu_cs.h"
  29#include "soc15.h"
  30#include "soc15d.h"
  31#include "soc15_common.h"
  32#include "mmsch_v1_0.h"
  33
  34#include "uvd/uvd_7_0_offset.h"
  35#include "uvd/uvd_7_0_sh_mask.h"
  36#include "vce/vce_4_0_offset.h"
  37#include "vce/vce_4_0_default.h"
  38#include "vce/vce_4_0_sh_mask.h"
  39#include "nbif/nbif_6_1_offset.h"
  40#include "mmhub/mmhub_1_0_offset.h"
  41#include "mmhub/mmhub_1_0_sh_mask.h"
  42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
  43
  44#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
  45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
  46//UVD_PG0_CC_UVD_HARVESTING
  47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
  48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
  49
  50#define UVD7_MAX_HW_INSTANCES_VEGA20			2
  51
  52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  53static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  54static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  55static int uvd_v7_0_start(struct amdgpu_device *adev);
  56static void uvd_v7_0_stop(struct amdgpu_device *adev);
  57static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  58
  59static int amdgpu_ih_clientid_uvds[] = {
  60	SOC15_IH_CLIENTID_UVD,
  61	SOC15_IH_CLIENTID_UVD1
  62};
  63
  64/**
  65 * uvd_v7_0_ring_get_rptr - get read pointer
  66 *
  67 * @ring: amdgpu_ring pointer
  68 *
  69 * Returns the current hardware read pointer
  70 */
  71static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  72{
  73	struct amdgpu_device *adev = ring->adev;
  74
  75	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
  76}
  77
  78/**
  79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  80 *
  81 * @ring: amdgpu_ring pointer
  82 *
  83 * Returns the current hardware enc read pointer
  84 */
  85static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  86{
  87	struct amdgpu_device *adev = ring->adev;
  88
  89	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
  90		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
  91	else
  92		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
  93}
  94
  95/**
  96 * uvd_v7_0_ring_get_wptr - get write pointer
  97 *
  98 * @ring: amdgpu_ring pointer
  99 *
 100 * Returns the current hardware write pointer
 101 */
 102static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 103{
 104	struct amdgpu_device *adev = ring->adev;
 105
 106	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 107}
 108
 109/**
 110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 111 *
 112 * @ring: amdgpu_ring pointer
 113 *
 114 * Returns the current hardware enc write pointer
 115 */
 116static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 117{
 118	struct amdgpu_device *adev = ring->adev;
 119
 120	if (ring->use_doorbell)
 121		return *ring->wptr_cpu_addr;
 122
 123	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 124		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 125	else
 126		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 127}
 128
 129/**
 130 * uvd_v7_0_ring_set_wptr - set write pointer
 131 *
 132 * @ring: amdgpu_ring pointer
 133 *
 134 * Commits the write pointer to the hardware
 135 */
 136static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 137{
 138	struct amdgpu_device *adev = ring->adev;
 139
 140	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 141}
 142
 143/**
 144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 145 *
 146 * @ring: amdgpu_ring pointer
 147 *
 148 * Commits the enc write pointer to the hardware
 149 */
 150static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 151{
 152	struct amdgpu_device *adev = ring->adev;
 153
 154	if (ring->use_doorbell) {
 155		/* XXX check if swapping is necessary on BE */
 156		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
 157		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 158		return;
 159	}
 160
 161	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 162		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 163			lower_32_bits(ring->wptr));
 164	else
 165		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 166			lower_32_bits(ring->wptr));
 167}
 168
 169/**
 170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 171 *
 172 * @ring: the engine to test on
 173 *
 174 */
 175static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 176{
 177	struct amdgpu_device *adev = ring->adev;
 178	uint32_t rptr;
 179	unsigned i;
 180	int r;
 181
 182	if (amdgpu_sriov_vf(adev))
 183		return 0;
 184
 185	r = amdgpu_ring_alloc(ring, 16);
 186	if (r)
 187		return r;
 188
 189	rptr = amdgpu_ring_get_rptr(ring);
 190
 191	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 192	amdgpu_ring_commit(ring);
 193
 194	for (i = 0; i < adev->usec_timeout; i++) {
 195		if (amdgpu_ring_get_rptr(ring) != rptr)
 196			break;
 197		udelay(1);
 198	}
 199
 200	if (i >= adev->usec_timeout)
 201		r = -ETIMEDOUT;
 202
 203	return r;
 204}
 205
 206/**
 207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 208 *
 209 * @ring: ring we should submit the msg to
 210 * @handle: session handle to use
 211 * @bo: amdgpu object for which we query the offset
 212 * @fence: optional fence to return
 213 *
 214 * Open up a stream for HW test
 215 */
 216static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
 217				       struct amdgpu_bo *bo,
 218				       struct dma_fence **fence)
 219{
 220	const unsigned ib_size_dw = 16;
 221	struct amdgpu_job *job;
 222	struct amdgpu_ib *ib;
 223	struct dma_fence *f = NULL;
 224	uint64_t addr;
 225	int i, r;
 226
 227	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
 228				     AMDGPU_IB_POOL_DIRECT, &job);
 229	if (r)
 230		return r;
 231
 232	ib = &job->ibs[0];
 233	addr = amdgpu_bo_gpu_offset(bo);
 234
 235	ib->length_dw = 0;
 236	ib->ptr[ib->length_dw++] = 0x00000018;
 237	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 238	ib->ptr[ib->length_dw++] = handle;
 239	ib->ptr[ib->length_dw++] = 0x00000000;
 240	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 241	ib->ptr[ib->length_dw++] = addr;
 242
 243	ib->ptr[ib->length_dw++] = 0x00000014;
 244	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 245	ib->ptr[ib->length_dw++] = 0x0000001c;
 246	ib->ptr[ib->length_dw++] = 0x00000000;
 247	ib->ptr[ib->length_dw++] = 0x00000000;
 248
 249	ib->ptr[ib->length_dw++] = 0x00000008;
 250	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 251
 252	for (i = ib->length_dw; i < ib_size_dw; ++i)
 253		ib->ptr[i] = 0x0;
 254
 255	r = amdgpu_job_submit_direct(job, ring, &f);
 256	if (r)
 257		goto err;
 258
 259	if (fence)
 260		*fence = dma_fence_get(f);
 261	dma_fence_put(f);
 262	return 0;
 263
 264err:
 265	amdgpu_job_free(job);
 266	return r;
 267}
 268
 269/**
 270 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 271 *
 272 * @ring: ring we should submit the msg to
 273 * @handle: session handle to use
 274 * @bo: amdgpu object for which we query the offset
 275 * @fence: optional fence to return
 276 *
 277 * Close up a stream for HW test or if userspace failed to do so
 278 */
 279static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
 280					struct amdgpu_bo *bo,
 281					struct dma_fence **fence)
 282{
 283	const unsigned ib_size_dw = 16;
 284	struct amdgpu_job *job;
 285	struct amdgpu_ib *ib;
 286	struct dma_fence *f = NULL;
 287	uint64_t addr;
 288	int i, r;
 289
 290	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
 291				     AMDGPU_IB_POOL_DIRECT, &job);
 292	if (r)
 293		return r;
 294
 295	ib = &job->ibs[0];
 296	addr = amdgpu_bo_gpu_offset(bo);
 297
 298	ib->length_dw = 0;
 299	ib->ptr[ib->length_dw++] = 0x00000018;
 300	ib->ptr[ib->length_dw++] = 0x00000001;
 301	ib->ptr[ib->length_dw++] = handle;
 302	ib->ptr[ib->length_dw++] = 0x00000000;
 303	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 304	ib->ptr[ib->length_dw++] = addr;
 305
 306	ib->ptr[ib->length_dw++] = 0x00000014;
 307	ib->ptr[ib->length_dw++] = 0x00000002;
 308	ib->ptr[ib->length_dw++] = 0x0000001c;
 309	ib->ptr[ib->length_dw++] = 0x00000000;
 310	ib->ptr[ib->length_dw++] = 0x00000000;
 311
 312	ib->ptr[ib->length_dw++] = 0x00000008;
 313	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 314
 315	for (i = ib->length_dw; i < ib_size_dw; ++i)
 316		ib->ptr[i] = 0x0;
 317
 318	r = amdgpu_job_submit_direct(job, ring, &f);
 319	if (r)
 320		goto err;
 321
 322	if (fence)
 323		*fence = dma_fence_get(f);
 324	dma_fence_put(f);
 325	return 0;
 326
 327err:
 328	amdgpu_job_free(job);
 329	return r;
 330}
 331
 332/**
 333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 334 *
 335 * @ring: the engine to test on
 336 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 337 *
 338 */
 339static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 340{
 341	struct dma_fence *fence = NULL;
 342	struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
 343	long r;
 344
 
 
 
 
 
 
 345	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
 346	if (r)
 347		goto error;
 348
 349	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 350	if (r)
 351		goto error;
 352
 353	r = dma_fence_wait_timeout(fence, false, timeout);
 354	if (r == 0)
 355		r = -ETIMEDOUT;
 356	else if (r > 0)
 357		r = 0;
 358
 359error:
 360	dma_fence_put(fence);
 
 
 
 361	return r;
 362}
 363
 364static int uvd_v7_0_early_init(struct amdgpu_ip_block *ip_block)
 365{
 366	struct amdgpu_device *adev = ip_block->adev;
 367
 368	if (adev->asic_type == CHIP_VEGA20) {
 369		u32 harvest;
 370		int i;
 371
 372		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
 373		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
 374			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
 375			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
 376				adev->uvd.harvest_config |= 1 << i;
 377			}
 378		}
 379		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
 380						 AMDGPU_UVD_HARVEST_UVD1))
 381			/* both instances are harvested, disable the block */
 382			return -ENOENT;
 383	} else {
 384		adev->uvd.num_uvd_inst = 1;
 385	}
 386
 387	if (amdgpu_sriov_vf(adev))
 388		adev->uvd.num_enc_rings = 1;
 389	else
 390		adev->uvd.num_enc_rings = 2;
 391	uvd_v7_0_set_ring_funcs(adev);
 392	uvd_v7_0_set_enc_ring_funcs(adev);
 393	uvd_v7_0_set_irq_funcs(adev);
 394
 395	return 0;
 396}
 397
 398static int uvd_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
 399{
 400	struct amdgpu_ring *ring;
 401
 402	int i, j, r;
 403	struct amdgpu_device *adev = ip_block->adev;
 404
 405	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 406		if (adev->uvd.harvest_config & (1 << j))
 407			continue;
 408		/* UVD TRAP */
 409		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
 410		if (r)
 411			return r;
 412
 413		/* UVD ENC TRAP */
 414		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 415			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
 416			if (r)
 417				return r;
 418		}
 419	}
 420
 421	r = amdgpu_uvd_sw_init(adev);
 422	if (r)
 423		return r;
 424
 425	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 426		const struct common_firmware_header *hdr;
 427		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 428		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 429		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 430		adev->firmware.fw_size +=
 431			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 432
 433		if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
 434			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
 435			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
 436			adev->firmware.fw_size +=
 437				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 438		}
 439		DRM_INFO("PSP loading UVD firmware\n");
 440	}
 441
 442	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 443		if (adev->uvd.harvest_config & (1 << j))
 444			continue;
 445		if (!amdgpu_sriov_vf(adev)) {
 446			ring = &adev->uvd.inst[j].ring;
 447			ring->vm_hub = AMDGPU_MMHUB0(0);
 448			sprintf(ring->name, "uvd_%d", ring->me);
 449			r = amdgpu_ring_init(adev, ring, 512,
 450					     &adev->uvd.inst[j].irq, 0,
 451					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 452			if (r)
 453				return r;
 454		}
 455
 456		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 457			ring = &adev->uvd.inst[j].ring_enc[i];
 458			ring->vm_hub = AMDGPU_MMHUB0(0);
 459			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
 460			if (amdgpu_sriov_vf(adev)) {
 461				ring->use_doorbell = true;
 462
 463				/* currently only use the first enconding ring for
 464				 * sriov, so set unused location for other unused rings.
 465				 */
 466				if (i == 0)
 467					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 468				else
 469					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 470			}
 471			r = amdgpu_ring_init(adev, ring, 512,
 472					     &adev->uvd.inst[j].irq, 0,
 473					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 474			if (r)
 475				return r;
 476		}
 477	}
 478
 479	r = amdgpu_uvd_resume(adev);
 480	if (r)
 481		return r;
 482
 
 
 
 
 483	r = amdgpu_virt_alloc_mm_table(adev);
 484	if (r)
 485		return r;
 486
 487	return r;
 488}
 489
 490static int uvd_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
 491{
 492	int i, j, r;
 493	struct amdgpu_device *adev = ip_block->adev;
 494
 495	amdgpu_virt_free_mm_table(adev);
 496
 497	r = amdgpu_uvd_suspend(adev);
 498	if (r)
 499		return r;
 500
 501	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 502		if (adev->uvd.harvest_config & (1 << j))
 503			continue;
 504		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 505			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
 506	}
 507	return amdgpu_uvd_sw_fini(adev);
 508}
 509
 510/**
 511 * uvd_v7_0_hw_init - start and test UVD block
 512 *
 513 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 514 *
 515 * Initialize the hardware, boot up the VCPU and do some testing
 516 */
 517static int uvd_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
 518{
 519	struct amdgpu_device *adev = ip_block->adev;
 520	struct amdgpu_ring *ring;
 521	uint32_t tmp;
 522	int i, j, r;
 523
 524	if (amdgpu_sriov_vf(adev))
 525		r = uvd_v7_0_sriov_start(adev);
 526	else
 527		r = uvd_v7_0_start(adev);
 528	if (r)
 529		goto done;
 530
 531	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 532		if (adev->uvd.harvest_config & (1 << j))
 533			continue;
 534		ring = &adev->uvd.inst[j].ring;
 535
 536		if (!amdgpu_sriov_vf(adev)) {
 537			r = amdgpu_ring_test_helper(ring);
 538			if (r)
 539				goto done;
 540
 541			r = amdgpu_ring_alloc(ring, 10);
 542			if (r) {
 543				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
 544				goto done;
 545			}
 546
 547			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 548				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 549			amdgpu_ring_write(ring, tmp);
 550			amdgpu_ring_write(ring, 0xFFFFF);
 551
 552			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 553				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 554			amdgpu_ring_write(ring, tmp);
 555			amdgpu_ring_write(ring, 0xFFFFF);
 556
 557			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 558				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 559			amdgpu_ring_write(ring, tmp);
 560			amdgpu_ring_write(ring, 0xFFFFF);
 561
 562			/* Clear timeout status bits */
 563			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 564				mmUVD_SEMA_TIMEOUT_STATUS), 0));
 565			amdgpu_ring_write(ring, 0x8);
 566
 567			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 568				mmUVD_SEMA_CNTL), 0));
 569			amdgpu_ring_write(ring, 3);
 570
 571			amdgpu_ring_commit(ring);
 572		}
 573
 574		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 575			ring = &adev->uvd.inst[j].ring_enc[i];
 576			r = amdgpu_ring_test_helper(ring);
 577			if (r)
 578				goto done;
 579		}
 580	}
 581done:
 582	if (!r)
 583		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 584
 585	return r;
 586}
 587
 588/**
 589 * uvd_v7_0_hw_fini - stop the hardware block
 590 *
 591 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 592 *
 593 * Stop the UVD block, mark ring as not ready any more
 594 */
 595static int uvd_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
 596{
 597	struct amdgpu_device *adev = ip_block->adev;
 598
 599	cancel_delayed_work_sync(&adev->uvd.idle_work);
 600
 601	if (!amdgpu_sriov_vf(adev))
 602		uvd_v7_0_stop(adev);
 603	else {
 604		/* full access mode, so don't touch any UVD register */
 605		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 606	}
 607
 608	return 0;
 609}
 610
 611static int uvd_v7_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
 612{
 613	struct amdgpu_device *adev = ip_block->adev;
 614
 615	return amdgpu_uvd_prepare_suspend(adev);
 616}
 617
 618static int uvd_v7_0_suspend(struct amdgpu_ip_block *ip_block)
 619{
 620	int r;
 621	struct amdgpu_device *adev = ip_block->adev;
 622
 623	/*
 624	 * Proper cleanups before halting the HW engine:
 625	 *   - cancel the delayed idle work
 626	 *   - enable powergating
 627	 *   - enable clockgating
 628	 *   - disable dpm
 629	 *
 630	 * TODO: to align with the VCN implementation, move the
 631	 * jobs for clockgating/powergating/dpm setting to
 632	 * ->set_powergating_state().
 633	 */
 634	cancel_delayed_work_sync(&adev->uvd.idle_work);
 635
 636	if (adev->pm.dpm_enabled) {
 637		amdgpu_dpm_enable_uvd(adev, false);
 638	} else {
 639		amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 640		/* shutdown the UVD block */
 641		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 642						       AMD_PG_STATE_GATE);
 643		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 644						       AMD_CG_STATE_GATE);
 645	}
 646
 647	r = uvd_v7_0_hw_fini(ip_block);
 648	if (r)
 649		return r;
 650
 651	return amdgpu_uvd_suspend(adev);
 652}
 653
 654static int uvd_v7_0_resume(struct amdgpu_ip_block *ip_block)
 655{
 656	int r;
 
 657
 658	r = amdgpu_uvd_resume(ip_block->adev);
 659	if (r)
 660		return r;
 661
 662	return uvd_v7_0_hw_init(ip_block);
 663}
 664
 665/**
 666 * uvd_v7_0_mc_resume - memory controller programming
 667 *
 668 * @adev: amdgpu_device pointer
 669 *
 670 * Let the UVD memory controller know it's offsets
 671 */
 672static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 673{
 674	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 675	uint32_t offset;
 676	int i;
 677
 678	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 679		if (adev->uvd.harvest_config & (1 << i))
 680			continue;
 681		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 682			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 683				i == 0 ?
 684				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo :
 685				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
 686			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 687				i == 0 ?
 688				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi :
 689				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
 690			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 691			offset = 0;
 692		} else {
 693			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 694				lower_32_bits(adev->uvd.inst[i].gpu_addr));
 695			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 696				upper_32_bits(adev->uvd.inst[i].gpu_addr));
 697			offset = size;
 698			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 699					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 700		}
 701
 702		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 703
 704		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 705				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 706		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 707				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 708		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 709		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 710
 711		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 712				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 713		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 714				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 715		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 716		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
 717				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 718
 719		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
 720				adev->gfx.config.gb_addr_config);
 721		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
 722				adev->gfx.config.gb_addr_config);
 723		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
 724				adev->gfx.config.gb_addr_config);
 725
 726		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 727	}
 728}
 729
 730static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 731				struct amdgpu_mm_table *table)
 732{
 733	uint32_t data = 0, loop;
 734	uint64_t addr = table->gpu_addr;
 735	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 736	uint32_t size;
 737	int i;
 738
 739	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 740
 741	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 742	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 743	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 744
 745	/* 2, update vmid of descriptor */
 746	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 747	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 748	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 749	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 750
 751	/* 3, notify mmsch about the size of this descriptor */
 752	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 753
 754	/* 4, set resp to zero */
 755	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 756
 757	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 758		if (adev->uvd.harvest_config & (1 << i))
 759			continue;
 760		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
 761		*adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0;
 762		adev->uvd.inst[i].ring_enc[0].wptr = 0;
 763		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
 764	}
 765	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 766	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 767
 768	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 769	loop = 1000;
 770	while ((data & 0x10000002) != 0x10000002) {
 771		udelay(10);
 772		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 773		loop--;
 774		if (!loop)
 775			break;
 776	}
 777
 778	if (!loop) {
 779		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 780		return -EBUSY;
 781	}
 782
 783	return 0;
 784}
 785
 786static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 787{
 788	struct amdgpu_ring *ring;
 789	uint32_t offset, size, tmp;
 790	uint32_t table_size = 0;
 791	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 792	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 793	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 794	struct mmsch_v1_0_cmd_end end = { {0} };
 795	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 796	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 797	uint8_t i = 0;
 798
 799	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 800	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 801	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 802	end.cmd_header.command_type = MMSCH_COMMAND__END;
 803
 804	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 805		header->version = MMSCH_VERSION;
 806		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 807
 808		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 809			header->uvd_table_offset = header->header_size;
 810		else
 811			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 812
 813		init_table += header->uvd_table_offset;
 814
 815		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 816			if (adev->uvd.harvest_config & (1 << i))
 817				continue;
 818			ring = &adev->uvd.inst[i].ring;
 819			ring->wptr = 0;
 820			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 821
 822			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 823							   0xFFFFFFFF, 0x00000004);
 824			/* mc resume*/
 825			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 826				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 827							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 828							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
 829				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 830							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 831							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
 832				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
 833				offset = 0;
 834			} else {
 835				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 836							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
 837				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 838							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
 839				offset = size;
 840				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 841							AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 842
 843			}
 844
 845			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
 846
 847			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 848						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 849			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 850						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 851			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 852			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 853
 854			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 855						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 856			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 857						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 858			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 859			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
 860						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 861
 862			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 863			/* mc resume end*/
 864
 865			/* disable clock gating */
 866			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
 867							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 868
 869			/* disable interupt */
 870			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 871							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 872
 873			/* stall UMC and register bus before resetting VCPU */
 874			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 875							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 876							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 877
 878			/* put LMI, VCPU, RBC etc... into reset */
 879			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 880						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 881							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 882							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 883							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 884							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 885							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 886							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 887							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 888
 889			/* initialize UVD memory controller */
 890			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
 891						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 892							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 893							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 894							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 895							       UVD_LMI_CTRL__REQ_MODE_MASK |
 896							       0x00100000L));
 897
 898			/* take all subblocks out of reset, except VCPU */
 899			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 900						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 901
 902			/* enable VCPU clock */
 903			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 904						    UVD_VCPU_CNTL__CLK_EN_MASK);
 905
 906			/* enable master interrupt */
 907			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 908							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 909							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 910
 911			/* clear the bit 4 of UVD_STATUS */
 912			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 913							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 914
 915			/* force RBC into idle state */
 916			size = order_base_2(ring->ring_size);
 917			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 918			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 919			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
 920
 921			ring = &adev->uvd.inst[i].ring_enc[0];
 922			ring->wptr = 0;
 923			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
 924			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 925			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
 926
 927			/* boot up the VCPU */
 928			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
 929
 930			/* enable UMC */
 931			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 932											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 933
 934			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 935		}
 936		/* add end packet */
 937		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 938		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 939		header->uvd_table_size = table_size;
 940
 941	}
 942	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 943}
 944
 945/**
 946 * uvd_v7_0_start - start UVD block
 947 *
 948 * @adev: amdgpu_device pointer
 949 *
 950 * Setup and start the UVD block
 951 */
 952static int uvd_v7_0_start(struct amdgpu_device *adev)
 953{
 954	struct amdgpu_ring *ring;
 955	uint32_t rb_bufsz, tmp;
 956	uint32_t lmi_swap_cntl;
 957	uint32_t mp_swap_cntl;
 958	int i, j, k, r;
 959
 960	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 961		if (adev->uvd.harvest_config & (1 << k))
 962			continue;
 963		/* disable DPG */
 964		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
 965				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 966	}
 967
 968	/* disable byte swapping */
 969	lmi_swap_cntl = 0;
 970	mp_swap_cntl = 0;
 971
 972	uvd_v7_0_mc_resume(adev);
 973
 974	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 975		if (adev->uvd.harvest_config & (1 << k))
 976			continue;
 977		ring = &adev->uvd.inst[k].ring;
 978		/* disable clock gating */
 979		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
 980				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 981
 982		/* disable interupt */
 983		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
 984				~UVD_MASTINT_EN__VCPU_EN_MASK);
 985
 986		/* stall UMC and register bus before resetting VCPU */
 987		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
 988				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 989				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 990		mdelay(1);
 991
 992		/* put LMI, VCPU, RBC etc... into reset */
 993		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
 994			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 995			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 996			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 997			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 998			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 999			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
1000			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
1001			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1002		mdelay(5);
1003
1004		/* initialize UVD memory controller */
1005		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1006			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1007			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1008			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1009			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1010			UVD_LMI_CTRL__REQ_MODE_MASK |
1011			0x00100000L);
1012
1013#ifdef __BIG_ENDIAN
1014		/* swap (8 in 32) RB and IB */
1015		lmi_swap_cntl = 0xa;
1016		mp_swap_cntl = 0;
1017#endif
1018		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1019		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1020
1021		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1022		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1023		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1024		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1025		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1026		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1027
1028		/* take all subblocks out of reset, except VCPU */
1029		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1030				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1031		mdelay(5);
1032
1033		/* enable VCPU clock */
1034		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1035				UVD_VCPU_CNTL__CLK_EN_MASK);
1036
1037		/* enable UMC */
1038		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1039				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1040
1041		/* boot up the VCPU */
1042		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1043		mdelay(10);
1044
1045		for (i = 0; i < 10; ++i) {
1046			uint32_t status;
1047
1048			for (j = 0; j < 100; ++j) {
1049				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1050				if (status & 2)
1051					break;
1052				mdelay(10);
1053			}
1054			r = 0;
1055			if (status & 2)
1056				break;
1057
1058			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1059			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1060					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1061					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1062			mdelay(10);
1063			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1064					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1065			mdelay(10);
1066			r = -1;
1067		}
1068
1069		if (r) {
1070			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1071			return r;
1072		}
1073		/* enable master interrupt */
1074		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1075			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1076			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1077
1078		/* clear the bit 4 of UVD_STATUS */
1079		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1080				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1081
1082		/* force RBC into idle state */
1083		rb_bufsz = order_base_2(ring->ring_size);
1084		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1085		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1086		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1087		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1088		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1089		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1090		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1091
1092		/* set the write pointer delay */
1093		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1094
1095		/* set the wb address */
1096		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1097				(upper_32_bits(ring->gpu_addr) >> 2));
1098
1099		/* program the RB_BASE for ring buffer */
1100		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1101				lower_32_bits(ring->gpu_addr));
1102		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1103				upper_32_bits(ring->gpu_addr));
1104
1105		/* Initialize the ring buffer's read and write pointers */
1106		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1107
1108		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1109		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1110				lower_32_bits(ring->wptr));
1111
1112		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1113				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1114
1115		ring = &adev->uvd.inst[k].ring_enc[0];
1116		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1117		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1118		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1119		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1120		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1121
1122		ring = &adev->uvd.inst[k].ring_enc[1];
1123		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1124		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1125		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1126		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1127		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1128	}
1129	return 0;
1130}
1131
1132/**
1133 * uvd_v7_0_stop - stop UVD block
1134 *
1135 * @adev: amdgpu_device pointer
1136 *
1137 * stop the UVD block
1138 */
1139static void uvd_v7_0_stop(struct amdgpu_device *adev)
1140{
1141	uint8_t i = 0;
1142
1143	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1144		if (adev->uvd.harvest_config & (1 << i))
1145			continue;
1146		/* force RBC into idle state */
1147		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1148
1149		/* Stall UMC and register bus before resetting VCPU */
1150		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1151				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1152				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1153		mdelay(1);
1154
1155		/* put VCPU into reset */
1156		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1157				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1158		mdelay(5);
1159
1160		/* disable VCPU clock */
1161		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1162
1163		/* Unstall UMC and register bus */
1164		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1165				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1166	}
1167}
1168
1169/**
1170 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1171 *
1172 * @ring: amdgpu_ring pointer
1173 * @addr: address
1174 * @seq: sequence number
1175 * @flags: fence related flags
1176 *
1177 * Write a fence and a trap command to the ring.
1178 */
1179static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1180				     unsigned flags)
1181{
1182	struct amdgpu_device *adev = ring->adev;
1183
1184	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1185
1186	amdgpu_ring_write(ring,
1187		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1188	amdgpu_ring_write(ring, seq);
1189	amdgpu_ring_write(ring,
1190		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1191	amdgpu_ring_write(ring, addr & 0xffffffff);
1192	amdgpu_ring_write(ring,
1193		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1194	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1195	amdgpu_ring_write(ring,
1196		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1197	amdgpu_ring_write(ring, 0);
1198
1199	amdgpu_ring_write(ring,
1200		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1201	amdgpu_ring_write(ring, 0);
1202	amdgpu_ring_write(ring,
1203		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1204	amdgpu_ring_write(ring, 0);
1205	amdgpu_ring_write(ring,
1206		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1207	amdgpu_ring_write(ring, 2);
1208}
1209
1210/**
1211 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1212 *
1213 * @ring: amdgpu_ring pointer
1214 * @addr: address
1215 * @seq: sequence number
1216 * @flags: fence related flags
1217 *
1218 * Write enc a fence and a trap command to the ring.
1219 */
1220static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1221			u64 seq, unsigned flags)
1222{
1223
1224	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1225
1226	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1227	amdgpu_ring_write(ring, addr);
1228	amdgpu_ring_write(ring, upper_32_bits(addr));
1229	amdgpu_ring_write(ring, seq);
1230	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1231}
1232
1233/**
1234 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1235 *
1236 * @ring: amdgpu_ring pointer
1237 */
1238static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1239{
1240	/* The firmware doesn't seem to like touching registers at this point. */
1241}
1242
1243/**
1244 * uvd_v7_0_ring_test_ring - register write test
1245 *
1246 * @ring: amdgpu_ring pointer
1247 *
1248 * Test if we can successfully write to the context register
1249 */
1250static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1251{
1252	struct amdgpu_device *adev = ring->adev;
1253	uint32_t tmp = 0;
1254	unsigned i;
1255	int r;
1256
1257	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1258	r = amdgpu_ring_alloc(ring, 3);
1259	if (r)
1260		return r;
1261
1262	amdgpu_ring_write(ring,
1263		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1264	amdgpu_ring_write(ring, 0xDEADBEEF);
1265	amdgpu_ring_commit(ring);
1266	for (i = 0; i < adev->usec_timeout; i++) {
1267		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1268		if (tmp == 0xDEADBEEF)
1269			break;
1270		udelay(1);
1271	}
1272
1273	if (i >= adev->usec_timeout)
1274		r = -ETIMEDOUT;
1275
1276	return r;
1277}
1278
1279/**
1280 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1281 *
1282 * @p: the CS parser with the IBs
1283 * @job: which job this ib is in
1284 * @ib: which IB to patch
1285 *
1286 */
1287static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1288					   struct amdgpu_job *job,
1289					   struct amdgpu_ib *ib)
1290{
1291	struct amdgpu_ring *ring = amdgpu_job_ring(job);
 
1292	unsigned i;
1293
1294	/* No patching necessary for the first instance */
1295	if (!ring->me)
1296		return 0;
1297
1298	for (i = 0; i < ib->length_dw; i += 2) {
1299		uint32_t reg = amdgpu_ib_get_value(ib, i);
1300
1301		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1302		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1303
1304		amdgpu_ib_set_value(ib, i, reg);
1305	}
1306	return 0;
1307}
1308
1309/**
1310 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1311 *
1312 * @ring: amdgpu_ring pointer
1313 * @job: job to retrieve vmid from
1314 * @ib: indirect buffer to execute
1315 * @flags: unused
1316 *
1317 * Write ring commands to execute the indirect buffer
1318 */
1319static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1320				  struct amdgpu_job *job,
1321				  struct amdgpu_ib *ib,
1322				  uint32_t flags)
1323{
1324	struct amdgpu_device *adev = ring->adev;
1325	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1326
1327	amdgpu_ring_write(ring,
1328		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1329	amdgpu_ring_write(ring, vmid);
1330
1331	amdgpu_ring_write(ring,
1332		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1333	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1334	amdgpu_ring_write(ring,
1335		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1336	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1337	amdgpu_ring_write(ring,
1338		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1339	amdgpu_ring_write(ring, ib->length_dw);
1340}
1341
1342/**
1343 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1344 *
1345 * @ring: amdgpu_ring pointer
1346 * @job: job to retrive vmid from
1347 * @ib: indirect buffer to execute
1348 * @flags: unused
1349 *
1350 * Write enc ring commands to execute the indirect buffer
1351 */
1352static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1353					struct amdgpu_job *job,
1354					struct amdgpu_ib *ib,
1355					uint32_t flags)
1356{
1357	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1358
1359	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1360	amdgpu_ring_write(ring, vmid);
1361	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1362	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1363	amdgpu_ring_write(ring, ib->length_dw);
1364}
1365
1366static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1367				    uint32_t reg, uint32_t val)
1368{
1369	struct amdgpu_device *adev = ring->adev;
1370
1371	amdgpu_ring_write(ring,
1372		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1373	amdgpu_ring_write(ring, reg << 2);
1374	amdgpu_ring_write(ring,
1375		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1376	amdgpu_ring_write(ring, val);
1377	amdgpu_ring_write(ring,
1378		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1379	amdgpu_ring_write(ring, 8);
1380}
1381
1382static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1383					uint32_t val, uint32_t mask)
1384{
1385	struct amdgpu_device *adev = ring->adev;
1386
1387	amdgpu_ring_write(ring,
1388		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1389	amdgpu_ring_write(ring, reg << 2);
1390	amdgpu_ring_write(ring,
1391		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1392	amdgpu_ring_write(ring, val);
1393	amdgpu_ring_write(ring,
1394		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1395	amdgpu_ring_write(ring, mask);
1396	amdgpu_ring_write(ring,
1397		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1398	amdgpu_ring_write(ring, 12);
1399}
1400
1401static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1402					unsigned vmid, uint64_t pd_addr)
1403{
1404	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1405	uint32_t data0, data1, mask;
1406
1407	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1408
1409	/* wait for reg writes */
1410	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1411	data1 = lower_32_bits(pd_addr);
1412	mask = 0xffffffff;
1413	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1414}
1415
1416static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1417{
1418	struct amdgpu_device *adev = ring->adev;
1419	int i;
1420
1421	WARN_ON(ring->wptr % 2 || count % 2);
1422
1423	for (i = 0; i < count / 2; i++) {
1424		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1425		amdgpu_ring_write(ring, 0);
1426	}
1427}
1428
1429static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1430{
1431	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1432}
1433
1434static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1435					    uint32_t reg, uint32_t val,
1436					    uint32_t mask)
1437{
1438	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1439	amdgpu_ring_write(ring,	reg << 2);
1440	amdgpu_ring_write(ring, mask);
1441	amdgpu_ring_write(ring, val);
1442}
1443
1444static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1445					    unsigned int vmid, uint64_t pd_addr)
1446{
1447	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1448
1449	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1450
1451	/* wait for reg writes */
1452	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1453					vmid * hub->ctx_addr_distance,
1454					lower_32_bits(pd_addr), 0xffffffff);
1455}
1456
1457static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1458					uint32_t reg, uint32_t val)
1459{
1460	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1461	amdgpu_ring_write(ring,	reg << 2);
1462	amdgpu_ring_write(ring, val);
1463}
1464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1465static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1466					struct amdgpu_irq_src *source,
1467					unsigned type,
1468					enum amdgpu_interrupt_state state)
1469{
1470	// TODO
1471	return 0;
1472}
1473
1474static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1475				      struct amdgpu_irq_src *source,
1476				      struct amdgpu_iv_entry *entry)
1477{
1478	uint32_t ip_instance;
1479
1480	switch (entry->client_id) {
1481	case SOC15_IH_CLIENTID_UVD:
1482		ip_instance = 0;
1483		break;
1484	case SOC15_IH_CLIENTID_UVD1:
1485		ip_instance = 1;
1486		break;
1487	default:
1488		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1489		return 0;
1490	}
1491
1492	DRM_DEBUG("IH: UVD TRAP\n");
1493
1494	switch (entry->src_id) {
1495	case 124:
1496		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1497		break;
1498	case 119:
1499		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1500		break;
1501	case 120:
1502		if (!amdgpu_sriov_vf(adev))
1503			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1504		break;
1505	default:
1506		DRM_ERROR("Unhandled interrupt: %d %d\n",
1507			  entry->src_id, entry->src_data[0]);
1508		break;
1509	}
1510
1511	return 0;
1512}
1513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1514static int uvd_v7_0_set_clockgating_state(void *handle,
1515					  enum amd_clockgating_state state)
1516{
1517	/* needed for driver unload*/
1518	return 0;
1519}
1520
1521const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1522	.name = "uvd_v7_0",
1523	.early_init = uvd_v7_0_early_init,
 
1524	.sw_init = uvd_v7_0_sw_init,
1525	.sw_fini = uvd_v7_0_sw_fini,
1526	.hw_init = uvd_v7_0_hw_init,
1527	.hw_fini = uvd_v7_0_hw_fini,
1528	.prepare_suspend = uvd_v7_0_prepare_suspend,
1529	.suspend = uvd_v7_0_suspend,
1530	.resume = uvd_v7_0_resume,
 
 
 
 
 
 
1531	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1532	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1533};
1534
1535static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1536	.type = AMDGPU_RING_TYPE_UVD,
1537	.align_mask = 0xf,
1538	.support_64bit_ptrs = false,
1539	.no_user_fence = true,
 
1540	.get_rptr = uvd_v7_0_ring_get_rptr,
1541	.get_wptr = uvd_v7_0_ring_get_wptr,
1542	.set_wptr = uvd_v7_0_ring_set_wptr,
1543	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1544	.emit_frame_size =
1545		6 + /* hdp invalidate */
1546		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1547		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1548		8 + /* uvd_v7_0_ring_emit_vm_flush */
1549		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1550	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1551	.emit_ib = uvd_v7_0_ring_emit_ib,
1552	.emit_fence = uvd_v7_0_ring_emit_fence,
1553	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1554	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1555	.test_ring = uvd_v7_0_ring_test_ring,
1556	.test_ib = amdgpu_uvd_ring_test_ib,
1557	.insert_nop = uvd_v7_0_ring_insert_nop,
1558	.pad_ib = amdgpu_ring_generic_pad_ib,
1559	.begin_use = amdgpu_uvd_ring_begin_use,
1560	.end_use = amdgpu_uvd_ring_end_use,
1561	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1562	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1563	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1564};
1565
1566static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1567	.type = AMDGPU_RING_TYPE_UVD_ENC,
1568	.align_mask = 0x3f,
1569	.nop = HEVC_ENC_CMD_NO_OP,
1570	.support_64bit_ptrs = false,
1571	.no_user_fence = true,
 
1572	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1573	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1574	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1575	.emit_frame_size =
1576		3 + 3 + /* hdp flush / invalidate */
1577		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1578		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1579		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1580		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1581		1, /* uvd_v7_0_enc_ring_insert_end */
1582	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1583	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1584	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1585	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1586	.test_ring = uvd_v7_0_enc_ring_test_ring,
1587	.test_ib = uvd_v7_0_enc_ring_test_ib,
1588	.insert_nop = amdgpu_ring_insert_nop,
1589	.insert_end = uvd_v7_0_enc_ring_insert_end,
1590	.pad_ib = amdgpu_ring_generic_pad_ib,
1591	.begin_use = amdgpu_uvd_ring_begin_use,
1592	.end_use = amdgpu_uvd_ring_end_use,
1593	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1594	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1595	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1596};
1597
1598static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1599{
1600	int i;
1601
1602	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1603		if (adev->uvd.harvest_config & (1 << i))
1604			continue;
1605		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1606		adev->uvd.inst[i].ring.me = i;
1607		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1608	}
1609}
1610
1611static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1612{
1613	int i, j;
1614
1615	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1616		if (adev->uvd.harvest_config & (1 << j))
1617			continue;
1618		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1619			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1620			adev->uvd.inst[j].ring_enc[i].me = j;
1621		}
1622
1623		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1624	}
1625}
1626
1627static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1628	.set = uvd_v7_0_set_interrupt_state,
1629	.process = uvd_v7_0_process_interrupt,
1630};
1631
1632static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1633{
1634	int i;
1635
1636	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1637		if (adev->uvd.harvest_config & (1 << i))
1638			continue;
1639		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1640		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1641	}
1642}
1643
1644const struct amdgpu_ip_block_version uvd_v7_0_ip_block = {
 
1645		.type = AMD_IP_BLOCK_TYPE_UVD,
1646		.major = 7,
1647		.minor = 0,
1648		.rev = 0,
1649		.funcs = &uvd_v7_0_ip_funcs,
1650};
v5.14.15
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
 
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_common.h"
  31#include "mmsch_v1_0.h"
  32
  33#include "uvd/uvd_7_0_offset.h"
  34#include "uvd/uvd_7_0_sh_mask.h"
  35#include "vce/vce_4_0_offset.h"
  36#include "vce/vce_4_0_default.h"
  37#include "vce/vce_4_0_sh_mask.h"
  38#include "nbif/nbif_6_1_offset.h"
  39#include "mmhub/mmhub_1_0_offset.h"
  40#include "mmhub/mmhub_1_0_sh_mask.h"
  41#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
  42
  43#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
  44#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
  45//UVD_PG0_CC_UVD_HARVESTING
  46#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
  47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
  48
  49#define UVD7_MAX_HW_INSTANCES_VEGA20			2
  50
  51static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  52static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  53static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  54static int uvd_v7_0_start(struct amdgpu_device *adev);
  55static void uvd_v7_0_stop(struct amdgpu_device *adev);
  56static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  57
  58static int amdgpu_ih_clientid_uvds[] = {
  59	SOC15_IH_CLIENTID_UVD,
  60	SOC15_IH_CLIENTID_UVD1
  61};
  62
  63/**
  64 * uvd_v7_0_ring_get_rptr - get read pointer
  65 *
  66 * @ring: amdgpu_ring pointer
  67 *
  68 * Returns the current hardware read pointer
  69 */
  70static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  71{
  72	struct amdgpu_device *adev = ring->adev;
  73
  74	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
  75}
  76
  77/**
  78 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  79 *
  80 * @ring: amdgpu_ring pointer
  81 *
  82 * Returns the current hardware enc read pointer
  83 */
  84static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  85{
  86	struct amdgpu_device *adev = ring->adev;
  87
  88	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
  89		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
  90	else
  91		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
  92}
  93
  94/**
  95 * uvd_v7_0_ring_get_wptr - get write pointer
  96 *
  97 * @ring: amdgpu_ring pointer
  98 *
  99 * Returns the current hardware write pointer
 100 */
 101static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 102{
 103	struct amdgpu_device *adev = ring->adev;
 104
 105	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 106}
 107
 108/**
 109 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 110 *
 111 * @ring: amdgpu_ring pointer
 112 *
 113 * Returns the current hardware enc write pointer
 114 */
 115static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 116{
 117	struct amdgpu_device *adev = ring->adev;
 118
 119	if (ring->use_doorbell)
 120		return adev->wb.wb[ring->wptr_offs];
 121
 122	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 123		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 124	else
 125		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 126}
 127
 128/**
 129 * uvd_v7_0_ring_set_wptr - set write pointer
 130 *
 131 * @ring: amdgpu_ring pointer
 132 *
 133 * Commits the write pointer to the hardware
 134 */
 135static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 136{
 137	struct amdgpu_device *adev = ring->adev;
 138
 139	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 140}
 141
 142/**
 143 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 144 *
 145 * @ring: amdgpu_ring pointer
 146 *
 147 * Commits the enc write pointer to the hardware
 148 */
 149static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 150{
 151	struct amdgpu_device *adev = ring->adev;
 152
 153	if (ring->use_doorbell) {
 154		/* XXX check if swapping is necessary on BE */
 155		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 156		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 157		return;
 158	}
 159
 160	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 161		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 162			lower_32_bits(ring->wptr));
 163	else
 164		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 165			lower_32_bits(ring->wptr));
 166}
 167
 168/**
 169 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 170 *
 171 * @ring: the engine to test on
 172 *
 173 */
 174static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 175{
 176	struct amdgpu_device *adev = ring->adev;
 177	uint32_t rptr;
 178	unsigned i;
 179	int r;
 180
 181	if (amdgpu_sriov_vf(adev))
 182		return 0;
 183
 184	r = amdgpu_ring_alloc(ring, 16);
 185	if (r)
 186		return r;
 187
 188	rptr = amdgpu_ring_get_rptr(ring);
 189
 190	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 191	amdgpu_ring_commit(ring);
 192
 193	for (i = 0; i < adev->usec_timeout; i++) {
 194		if (amdgpu_ring_get_rptr(ring) != rptr)
 195			break;
 196		udelay(1);
 197	}
 198
 199	if (i >= adev->usec_timeout)
 200		r = -ETIMEDOUT;
 201
 202	return r;
 203}
 204
 205/**
 206 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 207 *
 208 * @ring: ring we should submit the msg to
 209 * @handle: session handle to use
 210 * @bo: amdgpu object for which we query the offset
 211 * @fence: optional fence to return
 212 *
 213 * Open up a stream for HW test
 214 */
 215static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 216				       struct amdgpu_bo *bo,
 217				       struct dma_fence **fence)
 218{
 219	const unsigned ib_size_dw = 16;
 220	struct amdgpu_job *job;
 221	struct amdgpu_ib *ib;
 222	struct dma_fence *f = NULL;
 223	uint64_t addr;
 224	int i, r;
 225
 226	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 227					AMDGPU_IB_POOL_DIRECT, &job);
 228	if (r)
 229		return r;
 230
 231	ib = &job->ibs[0];
 232	addr = amdgpu_bo_gpu_offset(bo);
 233
 234	ib->length_dw = 0;
 235	ib->ptr[ib->length_dw++] = 0x00000018;
 236	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 237	ib->ptr[ib->length_dw++] = handle;
 238	ib->ptr[ib->length_dw++] = 0x00000000;
 239	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 240	ib->ptr[ib->length_dw++] = addr;
 241
 242	ib->ptr[ib->length_dw++] = 0x00000014;
 243	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 244	ib->ptr[ib->length_dw++] = 0x0000001c;
 245	ib->ptr[ib->length_dw++] = 0x00000000;
 246	ib->ptr[ib->length_dw++] = 0x00000000;
 247
 248	ib->ptr[ib->length_dw++] = 0x00000008;
 249	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 250
 251	for (i = ib->length_dw; i < ib_size_dw; ++i)
 252		ib->ptr[i] = 0x0;
 253
 254	r = amdgpu_job_submit_direct(job, ring, &f);
 255	if (r)
 256		goto err;
 257
 258	if (fence)
 259		*fence = dma_fence_get(f);
 260	dma_fence_put(f);
 261	return 0;
 262
 263err:
 264	amdgpu_job_free(job);
 265	return r;
 266}
 267
 268/**
 269 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 270 *
 271 * @ring: ring we should submit the msg to
 272 * @handle: session handle to use
 273 * @bo: amdgpu object for which we query the offset
 274 * @fence: optional fence to return
 275 *
 276 * Close up a stream for HW test or if userspace failed to do so
 277 */
 278static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 279					struct amdgpu_bo *bo,
 280					struct dma_fence **fence)
 281{
 282	const unsigned ib_size_dw = 16;
 283	struct amdgpu_job *job;
 284	struct amdgpu_ib *ib;
 285	struct dma_fence *f = NULL;
 286	uint64_t addr;
 287	int i, r;
 288
 289	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 290					AMDGPU_IB_POOL_DIRECT, &job);
 291	if (r)
 292		return r;
 293
 294	ib = &job->ibs[0];
 295	addr = amdgpu_bo_gpu_offset(bo);
 296
 297	ib->length_dw = 0;
 298	ib->ptr[ib->length_dw++] = 0x00000018;
 299	ib->ptr[ib->length_dw++] = 0x00000001;
 300	ib->ptr[ib->length_dw++] = handle;
 301	ib->ptr[ib->length_dw++] = 0x00000000;
 302	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 303	ib->ptr[ib->length_dw++] = addr;
 304
 305	ib->ptr[ib->length_dw++] = 0x00000014;
 306	ib->ptr[ib->length_dw++] = 0x00000002;
 307	ib->ptr[ib->length_dw++] = 0x0000001c;
 308	ib->ptr[ib->length_dw++] = 0x00000000;
 309	ib->ptr[ib->length_dw++] = 0x00000000;
 310
 311	ib->ptr[ib->length_dw++] = 0x00000008;
 312	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 313
 314	for (i = ib->length_dw; i < ib_size_dw; ++i)
 315		ib->ptr[i] = 0x0;
 316
 317	r = amdgpu_job_submit_direct(job, ring, &f);
 318	if (r)
 319		goto err;
 320
 321	if (fence)
 322		*fence = dma_fence_get(f);
 323	dma_fence_put(f);
 324	return 0;
 325
 326err:
 327	amdgpu_job_free(job);
 328	return r;
 329}
 330
 331/**
 332 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 333 *
 334 * @ring: the engine to test on
 335 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 336 *
 337 */
 338static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 339{
 340	struct dma_fence *fence = NULL;
 341	struct amdgpu_bo *bo = NULL;
 342	long r;
 343
 344	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 345				      AMDGPU_GEM_DOMAIN_VRAM,
 346				      &bo, NULL, NULL);
 347	if (r)
 348		return r;
 349
 350	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
 351	if (r)
 352		goto error;
 353
 354	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 355	if (r)
 356		goto error;
 357
 358	r = dma_fence_wait_timeout(fence, false, timeout);
 359	if (r == 0)
 360		r = -ETIMEDOUT;
 361	else if (r > 0)
 362		r = 0;
 363
 364error:
 365	dma_fence_put(fence);
 366	amdgpu_bo_unpin(bo);
 367	amdgpu_bo_unreserve(bo);
 368	amdgpu_bo_unref(&bo);
 369	return r;
 370}
 371
 372static int uvd_v7_0_early_init(void *handle)
 373{
 374	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 375
 376	if (adev->asic_type == CHIP_VEGA20) {
 377		u32 harvest;
 378		int i;
 379
 380		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
 381		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
 382			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
 383			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
 384				adev->uvd.harvest_config |= 1 << i;
 385			}
 386		}
 387		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
 388						 AMDGPU_UVD_HARVEST_UVD1))
 389			/* both instances are harvested, disable the block */
 390			return -ENOENT;
 391	} else {
 392		adev->uvd.num_uvd_inst = 1;
 393	}
 394
 395	if (amdgpu_sriov_vf(adev))
 396		adev->uvd.num_enc_rings = 1;
 397	else
 398		adev->uvd.num_enc_rings = 2;
 399	uvd_v7_0_set_ring_funcs(adev);
 400	uvd_v7_0_set_enc_ring_funcs(adev);
 401	uvd_v7_0_set_irq_funcs(adev);
 402
 403	return 0;
 404}
 405
 406static int uvd_v7_0_sw_init(void *handle)
 407{
 408	struct amdgpu_ring *ring;
 409
 410	int i, j, r;
 411	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 412
 413	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 414		if (adev->uvd.harvest_config & (1 << j))
 415			continue;
 416		/* UVD TRAP */
 417		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
 418		if (r)
 419			return r;
 420
 421		/* UVD ENC TRAP */
 422		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 423			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
 424			if (r)
 425				return r;
 426		}
 427	}
 428
 429	r = amdgpu_uvd_sw_init(adev);
 430	if (r)
 431		return r;
 432
 433	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 434		const struct common_firmware_header *hdr;
 435		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 436		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 437		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 438		adev->firmware.fw_size +=
 439			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 440
 441		if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
 442			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
 443			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
 444			adev->firmware.fw_size +=
 445				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 446		}
 447		DRM_INFO("PSP loading UVD firmware\n");
 448	}
 449
 450	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 451		if (adev->uvd.harvest_config & (1 << j))
 452			continue;
 453		if (!amdgpu_sriov_vf(adev)) {
 454			ring = &adev->uvd.inst[j].ring;
 
 455			sprintf(ring->name, "uvd_%d", ring->me);
 456			r = amdgpu_ring_init(adev, ring, 512,
 457					     &adev->uvd.inst[j].irq, 0,
 458					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 459			if (r)
 460				return r;
 461		}
 462
 463		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 464			ring = &adev->uvd.inst[j].ring_enc[i];
 
 465			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
 466			if (amdgpu_sriov_vf(adev)) {
 467				ring->use_doorbell = true;
 468
 469				/* currently only use the first enconding ring for
 470				 * sriov, so set unused location for other unused rings.
 471				 */
 472				if (i == 0)
 473					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 474				else
 475					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 476			}
 477			r = amdgpu_ring_init(adev, ring, 512,
 478					     &adev->uvd.inst[j].irq, 0,
 479					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 480			if (r)
 481				return r;
 482		}
 483	}
 484
 485	r = amdgpu_uvd_resume(adev);
 486	if (r)
 487		return r;
 488
 489	r = amdgpu_uvd_entity_init(adev);
 490	if (r)
 491		return r;
 492
 493	r = amdgpu_virt_alloc_mm_table(adev);
 494	if (r)
 495		return r;
 496
 497	return r;
 498}
 499
 500static int uvd_v7_0_sw_fini(void *handle)
 501{
 502	int i, j, r;
 503	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 504
 505	amdgpu_virt_free_mm_table(adev);
 506
 507	r = amdgpu_uvd_suspend(adev);
 508	if (r)
 509		return r;
 510
 511	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 512		if (adev->uvd.harvest_config & (1 << j))
 513			continue;
 514		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 515			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
 516	}
 517	return amdgpu_uvd_sw_fini(adev);
 518}
 519
 520/**
 521 * uvd_v7_0_hw_init - start and test UVD block
 522 *
 523 * @handle: handle used to pass amdgpu_device pointer
 524 *
 525 * Initialize the hardware, boot up the VCPU and do some testing
 526 */
 527static int uvd_v7_0_hw_init(void *handle)
 528{
 529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 530	struct amdgpu_ring *ring;
 531	uint32_t tmp;
 532	int i, j, r;
 533
 534	if (amdgpu_sriov_vf(adev))
 535		r = uvd_v7_0_sriov_start(adev);
 536	else
 537		r = uvd_v7_0_start(adev);
 538	if (r)
 539		goto done;
 540
 541	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 542		if (adev->uvd.harvest_config & (1 << j))
 543			continue;
 544		ring = &adev->uvd.inst[j].ring;
 545
 546		if (!amdgpu_sriov_vf(adev)) {
 547			r = amdgpu_ring_test_helper(ring);
 548			if (r)
 549				goto done;
 550
 551			r = amdgpu_ring_alloc(ring, 10);
 552			if (r) {
 553				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
 554				goto done;
 555			}
 556
 557			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 558				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 559			amdgpu_ring_write(ring, tmp);
 560			amdgpu_ring_write(ring, 0xFFFFF);
 561
 562			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 563				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 564			amdgpu_ring_write(ring, tmp);
 565			amdgpu_ring_write(ring, 0xFFFFF);
 566
 567			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 568				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 569			amdgpu_ring_write(ring, tmp);
 570			amdgpu_ring_write(ring, 0xFFFFF);
 571
 572			/* Clear timeout status bits */
 573			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 574				mmUVD_SEMA_TIMEOUT_STATUS), 0));
 575			amdgpu_ring_write(ring, 0x8);
 576
 577			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 578				mmUVD_SEMA_CNTL), 0));
 579			amdgpu_ring_write(ring, 3);
 580
 581			amdgpu_ring_commit(ring);
 582		}
 583
 584		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 585			ring = &adev->uvd.inst[j].ring_enc[i];
 586			r = amdgpu_ring_test_helper(ring);
 587			if (r)
 588				goto done;
 589		}
 590	}
 591done:
 592	if (!r)
 593		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 594
 595	return r;
 596}
 597
 598/**
 599 * uvd_v7_0_hw_fini - stop the hardware block
 600 *
 601 * @handle: handle used to pass amdgpu_device pointer
 602 *
 603 * Stop the UVD block, mark ring as not ready any more
 604 */
 605static int uvd_v7_0_hw_fini(void *handle)
 606{
 607	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
 608
 609	if (!amdgpu_sriov_vf(adev))
 610		uvd_v7_0_stop(adev);
 611	else {
 612		/* full access mode, so don't touch any UVD register */
 613		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 614	}
 615
 616	return 0;
 617}
 618
 619static int uvd_v7_0_suspend(void *handle)
 
 
 
 
 
 
 
 620{
 621	int r;
 622	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 623
 624	r = uvd_v7_0_hw_fini(adev);
 
 
 
 
 
 
 
 
 
 
 
 625	if (r)
 626		return r;
 627
 628	return amdgpu_uvd_suspend(adev);
 629}
 630
 631static int uvd_v7_0_resume(void *handle)
 632{
 633	int r;
 634	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 635
 636	r = amdgpu_uvd_resume(adev);
 637	if (r)
 638		return r;
 639
 640	return uvd_v7_0_hw_init(adev);
 641}
 642
 643/**
 644 * uvd_v7_0_mc_resume - memory controller programming
 645 *
 646 * @adev: amdgpu_device pointer
 647 *
 648 * Let the UVD memory controller know it's offsets
 649 */
 650static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 651{
 652	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 653	uint32_t offset;
 654	int i;
 655
 656	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 657		if (adev->uvd.harvest_config & (1 << i))
 658			continue;
 659		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 660			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 661				i == 0 ?
 662				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
 663				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
 664			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 665				i == 0 ?
 666				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
 667				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
 668			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 669			offset = 0;
 670		} else {
 671			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 672				lower_32_bits(adev->uvd.inst[i].gpu_addr));
 673			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 674				upper_32_bits(adev->uvd.inst[i].gpu_addr));
 675			offset = size;
 676			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 677					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 678		}
 679
 680		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 681
 682		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 683				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 684		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 685				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 686		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 687		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 688
 689		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 690				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 691		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 692				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 693		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 694		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
 695				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 696
 697		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
 698				adev->gfx.config.gb_addr_config);
 699		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
 700				adev->gfx.config.gb_addr_config);
 701		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
 702				adev->gfx.config.gb_addr_config);
 703
 704		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 705	}
 706}
 707
 708static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 709				struct amdgpu_mm_table *table)
 710{
 711	uint32_t data = 0, loop;
 712	uint64_t addr = table->gpu_addr;
 713	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 714	uint32_t size;
 715	int i;
 716
 717	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 718
 719	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 720	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 721	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 722
 723	/* 2, update vmid of descriptor */
 724	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 725	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 726	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 727	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 728
 729	/* 3, notify mmsch about the size of this descriptor */
 730	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 731
 732	/* 4, set resp to zero */
 733	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 734
 735	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 736		if (adev->uvd.harvest_config & (1 << i))
 737			continue;
 738		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
 739		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
 740		adev->uvd.inst[i].ring_enc[0].wptr = 0;
 741		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
 742	}
 743	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 744	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 745
 746	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 747	loop = 1000;
 748	while ((data & 0x10000002) != 0x10000002) {
 749		udelay(10);
 750		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 751		loop--;
 752		if (!loop)
 753			break;
 754	}
 755
 756	if (!loop) {
 757		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 758		return -EBUSY;
 759	}
 760
 761	return 0;
 762}
 763
 764static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 765{
 766	struct amdgpu_ring *ring;
 767	uint32_t offset, size, tmp;
 768	uint32_t table_size = 0;
 769	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 770	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 771	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 772	struct mmsch_v1_0_cmd_end end = { {0} };
 773	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 774	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 775	uint8_t i = 0;
 776
 777	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 778	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 779	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 780	end.cmd_header.command_type = MMSCH_COMMAND__END;
 781
 782	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 783		header->version = MMSCH_VERSION;
 784		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 785
 786		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 787			header->uvd_table_offset = header->header_size;
 788		else
 789			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 790
 791		init_table += header->uvd_table_offset;
 792
 793		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 794			if (adev->uvd.harvest_config & (1 << i))
 795				continue;
 796			ring = &adev->uvd.inst[i].ring;
 797			ring->wptr = 0;
 798			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 799
 800			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 801							   0xFFFFFFFF, 0x00000004);
 802			/* mc resume*/
 803			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 804				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 805							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 806							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
 807				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 808							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 809							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
 810				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
 811				offset = 0;
 812			} else {
 813				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 814							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
 815				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 816							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
 817				offset = size;
 818				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 819							AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 820
 821			}
 822
 823			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
 824
 825			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 826						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 827			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 828						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 829			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 830			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 831
 832			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 833						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 834			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 835						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 836			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 837			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
 838						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 839
 840			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 841			/* mc resume end*/
 842
 843			/* disable clock gating */
 844			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
 845							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 846
 847			/* disable interupt */
 848			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 849							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 850
 851			/* stall UMC and register bus before resetting VCPU */
 852			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 853							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 854							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 855
 856			/* put LMI, VCPU, RBC etc... into reset */
 857			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 858						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 859							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 860							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 861							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 862							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 863							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 864							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 865							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 866
 867			/* initialize UVD memory controller */
 868			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
 869						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 870							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 871							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 872							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 873							       UVD_LMI_CTRL__REQ_MODE_MASK |
 874							       0x00100000L));
 875
 876			/* take all subblocks out of reset, except VCPU */
 877			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 878						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 879
 880			/* enable VCPU clock */
 881			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 882						    UVD_VCPU_CNTL__CLK_EN_MASK);
 883
 884			/* enable master interrupt */
 885			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 886							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 887							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 888
 889			/* clear the bit 4 of UVD_STATUS */
 890			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 891							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 892
 893			/* force RBC into idle state */
 894			size = order_base_2(ring->ring_size);
 895			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 896			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 897			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
 898
 899			ring = &adev->uvd.inst[i].ring_enc[0];
 900			ring->wptr = 0;
 901			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
 902			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 903			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
 904
 905			/* boot up the VCPU */
 906			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
 907
 908			/* enable UMC */
 909			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 910											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 911
 912			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 913		}
 914		/* add end packet */
 915		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 916		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 917		header->uvd_table_size = table_size;
 918
 919	}
 920	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 921}
 922
 923/**
 924 * uvd_v7_0_start - start UVD block
 925 *
 926 * @adev: amdgpu_device pointer
 927 *
 928 * Setup and start the UVD block
 929 */
 930static int uvd_v7_0_start(struct amdgpu_device *adev)
 931{
 932	struct amdgpu_ring *ring;
 933	uint32_t rb_bufsz, tmp;
 934	uint32_t lmi_swap_cntl;
 935	uint32_t mp_swap_cntl;
 936	int i, j, k, r;
 937
 938	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 939		if (adev->uvd.harvest_config & (1 << k))
 940			continue;
 941		/* disable DPG */
 942		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
 943				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 944	}
 945
 946	/* disable byte swapping */
 947	lmi_swap_cntl = 0;
 948	mp_swap_cntl = 0;
 949
 950	uvd_v7_0_mc_resume(adev);
 951
 952	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 953		if (adev->uvd.harvest_config & (1 << k))
 954			continue;
 955		ring = &adev->uvd.inst[k].ring;
 956		/* disable clock gating */
 957		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
 958				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 959
 960		/* disable interupt */
 961		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
 962				~UVD_MASTINT_EN__VCPU_EN_MASK);
 963
 964		/* stall UMC and register bus before resetting VCPU */
 965		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
 966				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 967				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 968		mdelay(1);
 969
 970		/* put LMI, VCPU, RBC etc... into reset */
 971		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
 972			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 973			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 974			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 975			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 976			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 977			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 978			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 979			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 980		mdelay(5);
 981
 982		/* initialize UVD memory controller */
 983		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
 984			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 985			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 986			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 987			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 988			UVD_LMI_CTRL__REQ_MODE_MASK |
 989			0x00100000L);
 990
 991#ifdef __BIG_ENDIAN
 992		/* swap (8 in 32) RB and IB */
 993		lmi_swap_cntl = 0xa;
 994		mp_swap_cntl = 0;
 995#endif
 996		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 997		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 998
 999		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1000		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1001		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1002		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1003		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1004		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1005
1006		/* take all subblocks out of reset, except VCPU */
1007		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1008				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1009		mdelay(5);
1010
1011		/* enable VCPU clock */
1012		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1013				UVD_VCPU_CNTL__CLK_EN_MASK);
1014
1015		/* enable UMC */
1016		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1017				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1018
1019		/* boot up the VCPU */
1020		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1021		mdelay(10);
1022
1023		for (i = 0; i < 10; ++i) {
1024			uint32_t status;
1025
1026			for (j = 0; j < 100; ++j) {
1027				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1028				if (status & 2)
1029					break;
1030				mdelay(10);
1031			}
1032			r = 0;
1033			if (status & 2)
1034				break;
1035
1036			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1037			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1038					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1039					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1040			mdelay(10);
1041			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1042					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1043			mdelay(10);
1044			r = -1;
1045		}
1046
1047		if (r) {
1048			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1049			return r;
1050		}
1051		/* enable master interrupt */
1052		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1053			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1054			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1055
1056		/* clear the bit 4 of UVD_STATUS */
1057		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1058				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1059
1060		/* force RBC into idle state */
1061		rb_bufsz = order_base_2(ring->ring_size);
1062		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1063		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1064		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1065		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1066		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1067		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1068		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1069
1070		/* set the write pointer delay */
1071		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1072
1073		/* set the wb address */
1074		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1075				(upper_32_bits(ring->gpu_addr) >> 2));
1076
1077		/* program the RB_BASE for ring buffer */
1078		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1079				lower_32_bits(ring->gpu_addr));
1080		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1081				upper_32_bits(ring->gpu_addr));
1082
1083		/* Initialize the ring buffer's read and write pointers */
1084		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1085
1086		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1087		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1088				lower_32_bits(ring->wptr));
1089
1090		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1091				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1092
1093		ring = &adev->uvd.inst[k].ring_enc[0];
1094		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1095		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1096		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1097		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1098		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1099
1100		ring = &adev->uvd.inst[k].ring_enc[1];
1101		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1102		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1103		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1104		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1105		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1106	}
1107	return 0;
1108}
1109
1110/**
1111 * uvd_v7_0_stop - stop UVD block
1112 *
1113 * @adev: amdgpu_device pointer
1114 *
1115 * stop the UVD block
1116 */
1117static void uvd_v7_0_stop(struct amdgpu_device *adev)
1118{
1119	uint8_t i = 0;
1120
1121	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1122		if (adev->uvd.harvest_config & (1 << i))
1123			continue;
1124		/* force RBC into idle state */
1125		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1126
1127		/* Stall UMC and register bus before resetting VCPU */
1128		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1129				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1130				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1131		mdelay(1);
1132
1133		/* put VCPU into reset */
1134		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1135				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1136		mdelay(5);
1137
1138		/* disable VCPU clock */
1139		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1140
1141		/* Unstall UMC and register bus */
1142		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1143				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1144	}
1145}
1146
1147/**
1148 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1149 *
1150 * @ring: amdgpu_ring pointer
1151 * @addr: address
1152 * @seq: sequence number
1153 * @flags: fence related flags
1154 *
1155 * Write a fence and a trap command to the ring.
1156 */
1157static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1158				     unsigned flags)
1159{
1160	struct amdgpu_device *adev = ring->adev;
1161
1162	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1163
1164	amdgpu_ring_write(ring,
1165		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1166	amdgpu_ring_write(ring, seq);
1167	amdgpu_ring_write(ring,
1168		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1169	amdgpu_ring_write(ring, addr & 0xffffffff);
1170	amdgpu_ring_write(ring,
1171		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1172	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1173	amdgpu_ring_write(ring,
1174		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1175	amdgpu_ring_write(ring, 0);
1176
1177	amdgpu_ring_write(ring,
1178		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1179	amdgpu_ring_write(ring, 0);
1180	amdgpu_ring_write(ring,
1181		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1182	amdgpu_ring_write(ring, 0);
1183	amdgpu_ring_write(ring,
1184		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1185	amdgpu_ring_write(ring, 2);
1186}
1187
1188/**
1189 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1190 *
1191 * @ring: amdgpu_ring pointer
1192 * @addr: address
1193 * @seq: sequence number
1194 * @flags: fence related flags
1195 *
1196 * Write enc a fence and a trap command to the ring.
1197 */
1198static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1199			u64 seq, unsigned flags)
1200{
1201
1202	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1203
1204	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1205	amdgpu_ring_write(ring, addr);
1206	amdgpu_ring_write(ring, upper_32_bits(addr));
1207	amdgpu_ring_write(ring, seq);
1208	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1209}
1210
1211/**
1212 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1213 *
1214 * @ring: amdgpu_ring pointer
1215 */
1216static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1217{
1218	/* The firmware doesn't seem to like touching registers at this point. */
1219}
1220
1221/**
1222 * uvd_v7_0_ring_test_ring - register write test
1223 *
1224 * @ring: amdgpu_ring pointer
1225 *
1226 * Test if we can successfully write to the context register
1227 */
1228static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1229{
1230	struct amdgpu_device *adev = ring->adev;
1231	uint32_t tmp = 0;
1232	unsigned i;
1233	int r;
1234
1235	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1236	r = amdgpu_ring_alloc(ring, 3);
1237	if (r)
1238		return r;
1239
1240	amdgpu_ring_write(ring,
1241		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1242	amdgpu_ring_write(ring, 0xDEADBEEF);
1243	amdgpu_ring_commit(ring);
1244	for (i = 0; i < adev->usec_timeout; i++) {
1245		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1246		if (tmp == 0xDEADBEEF)
1247			break;
1248		udelay(1);
1249	}
1250
1251	if (i >= adev->usec_timeout)
1252		r = -ETIMEDOUT;
1253
1254	return r;
1255}
1256
1257/**
1258 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1259 *
1260 * @p: the CS parser with the IBs
1261 * @ib_idx: which IB to patch
 
1262 *
1263 */
1264static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1265					   uint32_t ib_idx)
 
1266{
1267	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1268	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1269	unsigned i;
1270
1271	/* No patching necessary for the first instance */
1272	if (!ring->me)
1273		return 0;
1274
1275	for (i = 0; i < ib->length_dw; i += 2) {
1276		uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1277
1278		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1279		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1280
1281		amdgpu_set_ib_value(p, ib_idx, i, reg);
1282	}
1283	return 0;
1284}
1285
1286/**
1287 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1288 *
1289 * @ring: amdgpu_ring pointer
1290 * @job: job to retrieve vmid from
1291 * @ib: indirect buffer to execute
1292 * @flags: unused
1293 *
1294 * Write ring commands to execute the indirect buffer
1295 */
1296static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1297				  struct amdgpu_job *job,
1298				  struct amdgpu_ib *ib,
1299				  uint32_t flags)
1300{
1301	struct amdgpu_device *adev = ring->adev;
1302	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1303
1304	amdgpu_ring_write(ring,
1305		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1306	amdgpu_ring_write(ring, vmid);
1307
1308	amdgpu_ring_write(ring,
1309		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1310	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1311	amdgpu_ring_write(ring,
1312		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1313	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1314	amdgpu_ring_write(ring,
1315		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1316	amdgpu_ring_write(ring, ib->length_dw);
1317}
1318
1319/**
1320 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1321 *
1322 * @ring: amdgpu_ring pointer
1323 * @job: job to retrive vmid from
1324 * @ib: indirect buffer to execute
1325 * @flags: unused
1326 *
1327 * Write enc ring commands to execute the indirect buffer
1328 */
1329static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1330					struct amdgpu_job *job,
1331					struct amdgpu_ib *ib,
1332					uint32_t flags)
1333{
1334	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1335
1336	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1337	amdgpu_ring_write(ring, vmid);
1338	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1339	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1340	amdgpu_ring_write(ring, ib->length_dw);
1341}
1342
1343static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1344				    uint32_t reg, uint32_t val)
1345{
1346	struct amdgpu_device *adev = ring->adev;
1347
1348	amdgpu_ring_write(ring,
1349		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1350	amdgpu_ring_write(ring, reg << 2);
1351	amdgpu_ring_write(ring,
1352		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1353	amdgpu_ring_write(ring, val);
1354	amdgpu_ring_write(ring,
1355		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1356	amdgpu_ring_write(ring, 8);
1357}
1358
1359static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1360					uint32_t val, uint32_t mask)
1361{
1362	struct amdgpu_device *adev = ring->adev;
1363
1364	amdgpu_ring_write(ring,
1365		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1366	amdgpu_ring_write(ring, reg << 2);
1367	amdgpu_ring_write(ring,
1368		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1369	amdgpu_ring_write(ring, val);
1370	amdgpu_ring_write(ring,
1371		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1372	amdgpu_ring_write(ring, mask);
1373	amdgpu_ring_write(ring,
1374		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1375	amdgpu_ring_write(ring, 12);
1376}
1377
1378static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1379					unsigned vmid, uint64_t pd_addr)
1380{
1381	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1382	uint32_t data0, data1, mask;
1383
1384	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1385
1386	/* wait for reg writes */
1387	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1388	data1 = lower_32_bits(pd_addr);
1389	mask = 0xffffffff;
1390	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1391}
1392
1393static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1394{
1395	struct amdgpu_device *adev = ring->adev;
1396	int i;
1397
1398	WARN_ON(ring->wptr % 2 || count % 2);
1399
1400	for (i = 0; i < count / 2; i++) {
1401		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1402		amdgpu_ring_write(ring, 0);
1403	}
1404}
1405
1406static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1407{
1408	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1409}
1410
1411static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1412					    uint32_t reg, uint32_t val,
1413					    uint32_t mask)
1414{
1415	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1416	amdgpu_ring_write(ring,	reg << 2);
1417	amdgpu_ring_write(ring, mask);
1418	amdgpu_ring_write(ring, val);
1419}
1420
1421static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1422					    unsigned int vmid, uint64_t pd_addr)
1423{
1424	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1425
1426	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1427
1428	/* wait for reg writes */
1429	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1430					vmid * hub->ctx_addr_distance,
1431					lower_32_bits(pd_addr), 0xffffffff);
1432}
1433
1434static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1435					uint32_t reg, uint32_t val)
1436{
1437	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1438	amdgpu_ring_write(ring,	reg << 2);
1439	amdgpu_ring_write(ring, val);
1440}
1441
1442#if 0
1443static bool uvd_v7_0_is_idle(void *handle)
1444{
1445	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1446
1447	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1448}
1449
1450static int uvd_v7_0_wait_for_idle(void *handle)
1451{
1452	unsigned i;
1453	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1454
1455	for (i = 0; i < adev->usec_timeout; i++) {
1456		if (uvd_v7_0_is_idle(handle))
1457			return 0;
1458	}
1459	return -ETIMEDOUT;
1460}
1461
1462#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1463static bool uvd_v7_0_check_soft_reset(void *handle)
1464{
1465	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1466	u32 srbm_soft_reset = 0;
1467	u32 tmp = RREG32(mmSRBM_STATUS);
1468
1469	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1470	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1471	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1472		    AMDGPU_UVD_STATUS_BUSY_MASK))
1473		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1474				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1475
1476	if (srbm_soft_reset) {
1477		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1478		return true;
1479	} else {
1480		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1481		return false;
1482	}
1483}
1484
1485static int uvd_v7_0_pre_soft_reset(void *handle)
1486{
1487	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488
1489	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1490		return 0;
1491
1492	uvd_v7_0_stop(adev);
1493	return 0;
1494}
1495
1496static int uvd_v7_0_soft_reset(void *handle)
1497{
1498	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1499	u32 srbm_soft_reset;
1500
1501	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1502		return 0;
1503	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1504
1505	if (srbm_soft_reset) {
1506		u32 tmp;
1507
1508		tmp = RREG32(mmSRBM_SOFT_RESET);
1509		tmp |= srbm_soft_reset;
1510		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1511		WREG32(mmSRBM_SOFT_RESET, tmp);
1512		tmp = RREG32(mmSRBM_SOFT_RESET);
1513
1514		udelay(50);
1515
1516		tmp &= ~srbm_soft_reset;
1517		WREG32(mmSRBM_SOFT_RESET, tmp);
1518		tmp = RREG32(mmSRBM_SOFT_RESET);
1519
1520		/* Wait a little for things to settle down */
1521		udelay(50);
1522	}
1523
1524	return 0;
1525}
1526
1527static int uvd_v7_0_post_soft_reset(void *handle)
1528{
1529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1530
1531	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1532		return 0;
1533
1534	mdelay(5);
1535
1536	return uvd_v7_0_start(adev);
1537}
1538#endif
1539
1540static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1541					struct amdgpu_irq_src *source,
1542					unsigned type,
1543					enum amdgpu_interrupt_state state)
1544{
1545	// TODO
1546	return 0;
1547}
1548
1549static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1550				      struct amdgpu_irq_src *source,
1551				      struct amdgpu_iv_entry *entry)
1552{
1553	uint32_t ip_instance;
1554
1555	switch (entry->client_id) {
1556	case SOC15_IH_CLIENTID_UVD:
1557		ip_instance = 0;
1558		break;
1559	case SOC15_IH_CLIENTID_UVD1:
1560		ip_instance = 1;
1561		break;
1562	default:
1563		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1564		return 0;
1565	}
1566
1567	DRM_DEBUG("IH: UVD TRAP\n");
1568
1569	switch (entry->src_id) {
1570	case 124:
1571		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1572		break;
1573	case 119:
1574		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1575		break;
1576	case 120:
1577		if (!amdgpu_sriov_vf(adev))
1578			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1579		break;
1580	default:
1581		DRM_ERROR("Unhandled interrupt: %d %d\n",
1582			  entry->src_id, entry->src_data[0]);
1583		break;
1584	}
1585
1586	return 0;
1587}
1588
1589#if 0
1590static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1591{
1592	uint32_t data, data1, data2, suvd_flags;
1593
1594	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1595	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1596	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1597
1598	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1599		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1600
1601	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1602		     UVD_SUVD_CGC_GATE__SIT_MASK |
1603		     UVD_SUVD_CGC_GATE__SMP_MASK |
1604		     UVD_SUVD_CGC_GATE__SCM_MASK |
1605		     UVD_SUVD_CGC_GATE__SDB_MASK;
1606
1607	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1608		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1609		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1610
1611	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1612			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1613			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1614			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1615			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1616			UVD_CGC_CTRL__SYS_MODE_MASK |
1617			UVD_CGC_CTRL__UDEC_MODE_MASK |
1618			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1619			UVD_CGC_CTRL__REGS_MODE_MASK |
1620			UVD_CGC_CTRL__RBC_MODE_MASK |
1621			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1622			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1623			UVD_CGC_CTRL__IDCT_MODE_MASK |
1624			UVD_CGC_CTRL__MPRD_MODE_MASK |
1625			UVD_CGC_CTRL__MPC_MODE_MASK |
1626			UVD_CGC_CTRL__LBSI_MODE_MASK |
1627			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1628			UVD_CGC_CTRL__WCB_MODE_MASK |
1629			UVD_CGC_CTRL__VCPU_MODE_MASK |
1630			UVD_CGC_CTRL__JPEG_MODE_MASK |
1631			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1632			UVD_CGC_CTRL__SCPU_MODE_MASK);
1633	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1634			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1635			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1636			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1637			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1638	data1 |= suvd_flags;
1639
1640	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1641	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1642	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1643	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1644}
1645
1646static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1647{
1648	uint32_t data, data1, cgc_flags, suvd_flags;
1649
1650	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1651	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1652
1653	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1654		UVD_CGC_GATE__UDEC_MASK |
1655		UVD_CGC_GATE__MPEG2_MASK |
1656		UVD_CGC_GATE__RBC_MASK |
1657		UVD_CGC_GATE__LMI_MC_MASK |
1658		UVD_CGC_GATE__IDCT_MASK |
1659		UVD_CGC_GATE__MPRD_MASK |
1660		UVD_CGC_GATE__MPC_MASK |
1661		UVD_CGC_GATE__LBSI_MASK |
1662		UVD_CGC_GATE__LRBBM_MASK |
1663		UVD_CGC_GATE__UDEC_RE_MASK |
1664		UVD_CGC_GATE__UDEC_CM_MASK |
1665		UVD_CGC_GATE__UDEC_IT_MASK |
1666		UVD_CGC_GATE__UDEC_DB_MASK |
1667		UVD_CGC_GATE__UDEC_MP_MASK |
1668		UVD_CGC_GATE__WCB_MASK |
1669		UVD_CGC_GATE__VCPU_MASK |
1670		UVD_CGC_GATE__SCPU_MASK |
1671		UVD_CGC_GATE__JPEG_MASK |
1672		UVD_CGC_GATE__JPEG2_MASK;
1673
1674	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1675				UVD_SUVD_CGC_GATE__SIT_MASK |
1676				UVD_SUVD_CGC_GATE__SMP_MASK |
1677				UVD_SUVD_CGC_GATE__SCM_MASK |
1678				UVD_SUVD_CGC_GATE__SDB_MASK;
1679
1680	data |= cgc_flags;
1681	data1 |= suvd_flags;
1682
1683	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1684	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1685}
1686
1687static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1688{
1689	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1690
1691	if (enable)
1692		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1693			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1694	else
1695		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1696			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1697
1698	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1699}
1700
1701
1702static int uvd_v7_0_set_clockgating_state(void *handle,
1703					  enum amd_clockgating_state state)
1704{
1705	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1706	bool enable = (state == AMD_CG_STATE_GATE);
1707
1708	uvd_v7_0_set_bypass_mode(adev, enable);
1709
1710	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1711		return 0;
1712
1713	if (enable) {
1714		/* disable HW gating and enable Sw gating */
1715		uvd_v7_0_set_sw_clock_gating(adev);
1716	} else {
1717		/* wait for STATUS to clear */
1718		if (uvd_v7_0_wait_for_idle(handle))
1719			return -EBUSY;
1720
1721		/* enable HW gates because UVD is idle */
1722		/* uvd_v7_0_set_hw_clock_gating(adev); */
1723	}
1724
1725	return 0;
1726}
1727
1728static int uvd_v7_0_set_powergating_state(void *handle,
1729					  enum amd_powergating_state state)
1730{
1731	/* This doesn't actually powergate the UVD block.
1732	 * That's done in the dpm code via the SMC.  This
1733	 * just re-inits the block as necessary.  The actual
1734	 * gating still happens in the dpm code.  We should
1735	 * revisit this when there is a cleaner line between
1736	 * the smc and the hw blocks
1737	 */
1738	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1739
1740	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1741		return 0;
1742
1743	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1744
1745	if (state == AMD_PG_STATE_GATE) {
1746		uvd_v7_0_stop(adev);
1747		return 0;
1748	} else {
1749		return uvd_v7_0_start(adev);
1750	}
1751}
1752#endif
1753
1754static int uvd_v7_0_set_clockgating_state(void *handle,
1755					  enum amd_clockgating_state state)
1756{
1757	/* needed for driver unload*/
1758	return 0;
1759}
1760
1761const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1762	.name = "uvd_v7_0",
1763	.early_init = uvd_v7_0_early_init,
1764	.late_init = NULL,
1765	.sw_init = uvd_v7_0_sw_init,
1766	.sw_fini = uvd_v7_0_sw_fini,
1767	.hw_init = uvd_v7_0_hw_init,
1768	.hw_fini = uvd_v7_0_hw_fini,
 
1769	.suspend = uvd_v7_0_suspend,
1770	.resume = uvd_v7_0_resume,
1771	.is_idle = NULL /* uvd_v7_0_is_idle */,
1772	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1773	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1774	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1775	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1776	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1777	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1778	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1779};
1780
1781static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1782	.type = AMDGPU_RING_TYPE_UVD,
1783	.align_mask = 0xf,
1784	.support_64bit_ptrs = false,
1785	.no_user_fence = true,
1786	.vmhub = AMDGPU_MMHUB_0,
1787	.get_rptr = uvd_v7_0_ring_get_rptr,
1788	.get_wptr = uvd_v7_0_ring_get_wptr,
1789	.set_wptr = uvd_v7_0_ring_set_wptr,
1790	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1791	.emit_frame_size =
1792		6 + /* hdp invalidate */
1793		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1794		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1795		8 + /* uvd_v7_0_ring_emit_vm_flush */
1796		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1797	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1798	.emit_ib = uvd_v7_0_ring_emit_ib,
1799	.emit_fence = uvd_v7_0_ring_emit_fence,
1800	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1801	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1802	.test_ring = uvd_v7_0_ring_test_ring,
1803	.test_ib = amdgpu_uvd_ring_test_ib,
1804	.insert_nop = uvd_v7_0_ring_insert_nop,
1805	.pad_ib = amdgpu_ring_generic_pad_ib,
1806	.begin_use = amdgpu_uvd_ring_begin_use,
1807	.end_use = amdgpu_uvd_ring_end_use,
1808	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1809	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1810	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1811};
1812
1813static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1814	.type = AMDGPU_RING_TYPE_UVD_ENC,
1815	.align_mask = 0x3f,
1816	.nop = HEVC_ENC_CMD_NO_OP,
1817	.support_64bit_ptrs = false,
1818	.no_user_fence = true,
1819	.vmhub = AMDGPU_MMHUB_0,
1820	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1821	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1822	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1823	.emit_frame_size =
1824		3 + 3 + /* hdp flush / invalidate */
1825		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1826		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1827		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1828		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1829		1, /* uvd_v7_0_enc_ring_insert_end */
1830	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1831	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1832	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1833	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1834	.test_ring = uvd_v7_0_enc_ring_test_ring,
1835	.test_ib = uvd_v7_0_enc_ring_test_ib,
1836	.insert_nop = amdgpu_ring_insert_nop,
1837	.insert_end = uvd_v7_0_enc_ring_insert_end,
1838	.pad_ib = amdgpu_ring_generic_pad_ib,
1839	.begin_use = amdgpu_uvd_ring_begin_use,
1840	.end_use = amdgpu_uvd_ring_end_use,
1841	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1842	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1843	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1844};
1845
1846static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1847{
1848	int i;
1849
1850	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1851		if (adev->uvd.harvest_config & (1 << i))
1852			continue;
1853		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1854		adev->uvd.inst[i].ring.me = i;
1855		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1856	}
1857}
1858
1859static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1860{
1861	int i, j;
1862
1863	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1864		if (adev->uvd.harvest_config & (1 << j))
1865			continue;
1866		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1867			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1868			adev->uvd.inst[j].ring_enc[i].me = j;
1869		}
1870
1871		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1872	}
1873}
1874
1875static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1876	.set = uvd_v7_0_set_interrupt_state,
1877	.process = uvd_v7_0_process_interrupt,
1878};
1879
1880static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1881{
1882	int i;
1883
1884	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1885		if (adev->uvd.harvest_config & (1 << i))
1886			continue;
1887		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1888		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1889	}
1890}
1891
1892const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1893{
1894		.type = AMD_IP_BLOCK_TYPE_UVD,
1895		.major = 7,
1896		.minor = 0,
1897		.rev = 0,
1898		.funcs = &uvd_v7_0_ip_funcs,
1899};